有四个激活函数
import matplotlib.pyplot as plt import numpy as np x = np.linspace(-10,10) y_sigmoid = 1/(1+np.exp(-x)) y_tanh = (np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x)) fig = plt.figure() #plot sigmoid ax = fig.add_subplot(221) ax.plot(x,y_sigmoid) ax.grid() ax.set_title('(a) Sigmoid') # plot tanh ax = fig.add_subplot(222) ax.plot(x,y_tanh) ax.grid() ax.set_title('(b) Tanh') # plot relu ax = fig.add_subplot(223) y_relu = np.array([0*item if item<0 else item for item in x ]) ax.plot(x,y_relu) ax.grid() ax.set_title('(c) ReLu') #plot leaky relu ax = fig.add_subplot(224) y_relu = np.array([0.2*item if item<0 else item for item in x ]) ax.plot(x,y_relu) ax.grid() ax.set_title('(d) Leaky ReLu') plt.tight_layout()
我们来看一个小实例
把男人定义为1,女人定义为0
输入身高和体重,来预测男人和女人,因此有两个输入,一个输出,我们不妨设两个隐藏层
代码如下:
import numpy as np
def sigmoid(x):
# Sigmoid activation function: f(x) = 1 / (1 + e^(-x))
return 1 / (1 + np.exp(-x))
# 求导 def deriv_sigmoid(x): # Derivative of sigmoid: f'(x) = f(x) * (1 - f(x)) fx = sigmoid(x) return fx * (1 - fx) # 损失函数 def mse_loss(y_true, y_pred): # y_true and y_pred are numpy arrays of the same length. return ((y_true - y_pred) ** 2).mean() # 神经网络 class OurNeuralNetwork: ''' A neural network with: - 2 inputs - a hidden layer with 2 neurons (h1, h2) - an output layer with 1 neuron (o1) *** DISCLAIMER ***: The code below is intended to be simple and educational, NOT optimal. Real neural net code looks nothing like this. DO NOT use this code. Instead, read/run it to understand how this specific network works. ''' def __init__(self): # Weights self.w1 = np.random.normal() self.w2 = np.random.normal() self.w3 = np.random.normal() self.w4 = np.random.normal() self.w5 = np.random.normal() self.w6 = np.random.normal() # Biases self.b1 = np.random.normal() self.b2 = np.random.normal() self.b3 = np.random.normal() def feedforward(self, x): # x is a numpy array with 2 elements. h1 = sigmoid(self.w1 * x[0] + self.w2 * x[1] + self.b1) h2 = sigmoid(self.w3 * x[0] + self.w4 * x[1] + self.b2) o1 = sigmoid(self.w5 * h1 + self.w6 * h2 + self.b3) return o1 def train(self, data, all_y_trues): ''' - data is a (n x 2) numpy array, n = # of samples in the dataset. - all_y_trues is a numpy array with n elements. Elements in all_y_trues correspond to those in data. ''' #学习率 learn_rate = 0.1 epochs = 1000 # number of times to loop through the entire dataset for epoch in range(epochs): for x, y_true in zip(data, all_y_trues): # --- Do a feedforward (we'll need these values later) sum_h1 = self.w1 * x[0] + self.w2 * x[1] + self.b1 h1 = sigmoid(sum_h1) sum_h2 = self.w3 * x[0] + self.w4 * x[1] + self.b2 h2 = sigmoid(sum_h2) sum_o1 = self.w5 * h1 + self.w6 * h2 + self.b3 o1 = sigmoid(sum_o1) y_pred = o1 # --- Calculate partial derivatives. # --- Naming: d_L_d_w1 represents "partial L / partial w1" d_L_d_ypred = -2 * (y_true - y_pred) # Neuron o1 d_ypred_d_w5 = h1 * deriv_sigmoid(sum_o1) d_ypred_d_w6 = h2 * deriv_sigmoid(sum_o1) d_ypred_d_b3 = deriv_sigmoid(sum_o1) d_ypred_d_h1 = self.w5 * deriv_sigmoid(sum_o1) d_ypred_d_h2 = self.w6 * deriv_sigmoid(sum_o1) # Neuron h1 d_h1_d_w1 = x[0] * deriv_sigmoid(sum_h1) d_h1_d_w2 = x[1] * deriv_sigmoid(sum_h1) d_h1_d_b1 = deriv_sigmoid(sum_h1) # Neuron h2 d_h2_d_w3 = x[0] * deriv_sigmoid(sum_h2) d_h2_d_w4 = x[1] * deriv_sigmoid(sum_h2) d_h2_d_b2 = deriv_sigmoid(sum_h2) # --- Update weights and biases # Neuron h1 self.w1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w1 self.w2 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w2 self.b1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_b1 # Neuron h2 self.w3 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w3 self.w4 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w4 self.b2 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_b2 # Neuron o1 self.w5 -= learn_rate * d_L_d_ypred * d_ypred_d_w5 self.w6 -= learn_rate * d_L_d_ypred * d_ypred_d_w6 self.b3 -= learn_rate * d_L_d_ypred * d_ypred_d_b3 # --- Calculate total loss at the end of each epoch if epoch % 10 == 0: y_preds = np.apply_along_axis(self.feedforward, 1, data) loss = mse_loss(all_y_trues, y_preds) print("Epoch %d loss: %.3f" % (epoch, loss)) # Define dataset data = np.array([ [-2, -1], # Alice [25, 6], # Bob [17, 4], # Charlie [-15, -6], # Diana ]) all_y_trues = np.array([ 0, # Alice 1, # Bob 1, # Charlie 0, # Diana ]) # Train our neural network! network = OurNeuralNetwork() network.train(data, all_y_trues) # Make some predictions emily = np.array([-7, -3]) # 128 pounds, 63 inches frank = np.array([20, 2]) # 155 pounds, 68 inches print("Emily: %.3f" % network.feedforward(emily)) print("Frank: %.3f" % network.feedforward(frank))
Emily: 0.035 Frank: 0.961