import numpy as np

alpha = 0.0251 # as close to true alpha as possible
def nonlinear(x, deriv=False):
  if(deriv==True):
    return x*(1-x)
  return 1/(1+np.e**(-x))

#seed
np.random.seed(1)

#testing sample
test_x = np.array([[251,497,-246],
              [299,249,50],
              [194,180,14],
              [140,148,-8],
              [210,140,70]])
#Input Array - This input will be taken directly from a Pong game
X = np.array([[198,200,-2],
          [90, 280,-190],
          [84, 256,-172],
          [140,240,-100],
          [114,216,-102],
          [72, 95,-23],
          [99, 31, 68],
          [144, 20, 124],
          [640, 216,424],
          [32, 464,-432],
          [176, 64,112],
          [754, 506,248],
          [107, 104,3],
          [116,101,15]])

#output array - if ball_pos - paddle > 0 move up else move down
Y = np.array([[0,0,0,0,0,0,1,1,1,0,1,1,1,1,]]).T

syn0 = 2*np.random.random((3,14))-1
syn1 = 2*np.random.random((14,14))-1

for j in range(60000):

  #forward propagation
  l0 = X
  l1 = nonlinear(np.dot(l0, syn0))
  l2 = nonlinear(np.dot(l1, syn1))

  #how much did we miss
  l2_error = Y - l2

  #multiply how much missed by the slope of sigmoid at the value in l1
  l2_delta = l2_error * nonlinear(l2, True)

  #how much did l1 contribute to l2 error
  #(according to the weights)
  l1_error = l2_delta.dot(syn1.T)

  #in what direction is the target l1?
  # Sure?
  l1_delta = l1_error*nonlinear(l1,True)

  #update weight
  syn1 += alpha * (l1.T.dot(l2_delta))
  syn0 += alpha * (l0.T.dot(l1_delta))

  # display error
  if(j % 10000) == 0:
    print("ERROR: " + str(np.mean(np.abs(l2_error))))


#Testing Forward propagation
l0_test = test_x
l1_test = nonlinear(np.dot(l0_test,syn0))
l2_test = nonlinear(np.dot(l1_test,syn1))

#Dress up the array (make it look nice)
l2_test_output = []
for x in range(len(l2_test)):
  l2_test_output.append(l2_test[x][0])

print("Test Output")
print(l2_test_output)

#Put all the l2 data in a way I could see it: Just the first probabilites
l2_output = []
for x in range(len(l2)):
  l2_output.append(l2[x][0])

print("Output")
print(l2_output)


该代码应采用一组三个数字[(value_1),(value_2),(value_1-value_2)],如果第一个和第二个值之间的差为负,则返回“ 0”,或者返回“ 1”如果差异为正。到目前为止,它实际上运行良好。

这是输出:
ERROR: 0.497132186092ERROR: 0.105081486632ERROR: 0.102115299177ERROR: 0.100813655802ERROR: 0.100042420179ERROR: 0.0995185781466Test Output[0.0074706006801269686, 0.66687458928464094, 0.66687458928463983, 0.66686236694464551, 0.98341439176739631]Output[0.66687459245609326, 0.00083944690766060215, 0.00083946471285455484, 0.0074706634783305243, 0.0074706634765733968, 0.007480987498372226, 0.99646513183073093, 0.99647100131874755, 0.99646513180692531, 0.00083944572383107523, 0.99646513180692531, 0.98324165810211861, 0.66687439729829612, 0.66687459321626519]错误:0.497132186092

如您所见,给定的alpha = 0.0251(对于梯度下降-通过反复试验发现此误差)仅约为9.95%。

自编写此程序以来,我已经了解到泄漏的RelU是Sigmoid函数的更好替代方案,因为它比Sigmoid进行优化和学习得更快。我想在该程序中使用numpy实现泄漏的RelU函数,但是我不确定从哪里开始,更不确定它的派生是什么。

如何在该神经网络中实现泄漏的RelU?

最佳答案

我想在这里补充一点,实际上可以使用很多类似ReLu的激活函数来代替标准ReLu activation


您已经提到了Leaky ReLu自己(由alpha参数化)。
Parametric Rectified Linear Unit(PReLU)。该公式与Leaky ReLu相同,但是允许学习系数alpha。另请参见this discussion
Exponential linear unit(ELU),它试图使平均激活数接近零,从而加快学习速度:


python - 如何实现ReLU代替Sigmoid函数-LMLPHP


Scaled exponential linear unit(SELU)已于最近发布。它是ELU的扩展,具有特定的参数选择,具有附加的规范化效果并有助于更快地学习。


所有激活及其衍生的Here's the list

关于python - 如何实现ReLU代替Sigmoid函数,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/46697840/

10-13 08:37