def check_NNGradients(ilambda=0):
    """ 
	
		使用一个小型神经网络进行梯度检测, 比较分析梯度与数值梯度的差距
	只进行一次传播就行, 只是验算是否正确, 不需要进行梯度下降, 训练时才需要
	
	"""
    # 神经网络参数
    input_layer_size = 3
    hidden_layer_size = 5
    num_labels = 3
    m = 5

    # 随机生成参数
    Theta1 = debug_initialize_weights(hidden_layer_size, input_layer_size)
    Theta2 = debug_initialize_weights(num_labels, hidden_layer_size)
    # 随机生成训练集 m*input_layer_size
    X = debug_initialize_weights(m, input_layer_size - 1)
    y = np.mod(np.arange(0, m), num_labels).reshape(m, 1)

    # 展开参数
    nn_params = np.vstack((Theta1.reshape(Theta1.size,
                                          1), Theta2.reshape(Theta2.size, 1)))
    my_nn = nn(nn_params, input_layer_size, hidden_layer_size, num_labels, X,
               y, ilambda)

    # 进行分析梯度计算
    cost, grad = my_nn.cost_function()
    # 进行数值梯度计算
    numgrad = compute_numerical_gradient(my_nn, nn_params)

    # 显示分析计算的梯度以及近似计算得到的数值梯度
    for num, analyze in zip(numgrad, grad):  # zip可以使多个迭代器一起迭代
        print('{:.6f} {:.6f}'.format(float(num), float(analyze)))
    print('\n上面的两列应该非常的相近.\n' '(左-数值梯度, 右-解析梯度)\n')

    # Evaluate the norm of the difference between two solutions.
    # If you have a correct implementation, and assuming you used EPSILON = 0.0001
    # in compute_numerical_gradient.m, then diff below should be less than 1e-9
    diff = np.linalg.norm(numgrad - grad) / np.linalg.norm(numgrad + grad)

    print('如果你的反向传播算法是正确的, 相对偏差会非常的小(小于1e-9) \n' '相对偏差: {:.6e}'.format(diff))
Exemplo n.º 2
0
def nnOutput(train1, test, element, var, analyse=False):
    print "starting to run for subset"
    train = train1.copy(deep=True)
    if analyse: analyseData = train.copy(deep=True)
    rawTransformed = getTargetVar(train)
    nnObject = nn(
        listOfMatrix=[np.random.rand(len(var), 8),
                      np.random.rand(8, 3)],
        input=rawTransformed[var].as_matrix(),
        output=rawTransformed[['high', 'medium', 'low']].as_matrix(),
        func=sigmoid,
        funcGradient=sigDeriv,
        variables=var,
        iteration=400)
    d = booster(classifier=nnObject,
                maxIteration=50,
                test=test,
                trainCopy=train1)
    if analyse: return d.weightSelecter(analyse)
    else: return d.nniterate()
Exemplo n.º 3
0
def nnOutput(train, test, element, var, analyse=False):
    print "starting to run for subset"
    if analyse: analyseData = train.copy(deep=True)
    rawTransformed = train
    secondLayer = 8
    d = nn(listOfMatrix=[
        np.random.rand(len(var), secondLayer),
        np.random.rand(secondLayer, 3)
    ],
           input=rawTransformed[var].as_matrix(),
           output=rawTransformed[['high', 'medium', 'low']].as_matrix(),
           func=sigmoid,
           funcGradient=sigDeriv,
           variables=var,
           iteration=500)
    d.findEstimates()
    print d.cost, d.input.shape[0]
    if analyse:
        analyseData1 = analyseData.copy(deep=True)
        temp = d.analyseObservation(dataSet=analyseData1)
        return temp
    prediction = d.predict(test=test)

    return prediction, d.cost, d.input.shape[0]
Exemplo n.º 4
0
    initn_bias_type   = ii.BiasType.ZERO
    cost_type         = cc.Type.QUADRATIC
    traing_type       = tt.Type.GRADIENT_D
    reg_type          = rr.Type.NONE
    reg_lambda        = 0.01
    learng_eta        = 0.03
    epochs            = 5001
    batch_size        = 0  # len(traing_x)

    input_log = True

    traing_qtts_log = True
    traing_acts_log = False
    traing_cost_log = False
    evaltn_cost_log = False

    n_net = nn(layers, actvtn_types,
            initn_weight_type, initn_bias_type,
            cost_type,
            reg_type, reg_lambda,
            traing_type, epochs, batch_size, learng_eta,
            groups,
            traing_x_list, traing_y_list,
            evaltn_x_list, evaltn_y_list,
            input_log,
            traing_qtts_log, traing_acts_log, traing_cost_log,
            evaltn_cost_log)

    n_net.training()
    n_net.predictions()
q = load_Theta2[-1]
Theta2 = np.vstack((q.reshape(1, q.shape[0]), load_Theta2[:-1]))

# 将参数展开为一个长向量
nn_params = np.vstack((Theta1.reshape(Theta1.size, 1), Theta2.reshape(Theta2.size, 1)))


## ================ Part 3: 计算代价 (feed_forward) ================
#
print('利用神经网络的前向传播计算代价...\n')

# 正则参数设为0, 即代表不进行正则化
ilambda = 0

# 创建神经网络
my_nn = nn(nn_params, input_layer_size, hidden_layer_size,
			num_labels, X, y, ilambda)
			
_,_,_,_,J = my_nn.feed_forward()
print('''从ex4weights中加载的参数计算得到的代价值为: {:.6f} 
(这个值大约为: 0.287629)\n'''.format(J))


## =============== Part 4: 实现正则化 ===============
#
print('检查正则代价... \n')

#使用lambda来进行正则化的区分
ilambda = 1	

my_nn_rec = nn(nn_params, input_layer_size, hidden_layer_size,
			num_labels, X, y, ilambda)