示例#1
0
# initial the gradients

gradWeights = np.zeros_like(Weights)
gradBiases = np.zeros_like(Biases)

print 'numCon', numCon
print 'numDis', numDis
print 'numOut', numOut
print 'Weights', len(Weights)
print 'Bias', len(Biases)
# dwadwad
# 17940
# 1544
#
paramFile = 'paramTest_GCNNPool'
write_param.write_binary(params.xypath + '../' + paramFile, Weights, Biases)
print 'Parameters have been saved at: ', params.xypath + '../' + paramFile
problem = ''


def InitByNodes(graph, word_dict):
    # Embedding ---> Mapping ---> Conv1 ---> .... ---> Convn ---> Pooling ---> Fully-Connected ---> Output
    # ConstructGraphConvolution(graph, word_dict, numView, numFea, numMap, numCon, numDis, numOut, \
    #                               Wmap, Bmap,
    #                               Wconv_root, Wconv_in, Wconv_out, Bconv, \
    #                               Wdis, Woutput, Bdis, Boutput
    #                               ):

    # word_dict: [dict_view1, dict_view2]
    # numView, numFea, numCon, numDis, numOut, \
    # Wconv_root    [View1:[conv1, conv2, ...], View2:[conv1, conv2, ...]]
示例#2
0
Weights, Wconv_right = InitParam(Weights, num=numFea * numCon)

Biases, Bconv = InitParam(Biases, num=numCon)

# discriminative layer
Weights, Wdis = InitParam(Weights, num=numPool * numCon * numDis)
Biases, Bdis = InitParam(Biases, num=numDis)

# output layer
Weights, Wout = InitParam(Weights,
                          num=numDis * numOut,
                          upper=.0002,
                          lower=-.0002)
Biases, Bout = InitParam(Biases, newWeights=np.zeros((numOut, 1)))

Weights = Weights.reshape((-1, 1))
Biases = Biases.reshape((-1, 1))
print Weights[1], Weights[2], Weights[3], Weights[4], Weights[5]
print 'numDis', numDis
print 'numCon', numCon
print 'numOut', numOut
print 'Weights', len(Weights)
print 'Bias', len(Biases)
# dwadwad
# 17940
# 1544

write_param.write_binary(
    '../paramTest_TBCNN_Conv' + str(gl.numCon) + '_Dis' + str(gl.numDis),
    Weights, Biases)
print 'Done!'
示例#3
0
        # output layer
        Weights, Wout = InitParam(Weights,
                                  num=numDis * numOut,
                                  upper=.0002,
                                  lower=-.0002)
        Biases, Bout = InitParam(Biases, newWeights=np.zeros((numOut, 1)))

        Weights = Weights.reshape((-1, 1))
        Biases = Biases.reshape((-1, 1))

        print len(Weights)
        print len(Biases)
        print Biases[0, 0], '     ', Biases[1, 0]

        write_param.write_binary('../param_pretrain', Weights, Biases)
        dsadssd

        tobegin = 0

    elif Config == 'load':

        #Weights = CP.load(file('param/param_30_Weights'))
        #Biases  = CP.load(file('param/param_30_Biases' ))
        Weights = p.load(
            file('param_rollback1/param_' + str(tobegin) + '_Weights'))
        Biases = p.load(
            file('param_rollback1/param_' + str(tobegin) + '_Biases'))

    ##################################
    ##################################
示例#4
0
    
    
print Biases_pretrain[0]
print Biases_pretrain[1]

Biases, Bpretrain = InitParam(Biases, newWeights = Biases_pretrain)
Weights, Wpretrain = InitParam(Weights, newWeights = Weights_pretrain)  


Weights, Wconv_root = InitParam(Weights, num = numFea*numCon)
Weights, Wconv_left = InitParam(Weights, num = numFea*numCon)
Weights, Wconv_right= InitParam(Weights, num = numFea*numCon)
Biases,  Bconv      = InitParam(Biases,  num = numCon)
        
        # discriminative layer
Weights, Wdis = InitParam(Weights, num = numPool*numCon*numDis)
Biases,  Bdis = InitParam(Biases,  num = numDis)
        
        # output layer
Weights, Wout = InitParam(Weights, num = numDis*numOut, upper = .0002, lower = -.0002)
Biases,  Bout = InitParam(Biases,  newWeights = np.zeros( (numOut,1) ) )
        
        
Weights = Weights.reshape((-1,1))
Biases = Biases.reshape((-1,1))

write_param.write_binary('../param', Weights, Biases)

print len(Weights)
print len(Biases)
示例#5
0
文件: mix.py 项目: Lili-Mou/FFNN
fingerprint = 'QC_LSTM'

write_nets_in_one_file(X_train, './', 'preprocessed/train_nets_'+fingerprint)
write_nets_in_one_file(X_CV,    './', 'preprocessed/CV_nets_'+ fingerprint )
write_nets_in_one_file(X_test,  './', 'preprocessed/test_nets_' + fingerprint)

serialize.write_labels('preprocessed/labels', y_train, y_CV, y_test)
print "writing para"



##############################
# initialize weights

#np.random.seed(123)
np.random.seed(435)
#np.random.seed(126)
#np.random.seed(127)
#np.random.seed(787)
    
Weights, Biases = construct.generateParam(False)
    
Weights = Weights.reshape((-1,1))
Biases  = Biases.reshape((-1,1))   
print len(Weights), len(Biases)

write_param.write_binary('preprocessed/para_'+fingerprint, Weights, Biases)

print 'done!'

示例#6
0
Biases, Bleft = InitParam(Biases, num=numLeft)
Biases, Bright = InitParam(Biases, num=numRight)
# joint layer
Weights, Wjoint_left = InitParam(Weights, num=numLeft * numJoint)
Weights, Wjoint_right = InitParam(Weights, num=numRight * numJoint)

Biases, Bjoint = InitParam(Biases, num=numJoint)

# discriminative layer
Weights, Wdis = InitParam(Weights, num=numJoint * numDis)
Biases, Bdis = InitParam(Biases, num=numDis)

# output layer
Weights, Wout = InitParam(Weights,
                          num=numDis * numOut,
                          upper=.0002,
                          lower=-.0002)
Biases, Bout = InitParam(Biases, newWeights=np.zeros((numOut, 1)))

Weights = Weights.reshape((-1, 1))
Biases = Biases.reshape((-1, 1))

print 'Weights', len(Weights)
print 'Bias', len(Biases)
# dwadwad
# 17940
# 1544
#
write_param.write_binary('../MLP_paramTest', Weights, Biases)
print 'Done!'