Пример #1
0
VAL_NUM = len(VAL_ANSWER)
VAL_BATCHSIZE = max(int(VAL_NUM/100),1)
VAL_INPUT   = [  VAL_INPUT[i:i+VAL_BATCHSIZE] for i in range(0,VAL_NUM,VAL_BATCHSIZE)]
VAL_ANSWER  = [  VAL_ANSWER[i:i+VAL_BATCHSIZE] for ii in range(0,VAL_NUM,VAL_BATCHSIZE)]

#pdb.set_trace()

print "Start training......"
totaltime = 0
for epoch in range(MAX_EPOCH):
    tStart = time.time()
    cost = 0
    for batch_x,batch_y in zip(BATCHED_INPUT,BATCHED_OUTPUT):
        cost += nn.train(batch_x,batch_y,
                         LEARNING_RATE,
                         MOMENTUM)
    tEnd = time.time()
    totaltime += tEnd - tStart

    if (epoch+1 != MAX_EPOCH) and ((epoch+1) % L_RATE_DECAY_STEP == 0):
        print "learning rate annealed at epoch {0}".format(epoch+1)
        LEARNING_RATE /= 10

    if epoch+1 != MAX_EPOCH and (epoch+1) % SAVE_MODEL_EPOCH == 0:
        fh = open(MODEL_ROOT+MODEL+"_at_{0}".format(epoch+1),'wb')
        saved_params = (nn.layers, nn.W, nn.b)
        pickle.dump(saved_params, fh)
        fh.close()
    #pdb.set_trace()
Пример #2
0
val_x = np.asarray(Val_Feats,dtype='float32').T
val_label = [ LABEL_DICT[ID] for ID in Val_IDs ]

# phone48to39 = load_dict_48to39()

# pdb.set_trace()

totaltime = 0
print "Start training......"
for epoch in range(100,MAX_EPOCH):
    tStart = time.time()
    cost = 0
    for batched_inputs,batched_outputs in zip(BATCHED_INPUT,BATCHED_OUTPUT):
        cost += nn.train(batched_inputs,batched_outputs,
                         LEARNING_RATE,
                         MOMENTUM,
                         RETAIN_PROB,
                         INPUT_R_PROB)
    tEnd = time.time()
    totaltime += tEnd - tStart
    if (epoch+1 != MAX_EPOCH) and ((epoch+1) % L_RATE_DECAY_STEP == 0):
        print "learning rate annealed at epoch {0}".format(epoch+1)
        LEARNING_RATE /= 10
    
    if epoch+1 != MAX_EPOCH and (epoch+1) % SAVE_MODEL_EPOCH == 0:
        fh = open(MODEL_ROOT+MODEL+"_at_{0}".format(epoch+1),'wb')
        saved_params = (nn.layers, nn.W, nn.b)
        pickle.dump(saved_params, fh)
        fh.close()
    # Calculate Validation Set Error
    val_batch = 500
Пример #3
0
print "Start training"

# phone48to39 = load_dict_48to39()

# pdb.set_trace()

p48list = load_list39to48()

p48to39dict = load_dict_48to39()

totaltime = 0
for epoch in range(MAX_EPOCH):
    tStart = time.time()
    cost = 0
    for batched_inputs,batched_outputs in zip(BATCHED_INPUT,BATCHED_OUTPUT):
        cost += nn.train(batched_inputs,batched_outputs)
    tEnd = time.time()
    totaltime += tEnd - tStart

    # Calculate Validation Error
    valsum = 0
    for val in validationNlabel:
        val_x = np.transpose( np.asarray([val[1]],dtype='float32') )
        p_feat = nn.test(val_x)
        pos = np.argmax(p_feat)
        p_48 = p48list[pos]
        p_39 = p48to39dict[p_48]
        if val[2] == p_48:
            valsum += 1
        print "valdiating:",val[2],p_48
    valcorrect = float(valsum)/len(validationNlabel)
Пример #4
0
# phone48to39 = load_dict_48to39()

# pdb.set_trace()

totaltime = 0
val_batch = 500
val_output = []
print "Start training......"
for epoch in range(MAX_EPOCH):
    tStart = time.time()
    cost = 0
    for idx in range(0,len(BATCHED_INPUT)):
        cost += nn.train(BATCHED_INPUT[idx],BATCHED_OUTPUT[idx],
                         LEARNING_RATE,
                         MOMENTUM,
                         RETAIN_PROB,
                         1)
    tEnd = time.time()
    del BATCHED_INPUT
    del BATCHED_OUTPUT
    totaltime += tEnd - tStart
    if (epoch+1 != MAX_EPOCH) and ((epoch+1) % L_RATE_DECAY_STEP == 0):
        print "learning rate annealed at epoch {0}".format(epoch+1)
        LEARNING_RATE*=0.5
    
    if epoch+1 != MAX_EPOCH and (epoch+1) % SAVE_MODEL_EPOCH == 0:
        fh = open(MODEL_ROOT+MODEL+"_at_{0}".format(epoch+1),'wb')
        saved_params = (nn.layers, nn.W, nn.b)
        pickle.dump(saved_params, fh)
        fh.close()
Пример #5
0
Val_IDs, Val_Feats = SepIDnFeat(LABELED_VALIDATION_SET)

val_x = np.asarray(Val_Feats, dtype='float32').T
val_label = [LABEL_DICT[ID] for ID in Val_IDs]

# phone48to39 = load_dict_48to39()

# pdb.set_trace()

totaltime = 0
print "Start training......"
for epoch in range(100, MAX_EPOCH):
    tStart = time.time()
    cost = 0
    for batched_inputs, batched_outputs in zip(BATCHED_INPUT, BATCHED_OUTPUT):
        cost += nn.train(batched_inputs, batched_outputs, LEARNING_RATE,
                         MOMENTUM, RETAIN_PROB, INPUT_R_PROB)
    tEnd = time.time()
    totaltime += tEnd - tStart
    if (epoch + 1 != MAX_EPOCH) and ((epoch + 1) % L_RATE_DECAY_STEP == 0):
        print "learning rate annealed at epoch {0}".format(epoch + 1)
        LEARNING_RATE /= 10

    if epoch + 1 != MAX_EPOCH and (epoch + 1) % SAVE_MODEL_EPOCH == 0:
        fh = open(MODEL_ROOT + MODEL + "_at_{0}".format(epoch + 1), 'wb')
        saved_params = (nn.layers, nn.W, nn.b)
        pickle.dump(saved_params, fh)
        fh.close()
    # Calculate Validation Set Error
    val_batch = 500
    val_output = []
    for i in xrange((val_x.shape[1] - 1) / val_batch + 1):
Пример #6
0
print "Start training"

# phone48to39 = load_dict_48to39()

# pdb.set_trace()

p48list = load_list39to48()

p48to39dict = load_dict_48to39()

totaltime = 0
for epoch in range(MAX_EPOCH):
    tStart = time.time()
    cost = 0
    for batched_inputs, batched_outputs in zip(BATCHED_INPUT, BATCHED_OUTPUT):
        cost += nn.train(batched_inputs, batched_outputs)
    tEnd = time.time()
    totaltime += tEnd - tStart

    # Calculate Validation Error
    valsum = 0
    for val in validationNlabel:
        val_x = np.transpose(np.asarray([val[1]], dtype='float32'))
        p_feat = nn.test(val_x)
        pos = np.argmax(p_feat)
        p_48 = p48list[pos]
        p_39 = p48to39dict[p_48]
        if val[2] == p_48:
            valsum += 1
        print "valdiating:", val[2], p_48
    valcorrect = float(valsum) / len(validationNlabel)