예제 #1
0
   DNN2_net.load_state_dict(checkpoint_load['DNN2_model_par'])



optimizer_CNN = optim.RMSprop(CNN_net.parameters(), lr=lr,alpha=0.95, eps=1e-8) 
optimizer_DNN1 = optim.RMSprop(DNN1_net.parameters(), lr=lr,alpha=0.95, eps=1e-8) 
optimizer_DNN2 = optim.RMSprop(DNN2_net.parameters(), lr=lr,alpha=0.95, eps=1e-8) 

# print("----------------------")
# print(DNN2_net(DNN1_net(CNN_net(3200))))

#for epoch in range(N_epochs):
  
test_flag=0
CNN_net.train()
DNN1_net.train()
DNN2_net.train()
 
loss_sum=0
err_sum=0

 # for i in range(N_batches):

#    [inp,lab]=create_batches_rnd(batch_size,data_folder,wav_lst_tr,snt_tr,wlen,lab_dict,0.2)
#    pout=DNN2_net(DNN1_net(CNN_net(inp)))
    
#    pred=torch.max(pout,dim=1)[1]
#    loss = cost(pout, lab.long())
#    err = torch.mean((pred!=lab.long()).float())
    
   
예제 #2
0
                               eps=1e-8)
optimizer_DNN2 = optim.RMSprop(DNN2_net.parameters(),
                               lr=lr,
                               alpha=0.95,
                               eps=1e-8)

# 开始进行训练,1500次

# 开始进行训练
for epoch in range(N_epochs):

    test_flag = 0
    CNN_net.train(
    )  # 这里因为里面包含了的dropout和batchnorm,因此需要指明是在训练还是在验证,在训练时则要.train,在验证时要.eval
    AttentionModule.train()
    DNN1_net.train()  # 注意要在自动forward之前
    DNN2_net.train()

    loss_sum = 0
    err_sum = 0

    for i in range(N_batches):  # 处理一批  for循环一共有800批
        # 获取数据  batch_size = 128
        [inp, lab] = create_batches_rnd(batch_size, data_folder, wav_lst_tr,
                                        snt_tr, wlen, lab_dict, 0.2)

        # 进行训练
        output = CNN_net(inp)
        # under deprecate
        # print(output.shape) [128, 6420]  128条片段,每个是6420的
        # o1, o3 = output.split([1, 1], dim=1)  # 切割列