Exemplo n.º 1
0
#    optimizer_DNN2.step()
    
#    loss_sum=loss_sum+loss.detach()
#    err_sum=err_sum+err.detach()
 

#  loss_tot=loss_sum/N_batches
#  err_tot=err_sum/N_batches
  
 
   
   
# Full Validation  new  
#  if epoch%N_eval_epoch==0:
      
CNN_net.eval()
DNN1_net.eval()
DNN2_net.eval()
test_flag=1 
#   loss_sum=0
#   err_sum=0
#   err_sum_snt=0
   
correct = 0

with torch.no_grad():  
	for i in range(snt_te):
       
     #[fs,signal]=scipy.io.wavfile.read(data_folder+wav_lst_te[i])
     #signal=signal.astype(float)/32768
Exemplo n.º 2
0
    # print('error total:' + str(err_tot.item()))
    # 最开始是
    # loss_totall:5.845455169677734
    # error total:0.9918749928474426
    # second epoch
    # loss_totall: 5.143251895904541
    # error total: 0.9721777439117432
    # the 4th time
    # loss_totall: 4.186425685882568
    # error total: 0.8866991996765137
    # epoch3, loss_tr = 4.186426  err_tr = 0.886699

    # Full Validation  new
    if epoch % N_eval_epoch == 0:  # 每8个写入到文件一下
        # os.system("pause")
        CNN_net.eval()  # 说明是在验证
        AttentionModule.eval()
        DNN1_net.eval()
        DNN2_net.eval()
        test_flag = 1
        loss_sum = 0
        err_sum = 0
        err_sum_snt = 0

        with torch.no_grad():  # 由于是三个不同的模型拼装所以no_grad上下文不再自动进行梯度下降
            for i in range(snt_te):  # 列表长度

                # [fs,signal]=scipy.io.wavfile.read(data_folder+wav_lst_te[i])
                # signal=signal.astype(float)/32768

                [signal, fs] = sf.read(data_folder + wav_lst_te[i])