# Loading label dictionary lab_dict=np.load(class_dict_file).item() print(CNN_net.out_dim) DNN1_arch = {'input_dim': CNN_net.out_dim, 'fc_lay': fc_lay, 'fc_drop': fc_drop, 'fc_use_batchnorm': fc_use_batchnorm, 'fc_use_laynorm': fc_use_laynorm, 'fc_use_laynorm_inp': fc_use_laynorm_inp, 'fc_use_batchnorm_inp':fc_use_batchnorm_inp, 'fc_act': fc_act, } DNN1_net=MLP(DNN1_arch) DNN1_net.cuda() DNN2_arch = {'input_dim':fc_lay[-1] , 'fc_lay': class_lay, 'fc_drop': class_drop, 'fc_use_batchnorm': class_use_batchnorm, 'fc_use_laynorm': class_use_laynorm, 'fc_use_laynorm_inp': class_use_laynorm_inp, 'fc_use_batchnorm_inp':class_use_batchnorm_inp, 'fc_act': class_act, } DNN2_net=MLP(DNN2_arch)
lab_dict = np.load(class_dict_file, allow_pickle=True).item() # print(CNN_net.out_dim) 6420 DNN1_arch = { 'input_dim': 321, # CNN_net.out_dim where 321 means after attention machine dim 'fc_lay': fc_lay, 'fc_drop': fc_drop, 'fc_use_batchnorm': fc_use_batchnorm, 'fc_use_laynorm': fc_use_laynorm, 'fc_use_laynorm_inp': fc_use_laynorm_inp, 'fc_use_batchnorm_inp': fc_use_batchnorm_inp, 'fc_act': fc_act, } DNN1_net = MLP(DNN1_arch) # 三次循环 DNN1_net.to(device) # print(fc_lay[-1]) 2048 DNN2_arch = { 'input_dim': fc_lay[-1], 'fc_lay': class_lay, 'fc_drop': class_drop, 'fc_use_batchnorm': class_use_batchnorm, 'fc_use_laynorm': class_use_laynorm, 'fc_use_laynorm_inp': class_use_laynorm_inp, 'fc_use_batchnorm_inp': class_use_batchnorm_inp, 'fc_act': class_act, # 注意这里使用的就是softmax最后一层 } DNN2_net = MLP(DNN2_arch) # 1次循环 DNN2_net.to(device)
CNN_net = CNN(CNN_arch) CNN_net.to(device) DNN1_arch = { 'input_dim': CNN_net.out_dim, 'fc_lay': fc_lay, 'fc_drop': fc_drop, 'fc_use_batchnorm': fc_use_batchnorm, 'fc_use_laynorm': fc_use_laynorm, 'fc_use_laynorm_inp': fc_use_laynorm_inp, 'fc_use_batchnorm_inp': fc_use_batchnorm_inp, 'fc_act': fc_act } DNN1_net = MLP(DNN1_arch) DNN1_net.to(device) checkpoint_load = torch.load(model_file, map_location=device) model_trained_using_data_parallel = False if model_trained_using_data_parallel: new_ckpt = {} for k, v in checkpoint_load.items(): new_v = collections.OrderedDict() for kk, vv in v.items(): if kk.startswith('module.'): kk = '.'.join(kk.split('.')[1:]) else: assert False new_v[kk] = vv new_ckpt[k] = new_v
except: os.mkdir(output_folder) # setting seed torch.manual_seed(seed) np.random.seed(seed) # Loading label dictionary lab_dict = np.load(class_dict_file, allow_pickle=True).item() # 读取模型 checkpoint = torch.load('exp/SincNet_TIMIT/model_raw') CNN_net = CNN({}) CNN_net.load_state_dict(checkpoint['CNN_model_par']) DNN1_net = MLP({}) DNN1_net.load_state_dict(checkpoint['DNN1_model_par']) DNN2_net = MLP({}) DNN2_net.load_state_dict(checkpoint['DNN2_model_par']) eer = 0 for i in range(N_batches): # 将test信号预处理batch TODO data—folder是test数据集 [inp, lab] = create_batches_rnd(batch_size, data_folder, wav_lst_te, snt_te, wlen, lab_dict, 0.2) pout = DNN2_net(DNN1_net(CNN_net(inp))) pred = torch.max(pout, dim=1)[1] # 寻找最大那个就是预测的谁 fpr, tpr, thresholds = roc_curve(lab, pred, pos_label=1) eer += brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.)