예제 #1
0
CNN_net = CNN(CNN_arch)
CNN_net.to(device)

DNN1_arch = {
    'input_dim': CNN_net.out_dim,
    'fc_lay': fc_lay,
    'fc_drop': fc_drop,
    'fc_use_batchnorm': fc_use_batchnorm,
    'fc_use_laynorm': fc_use_laynorm,
    'fc_use_laynorm_inp': fc_use_laynorm_inp,
    'fc_use_batchnorm_inp': fc_use_batchnorm_inp,
    'fc_act': fc_act
}

DNN1_net = MLP(DNN1_arch)
DNN1_net.to(device)

checkpoint_load = torch.load(model_file, map_location=device)
model_trained_using_data_parallel = False
if model_trained_using_data_parallel:
    new_ckpt = {}
    for k, v in checkpoint_load.items():
        new_v = collections.OrderedDict()
        for kk, vv in v.items():
            if kk.startswith('module.'):
                kk = '.'.join(kk.split('.')[1:])
            else:
                assert False
            new_v[kk] = vv
        new_ckpt[k] = new_v
예제 #2
0
# Loading label dictionary
lab_dict=np.load(class_dict_file).item()

print(CNN_net.out_dim)

DNN1_arch = {'input_dim': CNN_net.out_dim,
          'fc_lay': fc_lay,
          'fc_drop': fc_drop, 
          'fc_use_batchnorm': fc_use_batchnorm,
          'fc_use_laynorm': fc_use_laynorm,
          'fc_use_laynorm_inp': fc_use_laynorm_inp,
          'fc_use_batchnorm_inp':fc_use_batchnorm_inp,
          'fc_act': fc_act,
          }

DNN1_net=MLP(DNN1_arch)
DNN1_net.cuda()


DNN2_arch = {'input_dim':fc_lay[-1] ,
          'fc_lay': class_lay,
          'fc_drop': class_drop, 
          'fc_use_batchnorm': class_use_batchnorm,
          'fc_use_laynorm': class_use_laynorm,
          'fc_use_laynorm_inp': class_use_laynorm_inp,
          'fc_use_batchnorm_inp':class_use_batchnorm_inp,
          'fc_act': class_act,
          }


DNN2_net=MLP(DNN2_arch)
예제 #3
0
    'cnn_act': ["leaky_relu", "leaky_relu", "leaky_relu"],
    'cnn_drop': [0.0, 0.0, 0.0],
}
CNN_net = CNN(CNN_arch)

DNN1_arch = {
    'input_dim': CNN_net.out_dim,
    'fc_lay': [2048, 2048, 2048],
    'fc_drop': [0.2, 0.2, 0.2],
    'fc_use_batchnorm': [True, True, True],
    'fc_use_laynorm': [False, False, False],
    'fc_use_laynorm_inp': True,
    'fc_use_batchnorm_inp': False,
    'fc_act': ["leaky_relu", "leaky_relu", "leaky_relu"],
}
DNN1_net = MLP(DNN1_arch)

DNN2_arch = {
    'input_dim': 2048,
    'fc_lay': [8],
    'fc_drop': [0.2],
    'fc_use_batchnorm': [False],
    'fc_use_laynorm': [False],
    'fc_use_laynorm_inp': False,
    'fc_use_batchnorm_inp': False,
    'fc_act': ["softmax"],
}
DNN2_net = MLP(DNN2_arch)

model = nn.Sequential(CNN_net, DNN1_net, DNN2_net)
예제 #4
0
# Loading label dictionary
lab_dict = np.load(class_dict_file).item()

DNN1_arch = {
    'input_dim': CNN_net.out_dim,
    'fc_lay': fc_lay,
    'fc_drop': fc_drop,
    'fc_use_batchnorm': fc_use_batchnorm,
    'fc_use_laynorm': fc_use_laynorm,
    'fc_use_laynorm_inp': fc_use_laynorm_inp,
    'fc_use_batchnorm_inp': fc_use_batchnorm_inp,
    'fc_act': fc_act,
}

DNN1_net = MLP(DNN1_arch)
DNN1_net.cuda()

DNN2_arch = {
    'input_dim': fc_lay[-1],
    'fc_lay': class_lay,
    'fc_drop': class_drop,
    'fc_use_batchnorm': class_use_batchnorm,
    'fc_use_laynorm': class_use_laynorm,
    'fc_use_laynorm_inp': class_use_laynorm_inp,
    'fc_use_batchnorm_inp': class_use_batchnorm_inp,
    'fc_act': class_act,
}

DNN2_net = MLP(DNN2_arch)
DNN2_net.cuda()
예제 #5
0
# Loading label dictionary
lab_dict = np.load(class_dict_file).item()

DNN1_arch = {
    'input_dim': CNN_net.out_dim,
    'fc_lay': fc_lay,
    'fc_drop': fc_drop,
    'fc_use_batchnorm': fc_use_batchnorm,
    'fc_use_laynorm': fc_use_laynorm,
    'fc_use_laynorm_inp': fc_use_laynorm_inp,
    'fc_use_batchnorm_inp': fc_use_batchnorm_inp,
    'fc_act': fc_act,
}

DNN1_net = MLP(DNN1_arch)
DNN1_net.cuda()

DNN2_arch = {
    'input_dim': fc_lay[-1],
    'fc_lay': class_lay,
    'fc_drop': class_drop,
    'fc_use_batchnorm': class_use_batchnorm,
    'fc_use_laynorm': class_use_laynorm,
    'fc_use_laynorm_inp': class_use_laynorm_inp,
    'fc_use_batchnorm_inp': class_use_batchnorm_inp,
    'fc_act': class_act,
}

DNN2_net = MLP(DNN2_arch)
DNN2_net.cuda()
예제 #6
0
lab_dict = np.load(class_dict_file, allow_pickle=True).item()

# print(CNN_net.out_dim)  6420
DNN1_arch = {
    'input_dim':
    321,  # CNN_net.out_dim  where 321 means after attention machine dim
    'fc_lay': fc_lay,
    'fc_drop': fc_drop,
    'fc_use_batchnorm': fc_use_batchnorm,
    'fc_use_laynorm': fc_use_laynorm,
    'fc_use_laynorm_inp': fc_use_laynorm_inp,
    'fc_use_batchnorm_inp': fc_use_batchnorm_inp,
    'fc_act': fc_act,
}

DNN1_net = MLP(DNN1_arch)  # 三次循环
DNN1_net.to(device)
# print(fc_lay[-1]) 2048
DNN2_arch = {
    'input_dim': fc_lay[-1],
    'fc_lay': class_lay,
    'fc_drop': class_drop,
    'fc_use_batchnorm': class_use_batchnorm,
    'fc_use_laynorm': class_use_laynorm,
    'fc_use_laynorm_inp': class_use_laynorm_inp,
    'fc_use_batchnorm_inp': class_use_batchnorm_inp,
    'fc_act': class_act,  # 注意这里使用的就是softmax最后一层
}

DNN2_net = MLP(DNN2_arch)  # 1次循环
DNN2_net.to(device)
예제 #7
0
except:
    os.mkdir(output_folder)

# setting seed
torch.manual_seed(seed)
np.random.seed(seed)

# Loading label dictionary
lab_dict = np.load(class_dict_file, allow_pickle=True).item()

# 读取模型
checkpoint = torch.load('exp/SincNet_TIMIT/model_raw')

CNN_net = CNN({})
CNN_net.load_state_dict(checkpoint['CNN_model_par'])
DNN1_net = MLP({})
DNN1_net.load_state_dict(checkpoint['DNN1_model_par'])
DNN2_net = MLP({})
DNN2_net.load_state_dict(checkpoint['DNN2_model_par'])
eer = 0
for i in range(N_batches):
    # 将test信号预处理batch  TODO data—folder是test数据集
    [inp, lab] = create_batches_rnd(batch_size, data_folder, wav_lst_te,
                                    snt_te, wlen, lab_dict, 0.2)

    pout = DNN2_net(DNN1_net(CNN_net(inp)))

    pred = torch.max(pout, dim=1)[1]  # 寻找最大那个就是预测的谁
    fpr, tpr, thresholds = roc_curve(lab, pred, pos_label=1)
    eer += brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.)