コード例 #1
0
        elif fea_pase_dev[snt].shape[0]-lab_dev[snt].shape[0]==1:
            fea_lst_dev.append(fea_pase_dev[snt][:-1])
            lab_lst_dev.append(lab_dev[snt])
        else:
            print('length error')
            sys.exit(0)
    else:

        fea_lst_dev.append(fea_pase_dev[snt][:-2])
        lab_lst_dev.append(lab_dev[snt])
    
    

# feature matrix (training)
fea_conc=np.concatenate(fea_lst)
fea_conc=context_window(fea_conc,left,right)

# feature matrix (dev)
fea_conc_dev=np.concatenate(fea_lst_dev)
fea_conc_dev=context_window(fea_conc_dev,left,right)

# feature normalization
fea_conc=(fea_conc-np.mean(fea_conc,axis=0))/np.std(fea_conc,axis=0)
fea_conc_dev=(fea_conc_dev-np.mean(fea_conc_dev,axis=0))/np.std(fea_conc_dev,axis=0)


# lab matrix
lab_conc=np.concatenate(lab_lst)
lab_conc_dev=np.concatenate(lab_lst_dev)

if right>0:
コード例 #2
0
ファイル: run_IEMOCAP_fast.py プロジェクト: ys1998/maso
# Seeds initialization
np.random.seed(seed)
torch.manual_seed(seed)

# Batch creation (train)
fea_lst = []
lab_lst = []

print("Data Preparation...")
for snt in fea_pase.keys():
    fea_lst.append(fea_pase[snt])
    lab_lst.append(np.zeros(fea_pase[snt].shape[0]) + lab[snt.split('_')[0]])

# feature matrix (training)
fea_conc = np.concatenate(fea_lst)
fea_conc = context_window(fea_conc, left, right)

# feature normalization
mean = np.mean(fea_conc, axis=0)
std = np.std(fea_conc, axis=0)

# normalization
fea_conc = (fea_conc - mean) / std

mean = torch.from_numpy(mean).float().to(device)
std = torch.from_numpy(std).float().to(device)

# lab matrix
lab_conc = np.concatenate(lab_lst)

if right > 0:
コード例 #3
0
                fea_lst_dev.append(fea_pase_dev[snt][:-3])
                lab_lst_dev.append(lab_dev[snt])
            elif fea_pase_dev[snt].shape[0] - lab_dev[snt].shape[0] == 1:
                fea_lst_dev.append(fea_pase_dev[snt][:-1])
                lab_lst_dev.append(lab_dev[snt])
            else:
                print('length error')
                sys.exit(0)
        else:

            fea_lst_dev.append(fea_pase_dev[snt][:-2])
            lab_lst_dev.append(lab_dev[snt])

    # feature matrix (training)
    fea_conc = np.concatenate(fea_lst)
    fea_conc = context_window(fea_conc, left, right)

    # feature matrix (dev)
    fea_conc_dev = np.concatenate(fea_lst_dev)
    fea_conc_dev = context_window(fea_conc_dev, left, right)

    # lab matrix
    lab_conc = np.concatenate(lab_lst)
    lab_conc_dev = np.concatenate(lab_lst_dev)

    if right > 0:
        lab_conc = lab_conc[left:-right]
        lab_conc_dev = lab_conc_dev[left:-right]
    else:
        lab_conc = lab_conc[left:]
        lab_conc_dev = lab_conc_dev[left:]
コード例 #4
0
    # compute per-speaker mean and variance
    if avg_spk:
        for spk_id in mean_spk_dev.keys():
            mean_spk_dev[spk_id] = torch.mean(torch.stack(
                mean_spk_dev[spk_id]),
                                              dim=0)
            std_spk_dev[spk_id] = torch.mean(torch.stack(std_spk_dev[spk_id]),
                                             dim=0)

        # apply speaker normalization
        for snt_id in fea_dev.keys():
            spk_id = snt_id.split('_')[0]
            fea_pase_dev[snt_id] = (fea_pase_dev[snt_id] - mean_spk_dev[spk_id]
                                    )  #/std_spk_dev[spk_id]
            fea_pase_dev[snt_id] = context_window(fea_pase_dev[snt_id], left,
                                                  right)

    # Network initialization
    inp_dim = fea_pase_dev[snt_id].shape[1] * (left + right + 1)
    nnet = MLP(options, inp_dim)
    nnet.to(device)

    nnet.load_state_dict(torch.load(model_file))
    nnet.eval()

    post_file = open_or_fd(ark_file, output_folder, 'wb')

    for snt_id in fea_dev.keys():
        pout = nnet(torch.from_numpy(fea_pase_dev[snt_id]).to(device).float())
        # TO DO IT!!
        #pout=pout-log_counts