eval('b' + str(session_list[2])), eval('b' + str(session_list[3]))))
y_T_tr = np.ones(T_tr.shape[0])
y_nT_tr = np.zeros(nT_tr.shape[0])
X_train = np.concatenate((T_tr, nT_tr), axis=0)
y_train = np.concatenate((y_T_tr, y_nT_tr), axis=0)

# creating test data
T_te = eval('a' + str(sess))
nT_te = eval('b' + str(sess))
y_T_te = np.ones(T_te.shape[0])
y_nT_te = np.zeros(nT_te.shape[0])
X_test = np.concatenate((T_te, nT_te), axis=0)
y_test = np.concatenate((y_T_te, y_nT_te), axis=0)

# Shuffle and reshape the data to fit in model
X_train, y_train = data_import(X_train, y_train)
X_test, y_test = data_import(X_test, y_test)

X_train = X_train.astype('float32')
y_train = y_train.astype('int32')
X_test = X_test.astype('float32')
y_test = y_test.astype('int32')

# Now since we have uploaded the data, let's create directories to store the results
dir2 = '/home/guest/PycharmProjects/sharaj_works/NSRE/rsvp_gen_results/subject_wise/'
# dir2 is the main output directory to store results
# Run tag will dynamically create the sub-directories to store results
Run_tag = 'D_C_subject_' + str(sub) + '_te_session_' + str(sess)
print(Run_tag)

# output_dir & output_dir1 will be created for each subject and test session results
示例#2
0
import numpy as np
#loadmat
from scipy.io import loadmat, savemat

t_feed = 10  #feed time
t_frame = 20  #frame time
sample_rate = 16000
fs = sample_rate / 1000  #sample_rate of each ms
L_value = np.int(fs * t_frame)
NFFT = 512
nfilt = 22

audio_path = "D:\\LAB\\workspace\\lab\\patRecDat\\forStudents\\timit\\test"
#audio_path = "/Users/Mata/Documents/2017/学习/ws2017:18/PUL/forStudents/timit/test"

dataset = data_import(audio_path, 0)  #samples is a dictionary of 172 persons

feature_all_set = {}
print("feature engineering start")
process_bar = ShowProcess(len(dataset.keys()))
for name in dataset.keys():
    process_bar.show_process()
    #print("make the feature of "+ name)
    single_data = dataset.get(name,
                              'no such file name')  # samples of one person
    features_set = []
    for samples in single_data:
        if name in ['yuxin', 'qianqian',
                    'shanqi']:  #custom voice has dimension error
            samples = samples[:, 0]
        else:
示例#3
0
#loadmat
from scipy.io import loadmat, savemat

t_feed = 10  #feed time
t_frame = 20  #frame time
sample_rate = 16000
fs = sample_rate / 1000  #sample_rate of each ms
L_value = np.int(fs * t_frame)
NFFT = 512
nfilt = 22

audio_path = "D:\\LAB\\workspace\\lab\\patRecDat\\forStudents\\timit\\test"
#audio_path = "/Users/Mata/Documents/2017/学习/ws2017:18/PUL/forStudents/timit/test"

dataset = data_import(
    audio_path,
    0)  # 0 for registered(known) people, 1 for unregistered(unknown)people

#samples is a dictionary of 170 persons

feature_all_set = {}
print("feature engineering start")
process_bar = ShowProcess(len(dataset.keys()))
for name in dataset.keys():
    process_bar.show_process()
    #print("make the feature of "+ name)
    single_data = dataset.get(name,
                              'no such file name')  # samples of one person
    features_set = []
    for samples in single_data:
        if name in ['yuxin', 'qianqian',