def main(): static_feature_path = '../resource/data_v2/baseline.csv' dynamic_feature_path = '../resource/data_v2/dynamic.csv' label_path = '../resource/data_v2/label.csv' treatment_path = '../resource/data_v2/treatment.csv' side_effect_name_list = 'side_effect_1', 'side_effect_2', 'side_effect_3', 'side_effect_4' treatment_name_list = 'treatment_1', 'treatment_2', 'treatment_3', 'treatment_4' training_step = 2000 hidden_size = 16 output = [[ 'rnn', 'i', 'side_effect_name', 'j', 'eval_info_g', 'auc_g', 'tn_g', 'fp_g', 'fn_g', 'tp_g', 'optimal_cut_g', 'eval_info_1', 'auc_1', 'tn_1', 'fp_1', 'fn_1', 'tp_1', 'optimal_cut_1', 'eval_info_2', 'auc_2', 'tn_2', 'fp_2', 'fn_2', 'tp_2', 'optimal_cut_2', 'eval_info_3', 'auc_3', 'tn_3', 'fp_3', 'fn_3', 'tp_3', 'optimal_cut_3', 'eval_info_4', 'auc_4', 'tn_4', 'fp_4', 'fn_4', 'tp_4', 'optimal_cut_4' ]] for i in range(2): for item in zip(side_effect_name_list, treatment_name_list): side_effect_name, treatment_name = item print('Iteration: {}'.format(i)) print(side_effect_name) print(treatment_name) label = read_label(label_path) dynamic_data = read_dynamic_data(dynamic_feature_path) static_data = read_static_data(static_feature_path) treatment_data = read_treatment(treatment_path) dynamic_data, treatment_data, label = data_shift( dynamic_data, treatment_data, label) cross_validation = FiveFoldCrossValidation(label, dynamic_data, static_data, treatment_data) dataset = cross_validation.generate_five_fold( treatment_name, side_effect_name) result = train(dataset, hidden_size, training_step) for j in range(5): general, v1, v2, v3, v4 = result[j] eval_info_g, auc_g, tn_g, fp_g, fn_g, tp_g, optimal_cut_g = general eval_info_1, auc_1, tn_1, fp_1, fn_1, tp_1, optimal_cut_1 = v1 eval_info_2, auc_2, tn_2, fp_2, fn_2, tp_2, optimal_cut_2 = v2 eval_info_3, auc_3, tn_3, fp_3, fn_3, tp_3, optimal_cut_3 = v3 eval_info_4, auc_4, tn_4, fp_4, fn_4, tp_4, optimal_cut_4 = v4 output.append([ 'rnn', i, side_effect_name, j, eval_info_g, auc_g, tn_g, fp_g, fn_g, tp_g, optimal_cut_g, eval_info_1, auc_1, tn_1, fp_1, fn_1, tp_1, optimal_cut_1, eval_info_2, auc_2, tn_2, fp_2, fn_2, tp_2, optimal_cut_2, eval_info_3, auc_3, tn_3, fp_3, fn_3, tp_3, optimal_cut_3, eval_info_4, auc_4, tn_4, fp_4, fn_4, tp_4, optimal_cut_4 ]) with open('../resource/rnn.csv', 'w', encoding='utf-8-sig', newline='') as f: csv.writer(f).writerows(output)
def train_wrap(label_path, dynamic_feature_path, static_feature_path, treatment_path, treatment_name, side_effect_name, model_name, data_num, feature_list=None): label = read_label(label_path) dynamic_data = read_dynamic_data(dynamic_feature_path) static_data = read_static_data(static_feature_path) treatment_data = read_treatment(treatment_path) dynamic_data, treatment_data, label = data_shift(dynamic_data, treatment_data, label) cross_validation = FiveFoldCrossValidation(label, dynamic_data, static_data, treatment_data) dataset = cross_validation.generate_five_fold(treatment_name, side_effect_name) result = train(dataset, model_name, data_num, feature_list) return result
def main(): orig_path = '/home/step5/MLDS_Data/MLDS_HW1_RELEASE_v1/' # feature_path = '../data/train_100000.ark' # label_path = '../data/train_100000.lab' feature_path = orig_path + 'fbank/train.ark' label_path = orig_path + 'label/train_sorted.lab' submit_feature_path = '/home/step5/MLDS_Data/MLDS_HW1_RELEASE_v1/fbank/test.ark' submit_feature_path_2 = '../data/train_100000.ark' # phone_map_path = '../data/phone_map' p48_39_path = '../data/48_39.map' DATA_SIZE = 300000 X = read_data.read_feature(feature_path, DATA_SIZE) Y = read_data.read_label(label_path, p48_39_path, DATA_SIZE) X = X[100000:,:] Y = Y[100000:] train_size = len(Y) * 0.5 train_size = int(train_size) perm = np.random.permutation(train_size) perm = np.concatenate((perm, list(range(train_size,len(Y))))) X = X[perm,:] Y = Y[perm] print(X.shape, Y.shape) X_train = X[:train_size,:] X_test = X[train_size:,:] Y_train = Y[:train_size] Y_test = Y[train_size:] # Alpha, Beta, Gamma = mnist.load_data('mnist3.pkl.gz') # X_train, Y_train = Alpha # X_test, Y_test = Gamma Aval, model = train_experiment(X_train, Y_train, X_test, Y_test, 2000) predict_submit(model, submit_feature_path, 'submit.csv', p48_39_path) predict_submit(model, submit_feature_path_2, 'test.csv', p48_39_path)
# load data # 16,196 # T = "all_bonafid_split_1s" T = "TIMIT_split_1s" X_s = read_data.read_dataset(r'D:\GYK\WaveNet\data\{}'.format(T)) # 14,663 F = "TIMIT_WavNet_split_1s" X_c = read_data.read_dataset(r'D:\GYK\WaveNet\data\{}'.format(F)) X = np.vstack((X_s, X_c)) # creat label m = X_s.shape[0] n = X_c.shape[0] label.creat_label(m, n) y = read_data.read_label(r'.\label.txt') # data preprocess if isSplit: X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) else: X_train = X y_train = y t = "TIMIT_split_1s" X_s = read_data.read_dataset(r'D:\GYK\WaveNet\data\{}'.format(t)) # 14,663 f = "TIMIT_wavnet_split_low2"
test_size=0.2, shuffle=True) model = temp_model.multi_output(x_train1, y_train1, classes) new_model = Model(inputs=model.get_layer("input_1").input, outputs=model.get_layer("output_layer").output) if model_name != None: model.save(model_name) pred = new_model.predict(x_test1) results = analysis.confusion_matrix(y_test1, pred, classes) return results if __name__ == "__main__": inter = read_data.read_inter("./datas/inter-4-150s-upsampled.npy", time_stamp=5) #f_domain = read_data.read_frequency("./datas/inter-4-150s-upsampled.npy", time_stamp=5) #np.save("./datas/f_domain-4-150s-upsampled.npy", f_domain) #f_domain = np.load("./datas/f_domain-4-150s-upsampled.npy") label = read_data.read_label("./datas/label-4-150s-upsampled.npy") inter_result = result(inter, label, classes=4) #inter_result = result(inter, label, classes=4, model_name="./models/inter-4-150-64-up-bn") #f_result = result(f_domain, label, classes=4) #f_result = result(f_domain, label, classes=4, model_name="./models/f_domain-4-150-up") #f_result = result_multi(f_domain, label, classes=4, model_name="./models/f_domain-4-150-32-up-multiout") print(f"inter result: {inter_result}") #print (f"\n\nfreqency result: {f_result}")
def load_data(dataset): ''' Loads the dataset :type dataset: string :param dataset: the path to the dataset (here MNIST) ''' ############# # LOAD DATA # ############# # Download the MNIST dataset if it is not present # data_dir, data_file = os.path.split(dataset) # if data_dir == "" and not os.path.isfile(dataset): # # Check if dataset is in the data directory. # new_path = os.path.join( # os.path.split(__file__)[0], # "..", # "data", # dataset # ) # if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz': # dataset = new_path # if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz': # import urllib # origin = ( # 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz' # ) # print ('Downloading data from %s' % origin) # urllib.urlretrieve(origin, dataset) print ('... loading data') # Load the dataset # f = gzip.open(dataset, 'rb') # train_set, valid_set, test_set = cPickle.load(f) # f.close() orig_path = '/home/step5/MLDS_Data/MLDS_HW1_RELEASE_v1/' feature_path = orig_path + 'fbank/train.ark' label_path = orig_path + 'label/train_sorted.lab' p48_39_path = '../data/48_39.map' DATA_SIZE = 100000 X = read_data.read_feature(feature_path, DATA_SIZE) Y = read_data.read_label(label_path, p48_39_path, DATA_SIZE) #train_set, valid_set, test_set format: tuple(input, target) #input is an numpy.ndarray of 2 dimensions (a matrix) #witch row's correspond to an example. target is a #numpy.ndarray of 1 dimensions (vector)) that have the same length as #the number of rows in the input. It should give the target #target to the example with the same index in the input. def shared_dataset(data_xy, borrow=True): """ Function that loads the dataset into shared variables The reason we store our dataset in shared variables is to allow Theano to copy it into the GPU memory (when code is run on GPU). Since copying data into the GPU is slow, copying a minibatch everytime is needed (the default behaviour if the data is not in a shared variable) would lead to a large decrease in performance. """ data_x, data_y = data_xy shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX), borrow=borrow) shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX), borrow=borrow) # When storing data on the GPU it has to be stored as floats # therefore we will store the labels as ``floatX`` as well # (``shared_y`` does exactly that). But during our computations # we need them as ints (we use labels as index, and if they are # floats it doesn't make sense) therefore instead of returning # ``shared_y`` we will have to cast it to int. This little hack # lets ous get around this issue return shared_x, T.cast(shared_y, 'int32') train_size = len(Y) * 0.8 train_size = int(train_size) valid_size = int(len(Y) * 0.9) train_set = (X[:train_size,:], Y[:train_size]) valid_set = (X[train_size:valid_size,:], Y[train_size:valid_size]) test_set = (X[valid_size:,:], Y[valid_size:]) test_set_x, test_set_y = shared_dataset(test_set) valid_set_x, valid_set_y = shared_dataset(valid_set) train_set_x, train_set_y = shared_dataset(train_set) rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y), (test_set_x, test_set_y)] return rval
from keras.callbacks import EarlyStopping,ReduceLROnPlateau,ModelCheckpoint from sklearn.metrics import confusion_matrix from keras.models import load_model from keras.optimizers import Adam,RMSprop,Adamax import matplotlib.pyplot as plt # load data # 16,196 X_s=read_data.read_dataset(r'E:\GYK\google_tts\TIMIT_split_1s') # 14,663 X_c=read_data.read_dataset(r'E:\GYK\google_tts\TIMIT_wavnet_split_low2') X=np.vstack((X_s,X_c)) y=read_data.read_label(r'.\label.txt') # data preprocess X_train, X_test, y_train, y_test_1 = train_test_split(X, y, test_size = 0.2, random_state= 0) X_train = X_train.reshape(-1, X.shape[1], 1) X_test = X_test.reshape(-1, X.shape[1], 1) y_train = np_utils.to_categorical(y_train, num_classes=2) y_test = np_utils.to_categorical(y_test_1, num_classes=2) print('...',y_test_1) print('...', X_train.shape) # Build model # model=S_ResNet.s_res() #model = mymodel.model_1(X) # model=load_model('model.h5') model = mymodel.origin(X)
from sklearn.metrics import confusion_matrix # load data T = "TIMIT_split_1s" X_s = read_data.read_dataset_mfc(r'D:\GYK\WaveNet\data\{}'.format(T)) print('read X_s done...') F = "TIMIT_WavNet_split_1s" X_c = read_data.read_dataset_mfc(r'D:\GYK\WaveNet\data\{}'.format(F)) print('read X_c done...') X = np.vstack((X_s, X_c)) m = X_s.shape[0] n = X_c.shape[0] label.creat_label(m, n) y = read_data.read_label(r'D:\GYK\WaveNet\cnn\label.txt') # T = "all_bonafid_split_1s" # X_s=read_data.read_dataset_mfc(r'E:\GYK\google_tts\data\{}'.format(T)) # print('read X_s done...') # F = "all_SS_1_split_1s" # X_c=read_data.read_dataset_mfc(r'E:\GYK\google_tts\data\{}'.format(F)) # print('read X_c done...') # X_test=np.vstack((X_s,X_c)) # m = X_s.shape[0] # n = X_c.shape[0] # label.creat_label(m,n) # y_test=read_data.read_label(r'E:\GYK\google_tts\cnn\label.txt')