def load_data_new(main_path, main_file, train_samples_id, dimension, step=2, train=False, preprocess=False): ids, paths, names, sampling_rates, labels, explanations, partitions, intervals = load_file( main_path, main_file) train_x = np.empty((0, dimension)) train_y = np.empty((0, 2)) for i in train_samples_id: sample_x, sample_y = load_sample(main_path + paths[i], names[i], labels[i], sampling_rates[i], explanations[i], intervals[i], dimension=dimension, step=step, train=train, preprocess=preprocess) train_x = np.append(train_x, sample_x, axis=0) train_y = np.append(train_y, sample_y) #print('%d label = %s, #sample = %d %d' %(i,labels[i], sample_x.shape[0], sample_y.shape[0])) train_x = np.reshape( train_x, [train_x.shape[0], dimension, 1]) ####Hey hey check this line! train_y = np.reshape(train_y, [train_y.shape[0], 1]) print('Total load result: #sample0=%d, #sample1=%d' % (train_x.shape[0], train_y.shape[0])) return train_x, train_y
def load_data(main_path, main_file, train_samples_id, dimension1, dimension2, step=2, train=False, preprocess=False): global_counter_label0 = 0 global_counter_label1 = 0 global_counter_sample0 = 0 global_counter_sample1 = 0 ids, paths, names, sampling_rates, labels, explanations, partitions, intervals = load_file( main_path, main_file) train_x = np.empty((0, dimension1, dimension2)) train_y = np.empty((0, 2)) for i in train_samples_id: sample_x, sample_y, counter_label0, counter_label1, counter_sample0, counter_sample1 = load_sample_rbased( main_path + paths[i], names[i], labels[i], sampling_rates[i], explanations[i], dimension1=dimension1, dimension2=dimension2, step=step, train=train) train_x = np.append(train_x, sample_x, axis=0) train_y = np.append(train_y, sample_y) print('%d #label0 = %d, #label1=%d, #sample0=%d, #sample1=%d' % (i, counter_label0, counter_label1, counter_sample0, counter_sample1)) global_counter_label0 += counter_label0 global_counter_label1 += counter_label1 global_counter_sample0 += counter_sample0 global_counter_sample1 += counter_sample1 train_x = np.reshape(train_x, [train_x.shape[0], dimension2, dimension1 ]) ####Hey hey check this line! print('%#label0 = %d, #label1=%d, #sample0=%d, #sample1=%d' % (global_counter_label0, global_counter_label1, global_counter_sample0, global_counter_sample1)) return train_x, train_y
71, 173, 331, 327, 336, 159, 35, 178, 38, 66, 240, 18, 261, 371, 200, 238, 372, 126, 135, 256, 242, 107, 258, 32, 152, 153, 277, 286, 1, 232, 165, 223, 202, 333, 123, 300, 99, 53, 292, 63, 282, 212, 376, 183, 6, 12, 309, 56, 175, 14, 194, 218, 145, 297, 325, 114, 278, 298, 230, 79, 206, 203, 125, 7 ])) ''' main_path = '/Users/Zeynab/' #main_file = 'My data/In use/Data_v960213.csv' main_file = 'My data/In use/Data_v960412.csv' ''' main_path = '/home/mll/Golgooni/' main_file = 'My data/In use/Data_v960412.csv' ids, paths, names, sampling_rates, labels, explanations, partitions, intervals = load_file( main_path, main_file) ############################# Set parameters ################################# raw_dimension = 4000 mother_wavelet = 'db4' wav_level = 8 num_coefficient = 4 wav_dimension = 0 #wav_dimension = 22 + 22 + 38 + 69 rnn_layer = 'LSTM' rnn_hidden_node = 3 rnn_dropout = 0.4 batch_size = 1 epochs = 20