def generate(self, path_list): iter = 0 epoch = 0 pointer = 0 path = path_list[epoch] n_file = len(path_list) data = h5py.File(path) x = data['x'] y = data['y'] batch_size = self._batch_size_ n_samples = len(x) index = np.arange(n_samples) np.random.shuffle(index) while True: if (self._type_ == 'test') and (self._te_max_iter_ is not None): if iter == self._te_max_iter_: break iter += 1 if pointer >= n_samples: epoch += 1 if epoch == n_file: epoch = 0 path = path_list[epoch] print("start %s"%path) n_file = len(path_list) data = h5py.File(path) x = data['x'] y = data['y'] if (self._type_) == 'test' and (epoch == n_file - 1): break pointer = 0 np.random.shuffle(index) batch_idx = index[pointer : min(pointer + batch_size, n_samples)] pointer += batch_size yield pp_data.scale_on_3d(x[sorted(batch_idx)], self._scaler_), pp_data.scale_on_2d(y[sorted(batch_idx)], self._scaler_)
def train(args): """Train the neural network. Write out model every several iterations. Args: workspace: str, path of workspace. tr_snr: float, training SNR. te_snr: float, testing SNR. lr: float, learning rate. """ print(args) workspace = args.workspace tr_snr = args.tr_snr te_snr = args.te_snr lr = args.lr iteration = args.iter # Load data. t1 = time.time() tr_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "%ddb" % int(tr_snr), "data.h5") te_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "test", "%ddb" % int(te_snr), "data.h5") (tr_x1, tr_x2, tr_y1, tr_y2) = pp_data.load_hdf5(tr_hdf5_path) (te_x1, te_x2, te_y1, te_y2) = pp_data.load_hdf5(te_hdf5_path) print(tr_x1.shape, tr_y1.shape, tr_x2.shape, tr_y2.shape) print(te_x1.shape, te_y1.shape, te_x2.shape, te_y2.shape) print("Load data time: %s s" % (time.time() - t1, )) batch_size = 500 print("%d iterations / epoch" % int(tr_x1.shape[0] / batch_size)) # Scale data. if not True: t1 = time.time() scaler_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "%ddb" % int(tr_snr), "scaler.p") scaler = pickle.load(open(scaler_path, 'rb')) tr_x1 = pp_data.scale_on_3d(tr_x1, scaler) tr_y1 = pp_data.scale_on_2d(tr_y1, scaler) te_x1 = pp_data.scale_on_3d(te_x1, scaler) te_y1 = pp_data.scale_on_2d(te_y1, scaler) tr_x2 = pp_data.scale_on_2d(tr_x2, scaler) tr_y2 = pp_data.scale_on_2d(tr_y2, scaler) te_x2 = pp_data.scale_on_2d(te_x2, scaler) te_y2 = pp_data.scale_on_2d(te_y2, scaler) print("Scale data time: %s s" % (time.time() - t1, )) # Debug plot. if False: plt.matshow(tr_x[0:1000, 0, :].T, origin='lower', aspect='auto', cmap='jet') plt.show() pause # Build model (_, n_concat, n_freq) = tr_x1.shape n_hid = 2048 input_dim1 = (257 + 40 + 30) * 2 input_dim2 = (257 + 40 + 30) out_dim1 = (257 + 40 + 30) * 2 out_dim1_irm = 257 + 40 + 64 out_dim2 = (257 + 40 + 30) out_dim2_irm = (257 + 40 + 64) # model = Sequential() # model.add(Flatten(input_shape=(n_concat, n_freq))) # model.add(Dense(n_hid, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(n_hid, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(n_hid, activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(n_freq, activation='linear')) input1 = Input(shape=(n_concat, input_dim1), name='input1') layer = Flatten(name='flatten')(input1) layer = Dense(n_hid, activation='relu', name='dense1')(layer) layer = Dropout(0.2)(layer) layer = Dense(n_hid, activation='relu', name='dense2')(layer) layer = Dropout(0.2)(layer) partial_out1 = Dense(out_dim1, name='1_out_linear')(layer) partial_out1_irm = Dense(out_dim1_irm, name='1_out_irm', activation='sigmoid')(layer) out1 = concatenate([partial_out1, partial_out1_irm], name='out1') input2 = Input(shape=(input_dim2, ), name='input2') layer = concatenate([input2, out1], name='merge') layer = Dense(n_hid, activation='relu', name='dense3')(layer) layer = Dropout(0.2)(layer) layer = Dense(n_hid, activation='relu', name='dense4')(layer) layer = Dropout(0.2)(layer) partial_out2 = Dense(out_dim2, name='2_out_linear')(layer) partial_out2_irm = Dense(out_dim2_irm, name='2_out_irm', activation='sigmoid')(layer) out2 = concatenate([partial_out2, partial_out2_irm], name='out2') model = Model(inputs=[input1, input2], outputs=[out1, out2]) model.summary() sys.stdout.flush() model.compile(loss='mean_absolute_error', optimizer=Adam(lr=lr, epsilon=1e-03)) # Data generator. tr_gen = DataGenerator(batch_size=batch_size, type='train') eval_te_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) eval_tr_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) # Directories for saving models and training stats model_dir = os.path.join(workspace, "models", "%ddb" % int(tr_snr)) pp_data.create_folder(model_dir) stats_dir = os.path.join(workspace, "training_stats", "%ddb" % int(tr_snr)) pp_data.create_folder(stats_dir) # Print loss before training. iter = 0 tr_loss = eval(model, eval_tr_gen, tr_x1, tr_x2, tr_y1, tr_y2) te_loss = eval(model, eval_te_gen, te_x1, te_x2, te_y1, te_y2) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) # Save out training stats. stat_dict = { 'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Train. t1 = time.time() for (batch_x, batch_y) in tr_gen.generate(xs=[tr_x1, tr_x2], ys=[tr_y1, tr_y2]): loss = model.train_on_batch(batch_x, batch_y) iter += 1 # Validate and save training stats. if iter % 100 == 0: tr_loss = eval(model, eval_tr_gen, tr_x1, tr_x2, tr_y1, tr_y2) te_loss = eval(model, eval_te_gen, te_x1, te_x2, te_y1, te_y2) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) sys.stdout.flush() # Save out training stats. stat_dict = { 'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Save model. if iter % (iteration / 20) == 0: model_path = os.path.join(model_dir, "md_%diters.h5" % iter) model.save(model_path) print("Saved model to %s" % model_path) if iter == iteration + 1: break print("Training time: %s s" % (time.time() - t1, ))
tr_y = [] te_x = [] te_y = [] for i in h5_test_list: te_x_t, te_y_t = pp.load_hdf5(os.path.join(conf1.data_test_dir, i)) te_x.append(te_x_t) te_y.append(te_y_t) te_x = np.concatenate(te_x, axis=0) te_y = np.concatenate(te_y, axis=0) #scale test data scaler = pickle.load( open(os.path.join(conf1.packed_feature_dir, 'test', 'scaler.p'), 'rb')) te_x = pp.scale_on_3d(te_x, scaler) te_y = pp.scale_on_2d(te_y, scaler) print("Scale data time: %s s" % (time.time() - t1, )) print("Load data time: %s s" % (time.time() - t1, )) # conf.batch_size = 512 # print("%d iterations / epoch" % int(tr_x.shape[0] / conf1.batch_size)) tr_x, tr_y = pp.load_hdf5(os.path.join(conf1.data_train_dir, h5_train_list[0])) # Debug plot. # if False: # plt.matshow(tr_x[0: 1000, 0, :].T, origin='lower', aspect='auto', cmap='jet') # plt.show() # pause
def train(args): """Train the neural network. Write out model every several iterations. Args: workspace: str, path of workspace. tr_snr: float, training SNR. te_snr: float, testing SNR. lr: float, learning rate. """ print(args) workspace = args.workspace tr_snr = args.tr_snr te_snr = args.te_snr lr = args.lr iteration = args.iter # Load data. t1 = time.time() tr_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "%ddb" % int(tr_snr), "data.h5") te_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "test", "%ddb" % int(te_snr), "data.h5") tr_adapt_utt_path = os.path.join(workspace, "adaptive_utterance", "train", "adaptive_utterance_spec.p") te_adapt_utt_path = os.path.join(workspace, "adaptive_utterance", "test", "adaptive_utterance_spec.p") tr_adapt_utt = cPickle.load(open(tr_adapt_utt_path, 'rb')) te_adapt_utt = cPickle.load(open(te_adapt_utt_path, 'rb')) tr_adapt_utt_len_path = os.path.join(workspace, "adaptive_utterance", "train", "adaptive_utterance_max_len.p") te_adapt_utt_len_path = os.path.join(workspace, "adaptive_utterance", "test", "adaptive_utterance_max_len.p") tr_adapt_utt_len = cPickle.load(open(tr_adapt_utt_len_path, 'rb')) te_adapt_utt_len = cPickle.load(open(te_adapt_utt_len_path, 'rb')) max_len = max(tr_adapt_utt_len, te_adapt_utt_len) (tr_x1, tr_x2, tr_y1, tr_y2, tr_name) = pp_data.load_hdf5(tr_hdf5_path) (te_x1, te_x2, te_y1, te_y2, te_name) = pp_data.load_hdf5(te_hdf5_path) print(tr_x1.shape, tr_y1.shape, tr_x2.shape, tr_y2.shape) print(te_x1.shape, te_y1.shape, te_x2.shape, te_y2.shape) print("Load data time: %s s" % (time.time() - t1,)) batch_size = 500 print("%d iterations / epoch" % int(tr_x1.shape[0] / batch_size)) # Scale data. if not True: t1 = time.time() scaler_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "%ddb" % int(tr_snr), "scaler.p") scaler = pickle.load(open(scaler_path, 'rb')) tr_x1 = pp_data.scale_on_3d(tr_x1, scaler) tr_y1 = pp_data.scale_on_2d(tr_y1, scaler) te_x1 = pp_data.scale_on_3d(te_x1, scaler) te_y1 = pp_data.scale_on_2d(te_y1, scaler) tr_x2 = pp_data.scale_on_2d(tr_x2, scaler) tr_y2 = pp_data.scale_on_2d(tr_y2, scaler) te_x2 = pp_data.scale_on_2d(te_x2, scaler) te_y2 = pp_data.scale_on_2d(te_y2, scaler) print("Scale data time: %s s" % (time.time() - t1,)) # Debug plot. if False: plt.matshow(tr_x[0: 1000, 0, :].T, origin='lower', aspect='auto', cmap='jet') plt.show() pause # Build model (_, n_concat, n_freq) = tr_x1.shape n_hid = 2048 input_dim1 = (257 + 40 + 30) * 2 input_dim2 = (257 + 40 + 30) out_dim1 = (257 + 40 + 30) * 2 out_dim1_irm = 257 + 40 + 64 out_dim2 = (257 + 40 + 30) out_dim2_irm = (257 + 40 + 64) num_factorize = 30 def multiplication(pair_tensors): ''' :param pair_tensors: x: (num_factorize,) y: (num_factorize, n_hid) :return: (n_hid,) sum(x[i]*y[i,:],axis=1) ''' x, y = pair_tensors return K.sum(tf.multiply(y, K.expand_dims(x, -1)), axis=1) adapt_input = Input(shape=(None,), name='adapt_input') layer = Reshape((-1, 257), name='reshape')(adapt_input) layer = Dense(512, activation='relu', name='adapt_dense1')(layer) layer = Dense(512, activation='relu', name='adapt_dense2')(layer) layer = Dense(num_factorize, activation='softmax', name='adapt_out')(layer) alpha = Lambda(lambda x: K.sum(x, axis=1), output_shape=(num_factorize,), name='sequence_sum')(layer) input1 = Input(shape=(n_concat, input_dim1), name='input1') layer = Flatten(name='flatten')(input1) layer = Dense(n_hid * num_factorize, name='dense0')(layer) layer = Reshape((num_factorize, n_hid), name='reshape2')(layer) layer = Lambda(multiplication, name='multiply')([alpha, layer]) layer = Dense(n_hid, activation='relu', name='dense1')(layer) layer = Dropout(0.2)(layer) layer = Dense(n_hid, activation='relu', name='dense2')(layer) layer = Dropout(0.2)(layer) partial_out1 = Dense(out_dim1, name='1_out_linear')(layer) partial_out1_irm = Dense(out_dim1_irm, name='1_out_irm', activation='sigmoid')(layer) out1 = concatenate([partial_out1, partial_out1_irm], name='out1') input2 = Input(shape=(input_dim2,), name='input2') layer = concatenate([input2, out1], name='merge') layer = Dense(n_hid, activation='relu', name='dense3')(layer) layer = Dropout(0.2)(layer) layer = Dense(n_hid, activation='relu', name='dense4')(layer) layer = Dropout(0.2)(layer) partial_out2 = Dense(out_dim2, name='2_out_linear')(layer) partial_out2_irm = Dense(out_dim2_irm, name='2_out_irm', activation='sigmoid')(layer) out2 = concatenate([partial_out2, partial_out2_irm], name='out2') model = Model(inputs=[input1, input2, adapt_input], outputs=[out1, out2]) model.summary() sys.stdout.flush() model.compile(loss='mean_absolute_error', optimizer=Adam(lr=lr, epsilon=1e-03)) # Data generator. tr_gen = DataGenerator(batch_size=batch_size, type='train', max_len=max_len) eval_te_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100, max_len=max_len) eval_tr_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100, max_len=max_len) # Directories for saving models and training stats model_dir = os.path.join(workspace, "models", "%ddb" % int(tr_snr)) pp_data.create_folder(model_dir) stats_dir = os.path.join(workspace, "training_stats", "%ddb" % int(tr_snr)) pp_data.create_folder(stats_dir) # Print loss before training. iter = 0 tr_loss = eval(model, eval_tr_gen, tr_x1, tr_x2, tr_y1, tr_y2, tr_name, tr_adapt_utt) te_loss = eval(model, eval_te_gen, te_x1, te_x2, te_y1, te_y2, te_name, te_adapt_utt) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) # Save out training stats. stat_dict = {'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Train. t1 = time.time() for (batch_x, batch_y) in tr_gen.generate([tr_x1, tr_x2, tr_name], [tr_y1, tr_y2], tr_adapt_utt): loss = model.train_on_batch(batch_x, batch_y) iter += 1 # Validate and save training stats. if iter % 100 == 0: tr_loss = eval(model, eval_tr_gen, tr_x1, tr_x2, tr_y1, tr_y2, tr_name, tr_adapt_utt) te_loss = eval(model, eval_te_gen, te_x1, te_x2, te_y1, te_y2, te_name, te_adapt_utt) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) sys.stdout.flush() # Save out training stats. stat_dict = {'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Save model. if iter % (iteration / 20) == 0: model_path = os.path.join(model_dir, "md_%diters.h5" % iter) model.save(model_path) print("Saved model to %s" % model_path) if iter == iteration + 1: break print("Training time: %s s" % (time.time() - t1,))
def train(args): """Train the neural network. Write out model every several iterations. Args: workspace: str, path of workspace. tr_snr: float, training SNR. te_snr: float, testing SNR. lr: float, learning rate. """ print(args) workspace = args.workspace tr_snr = args.tr_snr te_snr = args.te_snr lr = args.lr # Load data. t1 = time.time() tr_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "%ddb" % int(tr_snr), "data.h5") te_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "test", "%ddb" % int(te_snr), "data.h5") (tr_x, tr_y) = pp_data.load_hdf5(tr_hdf5_path) (te_x, te_y) = pp_data.load_hdf5(te_hdf5_path) print(tr_x.shape, tr_y.shape) print(te_x.shape, te_y.shape) print("Load data time: %s s" % (time.time() - t1, )) batch_size = 500 print("%d iterations / epoch" % int(tr_x.shape[0] / batch_size)) # Scale data. if True: t1 = time.time() scaler_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "%ddb" % int(tr_snr), "scaler.p") scaler = pickle.load(open(scaler_path, 'rb')) tr_x = pp_data.scale_on_3d(tr_x, scaler) tr_y = pp_data.scale_on_2d(tr_y, scaler) te_x = pp_data.scale_on_3d(te_x, scaler) te_y = pp_data.scale_on_2d(te_y, scaler) print("Scale data time: %s s" % (time.time() - t1, )) # Debug plot. if False: plt.matshow(tr_x[0:1000, 0, :].T, origin='lower', aspect='auto', cmap='jet') plt.show() pause # Build model (_, n_concat, n_freq) = tr_x.shape n_hid = 2048 model = Sequential() model.add(Flatten(input_shape=(n_concat, n_freq))) model.add(Dense(n_hid, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(n_hid, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(n_hid, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(n_freq, activation='linear')) model.summary() model.compile(loss='mean_absolute_error', optimizer=Adam(lr=lr)) # Data generator. tr_gen = DataGenerator(batch_size=batch_size, type='train') eval_te_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) eval_tr_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) # Directories for saving models and training stats model_dir = os.path.join(workspace, "models", "%ddb" % int(tr_snr)) pp_data.create_folder(model_dir) stats_dir = os.path.join(workspace, "training_stats", "%ddb" % int(tr_snr)) pp_data.create_folder(stats_dir) # Print loss before training. iter = 0 tr_loss = eval(model, eval_tr_gen, tr_x, tr_y) te_loss = eval(model, eval_te_gen, te_x, te_y) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) # Save out training stats. stat_dict = { 'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Train. t1 = time.time() for (batch_x, batch_y) in tr_gen.generate(xs=[tr_x], ys=[tr_y]): loss = model.train_on_batch(batch_x, batch_y) iter += 1 # Validate and save training stats. if iter % 1000 == 0: tr_loss = eval(model, eval_tr_gen, tr_x, tr_y) te_loss = eval(model, eval_te_gen, te_x, te_y) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) # Save out training stats. stat_dict = { 'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Save model. if iter % 5000 == 0: model_path = os.path.join(model_dir, "md_%diters.h5" % iter) model.save(model_path) print("Saved model to %s" % model_path) if iter == 10001: break print("Training time: %s s" % (time.time() - t1, ))
def train(args): """Train the neural network. Write out model every several iterations. Args: workspace: str, path of workspace. tr_snr: float, training SNR. te_snr: float, testing SNR. lr: float, learning rate. """ print(args) workspace = args.workspace tr_snr = args.tr_snr te_snr = args.te_snr lr = args.lr snr_arr = [0, 5, 10, 15] """ workspace = "workspace" tr_snr = 0 te_snr = 0 lr = 1e-4 """ # Load data. t1 = time.time() for i in snr_arr: tr_snr = i te_snr = i tr_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "%ddb" % int(tr_snr), "data.h5") te_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "test", "%ddb" % int(te_snr), "data.h5") (tr_x, tr_y, tr_n) = pp_data.load_hdf5(tr_hdf5_path) # zxy tr_n (te_x, te_y, te_n) = pp_data.load_hdf5(te_hdf5_path) # zxy te_n print(tr_x.shape, tr_y.shape) # Scale data. if True: t2 = time.time() scaler_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "%ddb" % int(tr_snr), "scaler.p") scaler = pickle.load(open(scaler_path, 'rb')) tr_x = pp_data.scale_on_3d(tr_x, scaler) tr_y = pp_data.scale_on_2d(tr_y, scaler) # tr_n = pp_data.scale_on_2d(tr_n, scaler)#zxy te_x = pp_data.scale_on_3d(te_x, scaler) te_y = pp_data.scale_on_2d(te_y, scaler) # te_n = pp_data.scale_on_2d(te_n, scaler)#zxy print("Scale data(%sdb) time: %s s" % (tr_snr, time.time() - t2,)) # append data if i == 0: tr_x_all = tr_x tr_y_all = tr_y te_x_all = te_x te_y_all = te_y else: tr_x_all = np.concatenate((tr_x_all, tr_x), axis=0) tr_y_all = np.concatenate((tr_y_all, tr_y), axis=0) te_x_all = np.concatenate((te_x_all, te_x), axis=0) te_y_all = np.concatenate((te_y_all, te_y), axis=0) print(tr_x_all.shape, tr_y_all.shape)#zxy tr_n.shape print(te_x_all.shape, te_y_all.shape)#zxy te_n.shape print("Load data time: %s s" % (time.time() - t1,)) batch_size = 100 print("%d iterations / epoch" % int(tr_x.shape[0] / batch_size)) # Debug plot. if False: plt.matshow(tr_x[0 : 1000, 0, :].T, origin='lower', aspect='auto', cmap='jet') plt.show() pause # Build model (_, n_concat, n_freq) = tr_x.shape # 1.Load Pre-model by Xu model_path = os.path.join("premodel", "sednn_keras_logMag_Relu2048layer1_1outFr_7inFr_dp0.2_weights.75-0.00.hdf5") pre_model = load_model(model_path) #pre_model.summary() # 2.Build train model n_hid = 2048 #input:feature_x main_input = Input(shape=(n_concat, n_freq), name='main_input') x = Flatten(input_shape=(n_concat, n_freq))(main_input) # 2.1Pre-train to get feature_x // should be called tranform learning 2018-7-8 experiment13 #x = pre_model(x) #x = (pre_model.get_layer('input_1'))(x) #x = (pre_model.get_layer('dense_1'))(x) #x = (Dense(n_hid, activation='linear'))(x) ## model_mid = Model(inputs=pre_model.input, outputs=pre_model.get_layer('dense_1').output) #model_mid.summary() ## x=model_mid(x) x = (Dense(n_hid, activation='linear'))(x) """ x = (LSTM(n_hid, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.3))(main_input) x = (LSTM(n_hid, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.3))(x) x = (LSTM(n_hid, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.3))(x) """ #hidden1 x = (Dense(n_hid, name='hidden_1'))(x) x = LeakyReLU(alpha=0.3)(x) x = Dropout(0.3)(x) x = (Dense(n_hid, activation='linear'))(x) #hidden2 x = (Dense(n_hid, name='hidden_2'))(x) x = LeakyReLU(alpha=0.3)(x) x = Dropout(0.3)(x) """ x = (Dense(n_hid, activation='linear'))(x) #hidden3 x = (Dense(n_hid, name='hidden_3'))(x) x = LeakyReLU(alpha=0.3)(x) x = Dropout(0.3)(x) #x = (Dense(n_hid, activation='linear'))(x) #hidden4 x = (Dense(n_hid, name='hidden_4'))(x) x = LeakyReLU(alpha=0.3)(x) x = Dropout(0.5)(x) """ #output1:^speech output_y = Dense(n_freq, activation='linear', name='out_y')(x) #define noisy_to_speech&noise model model = Model(inputs=main_input, outputs=output_y) #compile model with different loss and weights model.compile(optimizer=Adam(lr=lr), loss='mae', metrics=['accuracy']) #show model_summary model.summary() # Data generator. tr_gen = DataGenerator(batch_size=batch_size, type='train') eval_te_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) eval_tr_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) # Directories for saving models and training stats model_dir = os.path.join(workspace, "models") # , "%ddb" % int(tr_snr)) pp_data.create_folder(model_dir) stats_dir = os.path.join(workspace, "training_stats") # , "%ddb" % int(tr_snr)) pp_data.create_folder(stats_dir) # Print loss before training. iter = 0 tr_loss = eval(model, eval_tr_gen, tr_x, tr_y) te_loss = eval(model, eval_te_gen, te_x, te_y) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) #tr_n_loss = eval(model, eval_tr_gen, tr_x, tr_n)#zxy0523 #te_n_loss = eval(model, eval_te_gen, te_x, te_n) #print("Iteration: %d, tr_n_loss: %f, te_n_loss: %f" % (iter, tr_n_loss, te_n_loss)) # Save out training stats. stat_dict = {'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Train. t1 = time.time() for (batch_x, batch_y) in tr_gen.generate(xs=[tr_x], ys=[tr_y]): loss = model.train_on_batch(batch_x, batch_y) iter += 1 # Validate and save training stats. if iter % 50 == 0: tr_loss = eval(model, eval_tr_gen, tr_x, tr_y) te_loss = eval(model, eval_te_gen, te_x, te_y) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) # Save out training stats. stat_dict = {'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Save model. if iter % 3000 == 0: model_path = os.path.join(model_dir, "md_dnn2_%diters.h5" % iter) model.save(model_path) print("Saved model to %s" % model_path) if iter == 3001: break #zxy resultz = model.evaluate(tr_x, tr_y) print ("/nTrain Acc:" ) print(resultz) resultz = model.evaluate(te_x, te_y) print ("/nTest Acc:" ) print(resultz) print(model.metrics_names) #zxy print("Training time: %s s" % (time.time() - t1,))
def train_noise(args): """Train the neural network. Write out model every several iterations. Args: workspace: str, path of workspace. tr_snr: float, training SNR. te_snr: float, testing SNR. lr: float, learning rate. """ print(args) workspace = args.workspace tr_snr = args.tr_snr te_snr = args.te_snr lr = args.lr """ workspace = "workspace" tr_snr = 0 te_snr = 0 lr = 1e-4 """ # Load data. t1 = time.time() tr_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "%ddb" % int(tr_snr), "data.h5") te_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "test", "%ddb" % int(te_snr), "data.h5") (tr_x, tr_y, tr_n) = pp_data.load_hdf5(tr_hdf5_path)#zxy tr_n (te_x, te_y, te_n) = pp_data.load_hdf5(te_hdf5_path)#zxy te_n print(tr_x.shape, tr_y.shape, tr_n.shape)#zxy tr_n.shape print(te_x.shape, te_y.shape, te_n.shape)#zxy te_n.shape print("Load data time: %s s" % (time.time() - t1,)) batch_size = 500 print("%d iterations / epoch" % int(tr_x.shape[0] / batch_size)) # Scale data. if True: t1 = time.time() scaler_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "%ddb" % int(tr_snr), "scaler.p") scaler = pickle.load(open(scaler_path, 'rb')) tr_x = pp_data.scale_on_3d(tr_x, scaler) #tr_y = pp_data.scale_on_2d(tr_y, scaler) tr_n = pp_data.scale_on_2d(tr_n, scaler)#zxy te_x = pp_data.scale_on_3d(te_x, scaler) #te_y = pp_data.scale_on_2d(te_y, scaler) te_n = pp_data.scale_on_2d(te_n, scaler)#zxy print("Scale data time: %s s" % (time.time() - t1,)) # Debug plot. if False: plt.matshow(tr_x[0 : 1000, 0, :].T, origin='lower', aspect='auto', cmap='jet') plt.show() pause # Build model (_, n_concat, n_freq) = tr_x.shape # 1.Load Pre-model by Xu model_path = os.path.join("premodel", "sednn_keras_logMag_Relu2048layer1_1outFr_7inFr_dp0.2_weights.75-0.00.hdf5") pre_model = load_model(model_path) # 2.Build train model n_hid = 2048 #input:feature_x main_input = Input(shape=(n_concat, n_freq), name='main_input') x = Flatten(input_shape=(n_concat, n_freq))(main_input) # 2.1Pre-train to get feature_x x = pre_model(x) #hidden1 x = (Dense(n_hid))(x) x = LeakyReLU(alpha=0.3)(x) x = Dropout(0.3)(x) #hidden2 x = (Dense(n_hid))(x) x = LeakyReLU(alpha=0.3)(x) x = Dropout(0.3)(x) #hidden3 x = (Dense(n_hid))(x) x = LeakyReLU(alpha=0.3)(x) x = Dropout(0.3)(x) #output1:^speech output_y = Dense(n_freq, activation='linear', name='out_y')(x) #define noisy_to_speech&noise model model = Model(inputs=main_input, outputs=output_y) #compile model with different loss and weights model.compile(optimizer=Adam(lr=lr), loss='mae', metrics=['accuracy']) #show model_summary model.summary() # Data generator. tr_gen = DataGenerator(batch_size=batch_size, type='train') eval_te_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) eval_tr_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) # Directories for saving models and training stats model_dir = os.path.join(workspace, "models", "%ddb_n" % int(tr_snr)) pp_data.create_folder(model_dir) stats_dir = os.path.join(workspace, "training_stats", "%ddb_n" % int(tr_snr)) pp_data.create_folder(stats_dir) # Print loss before training. iter = 0 tr_loss = eval(model, eval_tr_gen, tr_x, tr_n) te_loss = eval(model, eval_te_gen, te_x, te_n) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) #tr_n_loss = eval(model, eval_tr_gen, tr_x, tr_n)#zxy0523 #te_n_loss = eval(model, eval_te_gen, te_x, te_n) #print("Iteration: %d, tr_n_loss: %f, te_n_loss: %f" % (iter, tr_n_loss, te_n_loss)) # Save out training stats. stat_dict = {'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Train. t1 = time.time() for (batch_x, batch_n) in tr_gen.generate(xs=[tr_x], ys=[tr_n]): loss = model.train_on_batch(batch_x, batch_n) iter += 1 # Validate and save training stats. if iter % 100 == 0: tr_loss = eval(model, eval_tr_gen, tr_x, tr_n) te_loss = eval(model, eval_te_gen, te_x, te_n) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) # Save out training stats. stat_dict = {'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Save model. if iter % 1000 == 0: model_path = os.path.join(model_dir, "md_%diters.h5" % iter) model.save(model_path) print("Saved model to %s" % model_path) if iter == 3001: break #zxy resultz = model.evaluate(tr_x, tr_n) print ("/nTrain Acc:" ) print(resultz) resultz = model.evaluate(te_x, te_n) print ("/nTest Acc:" ) print(resultz) print(model.metrics_names) #zxy print("Training time: %s s" % (time.time() - t1,))
def train(workspace, tr_snr, te_snr, lr, model_name=None, force=False, iters=100000): """Train the neural network. Write out model every several iterations. Args: workspace: str, path of workspace. tr_snr: float, training SNR. te_snr: float, testing SNR. lr: float, learning rate. """ # Directories for saving models and training stats if model_name is None: model_name = '_'.join([str(snr) for snr in tr_snr]) + 'ddbs' model_dir = os.path.join(workspace, "models", model_name) pp_data.create_folder(model_dir) stats_dir = os.path.join(workspace, "training_stats", model_name) pp_data.create_folder(stats_dir) model_path = os.path.join(model_dir, f"md_{iters}iters.h5") if os.path.isfile(model_path) and not force: print(f'Model already trained ({model_path})') return # Load data. t1 = time.time() tr_x = None tr_y = None for snr in tr_snr: tr_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "%ddb" % int(snr), "data.h5") (X, y) = pp_data.load_hdf5(tr_hdf5_path) if tr_x is None: tr_x = X tr_y = y else: tr_x = np.concatenate((tr_x, X)) tr_y = np.concatenate((tr_y, y)) te_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "test", "%ddb" % int(te_snr), "data.h5") (te_x, te_y) = pp_data.load_hdf5(te_hdf5_path) print(tr_x.shape, tr_y.shape) print(te_x.shape, te_y.shape) print("Load data time: %s s" % (time.time() - t1, )) batch_size = 500 print("%d iterations / epoch" % int(tr_x.shape[0] / batch_size)) # Scale data. if True: t1 = time.time() scaler = read_combined_scaler(workspace, tr_snr) tr_x = pp_data.scale_on_3d(tr_x, scaler) #tr_y = pp_data.scale_on_2d(tr_y, scaler) te_x = pp_data.scale_on_3d(te_x, scaler) #te_y = pp_data.scale_on_2d(te_y, scaler) print("Scale data time: %s s" % (time.time() - t1, )) # Debug plot. if False: plt.matshow(tr_x[0:1000, 0, :].T, origin='lower', aspect='auto', cmap='jet') plt.show() pause print(tf.test.is_gpu_available()) # Build model (_, n_concat, n_freq) = tr_x.shape n_hid = 2048 model = Sequential() model.add(Flatten(input_shape=(n_concat, n_freq))) model.add(Dense(n_hid, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(n_hid, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(n_hid, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(n_freq, activation='linear')) model.summary() model.compile(loss='mean_absolute_error', optimizer=Adam(lr=lr)) # Data generator. tr_gen = DataGenerator(batch_size=batch_size, type='train') eval_te_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=50) eval_tr_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=50) # Print loss before training. iter = 0 tr_loss = eval(model, eval_tr_gen, tr_x, tr_y) te_loss = eval(model, eval_te_gen, te_x, te_y) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) # Save out training stats. stat_dict = { 'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) pickle.dump(stat_dict, open(stat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL) # Train.h t1 = time.time() for (batch_x, batch_y) in tr_gen.generate(xs=[tr_x], ys=[tr_y]): loss = model.train_on_batch(batch_x, batch_y) iter += 1 # Validate and save training stats. if iter % 1000 == 0: tr_loss = eval(model, eval_tr_gen, tr_x, tr_y) te_loss = eval(model, eval_te_gen, te_x, te_y) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) # Save out training stats. stat_dict = { 'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) pickle.dump(stat_dict, open(stat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL) # Save model. if iter % 10000 == 0: model_path = os.path.join(model_dir, "md_%diters.h5" % iter) model.save(model_path) print("Saved model to %s" % model_path) if iter == iters + 1: break print("Training time: %s s" % (time.time() - t1, ))
def continue_train_tfrecord(): workspace = "workspace" lr = 1e-5 iter = 220000 data_type = "IRM" # Load model. if data_type == "DM": model_path = os.path.join(workspace, "models", "elu_mixdb", "md_%diters.h5" % iter) else: model_path = os.path.join(workspace, "models", "mask_mixdb", "md_%diters.h5" % iter) model = load_model(model_path) #model = multi_gpu_model(model, 4) model.compile(loss='mean_absolute_error', optimizer=Adam(lr=lr, beta_1=0.2)) # Load data. if data_type == "DM": tr_hdf5_dir = os.path.join(workspace, "tfrecords", "train", "mixdb") tr_hdf5_names = os.listdir(tr_hdf5_dir) tr_path_list = [os.path.join(tr_hdf5_dir, i) for i in tr_hdf5_names] te_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "test", "mixdb", "data.h5") else: tr_hdf5_dir = os.path.join(workspace, "tfrecords", "train", "mask_mixdb") tr_hdf5_names = os.listdir(tr_hdf5_dir) tr_path_list = [os.path.join(tr_hdf5_dir, i) for i in tr_hdf5_names] te_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "test", "mask_mixdb", "data.h5") #(tr_x1, tr_y1) = pp_data.load_hdf5("workspace/packed_features/spectrogram/train/mixdb/data100000.h5") (te_x, te_y) = pp_data.load_hdf5(te_hdf5_path) t1 = time.time() scaler_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "mixdb", "scaler.p") scaler = pickle.load(open(scaler_path, 'rb')) te_x = pp_data.scale_on_3d(te_x, scaler) #tr_x1 = pp_data.scale_on_3d(tr_x1, scaler) if data_type == "DM": te_y = pp_data.scale_on_2d(te_y, scaler) tr_y1 = pp_data.scale_on_2d(tr_y1, scaler) print("Scale data time: %s s" % (time.time() - t1, )) # Directories for saving models and training stats if data_type == "DM": model_dir = os.path.join(workspace, "models", "elu_mixdb", "continue") stats_dir = os.path.join(workspace, "training_stats", "elu_mixdb", "continue") else: model_dir = os.path.join(workspace, "models", "mask_mixdb", "continue") stats_dir = os.path.join(workspace, "training_stats", "mask_mixdb", "continue") pp_data.create_folder(model_dir) pp_data.create_folder(stats_dir) # Print loss before training. batch_size = 1024 * 4 #eval_tr_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) eval_te_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) #tr_loss = eval(model, eval_tr_gen, tr_x1, tr_y1) tr_loss = 0 te_loss = eval(model, eval_te_gen, te_x, te_y) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) # Save out training stats. stat_dict = { 'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Train. sess = tf.Session() x, y = load_tfrecord(batch=batch_size, repeat=100000, data_path=tr_path_list) t1 = time.time() for count in range(1000000000): [tr_x, tr_y] = sess.run([x, y]) loss = model.train_on_batch(tr_x, tr_y) iter += 1 # Validate and save training stats. if iter % 1000 == 0: #tr_loss = eval(model, eval_tr_gen, tr_x1, tr_y1) te_loss = eval(model, eval_te_gen, te_x, te_y) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) # Save out training stats. stat_dict = { 'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Save model. if iter % 5000 == 0: model_path = os.path.join(model_dir, "md_%diters.h5" % iter) model.save(model_path) print("Saved model to %s" % model_path) if iter == 100001: break print("Training time: %s s" % (time.time() - t1, ))
def continue_train(args): workspace = args.workspace lr = args.lr iter = args.iteration data_type = "IRM" # Load model. if data_type == "DM": model_path = os.path.join(workspace, "models", "mixdb", "md_%diters.h5" % iter) else: model_path = os.path.join(workspace, "models", "mask_mixdb", "md_%diters.h5" % iter) model = load_model(model_path) #model = multi_gpu_model(model, 4) model.compile(loss='mean_absolute_error', optimizer=Adam(lr=lr, beta_1=0.2)) # Load data. t1 = time.time() if data_type == "DM": tr_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "mixdb", "data.h5") te_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "test", "mixdb", "data.h5") else: tr_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "mask_mixdb", "data.h5") te_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "test", "mask_mixdb", "data.h5") tr_hdf5_dir = os.path.join(workspace, "packed_features", "spectrogram", "train", "mask_mixdb") tr_hdf5_names = os.listdir(tr_hdf5_dir) tr_hdf5_names = [i for i in tr_hdf5_names if i.endswith(".h5")] tr_path_list = [os.path.join(tr_hdf5_dir, i) for i in tr_hdf5_names] (tr_x, tr_y) = pp_data.load_hdf5(tr_hdf5_path) (te_x, te_y) = pp_data.load_hdf5(te_hdf5_path) print(tr_x.shape, tr_y.shape) print(te_x.shape, te_y.shape) print("Load data time: %s s" % (time.time() - t1, )) batch_size = 2048 print("%d iterations / epoch" % int(tr_x.shape[0] / batch_size)) # Scale data. if True: t1 = time.time() scaler_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "mixdb", "scaler.p") scaler = pickle.load(open(scaler_path, 'rb')) tr_x = pp_data.scale_on_3d(tr_x, scaler) te_x = pp_data.scale_on_3d(te_x, scaler) if data_type == "DM": tr_y = pp_data.scale_on_2d(tr_y, scaler) te_y = pp_data.scale_on_2d(te_y, scaler) print("Scale data time: %s s" % (time.time() - t1, )) #scaler_path = os.path.join(workspace, "packed_features", "spectrogram", "train", "mixdb", "scaler.p") #scaler = pickle.load(open(scaler_path, 'rb')) tr_gen = DataGenerator(batch_size=batch_size, type='train') eval_te_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) eval_tr_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) #tr_gen = DataGenerator_h5py(batch_size=batch_size, type='train', scaler = scaler) #eval_te_gen = DataGenerator_h5py(batch_size=batch_size, type='test', te_max_iter=100, scaler =scaler) #eval_tr_gen = DataGenerator_h5py(batch_size=batch_size, type='test', te_max_iter=100, scaler =scaler) # Directories for saving models and training stats if data_type == "DM": model_dir = os.path.join(workspace, "models", "chinese_mixdb", "continue") stats_dir = os.path.join(workspace, "training_stats", "chinese_mixdb", "continue") else: model_dir = os.path.join(workspace, "models", "mask_mixdb", "continue") stats_dir = os.path.join(workspace, "training_stats", "mask_mixdb", "continue") pp_data.create_folder(model_dir) pp_data.create_folder(stats_dir) # Print loss before training. iter = 0 tr_loss = eval(model, eval_tr_gen, tr_x, tr_y) te_loss = eval(model, eval_te_gen, te_x, te_y) #tr_loss = eval_h5py(model, eval_tr_gen, tr_path_list) #te_loss = eval_h5py(model, eval_te_gen, [te_hdf5_path]) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) # Save out training stats. stat_dict = { 'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Train. t1 = time.time() for (batch_x, batch_y) in tr_gen.generate(xs=[tr_x], ys=[tr_y]): #for (batch_x, batch_y) in tr_gen.generate(tr_path_list): loss = model.train_on_batch(batch_x, batch_y) iter += 1 # Validate and save training stats. if iter % 500 == 0: tr_loss = eval(model, eval_tr_gen, tr_x, tr_y) te_loss = eval(model, eval_te_gen, te_x, te_y) #tr_loss = eval_h5py(model, eval_tr_gen, tr_path_list) #te_loss = eval_h5py(model, eval_te_gen, [te_hdf5_path]) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) # Save out training stats. stat_dict = { 'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Save model. if iter % 5000 == 0: model_path = os.path.join(model_dir, "md_%diters.h5" % iter) model.save(model_path) print("Saved model to %s" % model_path) if iter == 100001: break print("Training time: %s s" % (time.time() - t1, ))
def train(args): """Train the neural network. Write out model every several iterations. Args: workspace: str, path of workspace. tr_snr: float, training SNR. te_snr: float, testing SNR. lr: float, learning rate. """ print(args) workspace = args.workspace model_name = args.model_name lr = args.lr tr_dir_name = args.tr_dir_name va_dir_name = args.va_dir_name iter_training = args.iteration dropout = args.dropout # Load data. t1 = time.time() tr_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "train", tr_dir_name, "data.h5") # va_hdf5_path = os.path.join(workspace, "packed_features", "spectrogram", "validation", va_dir_name, "data.h5") (tr_x, tr_y) = pp_data.load_hdf5(tr_hdf5_path) # (va_x, va_y) = pp_data.load_hdf5(va_hdf5_path) print(tr_x.shape, tr_y.shape) # print(va_x.shape, va_y.shape) print("Load data time: %s s" % (time.time() - t1, )) batch_size = 500 print("%d iterations / epoch" % int(tr_x.shape[0] / batch_size)) # Scale data. if True: t1 = time.time() scaler_path = os.path.join(workspace, "packed_features", "spectrogram", "train", tr_dir_name, "scaler.p") scaler = pickle.load(open(scaler_path, 'rb')) tr_x = pp_data.scale_on_3d(tr_x, scaler) tr_y = pp_data.scale_on_2d(tr_y, scaler) # va_x = pp_data.scale_on_3d(va_x, scaler) # va_y = pp_data.scale_on_2d(va_y, scaler) print("Scale data time: %s s" % (time.time() - t1, )) # Debug plot. if False: plt.matshow(tr_x[0:1000, 0, :].T, origin='lower', aspect='auto', cmap='jet') plt.show() pause # Build model (_, n_concat, n_freq) = tr_x.shape n_hid = 2048 with tf.Session() as sess: model = DNN(sess, lr, batch_size, (n_concat, n_freq), n_freq, dropouts=dropout, training=True) model.build() sess.run(tf.global_variables_initializer()) merge_op = tf.summary.merge_all() # Data generator. tr_gen = DataGenerator(batch_size=batch_size, type='train') # eval_te_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) eval_tr_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) # Directories for saving models and training stats model_dir = os.path.join(workspace, "models", model_name) pp_data.create_folder(model_dir) stats_dir = os.path.join(workspace, "training_stats", model_name) pp_data.create_folder(stats_dir) # Print loss before training. iter = 0 tr_loss = eval(sess, model, eval_tr_gen, tr_x, tr_y) # te_loss = eval(model, eval_te_gen, te_x, te_y) # print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) print("Iteration: %d, tr_loss: %f" % (iter, tr_loss)) # Save out training stats. stat_dict = { 'iter': iter, 'tr_loss': tr_loss, } # 'te_loss': te_loss,} stat_path = os.path.join(stats_dir, "%diters.p" % iter) pickle.dump(stat_dict, open(stat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL) # Train. t1 = time.time() for (batch_x, batch_y) in tr_gen.generate(xs=[tr_x], ys=[tr_y]): feed_dict = {model.x_noisy: batch_x, model.y_clean: batch_y} _, loss, summary_str = sess.run( [model.optimizer, model.loss, merge_op], feed_dict=feed_dict) iter += 1 # Validate and save training stats. if iter % 1000 == 0: tr_loss = eval(sess, model, eval_tr_gen, tr_x, tr_y) # te_loss = eval(model, eval_te_gen, te_x, te_y) print("Iteration: %d, tr_loss: %f" % (iter, tr_loss)) # print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) # Save out training stats. stat_dict = { 'iter': iter, 'tr_loss': tr_loss, } # 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) pickle.dump(stat_dict, open(stat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL) # Save model. if iter % 5000 == 0: ckpt_file_path = os.path.join(model_dir, model_name) # if os.path.isdir(model_dir) is False: # os.makedirs(model_dir) tf.train.Saver().save(sess, ckpt_file_path, write_meta_graph=True) print("Saved model to %s" % ckpt_file_path) if iter == iter_training + 1: break print("Training time: %s s" % (time.time() - t1, ))
def train(args): """Train the neural network. Write out model every several iterations. Args: workspace: str, path of workspace. tr_snr: float, training SNR. te_snr: float, testing SNR. lr: float, learning rate. """ class MetricsHistory(Callback): def on_epoch_end(self, epoch, logs={}): file_logger.write([str(epoch), str(logs['loss']), str(logs['val_loss']) ]) print(args) workspace = args.workspace #tr_snr = args.tr_snr #te_snr = args.te_snr lr = args.lr #TF = args.TF model_name = args.model_name #model_save_dir = os.path.join(args.workspace, 'saved_models') # Load data t1 = time.time() print("Loading the train and vallidation dataset") tr_hdf5_path = os.path.join(workspace, "packed_features", "train", "mag.h5") te_hdf5_path = os.path.join(workspace, "packed_features", "val", "mag.h5") (tr_x, tr_y) = pp_data.load_hdf5(tr_hdf5_path) (te_x, te_y) = pp_data.load_hdf5(te_hdf5_path) print('train_x shape:') print(tr_x.shape, tr_y.shape) print('test_x shape:') print(te_x.shape, te_y.shape) print("Load data time: %f s" % (time.time() - t1)) print('\n') # Scale data if True: print("Scaling train and test dataset. This will take some time, please wait patiently...") t1 = time.time() scaler_path = os.path.join(workspace, "packed_features", "train", "mag_scaler.p") scaler = pickle.load(open(scaler_path, 'rb')) tr_x = pp_data.scale_on_3d(tr_x, scaler) tr_y = pp_data.scale_on_2d(tr_y, scaler) te_x = pp_data.scale_on_3d(te_x, scaler) te_y = pp_data.scale_on_2d(te_y, scaler) print("Scale data time: %f s" % (time.time() - t1)) # Debug plot. if False: plt.matshow(tr_x[0 : 1000, 0, :].T, origin='lower', aspect='auto', cmap='jet') plt.show() #time.sleep(secs) os.system("pause") # Build model batch_size = 150 epoch = 100 print("The neural networks you have chosed is %s" % model_name) print("The training batch is set to %d and the %s will be training for at most %d epoches" % (batch_size, model_name.upper(), epoch)) print("======iteration of one epoch======" ) iter_each_epoch = int(tr_x.shape[0] / batch_size) #val_each_epoch = int(te_x.shape[0] / batch_size) #print("There are %d iterations / epoch" % int(tr_x.shape[0] / batch_size)) print("There are %d iterations / epoch" % iter_each_epoch) log_save_dir = os.path.join(workspace, 'log') if not os.path.isdir(log_save_dir): os.makedirs(log_save_dir) log_path = os.path.join(log_save_dir, 'out_{}.csv'.format(model_name)) #log_path = os.path.join(log_save_dir, 'out_%ddb_%s.csv' %(int(snr[0]), model_name)) file_logger = FileLogger(log_path, ['epoch', 'train_loss', 'val_loss']) (_, n_concat, n_freq) = tr_x.shape #temp_tr_x = tr_x[:, 3, :][:, np.newaxis, :] #print(temp_tr_x.shape) #np.axis n_hid = 2048 #data_gen = DataGenerator(batch_size=batch_size, type='train') #tr_gen = data_gen.generate(xs=[tr_x], ys=[tr_y]) #te_gen = data_gen.generate(xs=[te_x], ys=[te_y]) #temp_tr_x = tr_gen[:, 3, :][:, np.newaxis, :] ''' model = Sequential() model.add(Flatten(input_shape=(n_concat, n_freq))) model.add(BatchNormalization()) model.add(Dense(n_hid, activation='relu', kernel_regularizer=regularizers.l2(l=0.0001))) model.add(Dropout(0.2)) model.add(BatchNormalization()) model.add(Dense(n_hid, activation='relu', kernel_regularizer=regularizers.l2(l=0.0001))) model.add(Dropout(0.2)) model.add(BatchNormalization()) model.add(Dense(n_hid, activation='relu', kernel_regularizer=regularizers.l2(l=0.0001))) model.add(Dropout(0.2)) model.add(Dense(n_freq, activation='linear')) #model.summary() ''' print('Model selected:', model_name.lower()) if model_name == 'dnn': model = dnn(n_hid, n_concat, n_freq) elif model_name == 'sdnn1': model = sdnn1(n_hid, n_concat, n_freq) elif model_name == 'sdnn2': model = sdnn2(n_hid, n_concat, n_freq) elif model_name == 'sdnn3': model = sdnn3(n_hid, n_concat, n_freq) elif model_name == 'fcn': model = fcn(n_concat, n_freq) elif model_name == 'fcn1': model = fcn1(n_concat, n_freq) elif model_name == 'fcn1': model = fcn1_re(n_concat, n_freq) elif model_name == 'fcn2': model = fcn2(n_concat, n_freq) elif model_name == 'fcn3': model = fcn3(n_concat, n_freq) elif model_name == 'fcn4': model = fcn4(n_concat, n_freq) elif model_name == 'm_vgg': model = m_vgg(n_concat, n_freq) elif model_name == 'm_vgg1': model = m_vgg1(n_concat, n_freq) elif model_name == 'm_vgg2': model = m_vgg2(n_concat, n_freq) elif model_name == 'm_vgg3': model = m_vgg3(n_concat, n_freq) elif model_name == 'm_vgg4': model = m_vgg3(n_concat, n_freq) elif model_name == 'CapsNet': model = CapsNet(n_concat, n_freq, 3) elif model_name == 'brnn' : recur_layers = 7 unit = 256 output_dim = n_freq model = brnn(n_concat, n_freq, unit, recur_layers, output_dim) elif model_name == 'rnn' : output_dim = n_freq model = rnn(n_concat, n_freq, output_dim) elif model_name == 'tcn' : input_dim = n_freq model = tcn(n_concat, input_dim) if model is None: exit('Please choose a valid model: [dnn, sdnn, sdnn1, cnn, scnn1]') #mean_squared_error model.compile(loss = 'mean_squared_error', optimizer=Adam(lr=lr)) print(model.summary()) #plot model #plot_model(model, to_file=args.save_dir+'/model.png', show_shapes=True) #plot_model(model, to_file='%s/%s_model.png' % (log_save_dir, model_name), show_shapes=True) # Save model and weights model_save_dir = os.path.join(workspace, 'saved_models', "%s" % model_name) model_save_name = "weights-checkpoint-{epoch:02d}-{val_loss:.2f}.h5" if not os.path.isdir(model_save_dir): os.makedirs(model_save_dir) model_path = os.path.join(model_save_dir, model_save_name) checkpoint = ModelCheckpoint(model_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min') print('Saved trained model at %s' % model_save_dir) #reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=4, min_lr=0.00001, verbose=1) lr_decay = LearningRateScheduler(schedule=lambda epoch: lr * (0.9 ** epoch)) metrics_history = MetricsHistory() hist = model.fit(x=tr_x, y=tr_y, batch_size=batch_size, epochs=epoch, verbose=1, shuffle=True, validation_data=(te_x, te_y), #validation_split=0.1, callbacks=[metrics_history, checkpoint, lr_decay]) ''' hist = model.fit_generator(tr_gen, steps_per_epoch=iter_each_epoch, epochs=epoch, verbose=1, validation_data=te_gen, validation_steps=val_each_epoch, callbacks=[metrics_history, checkpoint, reduce_lr]) ''' print(hist.history.keys()) # list all data in history #print(hist.history.keys()) ''' # summarize history for accuracy plt.plot(hist.history['acc']) plt.plot(hist.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() ''' # summarize history for loss model_png = "train_test_loss" loss_fig_dir = os.path.join(log_save_dir, '%s_%s.png' % (model_name, model_png)) plt.plot(hist.history['loss']) plt.plot(hist.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper right') plt.savefig(loss_fig_dir) #plt.show() ''' fig = plt.gcf() plt.show() fig.savefig('tessstttyyy.png', dpi=100) ''' file_logger.close() ''' # Data generator. tr_gen = DataGenerator(batch_size=batch_size, type='train') eval_te_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) eval_tr_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100) # Directories for saving models and training stats model_dir = os.path.join(workspace, "models", "%ddb" % int(tr_snr)) pp_data.create_folder(model_dir) stats_dir = os.path.join(workspace, "training_stats", "%ddb" % int(tr_snr)) pp_data.create_folder(stats_dir) # Print loss before training. iter = 0 tr_loss = eval(model, eval_tr_gen, tr_x, tr_y) te_loss = eval(model, eval_te_gen, te_x, te_y) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) # Save out training stats. stat_dict = {'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Train. t1 = time.time() for (batch_x, batch_y) in tr_gen.generate(xs=[tr_x], ys=[tr_y]): #loss = model.train_on_batch(batch_x, batch_y) if iter % 2000 == 0: lr *= 0.1 model.train_on_batch(batch_x, batch_y) iter += 1 # Validate and save training stats. if iter % 1000 == 0: tr_loss = eval(model, eval_tr_gen, tr_x, tr_y) te_loss = eval(model, eval_te_gen, te_x, te_y) print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss)) # Save out training stats. stat_dict = {'iter': iter, 'tr_loss': tr_loss, 'te_loss': te_loss, } stat_path = os.path.join(stats_dir, "%diters.p" % iter) cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL) # Save model. if iter % 5000 == 0: model_path = os.path.join(model_dir, "md_%diters.h5" % iter) model.save(model_path) print("Saved model to %s" % model_path) if iter == 10001: break ''' print("Training time: %s s" % (time.time() - t1,))