def train(): # load data tr_X2d_mix, tr_X3d_mix, tr_y2d_chn0, tr_y2d_chn1, tr_y3d_chn0, tr_y3d_chn1 = pp_data.LoadData( cfg.fe_fft_fd, n_time, hop, na_list=cfg.tr_list) # build model lay_in0 = InputLayer(in_shape=((n_time, n_freq)), name='in1') lay_a0 = Flatten()(lay_in0) lay_a1 = Dense(n_hid, act='relu', name='a1')(lay_a0) lay_a2 = Dropout(0.2, name='a2')(lay_a1) lay_a3 = Dense(n_hid, act='relu', name='a3')(lay_a2) lay_a4 = Dropout(0.2, name='a4')(lay_a3) lay_a5 = Dense(n_hid, act='relu', name='a5')(lay_a4) lay_a6 = Dropout(0.2, name='a6')(lay_a5) lay_b1 = Dense(n_freq, act='sigmoid', name='a7')(lay_a6) # mask_left, shape: (N, n_freq) lay_c1 = Dense(n_freq, act='sigmoid', name='a8')(lay_a6) # mask_right, shape: (N, n_freq) lay_out_b = Lambda(mul, name='out_b')([lay_b1, lay_in0]) # out_left, shape: (N, n_freq) lay_out_c = Lambda(mul, name='out_c')([lay_c1, lay_in0 ]) # out_right, shape: (N, n_freq) md = Model(in_layers=[lay_in0], out_layers=[lay_out_b, lay_out_c], any_layers=[lay_in0, lay_b1, lay_c1]) md.summary() # validation validation = Validation(tr_x=[np.abs(tr_y3d_chn0) + np.abs(tr_y3d_chn1)], tr_y=[np.abs(tr_y2d_chn0), np.abs(tr_y2d_chn1)], batch_size=100, metrics=[loss_func], call_freq=1, dump_path=None) # save model if not os.path.exists(cfg.md_fd): os.makedirs(cfg.md_fd) save_model = SaveModel(dump_fd=cfg.md_fd, call_freq=2) # callbacks callbacks = [validation, save_model] # optimizer optimizer = Adam(1e-3) # fit model md.fit( [np.abs(tr_y3d_chn0)+np.abs(tr_y3d_chn1)], [np.abs(tr_y2d_chn0), np.abs(tr_y2d_chn1)], \ batch_size=100, n_epochs=100, loss_func='mse', optimizer=optimizer, callbacks=callbacks, verbose=1 )
# sparse label to 1-of-K categorical label tr_y = sparse_to_categorical(tr_y, n_out) va_y = sparse_to_categorical(va_y, n_out) te_y = sparse_to_categorical(te_y, n_out) ### Build model lay_in = InputLayer(in_shape=(n_in, )) a = Dense(n_out=n_hid, act='relu', name='dense1')(lay_in) a = Dropout(p_drop=0.2)(a) a = Dense(n_out=n_hid, act='relu', name='dense2')(a) a = Dropout(p_drop=0.2)(a) lay_out = Dense(n_out=n_out, act='softmax')(a) md = Model(in_layers=[lay_in], out_layers=[lay_out]) md.compile() md.summary() # observe forward observe_nodes = [ md.find_layer('dense1').output_, md.find_layer('dense2').output_ ] f_forward = md.get_observe_forward_func(observe_nodes) print md.run_function(func=f_forward, z=[te_x], batch_size=500, tr_phase=0.) # observe backward md.set_gt_nodes(target_dim_list=[2]) loss_node = obj.categorical_crossentropy(md.out_nodes_[0], md.gt_nodes_[0]) gparams = K.grad(loss_node + md.reg_value_, md.params_)
n_out = 10 # Sparse label to 1-of-K categorical label tr_y = sparse_to_categorical(tr_y, n_out) va_y = sparse_to_categorical(va_y, n_out) te_y = sparse_to_categorical(te_y, n_out) # Build model lay_in = InputLayer(in_shape=(n_in, )) a = Dense(n_out=n_hid, act='relu')(lay_in) a = Dropout(p_drop=0.2)(a) a = Dense(n_out=n_hid, act='relu')(a) a = Dropout(p_drop=0.2)(a) lay_out = Dense(n_out=n_out, act='softmax')(a) md = Model(in_layers=[lay_in], out_layers=[lay_out]) md.compile() md.summary() # Callbacks dump_fd = 'train_on_batch_models' if not os.path.exists(dump_fd): os.makedirs(dump_fd) save_model = SaveModel(dump_fd=dump_fd, call_freq=200, type='iter') validation = Validation(tr_x=tr_x, tr_y=tr_y, va_x=None, va_y=None, te_x=te_x, te_y=te_y, batch_size=500,
def train(): # create empty folders in workspace create_folders() # get dev & eva data tr_X, tr_y, _, _, _, _ = pp_dev_data.GetSegData(dev_fe_fd, agg_num, hop, fold=None) te_X, te_na_list = pp_eva_data.GetEvaSegData(eva_fe_fd, agg_num, hop) [n_songs, n_chunks, _, n_in] = tr_X.shape print tr_X.shape, tr_y.shape print te_X.shape # model lay_in0 = InputLayer( (n_chunks, agg_num, n_in), name='in0') # shape: (n_songs, n_chunk, agg_num, n_in) lay_a1 = Flatten(3, name='a1')( lay_in0) # shape: (n_songs, n_chunk, agg_num*n_in) lay_a2 = Dense(n_hid, act='relu')(lay_a1) # shape: (n_songs, n_chunk, n_hid) lay_a3 = Dropout(0.2)(lay_a2) lay_a4 = Dense(n_hid, act='relu')(lay_a3) lay_a5 = Dropout(0.2)(lay_a4) lay_a6 = Dense(n_hid, act='relu')(lay_a5) lay_a7 = Dropout(0.2)(lay_a6) lay_a8 = Dense(n_out, act='sigmoid', b_init=-1, name='a8')(lay_a7) # shape: (n_songs, n_chunk, n_out) md = Model(in_layers=[lay_in0], out_layers=[lay_a8], any_layers=[]) md.compile() md.summary() # callback, write out dection scores to .txt each epoch dump_fd = cfg.scrap_fd + '/Results_eva/bob_eer' print_scores = cb_eer.PrintScoresBagOfBlocks(te_X, te_na_list, dump_fd, call_freq=1) # callback, print loss each epoch validation = Validation(tr_x=tr_X, tr_y=tr_y, va_x=None, va_y=None, te_x=None, te_y=None, metrics=[loss_func], call_freq=1, dump_path=None) # callback, save model every N epochs save_model = SaveModel(dump_fd=cfg.scrap_fd + '/Md_eva_bob', call_freq=10) # combine all callbacks callbacks = [validation, save_model, print_scores] # optimizer optimizer = Adam(2e-4) # fit model md.fit(x=tr_X, y=tr_y, batch_size=10, n_epochs=301, loss_func=loss_func, optimizer=optimizer, callbacks=callbacks)
def train(args): workspace = cfg.workspace te_fold = cfg.te_fold n_events = args.n_events snr = args.snr feature_dir = os.path.join(workspace, "features", "logmel", "n_events=%d" % n_events) yaml_dir = os.path.join(workspace, "mixed_audio", "n_events=%d" % n_events) (tr_x, tr_at_y, tr_sed_y, tr_na_list, te_x, te_at_y, te_sed_y, te_na_list) = pp_data.load_data(feature_dir=feature_dir, yaml_dir=yaml_dir, te_fold=te_fold, snr=snr, is_scale=is_scale) print(tr_x.shape, tr_at_y.shape) print(te_x.shape, te_at_y.shape) (_, n_time, n_freq) = tr_x.shape n_out = len(cfg.events) if False: for e in tr_x: plt.matshow(e.T, origin='lower', aspect='auto') plt.show() # Build model. lay_in = InputLayer(in_shape=(n_time, n_freq)) a = Reshape((1, n_time, n_freq))(lay_in) a = Conv2D(n_outfmaps=64, n_row=3, n_col=5, act='linear', strides=(1, 1), border_mode=(1, 2))(a) a = BN(axis=(0, 2, 3))(a) a = Activation('relu')(a) a = Conv2D(n_outfmaps=64, n_row=3, n_col=5, act='linear', strides=(1, 1), border_mode=(1, 2))(a) a = BN(axis=(0, 2, 3))(a) a = Activation('relu')(a) a = Dropout(p_drop=0.2)(a) a = Conv2D(n_outfmaps=64, n_row=3, n_col=5, act='linear', strides=(1, 1), border_mode=(1, 2))(a) a = BN(axis=(0, 2, 3))(a) a = Activation('relu')(a) a = Conv2D(n_outfmaps=64, n_row=3, n_col=5, act='linear', strides=(1, 1), border_mode=(1, 2))(a) a = BN(axis=(0, 2, 3))(a) a = Activation('relu')(a) a = Dropout(p_drop=0.2)(a) a = Conv2D(n_outfmaps=64, n_row=3, n_col=5, act='linear', strides=(1, 1), border_mode=(1, 2))(a) a = BN(axis=(0, 2, 3))(a) a = Activation('relu')(a) a = Conv2D(n_outfmaps=64, n_row=3, n_col=5, act='linear', strides=(1, 1), border_mode=(1, 2))(a) a = BN(axis=(0, 2, 3))(a) a = Activation('relu')(a) a = Dropout(p_drop=0.2)(a) a = Conv2D(n_outfmaps=n_out, n_row=1, n_col=1, act='sigmoid', border_mode=(0, 0), name='seg_masks')(a) a8 = Lambda(_global_avg_pooling, name='a8')(a) md = Model([lay_in], [a8]) md.compile() md.summary(is_logging=True) # Callbacks. md_dir = os.path.join(workspace, "models", pp_data.get_filename(__file__), "n_events=%d" % n_events, "fold=%d" % te_fold, "snr=%d" % snr) pp_data.create_folder(md_dir) save_model = SaveModel(md_dir, call_freq=50, type='iter', is_logging=True) validation = Validation(te_x=te_x, te_y=te_at_y, batch_size=50, call_freq=50, metrics=['binary_crossentropy'], dump_path=None, is_logging=True) callbacks = [save_model, validation] observe_nodes = [md.find_layer('seg_masks').output_] f_forward = md.get_observe_forward_func(observe_nodes) # Generator. tr_gen = DataGenerator(batch_size=32, type='train') eva_gen = DataGenerator2(batch_size=32, type='test') # Train. loss_ary = [] t1 = time.time() optimizer = Adam(1e-3) for (batch_x, batch_y) in tr_gen.generate(xs=[tr_x], ys=[tr_at_y]): if md.iter_ % 50 == 0: logging.info("iter: %d tr_loss: %f time: %s" % ( md.iter_, np.mean(loss_ary), time.time() - t1, )) t1 = time.time() loss_ary = [] # if md.iter_ % 200 == 0: # write_out_at_sed(md, eva_gen, f_forward, te_x, te_at_y, te_sed_y, n_events, snr, te_fold) if md.iter_ == 5001: break loss = md.train_on_batch(batch_x, batch_y, loss_func='binary_crossentropy', optimizer=optimizer, callbacks=callbacks) loss_ary.append(loss)
# sparse label to 1-of-K categorical label tr_y = sparse_to_categorical(tr_y, n_out) va_y = sparse_to_categorical(va_y, n_out) te_y = sparse_to_categorical(te_y, n_out) ### Build model lay_in1 = InputLayer(in_shape=(n_in,)) lay_in2 = InputLayer(in_shape=(n_in,)) a1 = Dense(n_out=n_hid, act='relu')(lay_in1) a2 = Dense(n_out=n_hid, act='relu')(lay_in2) b = Lambda(merge)([a1, a2]) b = Dense(n_out=n_hid, act='relu')(b) lay_out1 = Dense(n_out=n_out, act='softmax')(b) lay_out2 = Dense(n_out=n_out, act='softmax')(b) md = Model(in_layers=[lay_in1, lay_in2], out_layers=[lay_out1, lay_out2]) md.compile() md.summary() # validate model every n epoch (optional) validation = Validation(tr_x=[tr_x, tr_x], tr_y=[tr_y, tr_y], va_x=None, va_y=None, te_x=[te_x, te_x], te_y=[te_y, te_y], batch_size=500, metrics=[your_metric], call_freq=1) # callbacks function callbacks = [validation] ### train model
x0 = InputLayer(in_shape=(3, 32, 32)) x1 = Convolution2D(n_outfmaps=64, n_row=3, n_col=3, act='relu', border_mode=(1, 1))(x0) x2 = add_n_blocks(x1, n_outfmaps=64, n_repeat=3, is_first_layer=True) x3 = add_n_blocks(x2, n_outfmaps=128, n_repeat=4, is_first_layer=False) x4 = add_n_blocks(x3, n_outfmaps=256, n_repeat=6, is_first_layer=False) x5 = add_n_blocks(x4, n_outfmaps=512, n_repeat=3, is_first_layer=False) y1 = Lambda(mean_pool)(x5) y2 = Flatten()(y1) y3 = Dense(n_out, act='softmax')(y2) md = Model([x0], [y3]) # print summary info of model md.summary() ### optimization method optimizer = Adam(1e-3) ### callbacks (optional) # save model every n epoch (optional) if not os.path.exists('Md'): os.makedirs('Md') # create folder save_model = SaveModel(dump_fd='Md', call_freq=1) # validate model every n epoch (optional) validation = Validation(tr_x=tr_X, tr_y=tr_y,
np.random.seed(1515) import os from hat.models import Model from hat.layers.core import InputLayer, Dense, Dropout from hat.callbacks import SaveModel, Validation from hat.preprocessing import sparse_to_categorical from hat.optimizers import SGD, Adam from hat import serializations # init params n_in = 784 n_hid = 500 n_out = 10 lay_in = InputLayer(in_shape=(n_in, )) a = Dense(n_out=n_hid, act='relu')(lay_in) a = Dropout(p_drop=0.2)(a) a = Dense(n_out=n_hid, act='relu')(a) a = Dropout(p_drop=0.2)(a) lay_out = Dense(n_out=n_out, act='softmax')(a) md = Model(in_layers=[lay_in], out_layers=[lay_out]) md.compile() md.summary() # Save model md_path = 'model.p' serializations.save(md=md, path=md_path) # Load model md_load = serializations.load(md_path)
# sparse label to 1-of-K categorical label tr_y = sparse_to_categorical(tr_y, n_out) va_y = sparse_to_categorical(va_y, n_out) te_y = sparse_to_categorical(te_y, n_out) ### Build model lay_in1 = InputLayer(in_shape=(n_in, )) lay_in2 = InputLayer(in_shape=(n_in, )) a1 = Dense(n_out=n_hid, act='relu')(lay_in1) a2 = Dense(n_out=n_hid, act='relu')(lay_in2) b = Lambda(merge)([a1, a2]) b = Dense(n_out=n_hid, act='relu')(b) lay_out1 = Dense(n_out=n_out, act='softmax')(b) lay_out2 = Dense(n_out=n_out, act='softmax')(b) md = Model(in_layers=[lay_in1, lay_in2], out_layers=[lay_out1, lay_out2]) md.compile() md.summary() # validate model every n epoch (optional) validation = Validation(tr_x=[tr_x, tr_x], tr_y=[tr_y, tr_y], va_x=None, va_y=None, te_x=[te_x, te_x], te_y=[te_y, te_y], batch_size=500, metrics=[your_metric], call_freq=1) # callbacks function
def train(): _loss_func = _jdc_loss_func0 # load data t1 = time.time() dict = cPickle.load( open( cfg.scrap_fd+'/denoise_enhance_pool_fft_all0.p', 'rb' ) ) tr_X, tr_mask, tr_y, tr_na_list, te_X, te_mask, te_y, te_na_list = dict['tr_X'], dict['tr_mask'], dict['tr_y'], dict['tr_na_list'], dict['te_X'], dict['te_mask'], dict['te_y'], dict['te_na_list'] t2 = time.time() tr_X = pp_data.wipe_click( tr_X, tr_na_list ) te_X = pp_data.wipe_click( te_X, te_na_list ) # balance data tr_X, tr_mask, tr_y = pp_data.BalanceData2( tr_X, tr_mask, tr_y ) te_X, te_mask, te_y = pp_data.BalanceData2( te_X, te_mask, te_y ) print tr_X.shape, tr_y.shape, te_X.shape, te_y.shape [n_songs, n_chunks, n_freq] = te_X.shape tr_y = tr_y.reshape( (len(tr_y), 1) ) te_y = te_y.reshape( (len(te_y), 1) ) # jdc model # classifier lay_z0 = InputLayer( (n_chunks,) ) # shape:(n_songs, n_chunks) keep the length of songs lay_in0 = InputLayer( (n_chunks, n_freq), name='in0' ) # shape: (n_songs, n_chunk, n_freq) lay_a1 = lay_in0 # lay_a1 = Lambda( _conv2d )( lay_a1 ) lay_a1 = Lambda( _reshape_3d_to_4d )( lay_a1 ) lay_a1 = Convolution2D( 32, 3, 3, act='relu', init_type='glorot_uniform', border_mode=(1,1), strides=(1,1), name='a11' )( lay_a1 ) lay_a1 = Dropout( 0.2 )( lay_a1 ) lay_a1 = MaxPool2D( pool_size=(1,2) )( lay_a1 ) lay_a1 = Convolution2D( 64, 3, 3, act='relu', init_type='glorot_uniform', border_mode=(1,1), strides=(1,1), name='a12' )( lay_a1 ) lay_a1 = Dropout( 0.2 )( lay_a1 ) lay_a1 = MaxPool2D( pool_size=(1,2) )( lay_a1 ) lay_a1 = Lambda( _reshape_4d_to_3d )( lay_a1 ) lay_a1 = Dense( n_hid, act='relu', name='a2' )( lay_a1 ) # shape: (n_songs, n_chunk, n_hid) lay_a1 = Dropout( 0.2 )( lay_a1 ) lay_a1 = Dense( n_hid, act='relu', name='a4' )( lay_a1 ) lay_a1 = Dropout( 0.2 )( lay_a1 ) lay_a1 = Dense( n_hid, act='relu', name='a6' )( lay_a1 ) lay_a1 = Dropout( 0.2 )( lay_a1 ) lay_a8 = Dense( n_out, act='sigmoid', init_type='zeros', b_init=0, name='a8' )( lay_a1 ) # shape: (n_songs, n_chunk, n_out) # detector lay_b1 = lay_in0 # shape: (n_songs, n_chunk, n_freq) lay_b2 = Lambda( _conv2d )( lay_b1 ) # shape: (n_songs, n_chunk, n_freq) lay_b2 = Lambda( _reshape_3d_to_4d )( lay_b1 ) lay_b2 = MaxPool2D( pool_size=(1,2) )( lay_b2 ) lay_b2 = Lambda( _reshape_4d_to_3d )( lay_b2 ) lay_b8 = Dense( n_out, act='hard_sigmoid', init_type='zeros', b_init=-2.3, name='b8' )( lay_b2 ) md = Model( in_layers=[lay_in0, lay_z0], out_layers=[lay_a8, lay_b8], any_layers=[] ) # print summary info of model md.summary() # callbacks (optional) # save model every n epoch (optional) pp_data.CreateFolder( cfg.wbl_dev_md_fd ) pp_data.CreateFolder( cfg.wbl_dev_md_fd+'/cnn_fft' ) save_model = SaveModel( dump_fd=cfg.wbl_dev_md_fd+'/cnn_fft', call_freq=20, type='iter' ) validation = Validation( tr_x=None, tr_y=None, va_x=None, va_y=None, te_x=[te_X, te_mask], te_y=te_y, batch_size=100, metrics=[_loss_func], call_freq=20, dump_path=None, type='iter' ) # callbacks function callbacks = [save_model, validation] # EM training md.set_gt_nodes( tr_y ) md.find_layer('a11').set_trainable_params( ['W','b'] ) md.find_layer('a12').set_trainable_params( ['W','b'] ) md.find_layer('a2').set_trainable_params( ['W','b'] ) md.find_layer('a4').set_trainable_params( ['W','b'] ) md.find_layer('a6').set_trainable_params( ['W','b'] ) md.find_layer('a8').set_trainable_params( ['W','b'] ) md.find_layer('b8').set_trainable_params( [] ) opt_classifier = Adam( 1e-3 ) f_classify = md.get_optimization_func( loss_func=_loss_func, optimizer=opt_classifier, clip=None ) md.find_layer('a11').set_trainable_params( [] ) md.find_layer('a12').set_trainable_params( [] ) md.find_layer('a2').set_trainable_params( [] ) md.find_layer('a4').set_trainable_params( [] ) md.find_layer('a6').set_trainable_params( [] ) md.find_layer('a8').set_trainable_params( [] ) md.find_layer('b8').set_trainable_params( ['W','b'] ) opt_detector = Adam( 1e-3 ) f_detector = md.get_optimization_func( loss_func=_loss_func, optimizer=opt_detector, clip=None ) _x, _y = md.preprocess_data( [tr_X, tr_mask], tr_y, shuffle=True ) for i1 in xrange(500): print '-----------------------' opt_classifier.reset() md.do_optimization_func_iter_wise( f_classify, _x, _y, batch_size=100, n_iters=80, callbacks=callbacks, verbose=1 ) print '-----------------------' opt_detector.reset() md.do_optimization_func_iter_wise( f_detector, _x, _y, batch_size=100, n_iters=20, callbacks=callbacks, verbose=1 )
def train(args): cpickle_dir = args.cpickle_dir workspace = args.workspace # Path of hdf5 data bal_train_hdf5_path = os.path.join(cpickle_dir, "bal_train.h5") unbal_train_hdf5_path = os.path.join(cpickle_dir, "unbal_train.h5") eval_hdf5_path = os.path.join(cpickle_dir, "eval.h5") # Load data t1 = time.time() (tr_x1, tr_y1, tr_id_list1) = pp_data.load_data(bal_train_hdf5_path) (tr_x2, tr_y2, tr_id_list2) = pp_data.load_data(unbal_train_hdf5_path) tr_x = np.concatenate((tr_x1, tr_x2)) tr_y = np.concatenate((tr_y1, tr_y2)) tr_id_list = tr_id_list1 + tr_id_list2 (te_x, te_y, te_id_list) = pp_data.load_data(eval_hdf5_path) logging.info("Loading data time: %s s" % (time.time() - t1)) logging.info(tr_x1.shape, tr_x2.shape) logging.info("tr_x.shape: %s" % (tr_x.shape, )) (_, n_time, n_freq) = tr_x.shape # Build model n_hid = 500 n_out = tr_y.shape[1] lay_in = InputLayer(in_shape=(n_time, n_freq)) a = Dense(n_out=n_hid, act='relu')(lay_in) a = Dropout(p_drop=0.2)(a) a = Dense(n_out=n_hid, act='relu')(a) a = Dropout(p_drop=0.2)(a) a = Dense(n_out=n_hid, act='relu')(a) a = Dropout(p_drop=0.2)(a) cla = Dense(n_out=n_out, act='sigmoid', name='cla')(a) att = Dense(n_out=n_out, act='softmax', name='att')(a) # Attention lay_out = Lambda(_attention)([cla, att]) # Compile model md = Model(in_layers=[lay_in], out_layers=[lay_out]) md.compile() md.summary(is_logging=True) # Save model every several iterations call_freq = 1000 dump_fd = os.path.join(workspace, "models", pp_data.get_filename(__file__)) pp_data.create_folder(dump_fd) save_model = SaveModel(dump_fd=dump_fd, call_freq=call_freq, type='iter', is_logging=True) # Callbacks function callbacks = [save_model] batch_size = 500 tr_gen = RatioDataGenerator(batch_size=batch_size, type='train') # Optimization method optimizer = Adam(lr=args.lr) # Train stat_dir = os.path.join(workspace, "stats", pp_data.get_filename(__file__)) pp_data.create_folder(stat_dir) prob_dir = os.path.join(workspace, "probs", pp_data.get_filename(__file__)) pp_data.create_folder(prob_dir) tr_time = time.time() for (tr_batch_x, tr_batch_y) in tr_gen.generate(xs=[tr_x], ys=[tr_y]): # Compute stats every several interations if md.iter_ % call_freq == 0: # Stats of evaluation dataset t1 = time.time() te_err = eval(md=md, x=te_x, y=te_y, out_dir=os.path.join(stat_dir, "test"), out_probs_dir=os.path.join(prob_dir, "test")) logging.info("Evaluate test time: %s" % (time.time() - t1, )) # Stats of training dataset t1 = time.time() tr_bal_err = eval(md=md, x=tr_x1, y=tr_y1, out_dir=os.path.join(stat_dir, "train_bal"), out_probs_dir=None) logging.info("Evaluate tr_bal time: %s" % (time.time() - t1, )) # Update params (tr_batch_x, tr_batch_y) = pp_data.transform_data(tr_batch_x, tr_batch_y) md.train_on_batch(batch_x=tr_batch_x, batch_y=tr_batch_y, loss_func='binary_crossentropy', optimizer=optimizer, callbacks=callbacks) # Stop training when maximum iteration achieves if md.iter_ == call_freq * 31: break
def train(args): workspace = args.workspace cla_mapping = args.cla_mapping # Load data. t1 = time.time() tr_pack_path = os.path.join(workspace, "packed_features", "logmel", "training.h5") te_pack_path = os.path.join(workspace, "packed_features", "logmel", "testing.h5") with h5py.File(tr_pack_path, 'r') as hf: tr_na_list = list(hf.get('na_list')) tr_x = np.array(hf.get('x')) tr_y = np.array(hf.get('y')) with h5py.File(te_pack_path, 'r') as hf: te_na_list = list(hf.get('na_list')) te_x = np.array(hf.get('x')) te_y = np.array(hf.get('y')) logging.info("Loading data time: %s" % (time.time() - t1, )) # Scale. t1 = time.time() scaler_path = os.path.join(workspace, "scalers", "logmel", "training.scaler") scaler = pickle.load(open(scaler_path, 'rb')) tr_x = pp_data.do_scaler_on_x3d(tr_x, scaler) te_x = pp_data.do_scaler_on_x3d(te_x, scaler) logging.info("Scale time: %s" % (time.time() - t1, )) logging.info("tr_x: %s %s" % (tr_x.shape, tr_x.dtype)) logging.info("tr_y: %s %s" % (tr_y.shape, tr_y.dtype)) logging.info("y: 1-of-4 representation: %s" % (cfg.events + ['bg'], )) # Build model. (_, n_time, n_freq) = tr_x.shape n_out = len(cfg.events) + 1 in0 = InputLayer(in_shape=(n_time, n_freq)) a1 = Reshape((1, n_time, n_freq))(in0) a1 = Conv2D(n_outfmaps=64, n_row=3, n_col=3, act='linear', border_mode=(1, 1))(a1) a1 = BN(axis=(0, 2, 3))(a1) a1 = Activation('relu')(a1) a1 = Conv2D(n_outfmaps=64, n_row=3, n_col=3, act='linear', border_mode=(1, 1))(a1) a1 = BN(axis=(0, 2, 3))(a1) a1 = Activation('relu')(a1) a1 = Dropout(0.3)(a1) a1 = Conv2D(n_outfmaps=64, n_row=3, n_col=3, act='linear', border_mode=(1, 1))(a1) a1 = BN(axis=(0, 2, 3))(a1) a1 = Activation('relu')(a1) a1 = Conv2D(n_outfmaps=64, n_row=3, n_col=3, act='linear', border_mode=(1, 1))(a1) a1 = BN(axis=(0, 2, 3))(a1) a1 = Activation('relu')(a1) a1 = Dropout(0.3)(a1) a1 = Conv2D(n_outfmaps=64, n_row=3, n_col=3, act='linear', border_mode=(1, 1))(a1) a1 = BN(axis=(0, 2, 3))(a1) a1 = Activation('relu')(a1) a1 = Conv2D(n_outfmaps=64, n_row=3, n_col=3, act='linear', border_mode=(1, 1))(a1) a1 = BN(axis=(0, 2, 3))(a1) a1 = Activation('relu')(a1) a1 = Dropout(0.3)(a1) a1 = Conv2D(n_outfmaps=64, n_row=3, n_col=3, act='linear', border_mode=(1, 1))(a1) a1 = BN(axis=(0, 2, 3))(a1) a1 = Activation('relu')(a1) a1 = Conv2D(n_outfmaps=64, n_row=3, n_col=3, act='linear', border_mode=(1, 1))(a1) a1 = BN(axis=(0, 2, 3))(a1) a1 = Activation('relu')(a1) a1 = Dropout(0.3)(a1) # Segmentation mask for 'babycry', 'glassbreak' and 'gunshot'. a1 = Conv2D(n_outfmaps=len(cfg.events), n_row=1, n_col=1, act='sigmoid', border_mode=(0, 0))(a1) # Extend segmentation mask to 'babycry', 'glassbreak', 'gunshot' and 'background'. a1 = Lambda(_seg_mask_ext_bg, name='seg_masks')(a1) # Classification mapping. cla_mapping = args.cla_mapping if cla_mapping == 'global_rank_pooling': weight1d = np.power(r * np.ones(120 * 64), np.arange(120 * 64)) a8 = Lambda(_global_rank_pooling, weight1d=weight1d, name='a5')(a1) elif cla_mapping == 'global_max_pooling': a8 = Lambda(_global_max_pooling)(a1) elif cla_mapping == 'global_avg_pooling': a8 = Lambda(_global_avg_pooling)(a1) else: raise Exception("Incorrect cla_mapping!") md = Model([in0], [a8]) md.compile() md.summary(is_logging=True) # Callbacks. md_dir = os.path.join(workspace, "models", pp_data.get_filename(__file__)) pp_data.create_folder(md_dir) save_model = SaveModel(md_dir, call_freq=100, type='iter') validation = Validation(te_x=te_x, te_y=te_y, batch_size=100, call_freq=50, metrics=['binary_crossentropy'], dump_path=None, is_logging=True) callbacks = [save_model, validation] # Train. generator = DataGenerator(batch_size=20, type='train') loss_ary = [] t1 = time.time() optimizer = Adam(1e-4) for (batch_x, batch_y) in generator.generate(xs=[tr_x], ys=[tr_y]): np.set_printoptions(threshold=np.nan, linewidth=1000, precision=2, suppress=True) loss = md.train_on_batch(batch_x, batch_y, loss_func='binary_crossentropy', optimizer=optimizer, callbacks=callbacks) loss_ary.append(loss) if md.iter_ % 50 == 0: # Evalute training loss every several iterations. logging.info("iter: %d, tr loss: %d" % (md.iter_, np.mean(loss_ary))) logging.info("time: %s" % (time.time() - t1, )) t1 = time.time() loss_ary = [] if md.iter_ == 10001: # Stop after several iterations. break
# sparse label to 1-of-K categorical label tr_y = sparse_to_categorical(tr_y, n_out) va_y = sparse_to_categorical(va_y, n_out) te_y = sparse_to_categorical(te_y, n_out) ### Build model lay_in = InputLayer(in_shape=(n_in, )) a = Dense(n_out=n_hid, act='relu')(lay_in) a = Dropout(p_drop=0.2)(a) a = Dense(n_out=n_hid, act='relu')(a) a = Dropout(p_drop=0.2)(a) lay_out = Dense(n_out=n_out, act='softmax')(a) md = Model(in_layers=[lay_in], out_layers=[lay_out]) md.compile() # print summary info of model md.summary() ### callbacks (optional) # save model every n epoch (optional) dump_fd = 'mnist_dnn_models' if not os.path.exists(dump_fd): os.makedirs(dump_fd) save_model = SaveModel(dump_fd=dump_fd, call_freq=2) # validate model every n epoch (optional) validation = Validation(tr_x=tr_x, tr_y=tr_y, va_x=None,
def train(): # create empty folders in workspace create_folders() # prepare data tr_X, tr_y, _, va_X, va_y, va_na_list = pp_dev_data.GetSegData( fe_fd, agg_num, hop, fold) [n_songs, n_chunks, _, n_in] = tr_X.shape print tr_X.shape, tr_y.shape print va_X.shape, va_y.shape # model # classifier lay_in0 = InputLayer( (n_chunks, agg_num, n_in), name='in0') # shape: (n_songs, n_chunk, agg_num, n_in) lay_a1 = Flatten(3, name='a1')( lay_in0) # shape: (n_songs, n_chunk, agg_num*n_in) lay_a2 = Dense(n_hid, act='relu')(lay_a1) # shape: (n_songs, n_chunk, n_hid) lay_a3 = Dropout(0.2)(lay_a2) lay_a4 = Dense(n_hid, act='relu')(lay_a3) lay_a5 = Dropout(0.2)(lay_a4) lay_a6 = Dense(n_hid, act='relu')(lay_a5) lay_a7 = Dropout(0.2)(lay_a6) lay_a8 = Dense(n_out, act='sigmoid', b_init=-1, name='a8')(lay_a7) # shape: (n_songs, n_chunk, n_out) # detector lay_b1 = Lambda(mean_pool)(lay_in0) # shape: (n_songs, n_chunk, n_out) lay_b8 = Dense(n_out, act='sigmoid', name='b4')(lay_b1) # shape: (n_songs, n_chunk, n_out) md = Model(in_layers=[lay_in0], out_layers=[lay_a8, lay_b8], any_layers=[]) md.compile() md.summary() # callback, write out dection scores to .txt each epoch dump_fd = cfg.scrap_fd + '/Results_dev/jdc_eer/fold' + str(fold) print_scores = cb_eer.PrintScoresDetectionClassification(va_X, va_na_list, dump_fd, call_freq=1) # callback, print loss each epoch validation = Validation(tr_x=tr_X, tr_y=tr_y, va_x=va_X, va_y=va_y, te_x=None, te_y=None, metrics=[loss_func], call_freq=1, dump_path=None) # callback, save model every N epochs save_model = SaveModel(dump_fd=cfg.scrap_fd + '/Md_dev_jdc', call_freq=10) # combine all callbacks callbacks = [validation, save_model, print_scores] # optimizer # optimizer = SGD( 0.01, 0.95 ) optimizer = Adam(2e-4) # fit model md.fit(x=tr_X, y=tr_y, batch_size=10, n_epochs=301, loss_func=loss_func, optimizer=optimizer, callbacks=callbacks)
def train(): # load data batch_size = 128 tr_X, tr_y, va_X, va_y, te_X, te_y = pp_data.load_data() n_batches = int(tr_X.shape[0] / batch_size) # normalize data between [-1,1] tr_X = (tr_X - 0.5) * 2 tr_X = tr_X.reshape((50000, 1, 28, 28)) print tr_X.shape # generator a0 = InputLayer(100) a1 = Dense(128 * 7 * 7, act='linear')(a0) a1 = BN(axis=0)(a1) a1 = Reshape(out_shape=(128, 7, 7))(a1) a1 = Convolution2D(64, 5, 5, act='linear', border_mode=(2, 2))(a1) a1 = BN(axis=(0, 2, 3))(a1) a1 = Activation('leaky_relu')(a1) a1 = UpSampling2D(size=(2, 2))(a1) a1 = Convolution2D(32, 5, 5, act='linear', border_mode=(2, 2))(a1) a1 = BN(axis=(0, 2, 3))(a1) a1 = Activation('leaky_relu')(a1) a1 = UpSampling2D(size=(2, 2))(a1) a8 = Convolution2D(1, 5, 5, act='tanh', border_mode=(2, 2), name='a8')(a1) g = Model([a0], [a8]) g.compile() g.summary() # discriminator b0 = InputLayer((1, 28, 28), name='b0') b1 = Convolution2D(64, 5, 5, act='relu', border_mode=(0, 0), name='b1')(b0) b1 = MaxPooling2D(pool_size=(2, 2))(b1) b1 = Convolution2D(128, 5, 5, act='relu', border_mode=(0, 0))(b1) b1 = MaxPooling2D(pool_size=(2, 2))(b1) b1 = Flatten()(b1) b8 = Dense(1, act='sigmoid')(b1) d = Model([b0], [b8]) d.compile() d.summary() # discriminator on generator d_on_g = Model() d.set_trainability(False) d_on_g.add_models([g, d]) d.set_trainability(True) d_on_g.joint_models('a8', 'b0') d_on_g.compile() d_on_g.summary() # optimizer opt_d = Adam(1e-4) opt_g = Adam(1e-4) # optimization function f_train_d = d.get_optimization_func(target_dims=[2], loss_func='binary_crossentropy', optimizer=opt_d, clip=None) f_train_g = d_on_g.get_optimization_func(target_dims=[2], loss_func='binary_crossentropy', optimizer=opt_g, clip=None) noise = np.zeros((batch_size, 100)) for epoch in range(100): print epoch for index in range(n_batches): # concatenate generated img and real image to train discriminator. noise = np.random.uniform(-1, 1, (batch_size, 100)) batch_x = tr_X[index * batch_size:(index + 1) * batch_size] batch_gx = g.predict(noise) batch_x_all = np.concatenate((batch_x, batch_gx)) # assign real img label as 1, generated img label as 0 batch_y_all = np.array([1] * batch_size + [0] * batch_size) batch_y_all = batch_y_all.reshape((batch_y_all.shape[0], 1)) # save out generated img if index % 50 == 0: image = pp_data.combine_images(batch_gx) image = image * 127.5 + 127.5 if not os.path.exists("img_dcgan"): os.makedirs("img_dcgan") Image.fromarray(image.astype( np.uint8)).save("img_dcgan/" + str(epoch) + "_" + str(index) + ".png") # train discriminator d_loss = d.train_on_batch(f_train_d, batch_x_all, batch_y_all) # assign generate img label as 1, so as to deceive discriminator noise = np.random.uniform(-1, 1, (batch_size, 100)) batch_y_all = np.array([1] * batch_size) batch_y_all = batch_y_all.reshape((batch_y_all.shape[0], 1)) # train generator g_loss = d_on_g.train_on_batch(f_train_g, noise, batch_y_all) print index, "d_loss:", d_loss, "\tg_loss:", g_loss
# sparse target to categorical target tr_y = sparse_to_categorical(tr_y, n_out) te_y = sparse_to_categorical(te_y, n_out) ### build model def mean_pool(input): return K.mean(input, axis=1) lay_in = InputLayer(in_shape=(max_len,)) a = Embedding(n_words, n_proj)(lay_in) a = LSTM(n_out=n_hid, act='tanh', return_sequences=True)(a) a = LSTM(n_out=n_hid, act='tanh', return_sequences=True)(a) a = Lambda(mean_pool)(a) lay_out = Dense(n_out=n_out, act='softmax')(a) md = Model(in_layers=[lay_in], out_layers=[lay_out]) md.compile() # print summary info of model md.summary() ### callbacks (optional) # save model every n epoch (optional) dump_fd = 'imdb_lstm_models' if not os.path.exists(dump_fd): os.makedirs(dump_fd) save_model = SaveModel(dump_fd=dump_fd, call_freq=2) # validate model every n epoch (optional) validation = Validation(tr_x=tr_x, tr_y=tr_y, va_x=None, va_y=None, te_x=te_x, te_y=te_y,
n_out = 10 # Sparse label to 1-of-K categorical label tr_y = sparse_to_categorical(tr_y, n_out) va_y = sparse_to_categorical(va_y, n_out) te_y = sparse_to_categorical(te_y, n_out) # Build model lay_in = InputLayer(in_shape=(n_in,)) a = Dense(n_out=n_hid, act='relu')(lay_in) a = Dropout(p_drop=0.2)(a) a = Dense(n_out=n_hid, act='relu')(a) a = Dropout(p_drop=0.2)(a) lay_out = Dense(n_out=n_out, act='softmax')(a) md = Model(in_layers=[lay_in], out_layers=[lay_out]) md.compile() md.summary() # Callbacks dump_fd = 'train_on_batch_models' if not os.path.exists(dump_fd): os.makedirs(dump_fd) save_model = SaveModel(dump_fd=dump_fd, call_freq=200, type='iter') validation = Validation(tr_x=tr_x, tr_y=tr_y, va_x=None, va_y=None, te_x=te_x, te_y=te_y, batch_size=500, metrics=['categorical_error'], call_freq=200, type='iter')
n_out = 10 # sparse label to 1-of-K categorical label tr_y = sparse_to_categorical(tr_y, n_out) va_y = sparse_to_categorical(va_y, n_out) te_y = sparse_to_categorical(te_y, n_out) ### Build model lay_in = InputLayer(in_shape=(n_in, )) a = Dense(n_out=n_hid, act='relu')(lay_in) lay_mid = Dense(n_out=n_out, act='relu')(a) a = Dense(n_out=n_hid, act='relu')(lay_mid) lay_out = Dense(n_out=n_out, act='softmax')(a) md = Model(in_layers=[lay_in], out_layers=[lay_out], any_layers=[lay_mid]) md.compile() md.summary() # validate model every n epoch (optional) validation = Validation(tr_x=tr_x, tr_y=tr_y, va_x=None, va_y=None, te_x=te_x, te_y=te_y, batch_size=500, metrics=['categorical_error', your_metric], call_freq=1) # callbacks function