def train(): # prepare data tr_X, tr_y, _, te_X, te_y, te_na_list = pp_dev_data.GetAllData( fe_fd, agg_num, hop, fold) [batch_num, n_time, n_freq] = tr_X.shape print tr_X.shape, tr_y.shape print te_X.shape, te_y.shape # build model seq = Sequential() seq.add(InputLayer((n_time, n_freq))) seq.add(Flatten()) # flatten to 2d: (n_time, n_freq) to 1d:(n_time*n_freq) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act=act)) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act=act)) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act=act)) seq.add(Dropout(0.1)) seq.add(Dense(n_out, act='sigmoid')) md = seq.compile() md.summary() # optimizer optimizer = Adam(1e-4) # callbacks # tr_err, te_err are frame based. To get event based err, run recognize.py validation = Validation(tr_x=tr_X, tr_y=tr_y, va_x=None, va_y=None, te_x=te_X, te_y=te_y, batch_size=2000, metrics=['binary_crossentropy'], call_freq=1, dump_path=None) # save model pp_dev_data.CreateFolder(cfg.dev_md_fd) save_model = SaveModel(dump_fd=cfg.dev_md_fd, call_freq=10) # callbacks callbacks = [validation, save_model] # fit model md.fit(x=tr_X, y=tr_y, batch_size=2000, n_epochs=100, loss_func='binary_crossentropy', optimizer=optimizer, callbacks=callbacks, verbose=1)
def train(): # load data tr_X2d_mix, tr_X3d_mix, tr_y2d_chn0, tr_y2d_chn1, tr_y3d_chn0, tr_y3d_chn1 = pp_data.LoadData( cfg.fe_fft_fd, n_time, hop, na_list=cfg.tr_list) # build model lay_in0 = InputLayer(in_shape=((n_time, n_freq)), name='in1') lay_a0 = Flatten()(lay_in0) lay_a1 = Dense(n_hid, act='relu', name='a1')(lay_a0) lay_a2 = Dropout(0.2, name='a2')(lay_a1) lay_a3 = Dense(n_hid, act='relu', name='a3')(lay_a2) lay_a4 = Dropout(0.2, name='a4')(lay_a3) lay_a5 = Dense(n_hid, act='relu', name='a5')(lay_a4) lay_a6 = Dropout(0.2, name='a6')(lay_a5) lay_b1 = Dense(n_freq, act='sigmoid', name='a7')(lay_a6) # mask_left, shape: (N, n_freq) lay_c1 = Dense(n_freq, act='sigmoid', name='a8')(lay_a6) # mask_right, shape: (N, n_freq) lay_out_b = Lambda(mul, name='out_b')([lay_b1, lay_in0]) # out_left, shape: (N, n_freq) lay_out_c = Lambda(mul, name='out_c')([lay_c1, lay_in0 ]) # out_right, shape: (N, n_freq) md = Model(in_layers=[lay_in0], out_layers=[lay_out_b, lay_out_c], any_layers=[lay_in0, lay_b1, lay_c1]) md.summary() # validation validation = Validation(tr_x=[np.abs(tr_y3d_chn0) + np.abs(tr_y3d_chn1)], tr_y=[np.abs(tr_y2d_chn0), np.abs(tr_y2d_chn1)], batch_size=100, metrics=[loss_func], call_freq=1, dump_path=None) # save model if not os.path.exists(cfg.md_fd): os.makedirs(cfg.md_fd) save_model = SaveModel(dump_fd=cfg.md_fd, call_freq=2) # callbacks callbacks = [validation, save_model] # optimizer optimizer = Adam(1e-3) # fit model md.fit( [np.abs(tr_y3d_chn0)+np.abs(tr_y3d_chn1)], [np.abs(tr_y2d_chn0), np.abs(tr_y2d_chn1)], \ batch_size=100, n_epochs=100, loss_func='mse', optimizer=optimizer, callbacks=callbacks, verbose=1 )
tr_X, va_X, te_X = reshapeX(tr_X), reshapeX(va_X), reshapeX(te_X) # init params n_in = 784 n_hid = 500 n_out = 10 # sparse label to 1 of K categorical label tr_y = sparse_to_categorical(tr_y, n_out) va_y = sparse_to_categorical(va_y, n_out) te_y = sparse_to_categorical(te_y, n_out) ### Build model act = 'relu' seq = Sequential() seq.add(InputLayer(in_shape=(1, 28, 28))) seq.add(Convolution2D(n_outfmaps=32, n_row=3, n_col=3, act='relu')) seq.add(MaxPool2D(pool_size=(2, 2))) seq.add(Convolution2D(n_outfmaps=32, n_row=3, n_col=3, act='relu')) seq.add(MaxPool2D(pool_size=(2, 2))) seq.add(Dropout(0.2)) seq.add(Flatten()) seq.add(Dense(n_hid, act='relu')) seq.add(Dropout(0.5)) seq.add(Dense(n_hid, act='relu')) seq.add(Dense(n_out, act='softmax')) md = seq.combine() # print summary info of model md.summary()
# load & prepare data tr_x, tr_y, va_x, va_y, te_x, te_y = load_mnist() # init params n_in = 784 n_hid = 500 n_out = 10 # sparse label to 1-of-K categorical label tr_y = sparse_to_categorical(tr_y, n_out) va_y = sparse_to_categorical(va_y, n_out) te_y = sparse_to_categorical(te_y, n_out) ### Build model lay_in = InputLayer(in_shape=(n_in, )) a = Dense(n_out=n_hid, act='relu', name='dense1')(lay_in) a = Dropout(p_drop=0.2)(a) a = Dense(n_out=n_hid, act='relu', name='dense2')(a) a = Dropout(p_drop=0.2)(a) lay_out = Dense(n_out=n_out, act='softmax')(a) md = Model(in_layers=[lay_in], out_layers=[lay_out]) md.compile() md.summary() # observe forward observe_nodes = [ md.find_layer('dense1').output_, md.find_layer('dense2').output_ ]
act = 'relu' n_hid = 500 fold = 1 n_out = len(cfg.labels) # prepare data tr_X, tr_y, _ = pp_dev_data.GetAllData(cfg.dev_fe_mel_fd, agg_num, hop, fold=None) [batch_num, n_time, n_freq] = tr_X.shape print tr_X.shape, tr_y.shape # build model seq = Sequential() seq.add(InputLayer((n_time, n_freq))) seq.add(Flatten()) # flatten to 2d: (n_time, n_freq) to 1d:(n_time*n_freq) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act=act)) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act=act)) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act=act)) seq.add(Dropout(0.1)) seq.add(Dense(n_out, act='sigmoid')) md = seq.compile() md.summary() # validation # tr_err, te_err are frame based. To get event based err, run recognize.py validation = Validation(tr_x=tr_X,
def train(): # create empty folders in workspace create_folders() # get dev & eva data tr_X, tr_y, _, _, _, _ = pp_dev_data.GetSegData(dev_fe_fd, agg_num, hop, fold=None) te_X, te_na_list = pp_eva_data.GetEvaSegData(eva_fe_fd, agg_num, hop) [n_songs, n_chunks, _, n_in] = tr_X.shape print tr_X.shape, tr_y.shape print te_X.shape # model lay_in0 = InputLayer( (n_chunks, agg_num, n_in), name='in0') # shape: (n_songs, n_chunk, agg_num, n_in) lay_a1 = Flatten(3, name='a1')( lay_in0) # shape: (n_songs, n_chunk, agg_num*n_in) lay_a2 = Dense(n_hid, act='relu')(lay_a1) # shape: (n_songs, n_chunk, n_hid) lay_a3 = Dropout(0.2)(lay_a2) lay_a4 = Dense(n_hid, act='relu')(lay_a3) lay_a5 = Dropout(0.2)(lay_a4) lay_a6 = Dense(n_hid, act='relu')(lay_a5) lay_a7 = Dropout(0.2)(lay_a6) lay_a8 = Dense(n_out, act='sigmoid', b_init=-1, name='a8')(lay_a7) # shape: (n_songs, n_chunk, n_out) md = Model(in_layers=[lay_in0], out_layers=[lay_a8], any_layers=[]) md.compile() md.summary() # callback, write out dection scores to .txt each epoch dump_fd = cfg.scrap_fd + '/Results_eva/bob_eer' print_scores = cb_eer.PrintScoresBagOfBlocks(te_X, te_na_list, dump_fd, call_freq=1) # callback, print loss each epoch validation = Validation(tr_x=tr_X, tr_y=tr_y, va_x=None, va_y=None, te_x=None, te_y=None, metrics=[loss_func], call_freq=1, dump_path=None) # callback, save model every N epochs save_model = SaveModel(dump_fd=cfg.scrap_fd + '/Md_eva_bob', call_freq=10) # combine all callbacks callbacks = [validation, save_model, print_scores] # optimizer optimizer = Adam(2e-4) # fit model md.fit(x=tr_X, y=tr_y, batch_size=10, n_epochs=301, loss_func=loss_func, optimizer=optimizer, callbacks=callbacks)
# reshape X to shape: (n_pictures, n_fmaps=3, n_row=32, n_col=32) tr_X = reshape_img_for_cnn(tr_X) te_X = reshape_img_for_cnn(te_X) # init params n_out = 10 # sparse label to 1-of-K categorical label tr_y = sparse_to_categorical(tr_y, n_out) te_y = sparse_to_categorical(te_y, n_out) print tr_X.shape print tr_y.shape ### Build model seq = Sequential() seq.add(InputLayer(in_shape=(3, 32, 32))) seq.add( Convolution2D(n_outfmaps=64, n_row=3, n_col=3, act='linear', border_mode=(1, 1))) seq.add(BN(axes=(0, 2, 3))) seq.add(Activation('relu')) seq.add( Convolution2D(n_outfmaps=64, n_row=3, n_col=3, act='linear', border_mode=(1, 1)))
n_row=3, n_col=3, act='linear', border_mode=(1, 1), strides=(1, 1))(a3) a5 = BN(axes=(0, 2, 3))(a4) a6 = Activation('relu')(a5) a7 = Lambda(add_layers)([shortcut, a6]) a0 = a7 return a0 x0 = InputLayer(in_shape=(3, 32, 32)) x1 = Convolution2D(n_outfmaps=64, n_row=3, n_col=3, act='relu', border_mode=(1, 1))(x0) x2 = add_n_blocks(x1, n_outfmaps=64, n_repeat=3, is_first_layer=True) x3 = add_n_blocks(x2, n_outfmaps=128, n_repeat=4, is_first_layer=False) x4 = add_n_blocks(x3, n_outfmaps=256, n_repeat=6, is_first_layer=False) x5 = add_n_blocks(x4, n_outfmaps=512, n_repeat=3, is_first_layer=False) y1 = Lambda(mean_pool)(x5) y2 = Flatten()(y1) y3 = Dense(n_out, act='softmax')(y2) md = Model([x0], [y3])
# load & prepare data tr_x, tr_y, va_x, va_y, te_x, te_y = load_mnist() # init params n_in = 784 n_hid = 500 n_out = 10 # sparse label to 1-of-K categorical label tr_y = sparse_to_categorical(tr_y, n_out) va_y = sparse_to_categorical(va_y, n_out) te_y = sparse_to_categorical(te_y, n_out) ### Build model lay_in1 = InputLayer(in_shape=(n_in, )) lay_in2 = InputLayer(in_shape=(n_in, )) a1 = Dense(n_out=n_hid, act='relu')(lay_in1) a2 = Dense(n_out=n_hid, act='relu')(lay_in2) b = Lambda(merge)([a1, a2]) b = Dense(n_out=n_hid, act='relu')(b) lay_out1 = Dense(n_out=n_out, act='softmax')(b) lay_out2 = Dense(n_out=n_out, act='softmax')(b) md = Model(in_layers=[lay_in1, lay_in2], out_layers=[lay_out1, lay_out2]) md.compile() md.summary() # validate model every n epoch (optional) validation = Validation(tr_x=[tr_x, tr_x], tr_y=[tr_y, tr_y],
def train_cv_model(): # init path if type == 'home': fe_fd = cfg.dev_fe_mel_home_fd labels = cfg.labels_home lb_to_id = cfg.lb_to_id_home tr_txt = cfg.dev_evaluation_fd + '/home_fold' + str( fold) + '_train.txt' te_txt = cfg.dev_evaluation_fd + '/home_fold' + str( fold) + '_evaluate.txt' if type == 'resi': fe_fd = cfg.dev_fe_mel_resi_fd labels = cfg.labels_resi lb_to_id = cfg.lb_to_id_resi tr_txt = cfg.dev_evaluation_fd + '/residential_area_fold' + str( fold) + '_train.txt' te_txt = cfg.dev_evaluation_fd + '/residential_area_fold' + str( fold) + '_evaluate.txt' n_out = len(labels) # load data to list tr_X, tr_y = pp_dev_data.LoadAllData(fe_fd, tr_txt, lb_to_id, agg_num, hop) tr_y = sparse_to_categorical(tr_y, n_out) print tr_X.shape print tr_y.shape n_freq = tr_X.shape[2] # build model seq = Sequential() seq.add(InputLayer((agg_num, n_freq))) seq.add(Flatten()) seq.add(Dense(n_hid, act='relu')) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act='relu')) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act='relu')) seq.add(Dropout(0.1)) seq.add(Dense(n_out, 'sigmoid')) md = seq.combine() # print summary info of model md.summary() # optimization method optimizer = Adam(1e-3) # callbacks (optional) # save model every n epoch pp_dev_data.CreateFolder(cfg.dev_md_fd) save_model = SaveModel(dump_fd=cfg.dev_md_fd, call_freq=5) # validate model every n epoch validation = Validation(tr_x=tr_X, tr_y=tr_y, va_x=None, va_y=None, te_x=None, te_y=None, metrics=['binary_crossentropy'], call_freq=1, dump_path=None) # callbacks function callbacks = [validation, save_model] # train model md.fit(x=tr_X, y=tr_y, batch_size=20, n_epochs=100, loss_func='binary_crossentropy', optimizer=optimizer, callbacks=callbacks)
### load & prepare data tr_X, tr_y, va_X, va_y, te_X, te_y = load_data() # init params n_in = 784 n_hid = 500 n_out = 10 # sparse label to 1-of-K categorical label tr_y = sparse_to_categorical(tr_y, n_out) va_y = sparse_to_categorical(va_y, n_out) te_y = sparse_to_categorical(te_y, n_out) ### Build model seq = Sequential() seq.add(InputLayer(n_in)) seq.add(Dense(n_hid, act='relu')) seq.add(Dropout(p_drop=0.2)) seq.add(Dense(n_hid, act='relu')) seq.add(Dropout(p_drop=0.2)) seq.add(Dense(n_out, act='softmax')) md = seq.combine() # print summary info of model md.summary() # md.plot_connection() ### optimization method optimizer = Adam(lr=0.001) # Try SGD, Adagrad, Rmsprop, etc. instead ### callbacks (optional)
def train(): _loss_func = _jdc_loss_func0 # load data t1 = time.time() dict = cPickle.load( open( cfg.scrap_fd+'/denoise_enhance_pool_fft_all0.p', 'rb' ) ) tr_X, tr_mask, tr_y, tr_na_list, te_X, te_mask, te_y, te_na_list = dict['tr_X'], dict['tr_mask'], dict['tr_y'], dict['tr_na_list'], dict['te_X'], dict['te_mask'], dict['te_y'], dict['te_na_list'] t2 = time.time() tr_X = pp_data.wipe_click( tr_X, tr_na_list ) te_X = pp_data.wipe_click( te_X, te_na_list ) # balance data tr_X, tr_mask, tr_y = pp_data.BalanceData2( tr_X, tr_mask, tr_y ) te_X, te_mask, te_y = pp_data.BalanceData2( te_X, te_mask, te_y ) print tr_X.shape, tr_y.shape, te_X.shape, te_y.shape [n_songs, n_chunks, n_freq] = te_X.shape tr_y = tr_y.reshape( (len(tr_y), 1) ) te_y = te_y.reshape( (len(te_y), 1) ) # jdc model # classifier lay_z0 = InputLayer( (n_chunks,) ) # shape:(n_songs, n_chunks) keep the length of songs lay_in0 = InputLayer( (n_chunks, n_freq), name='in0' ) # shape: (n_songs, n_chunk, n_freq) lay_a1 = lay_in0 # lay_a1 = Lambda( _conv2d )( lay_a1 ) lay_a1 = Lambda( _reshape_3d_to_4d )( lay_a1 ) lay_a1 = Convolution2D( 32, 3, 3, act='relu', init_type='glorot_uniform', border_mode=(1,1), strides=(1,1), name='a11' )( lay_a1 ) lay_a1 = Dropout( 0.2 )( lay_a1 ) lay_a1 = MaxPool2D( pool_size=(1,2) )( lay_a1 ) lay_a1 = Convolution2D( 64, 3, 3, act='relu', init_type='glorot_uniform', border_mode=(1,1), strides=(1,1), name='a12' )( lay_a1 ) lay_a1 = Dropout( 0.2 )( lay_a1 ) lay_a1 = MaxPool2D( pool_size=(1,2) )( lay_a1 ) lay_a1 = Lambda( _reshape_4d_to_3d )( lay_a1 ) lay_a1 = Dense( n_hid, act='relu', name='a2' )( lay_a1 ) # shape: (n_songs, n_chunk, n_hid) lay_a1 = Dropout( 0.2 )( lay_a1 ) lay_a1 = Dense( n_hid, act='relu', name='a4' )( lay_a1 ) lay_a1 = Dropout( 0.2 )( lay_a1 ) lay_a1 = Dense( n_hid, act='relu', name='a6' )( lay_a1 ) lay_a1 = Dropout( 0.2 )( lay_a1 ) lay_a8 = Dense( n_out, act='sigmoid', init_type='zeros', b_init=0, name='a8' )( lay_a1 ) # shape: (n_songs, n_chunk, n_out) # detector lay_b1 = lay_in0 # shape: (n_songs, n_chunk, n_freq) lay_b2 = Lambda( _conv2d )( lay_b1 ) # shape: (n_songs, n_chunk, n_freq) lay_b2 = Lambda( _reshape_3d_to_4d )( lay_b1 ) lay_b2 = MaxPool2D( pool_size=(1,2) )( lay_b2 ) lay_b2 = Lambda( _reshape_4d_to_3d )( lay_b2 ) lay_b8 = Dense( n_out, act='hard_sigmoid', init_type='zeros', b_init=-2.3, name='b8' )( lay_b2 ) md = Model( in_layers=[lay_in0, lay_z0], out_layers=[lay_a8, lay_b8], any_layers=[] ) # print summary info of model md.summary() # callbacks (optional) # save model every n epoch (optional) pp_data.CreateFolder( cfg.wbl_dev_md_fd ) pp_data.CreateFolder( cfg.wbl_dev_md_fd+'/cnn_fft' ) save_model = SaveModel( dump_fd=cfg.wbl_dev_md_fd+'/cnn_fft', call_freq=20, type='iter' ) validation = Validation( tr_x=None, tr_y=None, va_x=None, va_y=None, te_x=[te_X, te_mask], te_y=te_y, batch_size=100, metrics=[_loss_func], call_freq=20, dump_path=None, type='iter' ) # callbacks function callbacks = [save_model, validation] # EM training md.set_gt_nodes( tr_y ) md.find_layer('a11').set_trainable_params( ['W','b'] ) md.find_layer('a12').set_trainable_params( ['W','b'] ) md.find_layer('a2').set_trainable_params( ['W','b'] ) md.find_layer('a4').set_trainable_params( ['W','b'] ) md.find_layer('a6').set_trainable_params( ['W','b'] ) md.find_layer('a8').set_trainable_params( ['W','b'] ) md.find_layer('b8').set_trainable_params( [] ) opt_classifier = Adam( 1e-3 ) f_classify = md.get_optimization_func( loss_func=_loss_func, optimizer=opt_classifier, clip=None ) md.find_layer('a11').set_trainable_params( [] ) md.find_layer('a12').set_trainable_params( [] ) md.find_layer('a2').set_trainable_params( [] ) md.find_layer('a4').set_trainable_params( [] ) md.find_layer('a6').set_trainable_params( [] ) md.find_layer('a8').set_trainable_params( [] ) md.find_layer('b8').set_trainable_params( ['W','b'] ) opt_detector = Adam( 1e-3 ) f_detector = md.get_optimization_func( loss_func=_loss_func, optimizer=opt_detector, clip=None ) _x, _y = md.preprocess_data( [tr_X, tr_mask], tr_y, shuffle=True ) for i1 in xrange(500): print '-----------------------' opt_classifier.reset() md.do_optimization_func_iter_wise( f_classify, _x, _y, batch_size=100, n_iters=80, callbacks=callbacks, verbose=1 ) print '-----------------------' opt_detector.reset() md.do_optimization_func_iter_wise( f_detector, _x, _y, batch_size=100, n_iters=20, callbacks=callbacks, verbose=1 )
tr_x = reshape(tr_x) va_x = reshape(va_x) te_x = reshape(te_x) # init params n_in = 784 n_hid = 500 n_out = 10 # sparse label to 1-of-K categorical label tr_y = sparse_to_categorical(tr_y, n_out) va_y = sparse_to_categorical(va_y, n_out) te_y = sparse_to_categorical(te_y, n_out) ### Build model lay_in = InputLayer(in_shape=(1, 28, 28)) a = Conv2D(n_outfmaps=32, n_row=3, n_col=3, act='linear', strides=(1, 1), border_mode=(1, 1))(lay_in) a = BN(axis=(0, 2, 3))(a) a = Activation('relu')(a) a = MaxPooling2D(pool_size=(2, 2))(a) a = Dropout(p_drop=0.2)(a) a = Conv2D(n_outfmaps=64, n_row=3, n_col=3, act='linear', strides=(1, 1), border_mode=(1, 1))(a) a = BN(axis=(0, 2, 3))(a) a = Activation('relu')(a) a = MaxPooling2D(pool_size=(2, 2))(a) a = Dropout(p_drop=0.2)(a) a = Conv2D(n_outfmaps=128, n_row=3, n_col=3, act='linear', strides=(1, 1), border_mode=(1, 1))(a) a = BN(axis=(0, 2, 3))(a)
def train(): # create empty folders in workspace create_folders() # prepare data tr_X, tr_y, _, va_X, va_y, va_na_list = pp_dev_data.GetSegData( fe_fd, agg_num, hop, fold) [n_songs, n_chunks, _, n_in] = tr_X.shape print tr_X.shape, tr_y.shape print va_X.shape, va_y.shape # model # classifier lay_in0 = InputLayer( (n_chunks, agg_num, n_in), name='in0') # shape: (n_songs, n_chunk, agg_num, n_in) lay_a1 = Flatten(3, name='a1')( lay_in0) # shape: (n_songs, n_chunk, agg_num*n_in) lay_a2 = Dense(n_hid, act='relu')(lay_a1) # shape: (n_songs, n_chunk, n_hid) lay_a3 = Dropout(0.2)(lay_a2) lay_a4 = Dense(n_hid, act='relu')(lay_a3) lay_a5 = Dropout(0.2)(lay_a4) lay_a6 = Dense(n_hid, act='relu')(lay_a5) lay_a7 = Dropout(0.2)(lay_a6) lay_a8 = Dense(n_out, act='sigmoid', b_init=-1, name='a8')(lay_a7) # shape: (n_songs, n_chunk, n_out) # detector lay_b1 = Lambda(mean_pool)(lay_in0) # shape: (n_songs, n_chunk, n_out) lay_b8 = Dense(n_out, act='sigmoid', name='b4')(lay_b1) # shape: (n_songs, n_chunk, n_out) md = Model(in_layers=[lay_in0], out_layers=[lay_a8, lay_b8], any_layers=[]) md.compile() md.summary() # callback, write out dection scores to .txt each epoch dump_fd = cfg.scrap_fd + '/Results_dev/jdc_eer/fold' + str(fold) print_scores = cb_eer.PrintScoresDetectionClassification(va_X, va_na_list, dump_fd, call_freq=1) # callback, print loss each epoch validation = Validation(tr_x=tr_X, tr_y=tr_y, va_x=va_X, va_y=va_y, te_x=None, te_y=None, metrics=[loss_func], call_freq=1, dump_path=None) # callback, save model every N epochs save_model = SaveModel(dump_fd=cfg.scrap_fd + '/Md_dev_jdc', call_freq=10) # combine all callbacks callbacks = [validation, save_model, print_scores] # optimizer # optimizer = SGD( 0.01, 0.95 ) optimizer = Adam(2e-4) # fit model md.fit(x=tr_X, y=tr_y, batch_size=10, n_epochs=301, loss_func=loss_func, optimizer=optimizer, callbacks=callbacks)
def train(tr_fe_fd, tr_csv_file, te_fe_fd, te_csv_file, n_concat, hop, scaler, out_md_fd): # Prepare data tr_x, tr_y = pp_data.get_matrix_format_data(fe_fd=tr_fe_fd, csv_file=tr_csv_file, n_concat=n_concat, hop=hop, scaler=scaler) te_x, te_y = pp_data.get_matrix_format_data(fe_fd=te_fe_fd, csv_file=te_csv_file, n_concat=n_concat, hop=hop, scaler=scaler) n_freq = tr_x.shape[2] print 'tr_x.shape:', tr_x.shape # (n_samples, n_concat, n_freq) print 'tr_y.shape:', tr_y.shape # (n_samples, n_labels) # Build model n_out = len(cfg.labels) seq = Sequential() seq.add(InputLayer((n_concat, n_freq))) seq.add(Flatten()) seq.add(Dropout(0.2)) seq.add(Dense(200, act='relu')) seq.add(Dropout(0.2)) seq.add(Dense(200, act='relu')) seq.add(Dropout(0.2)) seq.add(Dense(n_out, act='softmax')) md = seq.compile() md.summary() # Validation. # tr_err, te_err are frame based. To get event based err, run recognize.py validation = Validation(tr_x=tr_x, tr_y=tr_y, va_x=None, va_y=None, te_x=te_x, te_y=te_y, batch_size=500, call_freq=1, dump_path=None) # Save model pp_data.create_folder(out_md_fd) save_model = SaveModel(out_md_fd, call_freq=2) # Callbacks callbacks = [validation, save_model] # Optimizer optimizer = Adam(1e-3) # fit model md.fit(x=tr_x, y=tr_y, batch_size=100, n_epochs=101, loss_func='categorical_crossentropy', optimizer=optimizer, callbacks=callbacks)
# pad & truncate sequences tr_X = pad_trunc_seqs(tr_X, max_len, pad_type='pre') te_X = pad_trunc_seqs(te_X, max_len, pad_type='pre') # sparse target to categorical target tr_y = sparse_to_categorical(tr_y, n_out) te_y = sparse_to_categorical(te_y, n_out) ### build model def mean_pool(input): return K.mean(input, axis=1) seq = Sequential() seq.add(InputLayer(max_len)) seq.add(Embedding(n_words, n_proj)) seq.add(LSTM(n_hid, 'tanh', return_sequence=True)) # Try RNN, GRU instead seq.add(Lambda(mean_pool)) seq.add(Dense(n_out, 'softmax')) md = seq.combine() # print summary info of model md.summary() ### optimization method optimizer = Rmsprop(0.001) ### callbacks (optional) # save model every n epoch (optional) if not os.path.exists('Md'): os.makedirs('Md') # create folder