def train(): # prepare data tr_X, tr_y, _, te_X, te_y, te_na_list = pp_dev_data.GetAllData( fe_fd, agg_num, hop, fold) [batch_num, n_time, n_freq] = tr_X.shape print tr_X.shape, tr_y.shape print te_X.shape, te_y.shape # build model seq = Sequential() seq.add(InputLayer((n_time, n_freq))) seq.add(Flatten()) # flatten to 2d: (n_time, n_freq) to 1d:(n_time*n_freq) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act=act)) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act=act)) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act=act)) seq.add(Dropout(0.1)) seq.add(Dense(n_out, act='sigmoid')) md = seq.compile() md.summary() # optimizer optimizer = Adam(1e-4) # callbacks # tr_err, te_err are frame based. To get event based err, run recognize.py validation = Validation(tr_x=tr_X, tr_y=tr_y, va_x=None, va_y=None, te_x=te_X, te_y=te_y, batch_size=2000, metrics=['binary_crossentropy'], call_freq=1, dump_path=None) # save model pp_dev_data.CreateFolder(cfg.dev_md_fd) save_model = SaveModel(dump_fd=cfg.dev_md_fd, call_freq=10) # callbacks callbacks = [validation, save_model] # fit model md.fit(x=tr_X, y=tr_y, batch_size=2000, n_epochs=100, loss_func='binary_crossentropy', optimizer=optimizer, callbacks=callbacks, verbose=1)
def train(): # load data tr_X2d_mix, tr_X3d_mix, tr_y2d_chn0, tr_y2d_chn1, tr_y3d_chn0, tr_y3d_chn1 = pp_data.LoadData( cfg.fe_fft_fd, n_time, hop, na_list=cfg.tr_list) # build model lay_in0 = InputLayer(in_shape=((n_time, n_freq)), name='in1') lay_a0 = Flatten()(lay_in0) lay_a1 = Dense(n_hid, act='relu', name='a1')(lay_a0) lay_a2 = Dropout(0.2, name='a2')(lay_a1) lay_a3 = Dense(n_hid, act='relu', name='a3')(lay_a2) lay_a4 = Dropout(0.2, name='a4')(lay_a3) lay_a5 = Dense(n_hid, act='relu', name='a5')(lay_a4) lay_a6 = Dropout(0.2, name='a6')(lay_a5) lay_b1 = Dense(n_freq, act='sigmoid', name='a7')(lay_a6) # mask_left, shape: (N, n_freq) lay_c1 = Dense(n_freq, act='sigmoid', name='a8')(lay_a6) # mask_right, shape: (N, n_freq) lay_out_b = Lambda(mul, name='out_b')([lay_b1, lay_in0]) # out_left, shape: (N, n_freq) lay_out_c = Lambda(mul, name='out_c')([lay_c1, lay_in0 ]) # out_right, shape: (N, n_freq) md = Model(in_layers=[lay_in0], out_layers=[lay_out_b, lay_out_c], any_layers=[lay_in0, lay_b1, lay_c1]) md.summary() # validation validation = Validation(tr_x=[np.abs(tr_y3d_chn0) + np.abs(tr_y3d_chn1)], tr_y=[np.abs(tr_y2d_chn0), np.abs(tr_y2d_chn1)], batch_size=100, metrics=[loss_func], call_freq=1, dump_path=None) # save model if not os.path.exists(cfg.md_fd): os.makedirs(cfg.md_fd) save_model = SaveModel(dump_fd=cfg.md_fd, call_freq=2) # callbacks callbacks = [validation, save_model] # optimizer optimizer = Adam(1e-3) # fit model md.fit( [np.abs(tr_y3d_chn0)+np.abs(tr_y3d_chn1)], [np.abs(tr_y2d_chn0), np.abs(tr_y2d_chn1)], \ batch_size=100, n_epochs=100, loss_func='mse', optimizer=optimizer, callbacks=callbacks, verbose=1 )
# sparse label to 1 of K categorical label tr_y = sparse_to_categorical(tr_y, n_out) va_y = sparse_to_categorical(va_y, n_out) te_y = sparse_to_categorical(te_y, n_out) ### Build model act = 'relu' seq = Sequential() seq.add(InputLayer(in_shape=(1, 28, 28))) seq.add(Convolution2D(n_outfmaps=32, n_row=3, n_col=3, act='relu')) seq.add(MaxPool2D(pool_size=(2, 2))) seq.add(Convolution2D(n_outfmaps=32, n_row=3, n_col=3, act='relu')) seq.add(MaxPool2D(pool_size=(2, 2))) seq.add(Dropout(0.2)) seq.add(Flatten()) seq.add(Dense(n_hid, act='relu')) seq.add(Dropout(0.5)) seq.add(Dense(n_hid, act='relu')) seq.add(Dense(n_out, act='softmax')) md = seq.combine() # print summary info of model md.summary() # optimization method optimizer = Adam(lr=0.001) ### callbacks (optional) # save model every n epoch (optional) if not os.path.exists('Md'): os.makedirs('Md')
n_hid = 500 fold = 1 n_out = len(cfg.labels) # prepare data tr_X, tr_y, _ = pp_dev_data.GetAllData(cfg.dev_fe_mel_fd, agg_num, hop, fold=None) [batch_num, n_time, n_freq] = tr_X.shape print tr_X.shape, tr_y.shape # build model seq = Sequential() seq.add(InputLayer((n_time, n_freq))) seq.add(Flatten()) # flatten to 2d: (n_time, n_freq) to 1d:(n_time*n_freq) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act=act)) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act=act)) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act=act)) seq.add(Dropout(0.1)) seq.add(Dense(n_out, act='sigmoid')) md = seq.compile() md.summary() # validation # tr_err, te_err are frame based. To get event based err, run recognize.py validation = Validation(tr_x=tr_X, tr_y=tr_y,
def train(): # create empty folders in workspace create_folders() # get dev & eva data tr_X, tr_y, _, _, _, _ = pp_dev_data.GetSegData(dev_fe_fd, agg_num, hop, fold=None) te_X, te_na_list = pp_eva_data.GetEvaSegData(eva_fe_fd, agg_num, hop) [n_songs, n_chunks, _, n_in] = tr_X.shape print tr_X.shape, tr_y.shape print te_X.shape # model lay_in0 = InputLayer( (n_chunks, agg_num, n_in), name='in0') # shape: (n_songs, n_chunk, agg_num, n_in) lay_a1 = Flatten(3, name='a1')( lay_in0) # shape: (n_songs, n_chunk, agg_num*n_in) lay_a2 = Dense(n_hid, act='relu')(lay_a1) # shape: (n_songs, n_chunk, n_hid) lay_a3 = Dropout(0.2)(lay_a2) lay_a4 = Dense(n_hid, act='relu')(lay_a3) lay_a5 = Dropout(0.2)(lay_a4) lay_a6 = Dense(n_hid, act='relu')(lay_a5) lay_a7 = Dropout(0.2)(lay_a6) lay_a8 = Dense(n_out, act='sigmoid', b_init=-1, name='a8')(lay_a7) # shape: (n_songs, n_chunk, n_out) md = Model(in_layers=[lay_in0], out_layers=[lay_a8], any_layers=[]) md.compile() md.summary() # callback, write out dection scores to .txt each epoch dump_fd = cfg.scrap_fd + '/Results_eva/bob_eer' print_scores = cb_eer.PrintScoresBagOfBlocks(te_X, te_na_list, dump_fd, call_freq=1) # callback, print loss each epoch validation = Validation(tr_x=tr_X, tr_y=tr_y, va_x=None, va_y=None, te_x=None, te_y=None, metrics=[loss_func], call_freq=1, dump_path=None) # callback, save model every N epochs save_model = SaveModel(dump_fd=cfg.scrap_fd + '/Md_eva_bob', call_freq=10) # combine all callbacks callbacks = [validation, save_model, print_scores] # optimizer optimizer = Adam(2e-4) # fit model md.fit(x=tr_X, y=tr_y, batch_size=10, n_epochs=301, loss_func=loss_func, optimizer=optimizer, callbacks=callbacks)
return a0 x0 = InputLayer(in_shape=(3, 32, 32)) x1 = Convolution2D(n_outfmaps=64, n_row=3, n_col=3, act='relu', border_mode=(1, 1))(x0) x2 = add_n_blocks(x1, n_outfmaps=64, n_repeat=3, is_first_layer=True) x3 = add_n_blocks(x2, n_outfmaps=128, n_repeat=4, is_first_layer=False) x4 = add_n_blocks(x3, n_outfmaps=256, n_repeat=6, is_first_layer=False) x5 = add_n_blocks(x4, n_outfmaps=512, n_repeat=3, is_first_layer=False) y1 = Lambda(mean_pool)(x5) y2 = Flatten()(y1) y3 = Dense(n_out, act='softmax')(y2) md = Model([x0], [y3]) # print summary info of model md.summary() ### optimization method optimizer = Adam(1e-3) ### callbacks (optional) # save model every n epoch (optional) if not os.path.exists('Md'): os.makedirs('Md') # create folder save_model = SaveModel(dump_fd='Md', call_freq=1) # validate model every n epoch (optional)
def train_cv_model(): # init path if type == 'home': fe_fd = cfg.dev_fe_mel_home_fd labels = cfg.labels_home lb_to_id = cfg.lb_to_id_home tr_txt = cfg.dev_evaluation_fd + '/home_fold' + str( fold) + '_train.txt' te_txt = cfg.dev_evaluation_fd + '/home_fold' + str( fold) + '_evaluate.txt' if type == 'resi': fe_fd = cfg.dev_fe_mel_resi_fd labels = cfg.labels_resi lb_to_id = cfg.lb_to_id_resi tr_txt = cfg.dev_evaluation_fd + '/residential_area_fold' + str( fold) + '_train.txt' te_txt = cfg.dev_evaluation_fd + '/residential_area_fold' + str( fold) + '_evaluate.txt' n_out = len(labels) # load data to list tr_X, tr_y = pp_dev_data.LoadAllData(fe_fd, tr_txt, lb_to_id, agg_num, hop) tr_y = sparse_to_categorical(tr_y, n_out) print tr_X.shape print tr_y.shape n_freq = tr_X.shape[2] # build model seq = Sequential() seq.add(InputLayer((agg_num, n_freq))) seq.add(Flatten()) seq.add(Dense(n_hid, act='relu')) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act='relu')) seq.add(Dropout(0.1)) seq.add(Dense(n_hid, act='relu')) seq.add(Dropout(0.1)) seq.add(Dense(n_out, 'sigmoid')) md = seq.combine() # print summary info of model md.summary() # optimization method optimizer = Adam(1e-3) # callbacks (optional) # save model every n epoch pp_dev_data.CreateFolder(cfg.dev_md_fd) save_model = SaveModel(dump_fd=cfg.dev_md_fd, call_freq=5) # validate model every n epoch validation = Validation(tr_x=tr_X, tr_y=tr_y, va_x=None, va_y=None, te_x=None, te_y=None, metrics=['binary_crossentropy'], call_freq=1, dump_path=None) # callbacks function callbacks = [validation, save_model] # train model md.fit(x=tr_X, y=tr_y, batch_size=20, n_epochs=100, loss_func='binary_crossentropy', optimizer=optimizer, callbacks=callbacks)
a = MaxPooling2D(pool_size=(2, 2))(a) a = Dropout(p_drop=0.2)(a) a = Conv2D(n_outfmaps=64, n_row=3, n_col=3, act='linear', strides=(1, 1), border_mode=(1, 1))(a) a = BN(axis=(0, 2, 3))(a) a = Activation('relu')(a) a = MaxPooling2D(pool_size=(2, 2))(a) a = Dropout(p_drop=0.2)(a) a = Conv2D(n_outfmaps=128, n_row=3, n_col=3, act='linear', strides=(1, 1), border_mode=(1, 1))(a) a = BN(axis=(0, 2, 3))(a) a = Activation('relu')(a) a = MaxPooling2D(pool_size=(2, 2))(a) a = Dropout(p_drop=0.2)(a) a = Flatten()(a) a = Dense(n_out=n_hid, act='linear')(a) a = BN(axis=0)(a) a = Activation('relu')(a) a = Dropout(p_drop=0.2)(a) lay_out = Dense(n_out=n_out, act='softmax')(a) md = Model(in_layers=[lay_in], out_layers=[lay_out]) md.compile() # print summary info of model md.summary() ### callbacks (optional) # save model every n epoch (optional)
def train(): # create empty folders in workspace create_folders() # prepare data tr_X, tr_y, _, va_X, va_y, va_na_list = pp_dev_data.GetSegData( fe_fd, agg_num, hop, fold) [n_songs, n_chunks, _, n_in] = tr_X.shape print tr_X.shape, tr_y.shape print va_X.shape, va_y.shape # model # classifier lay_in0 = InputLayer( (n_chunks, agg_num, n_in), name='in0') # shape: (n_songs, n_chunk, agg_num, n_in) lay_a1 = Flatten(3, name='a1')( lay_in0) # shape: (n_songs, n_chunk, agg_num*n_in) lay_a2 = Dense(n_hid, act='relu')(lay_a1) # shape: (n_songs, n_chunk, n_hid) lay_a3 = Dropout(0.2)(lay_a2) lay_a4 = Dense(n_hid, act='relu')(lay_a3) lay_a5 = Dropout(0.2)(lay_a4) lay_a6 = Dense(n_hid, act='relu')(lay_a5) lay_a7 = Dropout(0.2)(lay_a6) lay_a8 = Dense(n_out, act='sigmoid', b_init=-1, name='a8')(lay_a7) # shape: (n_songs, n_chunk, n_out) # detector lay_b1 = Lambda(mean_pool)(lay_in0) # shape: (n_songs, n_chunk, n_out) lay_b8 = Dense(n_out, act='sigmoid', name='b4')(lay_b1) # shape: (n_songs, n_chunk, n_out) md = Model(in_layers=[lay_in0], out_layers=[lay_a8, lay_b8], any_layers=[]) md.compile() md.summary() # callback, write out dection scores to .txt each epoch dump_fd = cfg.scrap_fd + '/Results_dev/jdc_eer/fold' + str(fold) print_scores = cb_eer.PrintScoresDetectionClassification(va_X, va_na_list, dump_fd, call_freq=1) # callback, print loss each epoch validation = Validation(tr_x=tr_X, tr_y=tr_y, va_x=va_X, va_y=va_y, te_x=None, te_y=None, metrics=[loss_func], call_freq=1, dump_path=None) # callback, save model every N epochs save_model = SaveModel(dump_fd=cfg.scrap_fd + '/Md_dev_jdc', call_freq=10) # combine all callbacks callbacks = [validation, save_model, print_scores] # optimizer # optimizer = SGD( 0.01, 0.95 ) optimizer = Adam(2e-4) # fit model md.fit(x=tr_X, y=tr_y, batch_size=10, n_epochs=301, loss_func=loss_func, optimizer=optimizer, callbacks=callbacks)
def train(tr_fe_fd, tr_csv_file, te_fe_fd, te_csv_file, n_concat, hop, scaler, out_md_fd): # Prepare data tr_x, tr_y = pp_data.get_matrix_format_data(fe_fd=tr_fe_fd, csv_file=tr_csv_file, n_concat=n_concat, hop=hop, scaler=scaler) te_x, te_y = pp_data.get_matrix_format_data(fe_fd=te_fe_fd, csv_file=te_csv_file, n_concat=n_concat, hop=hop, scaler=scaler) n_freq = tr_x.shape[2] print 'tr_x.shape:', tr_x.shape # (n_samples, n_concat, n_freq) print 'tr_y.shape:', tr_y.shape # (n_samples, n_labels) # Build model n_out = len(cfg.labels) seq = Sequential() seq.add(InputLayer((n_concat, n_freq))) seq.add(Flatten()) seq.add(Dropout(0.2)) seq.add(Dense(200, act='relu')) seq.add(Dropout(0.2)) seq.add(Dense(200, act='relu')) seq.add(Dropout(0.2)) seq.add(Dense(n_out, act='softmax')) md = seq.compile() md.summary() # Validation. # tr_err, te_err are frame based. To get event based err, run recognize.py validation = Validation(tr_x=tr_x, tr_y=tr_y, va_x=None, va_y=None, te_x=te_x, te_y=te_y, batch_size=500, call_freq=1, dump_path=None) # Save model pp_data.create_folder(out_md_fd) save_model = SaveModel(out_md_fd, call_freq=2) # Callbacks callbacks = [validation, save_model] # Optimizer optimizer = Adam(1e-3) # fit model md.fit(x=tr_x, y=tr_y, batch_size=100, n_epochs=101, loss_func='categorical_crossentropy', optimizer=optimizer, callbacks=callbacks)