コード例 #1
0
def train():

    # load data
    tr_X2d_mix, tr_X3d_mix, tr_y2d_chn0, tr_y2d_chn1, tr_y3d_chn0, tr_y3d_chn1 = pp_data.LoadData(
        cfg.fe_fft_fd, n_time, hop, na_list=cfg.tr_list)

    # build model
    lay_in0 = InputLayer(in_shape=((n_time, n_freq)), name='in1')
    lay_a0 = Flatten()(lay_in0)
    lay_a1 = Dense(n_hid, act='relu', name='a1')(lay_a0)
    lay_a2 = Dropout(0.2, name='a2')(lay_a1)
    lay_a3 = Dense(n_hid, act='relu', name='a3')(lay_a2)
    lay_a4 = Dropout(0.2, name='a4')(lay_a3)
    lay_a5 = Dense(n_hid, act='relu', name='a5')(lay_a4)
    lay_a6 = Dropout(0.2, name='a6')(lay_a5)
    lay_b1 = Dense(n_freq, act='sigmoid',
                   name='a7')(lay_a6)  # mask_left, shape: (N, n_freq)
    lay_c1 = Dense(n_freq, act='sigmoid',
                   name='a8')(lay_a6)  # mask_right, shape: (N, n_freq)
    lay_out_b = Lambda(mul,
                       name='out_b')([lay_b1,
                                      lay_in0])  # out_left, shape: (N, n_freq)
    lay_out_c = Lambda(mul, name='out_c')([lay_c1, lay_in0
                                           ])  # out_right, shape: (N, n_freq)

    md = Model(in_layers=[lay_in0],
               out_layers=[lay_out_b, lay_out_c],
               any_layers=[lay_in0, lay_b1, lay_c1])
    md.summary()

    # validation
    validation = Validation(tr_x=[np.abs(tr_y3d_chn0) + np.abs(tr_y3d_chn1)],
                            tr_y=[np.abs(tr_y2d_chn0),
                                  np.abs(tr_y2d_chn1)],
                            batch_size=100,
                            metrics=[loss_func],
                            call_freq=1,
                            dump_path=None)

    # save model
    if not os.path.exists(cfg.md_fd): os.makedirs(cfg.md_fd)
    save_model = SaveModel(dump_fd=cfg.md_fd, call_freq=2)

    # callbacks
    callbacks = [validation, save_model]

    # optimizer
    optimizer = Adam(1e-3)

    # fit model
    md.fit( [np.abs(tr_y3d_chn0)+np.abs(tr_y3d_chn1)], [np.abs(tr_y2d_chn0), np.abs(tr_y2d_chn1)], \
        batch_size=100, n_epochs=100, loss_func='mse', optimizer=optimizer, callbacks=callbacks, verbose=1 )
コード例 #2
0
def train():

    # create empty folders in workspace
    create_folders()

    # get dev & eva data
    tr_X, tr_y, _, _, _, _ = pp_dev_data.GetSegData(dev_fe_fd,
                                                    agg_num,
                                                    hop,
                                                    fold=None)
    te_X, te_na_list = pp_eva_data.GetEvaSegData(eva_fe_fd, agg_num, hop)

    [n_songs, n_chunks, _, n_in] = tr_X.shape
    print tr_X.shape, tr_y.shape
    print te_X.shape

    # model
    lay_in0 = InputLayer(
        (n_chunks, agg_num, n_in),
        name='in0')  # shape: (n_songs, n_chunk, agg_num, n_in)
    lay_a1 = Flatten(3, name='a1')(
        lay_in0)  # shape: (n_songs, n_chunk, agg_num*n_in)
    lay_a2 = Dense(n_hid,
                   act='relu')(lay_a1)  # shape: (n_songs, n_chunk, n_hid)
    lay_a3 = Dropout(0.2)(lay_a2)
    lay_a4 = Dense(n_hid, act='relu')(lay_a3)
    lay_a5 = Dropout(0.2)(lay_a4)
    lay_a6 = Dense(n_hid, act='relu')(lay_a5)
    lay_a7 = Dropout(0.2)(lay_a6)
    lay_a8 = Dense(n_out, act='sigmoid', b_init=-1,
                   name='a8')(lay_a7)  # shape: (n_songs, n_chunk, n_out)

    md = Model(in_layers=[lay_in0], out_layers=[lay_a8], any_layers=[])
    md.compile()
    md.summary()

    # callback, write out dection scores to .txt each epoch
    dump_fd = cfg.scrap_fd + '/Results_eva/bob_eer'
    print_scores = cb_eer.PrintScoresBagOfBlocks(te_X,
                                                 te_na_list,
                                                 dump_fd,
                                                 call_freq=1)

    # callback, print loss each epoch
    validation = Validation(tr_x=tr_X,
                            tr_y=tr_y,
                            va_x=None,
                            va_y=None,
                            te_x=None,
                            te_y=None,
                            metrics=[loss_func],
                            call_freq=1,
                            dump_path=None)

    # callback, save model every N epochs
    save_model = SaveModel(dump_fd=cfg.scrap_fd + '/Md_eva_bob', call_freq=10)

    # combine all callbacks
    callbacks = [validation, save_model, print_scores]

    # optimizer
    optimizer = Adam(2e-4)

    # fit model
    md.fit(x=tr_X,
           y=tr_y,
           batch_size=10,
           n_epochs=301,
           loss_func=loss_func,
           optimizer=optimizer,
           callbacks=callbacks)
コード例 #3
0
lay_in1 = InputLayer(in_shape=(n_in,))
lay_in2 = InputLayer(in_shape=(n_in,))
a1 = Dense(n_out=n_hid, act='relu')(lay_in1)
a2 = Dense(n_out=n_hid, act='relu')(lay_in2)
b = Lambda(merge)([a1, a2])
b = Dense(n_out=n_hid, act='relu')(b)
lay_out1 = Dense(n_out=n_out, act='softmax')(b)
lay_out2 = Dense(n_out=n_out, act='softmax')(b)

md = Model(in_layers=[lay_in1, lay_in2], out_layers=[lay_out1, lay_out2])
md.compile()
md.summary()

# validate model every n epoch (optional)
validation = Validation(tr_x=[tr_x, tr_x], tr_y=[tr_y, tr_y], 
                        va_x=None, va_y=None, 
                        te_x=[te_x, te_x], te_y=[te_y, te_y], 
                        batch_size=500, 
                        metrics=[your_metric], 
                        call_freq=1)

# callbacks function
callbacks = [validation]

### train model
# optimization method
optimizer = Adam(lr=0.001)        # Try SGD, Adagrad, Rmsprop, etc. instead

md.fit(x=[tr_x, tr_x], y=[tr_y, tr_y], batch_size=500, n_epochs=101, 
       loss_func=your_loss, optimizer=optimizer, 
       callbacks=callbacks)
コード例 #4
0
ファイル: main_resnet34.py プロジェクト: Python3pkg/Hat
# save model every n epoch (optional)
if not os.path.exists('Md'): os.makedirs('Md')  # create folder
save_model = SaveModel(dump_fd='Md', call_freq=1)

# validate model every n epoch (optional)
validation = Validation(tr_x=tr_X,
                        tr_y=tr_y,
                        va_x=None,
                        va_y=None,
                        te_x=te_X,
                        te_y=te_y,
                        batch_size=100,
                        metrics=['categorical_error'],
                        call_freq=1,
                        dump_path='validation.p')

# callbacks function
callbacks = [validation, save_model]

### train model
md.fit(x=tr_X,
       y=tr_y,
       batch_size=100,
       n_epochs=10,
       loss_func='categorical_crossentropy',
       optimizer=optimizer,
       callbacks=callbacks,
       verbose=2)

### predict using model
pred_y = md.predict(te_X, batch_size=100)
コード例 #5
0
ファイル: imdb_lstm.py プロジェクト: MohanKrishna-RC/Hat
# print summary info of model
md.summary()

### callbacks (optional)
# save model every n epoch (optional)
dump_fd = 'imdb_lstm_models'
if not os.path.exists(dump_fd): os.makedirs(dump_fd)
save_model = SaveModel(dump_fd=dump_fd, call_freq=2)

# validate model every n epoch (optional)
validation = Validation(tr_x=tr_x, tr_y=tr_y, 
                        va_x=None, va_y=None, 
                        te_x=te_x, te_y=te_y, 
                        batch_size=500, 
                        metrics=['categorical_error', 'categorical_crossentropy'], 
                        call_freq=1)

# callbacks function
callbacks = [validation, save_model]

### train model
# optimization method
optimizer = Rmsprop(0.001)

md.fit(x=tr_x, y=tr_y, batch_size=32, n_epochs=20, 
       loss_func='categorical_crossentropy', optimizer=optimizer, 
       callbacks=callbacks)

### predict using model
pred_y = md.predict(te_x)
コード例 #6
0
lay_out2 = Dense(n_out=n_out, act='softmax')(b)

md = Model(in_layers=[lay_in1, lay_in2], out_layers=[lay_out1, lay_out2])
md.compile()
md.summary()

# validate model every n epoch (optional)
validation = Validation(tr_x=[tr_x, tr_x],
                        tr_y=[tr_y, tr_y],
                        va_x=None,
                        va_y=None,
                        te_x=[te_x, te_x],
                        te_y=[te_y, te_y],
                        batch_size=500,
                        metrics=[your_metric],
                        call_freq=1)

# callbacks function
callbacks = [validation]

### train model
# optimization method
optimizer = Adam(lr=0.001)  # Try SGD, Adagrad, Rmsprop, etc. instead

md.fit(x=[tr_x, tr_x],
       y=[tr_y, tr_y],
       batch_size=500,
       n_epochs=101,
       loss_func=your_loss,
       optimizer=optimizer,
       callbacks=callbacks)
コード例 #7
0
ファイル: mnist_dnn.py プロジェクト: qiuqiangkong/Hat
if not os.path.exists(dump_fd): os.makedirs(dump_fd)
save_model = SaveModel(dump_fd=dump_fd, call_freq=2)

# validate model every n epoch (optional)
validation = Validation(tr_x=tr_x,
                        tr_y=tr_y,
                        va_x=None,
                        va_y=None,
                        te_x=te_x,
                        te_y=te_y,
                        batch_size=500,
                        metrics=['categorical_error'],
                        call_freq=1)

# callbacks function
callbacks = [validation, save_model]

### train model
# optimization method
optimizer = Adam(lr=0.001)  # Try SGD, Adagrad, Rmsprop, etc. instead

md.fit(x=tr_x,
       y=tr_y,
       batch_size=500,
       n_epochs=101,
       loss_func='categorical_crossentropy',
       optimizer=optimizer,
       callbacks=callbacks)

### predict using model
pred_y = md.predict(te_x)
コード例 #8
0
def train():

    # create empty folders in workspace
    create_folders()

    # prepare data
    tr_X, tr_y, _, va_X, va_y, va_na_list = pp_dev_data.GetSegData(
        fe_fd, agg_num, hop, fold)
    [n_songs, n_chunks, _, n_in] = tr_X.shape

    print tr_X.shape, tr_y.shape
    print va_X.shape, va_y.shape

    # model
    # classifier
    lay_in0 = InputLayer(
        (n_chunks, agg_num, n_in),
        name='in0')  # shape: (n_songs, n_chunk, agg_num, n_in)
    lay_a1 = Flatten(3, name='a1')(
        lay_in0)  # shape: (n_songs, n_chunk, agg_num*n_in)
    lay_a2 = Dense(n_hid,
                   act='relu')(lay_a1)  # shape: (n_songs, n_chunk, n_hid)
    lay_a3 = Dropout(0.2)(lay_a2)
    lay_a4 = Dense(n_hid, act='relu')(lay_a3)
    lay_a5 = Dropout(0.2)(lay_a4)
    lay_a6 = Dense(n_hid, act='relu')(lay_a5)
    lay_a7 = Dropout(0.2)(lay_a6)
    lay_a8 = Dense(n_out, act='sigmoid', b_init=-1,
                   name='a8')(lay_a7)  # shape: (n_songs, n_chunk, n_out)

    # detector
    lay_b1 = Lambda(mean_pool)(lay_in0)  # shape: (n_songs, n_chunk, n_out)
    lay_b8 = Dense(n_out, act='sigmoid',
                   name='b4')(lay_b1)  # shape: (n_songs, n_chunk, n_out)

    md = Model(in_layers=[lay_in0], out_layers=[lay_a8, lay_b8], any_layers=[])
    md.compile()
    md.summary()

    # callback, write out dection scores to .txt each epoch
    dump_fd = cfg.scrap_fd + '/Results_dev/jdc_eer/fold' + str(fold)
    print_scores = cb_eer.PrintScoresDetectionClassification(va_X,
                                                             va_na_list,
                                                             dump_fd,
                                                             call_freq=1)

    # callback, print loss each epoch
    validation = Validation(tr_x=tr_X,
                            tr_y=tr_y,
                            va_x=va_X,
                            va_y=va_y,
                            te_x=None,
                            te_y=None,
                            metrics=[loss_func],
                            call_freq=1,
                            dump_path=None)

    # callback, save model every N epochs
    save_model = SaveModel(dump_fd=cfg.scrap_fd + '/Md_dev_jdc', call_freq=10)

    # combine all callbacks
    callbacks = [validation, save_model, print_scores]

    # optimizer
    # optimizer = SGD( 0.01, 0.95 )
    optimizer = Adam(2e-4)

    # fit model
    md.fit(x=tr_X,
           y=tr_y,
           batch_size=10,
           n_epochs=301,
           loss_func=loss_func,
           optimizer=optimizer,
           callbacks=callbacks)