コード例 #1
0
model.summary()
#sample_weight_mode="temporal"


tr_pubs = pub_ids[:int(len(pub_ids)*0.9)]
val_pubs = pub_ids[int(len(pub_ids)*0.9):] 

train = subdata_getter(tr_pubs,data)
validation = subdata_getter(val_pubs,data)


tr_generator =  DataGenerator(tr_pubs,train)
val_generator =  DataGenerator(val_pubs,validation)

history = NBatchLogger()
model.fit_generator(generator=tr_generator,shuffle=False, epochs=10, verbose=0,callbacks=[history]) #,callbacks=callbacks_list
model.evaluate_generator(generator = val_generator, use_multiprocessing=True, verbose=0)

history_save= []
history_save.append(history.train_log)
history_save.append(history.val_log)
pickle.dump(history_save, open('history_adam_0.01_d1', 'wb'))

#unknow words to "UNK"
data_te =  pd.read_csv('df_concat_test.csv', encoding="latin1").fillna(method="ffill")
data_te.loc[~data_te['Word'].isin(words),'Word'] = "UNK"
# data_te = data_te[:20000]

te_pub = list(set(data_te["Pub_id"].values))

コード例 #2
0
    subplot(2, 1, 1)
    plot(history['loss'])
    ylabel('loss')
    xlabel('epochs')
    subplot(2, 1, 2)
    plot(sqrt(history.history('mse')))
    ylabel('std')
    xlabel('epochs')

elif flag_method_of_fitting == 2:
    #(2). Fit using a Generator function:
    number_of_workers = 1
    #    K.set_learning_phase(0)
    conv_model_time_distributed.fit_generator(
        generator=training_generator,
        steps_per_epoch=100,
        validation_data=validation_generator,
        validation_steps=10,
        workers=number_of_workers)
elif flag_method_of_fitting == 3:
    #(3). Fitting using one fit per loop (more controllable):
    1

#### KERAS MODEL FUNCTIONS: ####
#Model.build
#Model.compile
#Model.compute_output_shape
#Model.count_params
#Model.evaluate
#Model.fit
#Model.get_layer
#Model.get_weights