def SRCNN_train(): SRCNN = model_SRCNN() SRCNN.compile(optimizer=adam(lr=0.0003), loss='mse', metrics=[PSNRLoss]) print(SRCNN.summary()) data, label = pd.read_training_data("drive/SuperResolution/train.h5") val_data, val_label = pd.read_training_data("drive/SuperResolution/val.h5") reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.9, patience=25, min_lr=0.000001, verbose=1) checkpoint = ModelCheckpoint("drive/SuperResolution/SRCNNx2_model.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='min') callbacks_list = [reduce_lr, checkpoint] SRCNN.fit(data, label, batch_size=64, validation_data=(val_data, val_label), callbacks=callbacks_list, shuffle=True, nb_epoch=200, verbose=1)
def train(): srcnn_model = model() print(srcnn_model.summary()) ################################################################################################### # get training and testing data with there labels from .h5 file created by prepare_data.py module # ################################################################################################### data, label = pd.read_training_data("./train.h5") val_data, val_label = pd.read_training_data("./test.h5") checkpoint = ModelCheckpoint("check.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='min') callbacks_list = [checkpoint] # fit our train and test data in model to train machine # srcnn_model.fit(data, label, batch_size=128, validation_data=(val_data, val_label), callbacks=callbacks_list, shuffle=True, nb_epoch=100, verbose=0) # to save our trained model we first save model in json file and then store it in .h5 file model_json = srcnn_model.to_json() with open("model.json", "w") as json_file: json_file.write(model_json) srcnn_model.save_weights("model.h5")
def train(): srcnn_model = model() print(srcnn_model.summary()) data, label = pd.read_training_data("./crop_train.h5") val_data, val_label = pd.read_training_data("./test.h5") checkpoint = ModelCheckpoint("SRCNN_scale3.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='min') callbacks_list = [checkpoint] h = srcnn_model.fit(data, label, batch_size=128, validation_data=(val_data, val_label), callbacks=callbacks_list, shuffle=True, nb_epoch=200, verbose=0) loss_history = h.history['PSNRLoss'] val_loss_history = h.history['val_PSNRLoss'] PSNR_history = np.array(loss_history) PSNR_val_history = np.array(val_loss_history) np.savetxt("butterfly_scale3.txt", PSNR_history, delimiter=",") np.savetxt("butterfly_val_scale3.txt", PSNR_val_history, delimiter=",") print(h.history.keys())
def EES_train(): EES = model_EES16() EES.compile(optimizer=adam(lr=0.0003), loss='mse') print EES.summary() data, label = pd.read_training_data("./train.h5") val_data, val_label = pd.read_training_data("./val.h5") checkpoint = ModelCheckpoint("EES_check.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='min') callbacks_list = [checkpoint] history_callback = EES.fit(data, label, batch_size=64, validation_data=(val_data, val_label), callbacks=callbacks_list, shuffle=True, nb_epoch=200, verbose=1) pandas.DataFrame(history_callback.history).to_csv("history.csv") EES.save_weights("EES_final.h5")
def train(): srcnn_model = model() print(srcnn_model.summary()) data, label = pd.read_training_data("./crop_train.h5") val_data, val_label = pd.read_training_data("./test.h5") checkpoint = ModelCheckpoint("SRCNN_check.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='min') callbacks_list = [checkpoint] srcnn_model.fit(data, label, batch_size=128, validation_data=(val_data, val_label), callbacks=callbacks_list, shuffle=True, nb_epoch=300, verbose=0)
def train(self,batch_size=128): data, label = pd.read_training_data("./train.h5") val_data, val_label = pd.read_training_data("./test.h5") checkpoint = ModelCheckpoint("SRCNN_check.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='min') callbacks_list = [checkpoint] self.nn_train.fit(data, label, batch_size=batch_size, validation_data=(val_data, val_label), callbacks=callbacks_list, shuffle=True, epochs=self.epochs) #, verbose=0) self.nn_train.save_weights("srcnn_combined.h5")
def train(): srcnn_model = model() print(srcnn_model.summary()) data, label = pd.read_training_data("./crop_train.h5") #ORIGINAL FILE './train.h5' val_data, val_label = pd.read_training_data("./test.h5") checkpoint = ModelCheckpoint("SRCNN_check.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='min') callbacks_list = [checkpoint] h = srcnn_model.fit(data, label, batch_size=128, validation_data=(val_data, val_label), callbacks=callbacks_list, shuffle=True, nb_epoch=10, verbose=0) print(h.history.keys()) print( "data: ", numpy.size(data[10])) activations = srcnn_model.predict(data[10].reshape(1, 32, 32, 1)) display_activation(activations, 1, 2, 0) return activations
def train(): ''' Train the model, need train.h5 -> run prepare_data.py before training module execution ''' pd.main() srcnn_model = model() data, label = pd.read_training_data("./model/train.h5") # srcnn_model.load_weights("m_model_adam.h5") srcnn_model.fit(data, label, batch_size=128, epochs=30) srcnn_model.save_weights("./model/srcnn_model.h5")
def EEDS_train(): _EEDS = model_EEDS() print _EEDS.summary() data, label = pd.read_training_data("./train.h5") val_data, val_label = pd.read_training_data("./val.h5") checkpoint = ModelCheckpoint("EEDS_check.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True, mode='min') callbacks_list = [checkpoint] _EEDS.fit(data, label, batch_size=64, validation_data=(val_data, val_label), callbacks=callbacks_list, shuffle=True, nb_epoch=200, verbose=1) _EEDS.save_weights("EEDS_final.h5")
def train(): srcnn_model = Model() srcnn_model() data, label = pd.read_training_data("./train.h5") filepath = "./checkpoint/saved-model-{epoch:02d}.h5" checkpoint = ModelCheckpoint(filepath, save_best_only=False, save_weights_only=False, mode='min', period=50) callbacks_list = [checkpoint] srcnn_model.SRCNN.fit(data, label, batch_size=128, callbacks=callbacks_list, shuffle=True, epochs=200)
def InceptionResNet_train(): InceptionResNet = model_InceptionResNet() InceptionResNet.compile(optimizer=adam(lr=0.0003), loss='mse', metrics=[PSNRLoss]) print(InceptionResNet.summary()) data, label = pd.read_training_data("drive/SuperResolution/onetrain2x.h5") val_data, val_label = pd.read_training_data( "drive/SuperResolution/oneval2x.h5") # data, label = pd.read_training_data("Nov_test/onetrain2x.h5") # # val_data, val_label = pd.read_training_data("Nov_test/oneval2x.h5") reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.9, patience=25, min_lr=0.000001, verbose=1) checkpoint = ModelCheckpoint("drive/SuperResolution/oneResNetXx2_model.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='min') # checkpoint = ModelCheckpoint("Nov_test/oneResNetXx2_model.h5", monitor='val_loss', verbose=1, save_best_only=True, # save_weights_only=False, mode='min') callbacks_list = [reduce_lr, checkpoint] InceptionResNet.fit(data, label, batch_size=64, validation_data=(val_data, val_label), callbacks=callbacks_list, shuffle=True, nb_epoch=200, verbose=1)
def EED_train(): EED = model_EED() EED.compile(optimizer=adam(lr=0.0003), loss='mse') data, label = pd.read_training_data("./train.h5") EED.fit(data, label, batch_size=256, nb_epoch=100) EED.save_weights("EED_model_adam100.h5")
def train(): srcnn_model = model() print(srcnn_model.summary()) data, label = pd.read_training_data("train_HR.h5") val_data, val_label = pd.read_training_data("test_HR.h5") print('data.shape = ' + str(data.shape)) train_on_batch = True if train_on_batch: f = open(os.path.join(loss_save_dir, 'metrics.csv'), 'w') train_ids = np.arange(0, len(data)) iter_num = 0 epoches = 10 batch_size = 128 save_model_step = 1000 for e in range(1, epoches + 1): print('epoch ' + str(e)) np.random.shuffle(train_ids) train_batchs_ids = np.array_split( train_ids, int((len(data) / batch_size) + 1)) for tr_batch_ids in train_batchs_ids: train_batch_loss = srcnn_model.train_on_batch( data[tr_batch_ids], label[tr_batch_ids]) val_batch_ids = np.random.choice(len(val_data), batch_size) val_batch_loss = srcnn_model.test_on_batch( val_data[val_batch_ids], val_label[val_batch_ids]) iter_num += 1 print( str(datetime.now()) + ' iter ' + str(iter_num) + ', loss: ' + str(train_batch_loss) + ', val_loss: ' + str(val_batch_loss)) metrics = str(iter_num) + ',' + str( train_batch_loss) + ',' + str(val_batch_loss) f.write(metrics) f.write('\n') f.flush() if (iter_num % save_model_step) == 0: srcnn_model.save( os.path.join(loss_save_dir, 'iter_' + str(iter_num) + '.h5')) f.close() else: filepath_val_loss = os.path.join( loss_save_dir, 'val_loss_e_{epoch:02d}_loss_{val_loss:.8f}.h5') checkpoint_val_loss = ModelCheckpoint(filepath_val_loss, monitor='val_loss', verbose=1, save_best_only=True, mode='min') history = srcnn_model.fit(data, label, batch_size=128, validation_data=(val_data, val_label), callbacks=[checkpoint_val_loss], shuffle=True, nb_epoch=200, verbose=1) # srcnn_model.load_weights("m_model_adam.h5") print(history.history.keys()) f = open(os.path.join(loss_save_dir, 'metrics.csv'), 'w') loss = history.history['loss'] val_loss = history.history['val_loss'] for i in range(len(loss)): f.write(str(i + 1) + ',' + str(loss[i]) + ',' + str(val_loss[i])) f.write('\n') f.flush() f.close()
def train(): srcnn_model = model() data, label = pd.read_training_data("./train.h5") # srcnn_model.load_weights("m_model_adam.h5") srcnn_model.fit(data, label, batch_size=128, nb_epoch=30) srcnn_model.save_weights("m_model_adam30.h5")
def EES_train(): EES = model_EES(input_col=48, input_row=48) data, label = pd.read_training_data("./train.h5") EES.fit(data, label, batch_size=256, nb_epoch=200) EES.save_weights("EES_model_adam200.h5")
def EEDS_train(): EEDS = model_EEDS(input_col=48, input_row=48) data, label = pd.read_training_data("./little_train.h5") EEDS.fit(data, label, batch_size=256, nb_epoch=100) EEDS.save_weights("EEDS8_model_adam100.h5")