def model_fitting(model_x, model_y, input_x, input_y, output_x, output_y): if browser: webbrowser.open('http://localhost:6006/ ', new=1) # for check terminal: "tensorboard --logdir=logs/ tensorboard = TensorBoard(log_dir="logs\\{}".format(time())) print("Fitting X-axis") model_x.fit(input_x, output_x, epochs=epochs, callbacks=[tensorboard]) tensorboard = TensorBoard(log_dir="logs\\{}".format(time())) print("Fitting Y-axis") model_y.fit(input_y, output_y, epochs=epochs, callbacks=[tensorboard]) else: print("Fitting X-axis") model_x.fit(input_x, output_x, epochs=epochs) print("------------------------------------------") print("") print("Fitting Y-axis") model_y.fit(input_y, output_y, epochs=epochs) print("model_x summary") model_x.summary() print("") print("model_y summary") model_y.summary() if save is True: model_x.save("x_axis.h5") model_y.save("y_axis.h5") return
def TensorBoardCallback(model_name=None): # rm /tmp/tensorboard/* # tensorboard --logdir /tmp/tensorboard # http://127.0.1.1:6006 if model_name: return TensorBoard( log_dir= f'/tmp/tensorboard/{datetime.datetime.utcnow().strftime("%Y-%m-%dT%H%M%SZ")}_{model_name}' ) else: return TensorBoard( log_dir= f'/tmp/tensorboard/{datetime.datetime.utcnow().strftime("%Y-%m-%dT%H%M%SZ")}' )
def compile_model(self): # init summary writer for tensorboard self.callback1 = TensorBoard(self.log_dir + '/discriminator') self.callback2 = TensorBoard(self.log_dir + '/generator') self.callback3 = TensorBoard(self.log_dir + '/generated_images') # model stuff input_shape = [self.image_size, self.image_size, self.image_channels] adam1 = Adam(lr=self.lr) adam2 = Adam(lr=self.lr * 2) # init and add multi-gpu support try: self.discriminator = multi_gpu_model(self.discriminator(), gpus=self.gpu) except: self.discriminator = self.discriminator() try: self.generator = multi_gpu_model(self.generator(), gpus=self.gpu) except: self.generator = self.generator() # compile discriminator self.discriminator.compile(loss='binary_crossentropy', optimizer=adam1) # compile generator input_tensor = Input(shape=input_shape) generated_catroon_tensor = self.generator(input_tensor) self.discriminator.trainable = False # for here we only train the generator discriminator_output = self.discriminator(generated_catroon_tensor) self.train_generator = Model( input_tensor, outputs=[generated_catroon_tensor, discriminator_output]) # add multi-gpu support try: self.train_generator = multi_gpu_model(self.train_generator, gpus=self.gpu) except: pass self.train_generator.compile( loss=[self.vgg_loss, 'binary_crossentropy'], loss_weights=[float(self.weight), 1.0], optimizer=adam2) # set callback model self.callback1.set_model(self.discriminator) self.callback2.set_model(self.train_generator) self.callback3.set_model(self.train_generator)
def pretrain(self, x, batch_size=256, epochs=200, optimizer='adam'): logger.info('Pretraining...') self._autoencoder.compile(optimizer=optimizer, loss='mse') STRFTIME = "%Y-%m-%d_%H:%M" csv_logger = CSVLogger(os.path.join(self._save_dir, f'pretrain_log_{datetime.now().strftime(STRFTIME)}.csv')) callback_tensorboard = TensorBoard(log_dir=os.path.join(self._log_dir, str(datetime.now())), histogram_freq=2, batch_size=32, write_graph=True, write_grads=True, write_images=False) # begin training t0 = time() try: self._autoencoder.fit(x, x, batch_size=batch_size, epochs=epochs, callbacks=[csv_logger, callback_tensorboard], verbose=False, validation_split=0.1) except ValueError: self._autoencoder.fit(x, x[-1], batch_size=batch_size, epochs=epochs, callbacks=[csv_logger, callback_tensorboard], verbose=False, validation_split=0.1) logger.info('Pretraining time: {}'.format(str(time() - t0))) self._autoencoder.save(os.path.join(self._save_dir, 'pretrain_cae_model.h5')) logger.info('Pretrained weights are saved to {}'.format(os.path.join(self._save_dir, 'pretrain_cae_model.h5'))) self._pretrained = True
def fit(self, model: GlowModel, dp: DataProcessor): tc = self.config.training model.dump_model_internal() self.compile(model) steps_per_epoch = tc.steps_per_epoch or dp.image_count // tc.batch_size callbacks = [ SamplingCallback(self.config, model), TensorBoard( str(self.config.resource.tensorboard_dir), batch_size=tc.batch_size, write_graph=True, # histogram_freq=5, write_grads=True ), ReduceLROnPlateau(monitor='loss', factor=tc.lr_decay, patience=tc.lr_patience, verbose=1, min_lr=tc.lr_patience), ] try: model.encoder.fit_generator(self.generator_for_fit(dp), epochs=tc.epochs, steps_per_epoch=steps_per_epoch, callbacks=callbacks, verbose=1) except InvalidArgumentError as e: model.dump_model_internal() raise e
def make_callback(self, log_dir: str, config: ProtocolTrainConfig ) -> List[Callback]: tensorboard = TensorBoard(log_dir=log_dir, update_freq=16, profile_batch=0) callbacks = [tensorboard] # region Image Callbacks if config.image_callbacks_configs is not None: for icc in config.image_callbacks_configs: callbacks += icc.to_callbacks(tensorboard, self.dataset_loader) # endregion # region Checkpoint weights_path = os.path.join(log_dir, "weights_{epoch:03d}.hdf5") model_checkpoint = ModelCheckpoint(weights_path) callbacks.append(model_checkpoint) # endregion # region Early stopping if config.early_stopping_metric is not None: early_stopping = EarlyStopping(monitor=config.early_stopping_metric, mode="min", patience=5 ) callbacks.append(early_stopping) # endregion # region AUC if config.auc_callbacks_configs is not None: for acc in config.auc_callbacks_configs: callbacks += [acc.to_callback(tensorboard, self.dataset_loader)] # endregion return callbacks
def execute(self): result_file = os.path.join(self.result_dir, "train_result_{}.txt".format(self.task_index)) config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True with tf.Session(self.server.target, config=config) as sess: K.set_session(sess) if self.go_on: self.restore_model() tb_callback = TensorBoard(log_dir=self.log_dir, write_grads=True, write_images=True) ckpt_callback = ModelCheckpoint(self.checkpoint_path, monitor='loss', save_weights_only=True) reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.1, patience=3, verbose=1) early_stopping = EarlyStopping(monitor='loss', min_delta=0, patience=10, verbose=1) # add callbacks to save model checkpoint and tensorboard events (on worker:0 only) callbacks = [tb_callback, ckpt_callback] if self.task_index == 0 else [] callbacks += [reduce_lr, early_stopping] # try: his = self.model.fit_generator(self.generate_rdd_data(), steps_per_epoch=self.steps_per_epoch, # validation_data=self.val_generate_data(val_data), # validation_steps=max(1, self.val_num // self.batch_size), epochs=self.epochs + self.initial_epoch, initial_epoch=self.initial_epoch, workers=0, callbacks=callbacks) logger.debug("{}-{}".format(self.task_index, his.history)) ModelDir.write_result(result_file, self.get_results(his), self.go_on) # except Exception as e: # logger.debug(str(e)) self.save_model() self.tf_feed.terminate()
def train(args): ''' Train model ''' model = build_model() X_train, Y1_train, Y2_train, X_test, Y1_test, Y2_test = load_dataset( args.dataset_name, args.num_train) mc = ModelCheckpoint('best_model.h5', monitor='dense_2_categorical_accuracy', mode='max', verbose=1, save_best_only=True) es = EarlyStopping(monitor='loss', mode='min', verbose=1, patience=10) print('Fitting model...') results = model.fit( X_train, [Y1_train, Y2_train], epochs=args.epochs, verbose=1, validation_data=(X_test, [Y1_test, Y2_test]), callbacks=[TensorBoard(log_dir=TENSORBOARD_DIR), es, mc]) _, _, _, cat_acc, subcat_acc = model.evaluate(X_test, [Y1_test, Y2_test], verbose=0) print('Final result is: %d', subcat_acc) model.save("model.h5") print("Saved model to disk")
def train(self): tbCallBack = TensorBoard(log_dir='./lstm_logs', histogram_freq=0, write_graph=True, write_images=True) def modelSave(epoch, logs): if (epoch + 1) % 5 == 0: self.model.save('lstm_saved_model.h5') msCallBack = LambdaCallback(on_epoch_end=modelSave) texts_raw_indices, texts_raw_without_aspects_indices, texts_left_indices, texts_left_with_aspects_indices, \ aspects_indices, texts_right_indices, texts_right_with_aspects_indices, \ polarities_matrix = \ read_dataset(type=self.DATASET, mode='test', embedding_dim=self.EMBEDDING_DIM, max_seq_len=self.MAX_SEQUENCE_LENGTH, max_aspect_len=self.MAX_ASPECT_LENGTH) self.model.fit(self.texts_raw_indices, self.polarities_matrix, validation_data=(texts_raw_indices, polarities_matrix), epochs=self.EPOCHS, batch_size=self.BATCH_SIZE, callbacks=[tbCallBack])
def __init__(self) -> object: """ :rtype: object """ mnist = tf.keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, y_train = x_train / 255, y_train network = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) tensorboard = TensorBoard(log_dir='logs/{}'.format(time())) network.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) network.fit(x_train, y_train, epochs=5, callbacks=[tensorboard]) print('Evaluations starts') test_loss, test_acc = network.evaluate(x_test, y_test) print('Test accuarcy : ', test_acc) print('Test Loss :', test_loss) network.save('Model/net.h5')
def train_all_layers(model): print 'Training all layers...' for l in model.layers: l.trainable = True mae_callback = MAECallback() early_stopping_callback = EarlyStopping(monitor='val_mae', mode='min', verbose=1, patience=10) model_checkpoint_callback = ModelCheckpoint( 'saved_models/all_layers_trained_weights.{epoch:02d}-{val_mae:.2f}.h5', monitor='val_mae', mode='min', verbose=1, save_best_only=True) tensorboard_callback = TensorBoard(log_dir=config.ALL_LAYERS_LOG_DIR, batch_size=train_generator.batch_size) model.compile(loss='mean_squared_error', optimizer='adam') model.fit_generator(generator=train_generator, steps_per_epoch=batches_per_epoch, epochs=100, callbacks=[ mae_callback, early_stopping_callback, model_checkpoint_callback, tensorboard_callback ])
def fit_model(built_model, x_train, y_train, x_val, y_val, epochs, batch_size): model = built_model #Define callbacks early_stopping = EarlyStopping(monitor='val_loss', patience=3) tensorboard = TensorBoard(log_dir='./logs', histogram_freq=10, write_graph=True, write_grads=False, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None, update_freq='epoch') filepath = "weights.{epoch:02d}-{val_loss:.2f}.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_weights_only=False, save_best_only=False, mode='max') #Fit model history = model.fit( x=x_train, y=y_train, validation_data=(x_val, y_val), epochs=epochs, batch_size=batch_size, callbacks=[ early_stopping, tensorboard, checkpoint ]) ###if you don't need a callback, just delete it from this list return (history)
def __init__(self, train_data=None, test_data=None, tb_log_dir=None, batch_size=128, epochs=100, verbosity=1): # epochs=25 self.verbose = verbosity self.epochs = epochs self.batch_size = batch_size self.n_timesteps = train_data["X"].shape[1] # self.n_features = (train_data["X"].shape[2]) self.n_features = 3 self.n_outputs = train_data["y"].shape[1] self.train_X = (train_data["X"])[:, :, :3] self.train_y = train_data["y"] self.test_X = (test_data["X"])[:, :, :3] self.test_y = test_data["y"] self.train_X2 = (train_data["X"])[:, :, 3:] self.train_y2 = train_data["y"] self.test_X2 = (test_data["X"])[:, :, 3:] self.test_y2 = test_data["y"] self.callbacks = [ F1_metrics(), TensorBoard(log_dir=tb_log_dir, write_grads=True, write_graph=True, histogram_freq=3, batch_size=self.batch_size) ] self.model = None
def main(): # Generate data data = np.random.random((1000, 100)) labels = np.random.randint(2, size=(1000, 1)) # Define neural network model model = Sequential() model.add(Dense(32, activation='relu', input_dim=100)) model.add(Dense(1, activation='sigmoid')) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) plot_model(model, to_file='model.png') # Create Tensorboard Logs tensorboard = TensorBoard(log_dir='logs/{}'.format(time()), write_graph=True) # Train model model.fit(data, labels, epochs=10, batch_size=32, callbacks=[tensorboard]) # Evaluate model. No separate test set predictions = model.predict(data) loss, accuracy = model.evaluate(data, labels, batch_size=32) print("Loss: {}".format(loss)) print("Accuracy: {}".format(accuracy))
def train(self): self.pipeline = self.pipeline_cls( raw_data=self.raw_data, standard_data_dict=self.standard_data, processor_cls=self.processor_cls, dataloader_cls=self.dataloader_cls, num_classes=self.num_classes, bert_pretrained_path=self.bert_pretrained_path, fix_length=self.fix_length, max_length=self.max_length, data_trec=self.data_trec) # evaluator = Evaluator4Clf(self.pipeline, self.log_path, self.model_save_path) tb_callback = TensorBoard(log_dir=self.log_path) callbacks = [tb_callback] if self.evaluator_cls: callbacks.append( self.evaluator_cls(self.pipeline, self.log_path, self.model_save_path)) self.pipeline.build(tokenizer=self.tokenizer, batch_size=self.batch_size, data_refresh=self.data_refresh, vocab_size=self.vocab_size, min_freq=self.min_freq) self.pipeline.train(epochs=self.epochs, callbacks=callbacks) if not self.evaluator_cls: self.pipeline.save(self.model_save_path, "bert_model.weights", fields_save=True, weights_only=True) # self.pipeline.train(epochs=self.epochs, callbacks=[]) self.pipeline.test()
def train(self): tbCallBack = TensorBoard(log_dir='./ram_logs', histogram_freq=0, write_graph=True, write_images=True) texts_raw_indices, texts_left_indices, aspects_indices, texts_right_indices, polarities_matrix = \ read_dataset(type=self.DATASET, mode='test', embedding_dim=self.EMBEDDING_DIM, max_seq_len=self.MAX_SEQUENCE_LENGTH, max_aspect_len=self.MAX_ASPECT_LENGTH) for i in range(1, self.ITERATION): print() print('-' * 50) print('Iteration', i) self.model.fit( [self.texts_raw_indices, self.aspects_indices], self.polarities_matrix, validation_data=([texts_raw_indices, aspects_indices], polarities_matrix), batch_size=self.BATCH_SIZE, callbacks=[tbCallBack]) if i % 5 == 0: self.model.save('ram_saved_model.h5') print('model saved')
def train(self, epochs, batch_size, tb_logs_dir=None, on_tpu=None, verbose=False): """ trains the model. If the initial config file contained parameters for training then these dont have to be defined but can still be overridden """ early_stopping_callback = EarlyStopping(monitor="val_loss", patience=5) callbacks = [early_stopping_callback] if bool(tb_logs_dir): date_time = datetime.now().strftime('%Y-%m-%d-%H%M%S') log_name = os.path.join(tb_logs_dir, "{}_{}".format(self.name, date_time)) # defining callbacks for training tensorboard_callback = TensorBoard(log_dir=log_name, write_graph=True, write_images=True) callbacks += [tensorboard_callback] # model has to be compiled differently when on tpu if bool(on_tpu): self.train_on_tpu(on_tpu, epochs, batch_size, callbacks) else: self.train_on_cpu(epochs, batch_size, callbacks, verbose)
def build_model(self): model = Sequential() model.add(Conv2D(32, (4, 4), strides=(2, 2), input_shape=(24, 10, 1), # batch_size=64, kernel_initializer=initializers.glorot_uniform(), activation=activations.relu, kernel_regularizer=regularizers.l2(0.01))) # kernel initialize weights model.add(Conv2D(64, (3, 3), strides=(1, 1), activation=activations.relu)) model.add(Conv2D(128, (2, 2), strides=(1, 1), activation=activations.relu)) # model.add(Dropout(0.5)) model.add(Flatten()) # model.add(Dense(512, input_dim=self.state_size, activation=activations.linear)) # autograd,PLRelu,RMS Prob # # model.add(LeakyReLU(alpha=0.3)) # model.add(BatchNormalization(momentum=0.99, epsilon=0.001)) # # model.add(LeakyReLU(alpha=0.3)) # model.add(Dense(256, activation=activations.linear)) # model.add(BatchNormalization(momentum=0.99, epsilon=0.001)) # model.add(Dense(128, activation=activations.linear)) # model.add(BatchNormalization(momentum=0.99, epsilon=0.001)) # model.add(Dense(64, activation=activations.linear)) # model.add(BatchNormalization(momentum=0.99, epsilon=0.001)) model.add(Dense(self.action_size, activation=activations.softmax)) model.compile(loss=losses.categorical_crossentropy, # loss='mse' losses.categorical_crossentropy optimizer=optimizers.RMSprop(lr=self.LEARNING_RATE)) # RMSprob,Adam,Nadam self.tensorBoard = TensorBoard('./logs/RLAgent', histogram_freq=0, write_graph=True, write_images=True) model.summary() return model
def get_callbacks(file_name=''): if file_name != '': path_checkpoint = 'checkpoint_keras_' + file_name log_dir = 'logs_' + file_name callback_checkpoint = ModelCheckpoint(filepath=path_checkpoint, monitor='val_loss', verbose=1, save_weights_only=False, save_best_only=True, mode='auto', period=1) callback_early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1) callback_tensorboard = TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=False) callback_reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_lr=1e-4, patience=3, verbose=1) callbacks = [callback_checkpoint, callback_tensorboard, callback_reduce_lr] return callbacks
def __init__(self): self.val_params: List[ValParamSet] = create_vals() self.time = 0 self.epochs_done = 0 self.state_repeats = 0 self.new_states = 0 self.epochs_learn = 10 self.learning_rate = 1 self.batch_size = 60 self.validation_batch_size = 20 self.l1 = 8 self.l2 = 168 self.l3 = 54 self.x_batch = [] self.y_batch = [] self.max_epsilon = 1 self.min_epsilon = 0.01 self.goodmemes = [] self.epsilon = 1 self.pred_plot_memory = [] self.run_no = 0 self.greedy_run_no = 0 self.actual_epoch_index = 0 self.last_weights = None self.not_to_learn_last_epochs = 50 self.global_rewards_mem = [] name = "nn_l_rate={} layers={}".format(self.vp().nn_l_rate, self.vp().layers) self.tensorboard = TensorBoard(log_dir="logs\{}".format(name)) print('TEEEEEEEEEEEEEEEEnsorboard') self.epochs_done = 0
def main(): (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() x_train = x_train.reshape(60000, 784) x_train = x_train / 255 x_test = x_test.reshape(10000, 784) x_test = x_test / 255 y_train = utils.to_categorical(y_train, 10) y_test = utils.to_categorical(y_test, 10) model = Sequential() model.add(Dense(784, input_dim=784, activation="relu")) model.add(Dense(10, activation="softmax")) model.compile(loss="categorical_crossentropy", optimizer="SGD", metrics=["accuracy"]) model.summary() callback = [ TensorBoard(log_dir='logs', histogram_freq=1, write_images=True) ] model.fit(x_train, y_train, batch_size=200, epochs=300, verbose=1, validation_split=0.2, callbacks=callback) model.save("fashion_model.h5") score = model.evaluate(x_test, y_test, verbose=1) print("Accuracy on test data is", score[1] * 100, "percent")
def trainCNN(name,X,y,layerSize,convLayer,denseLayer): X = X/255.0 model = Sequential() model.add(Conv2D(layerSize, (3, 3), input_shape=X.shape[1:])) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) for x in range(convLayer-1): model.add(Conv2D(layerSize, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) for i in range(denseLayer): model.add(Dense(layerSize)) model.add(Activation('relu')) model.add(Dense(1)) model.add(Activation('sigmoid')) tensorboard = TensorBoard(log_dir="logs/{}".format(name)) model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy']) model.fit(X, y, batch_size=32, epochs=10, validation_split=0.3, callbacks=[tensorboard]) model.save(str(layerSize) + "x" + str(convLayer) + "-CNN.model")
def fitting(self, model, model_name): #name= "paraview_"+model_name+"_"+str(nepochs)+"-{}".format(int(time.time())) name = "size_" + model_name + "_" + str(nepochs) + "-{}".format( int(time.time())) #name= "dchp_"+model_name+"_"+str(nepochs)+"-{}".format(int(time.time())) #callbacks=[TensorBoard(log_dir="/Users/malavikavijayendravasist/Desktop/mt2/Tensorboard/data_classes_handpicked/{}".format(name), batch_size=batch_size), # ModelCheckpoint('/Users/malavikavijayendravasist/Desktop/mt2/Checkpoints/data_classes_handpicked/{}'.format(name)+'_{epoch:02d}.h5',monitor='val_acc',verbose=1,period=1)] #-{val_accuracy:.2f} callbacks = [ TensorBoard( log_dir= "/Users/malavikavijayendravasist/Desktop/mt2/Tensorboard/handpicked_size/{}" .format(name), batch_size=batch_size), ModelCheckpoint( '/Users/malavikavijayendravasist/Desktop/mt2/Checkpoints/handpicked_size/{}' .format(name) + '_{epoch:02d}.h5', monitor='val_acc', verbose=1, period=1) ] #-{val_accuracy:.2f} model.fit_generator(generator=train_iterator, validation_data=valid_iterator, steps_per_epoch=steps_per_epoch_train, validation_steps=steps_per_epoch_valid, epochs=nepochs, callbacks=callbacks, verbose=1, workers=0) #confusion_matrix(valid_iterator.class_indices) model.save( '/Users/malavikavijayendravasist/Desktop/mt2/Models/handpicked_size/{}' .format(name) + '.hdf5')
def train(args, params): ''' Train model ''' model = build_model(params) X_train, Y1_train, Y2_train, X_test, Y1_test, Y2_test = load_dataset( args.dataset_name, args.num_train) print('Fitting model...') results = model.fit( X_train, [Y1_train, Y2_train], epochs=args.epochs, verbose=1, validation_data=(X_test, [Y1_test, Y2_test]), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)]) _, _, _, cat_acc, subcat_acc = model.evaluate(X_test, [Y1_test, Y2_test], verbose=0) LOG.debug('Final result is: %d', subcat_acc) nni.report_final_result(subcat_acc) print('Final result is: %d', subcat_acc) model_id = nni.get_sequence_id() # serialize model to JSON model_json = model.to_json() with open("model-{}.json".format(model_id), "w") as json_file: json_file.write(model_json) # serialize weights to HDF5 model.save_weights("model-{}.h5".format(model_id)) print("Saved model to disk")
def register_std_callbacks(self, tensorboard_logs_folder=None, checkpoint_path=None): self.require_model_loaded() run_id = str(time()) if self.desc is not None: run_id += "_" + self.desc folder_id = os.path.join(self.id, run_id) if tensorboard_logs_folder is not None: self.registered_callbacks.append( TensorBoard(log_dir=os.path.join(tensorboard_logs_folder, folder_id), histogram_freq=0, write_graph=True, write_images=True)) if checkpoint_path is not None: store_path = os.path.join(checkpoint_path, folder_id) if not os.path.exists(store_path): os.makedirs(store_path) store_path = os.path.join( store_path, 'e{epoch:02d}-l{loss:.4f}-v{val_loss:.4f}.ckpt') print("Storing to %s" % store_path) self.registered_callbacks.append( ModelCheckpoint(store_path, monitor='val_loss', verbose=1, period=1, save_best_only=False, mode='min'))
def train(self, x, y, batch_size, epochs, verbose): def exp_decay(epoch): initial_lrate = self.learning_rate k = 0.1 lrate = initial_lrate * np.exp(-k * epoch) return lrate callbacks = [ EarlyStopping(patience=20, monitor='val_loss', restore_best_weights=True), LearningRateScheduler(exp_decay, verbose=0), ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0), TensorBoard(log_dir=self.model_dir), TerminateOnNaN() ] history = self.ed_model.fit(x=x, y=y, batch_size=batch_size, epochs=epochs, callbacks=callbacks, shuffle=False, validation_split=0.2, verbose=verbose) return history.history
def train(self, x_train, x_test): self.vae.fit(x_train, x_train, epochs=50, batch_size=128, shuffle=True, validation_data=(x_test, x_test), callbacks=[TensorBoard(log_dir='tmp/VAE')])
def get_callbacks(output_dir, model_name, optimizer, model_weigths_path): logdir = os.path.join(output_dir, optimizer, 'logs') chkpt_filepath = model_name + '--{epoch:02d}--{loss:.3f}--{val_loss:.3f}.h5' callbacks = [ EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=4, verbose=1), ModelCheckpoint(filepath=model_weigths_path, monitor='val_loss', save_best_only=True, save_weights_only=True, verbose=1), TensorBoard(log_dir=logdir) ] if optimizer in ["sdg", "rmsprop"]: callbacks.append( ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, verbose=1, mode='min', min_delta=0.01, cooldown=0, min_lr=0)) return callbacks
def main(net, epochs, batch_size): config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) set_session(sess) (x_train, y_train), (x_test, y_test) = cifar10.load_data() x_train, x_test = x_train.astype('float32') / 255, x_test.astype( 'float32') / 255 mean = np.mean(x_train, axis=0) x_train -= mean x_test -= mean datagen = ImageDataGenerator( width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, ) datagen.fit(x_train) model = make_resnet(net) model.summary() model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), validation_data=(x_test, y_test), epochs=epochs, callbacks=[ ReduceLROnPlateau(verbose=1, patience=20), TensorBoard(observer.dir) ])
def fitting(self, model): f=self.feat.split(' ')[0] self.name= f + '_' + self.model_name+ "_" + str(self.nepochs)+ "_" + str(int(time.time())) print(self.name) l=self.nepochs c=0 while l>0: l//=10 c+=1 a="_{epoch:0"+str(c)+"d}.h5" callbacks=[TensorBoard(log_dir=self.TBfolder+self.name, batch_size=self.batch_size), ModelCheckpoint(self.CPfolder + self.name + a,monitor='val_acc',verbose=1,period=1)] #-{val_accuracy:.2f} print('validation steps ', self.steps_per_epoch_valid) model.fit_generator(generator=self.train_iterator, validation_data=self.valid_iterator, steps_per_epoch=self.steps_per_epoch_train, validation_steps=self.steps_per_epoch_valid, epochs=self.nepochs, callbacks=callbacks, verbose=1, workers=0) model.save(self.Modelfolder + self.name + '.hdf5') return self.name