tbcb.on_epoch_end(epoch, logs=logs) if epoch % 5 == 0: t1 = time.time() - t0 T += t1 print('========== Evaluating ==========') t_test = evaluate(model, emb, dataset, max_len) t_valid = evaluate_valid(model, emb, dataset, max_len) print( 'Epoch: {:03d}, Time: {:f}, valid (NDCG@10: {:.4f}, HR@10: {:.4f}), test (NDCG@10: {:.4f}, HR@10: {:.4f})' .format(epoch, T, t_valid[0], t_valid[1], t_test[0], t_test[1])) f.write(str(t_valid) + ' ' + str(t_test) + '\n') f.flush() t0 = time.time() # if np.array(loss_history)[::-1].argsort().argsort()[0] > 3: if epoch - np.array(loss_history).argsort()[0] > 10: break tbcb.on_train_end() except Exception as e: print(e) tbcb.on_train_end() f.close() sampler.close() f.close() sampler.close()
class Trainer: """Class object to setup and carry the training. Takes as input a generator that produces SR images. Conditionally, also a discriminator network and a feature extractor to build the components of the perceptual loss. Compiles the model(s) and trains in a GANS fashion if a discriminator is provided, otherwise carries a regular ISR training. Args: generator: Keras model, the super-scaling, or generator, network. discriminator: Keras model, the discriminator network for the adversarial component of the perceptual loss. feature_extractor: Keras model, feature extractor network for the deep features component of perceptual loss function. lr_train_dir: path to the directory containing the Low-Res images for training. hr_train_dir: path to the directory containing the High-Res images for training. lr_valid_dir: path to the directory containing the Low-Res images for validation. hr_valid_dir: path to the directory containing the High-Res images for validation. learning_rate: float. loss_weights: dictionary, use to weigh the components of the loss function. Contains 'generator' for the generator loss component, and can contain 'discriminator' and 'feature_extractor' for the discriminator and deep features components respectively. logs_dir: path to the directory where the tensorboard logs are saved. weights_dir: path to the directory where the weights are saved. dataname: string, used to identify what dataset is used for the training session. weights_generator: path to the pre-trained generator's weights, for transfer learning. weights_discriminator: path to the pre-trained discriminator's weights, for transfer learning. n_validation:integer, number of validation samples used at training from the validation set. flatness: dictionary. Determines determines the 'flatness' threshold level for the training patches. See the TrainerHelper class for more details. lr_decay_frequency: integer, every how many epochs the learning rate is reduced. lr_decay_factor: 0 < float <1, learning rate reduction multiplicative factor. Methods: train: combines the networks and triggers training with the specified settings. """ def __init__( self, generator, discriminator, feature_extractor, lr_train_dir, hr_train_dir, lr_valid_dir, hr_valid_dir, loss_weights={ 'generator': 1.0, 'discriminator': 0.003, 'feature_extractor': 1 / 12 }, log_dirs={ 'logs': 'logs', 'weights': 'weights' }, fallback_save_every_n_epochs=2, dataname=None, weights_generator=None, weights_discriminator=None, n_validation=None, flatness={ 'min': 0.0, 'increase_frequency': None, 'increase': 0.0, 'max': 0.0 }, learning_rate={ 'initial_value': 0.0004, 'decay_frequency': 100, 'decay_factor': 0.5 }, adam_optimizer={ 'beta1': 0.9, 'beta2': 0.999, 'epsilon': None }, losses={ 'generator': 'mae', 'discriminator': 'binary_crossentropy', 'feature_extractor': 'mse', }, metrics={'generator': 'PSNR_Y'}, ): self.generator = generator self.discriminator = discriminator self.feature_extractor = feature_extractor self.scale = generator.scale self.lr_patch_size = generator.patch_size self.learning_rate = learning_rate self.loss_weights = loss_weights self.weights_generator = weights_generator self.weights_discriminator = weights_discriminator self.adam_optimizer = adam_optimizer self.dataname = dataname self.flatness = flatness self.n_validation = n_validation self.losses = losses self.log_dirs = log_dirs self.metrics = metrics if self.metrics['generator'] == 'PSNR_Y': self.metrics['generator'] = PSNR_Y elif self.metrics['generator'] == 'PSNR': self.metrics['generator'] = PSNR self._parameters_sanity_check() self.model = self._combine_networks() self.settings = {} self.settings['training_parameters'] = locals() self.settings['training_parameters'][ 'lr_patch_size'] = self.lr_patch_size self.settings = self.update_training_config(self.settings) self.logger = get_logger(__name__) self.helper = TrainerHelper( generator=self.generator, weights_dir=log_dirs['weights'], logs_dir=log_dirs['logs'], lr_train_dir=lr_train_dir, feature_extractor=self.feature_extractor, discriminator=self.discriminator, dataname=dataname, weights_generator=self.weights_generator, weights_discriminator=self.weights_discriminator, fallback_save_every_n_epochs=fallback_save_every_n_epochs, ) self.train_dh = DataHandler( lr_dir=lr_train_dir, hr_dir=hr_train_dir, patch_size=self.lr_patch_size, scale=self.scale, n_validation_samples=None, ) self.valid_dh = DataHandler( lr_dir=lr_valid_dir, hr_dir=hr_valid_dir, patch_size=self.lr_patch_size, scale=self.scale, n_validation_samples=n_validation, ) def _parameters_sanity_check(self): """ Parameteres sanity check. """ if self.discriminator: assert self.lr_patch_size * self.scale == self.discriminator.patch_size self.adam_optimizer if self.feature_extractor: assert self.lr_patch_size * self.scale == self.feature_extractor.patch_size check_parameter_keys( self.learning_rate, needed_keys=['initial_value'], optional_keys=['decay_factor', 'decay_frequency'], default_value=None, ) check_parameter_keys( self.flatness, needed_keys=[], optional_keys=['min', 'increase_frequency', 'increase', 'max'], default_value=0.0, ) check_parameter_keys( self.adam_optimizer, needed_keys=['beta1', 'beta2'], optional_keys=['epsilon'], default_value=None, ) check_parameter_keys(self.log_dirs, needed_keys=['logs', 'weights']) def _combine_networks(self): """ Constructs the combined model which contains the generator network, as well as discriminator and geature extractor, if any are defined. """ lr = Input(shape=(self.lr_patch_size, ) * 2 + (3, )) sr = self.generator.model(lr) outputs = [sr] losses = [self.losses['generator']] loss_weights = [self.loss_weights['generator']] if self.discriminator: self.discriminator.model.trainable = False validity = self.discriminator.model(sr) outputs.append(validity) losses.append(self.losses['discriminator']) loss_weights.append(self.loss_weights['discriminator']) if self.feature_extractor: self.feature_extractor.model.trainable = False sr_feats = self.feature_extractor.model(sr) outputs.extend([*sr_feats]) losses.extend([self.losses['feature_extractor']] * len(sr_feats)) loss_weights.extend( [self.loss_weights['feature_extractor'] / len(sr_feats)] * len(sr_feats)) combined = Model(inputs=lr, outputs=outputs) # https://stackoverflow.com/questions/42327543/adam-optimizer-goes-haywire-after-200k-batches-training-loss-grows optimizer = Adam( beta_1=self.adam_optimizer['beta1'], beta_2=self.adam_optimizer['beta2'], lr=self.learning_rate['initial_value'], epsilon=self.adam_optimizer['epsilon'], ) combined.compile(loss=losses, loss_weights=loss_weights, optimizer=optimizer, metrics=self.metrics) return combined def _lr_scheduler(self, epoch): """ Scheduler for the learning rate updates. """ n_decays = epoch // self.learning_rate['decay_frequency'] lr = self.learning_rate['initial_value'] * ( self.learning_rate['decay_factor']**n_decays) # no lr below minimum control 10e-7 return max(1e-7, lr) def _flatness_scheduler(self, epoch): if self.flatness['increase']: n_increases = epoch // self.flatness['increase_frequency'] else: return self.flatness['min'] f = self.flatness['min'] + n_increases * self.flatness['increase'] return min(self.flatness['max'], f) def _load_weights(self): """ Loads the pretrained weights from the given path, if any is provided. If a discriminator is defined, does the same. """ if self.weights_generator: self.model.get_layer('generator').load_weights( self.weights_generator) if self.discriminator: if self.weights_discriminator: self.model.get_layer('discriminator').load_weights( self.weights_discriminator) self.discriminator.model.load_weights( self.weights_discriminator) def _format_losses(self, prefix, losses, model_metrics): """ Creates a dictionary for tensorboard tracking. """ return dict(zip([prefix + m for m in model_metrics], losses)) def update_training_config(self, settings): """ Summarizes training setting. """ _ = settings['training_parameters'].pop('weights_generator') _ = settings['training_parameters'].pop('self') _ = settings['training_parameters'].pop('generator') _ = settings['training_parameters'].pop('discriminator') _ = settings['training_parameters'].pop('feature_extractor') settings['generator'] = {} settings['generator']['name'] = self.generator.name settings['generator']['parameters'] = self.generator.params settings['generator']['weights_generator'] = self.weights_generator _ = settings['training_parameters'].pop('weights_discriminator') if self.discriminator: settings['discriminator'] = {} settings['discriminator']['name'] = self.discriminator.name settings['discriminator'][ 'weights_discriminator'] = self.weights_discriminator else: settings['discriminator'] = None if self.discriminator: settings['feature_extractor'] = {} settings['feature_extractor']['name'] = self.feature_extractor.name settings['feature_extractor'][ 'layers'] = self.feature_extractor.layers_to_extract else: settings['feature_extractor'] = None return settings def train(self, epochs, steps_per_epoch, batch_size, monitored_metrics): """ Carries on the training for the given number of epochs. Sends the losses to Tensorboard. Args: epochs: how many epochs to train for. steps_per_epoch: how many batches epoch. batch_size: amount of images per batch. monitored_metrics: dictionary, the keys are the metrics that are monitored for the weights saving logic. The values are the mode that trigger the weights saving ('min' vs 'max'). """ self.settings['training_parameters'][ 'steps_per_epoch'] = steps_per_epoch self.settings['training_parameters']['batch_size'] = batch_size starting_epoch = self.helper.initialize_training( self) # load_weights, creates folders, creates basename self.tensorboard = TensorBoard( log_dir=str(self.helper.callback_paths['logs'])) self.tensorboard.set_model(self.model) # validation data validation_set = self.valid_dh.get_validation_set(batch_size) y_validation = [validation_set['hr']] if self.discriminator: discr_out_shape = list( self.discriminator.model.outputs[0].shape)[1:4] valid = np.ones([batch_size] + discr_out_shape) fake = np.zeros([batch_size] + discr_out_shape) validation_valid = np.ones([len(validation_set['hr'])] + discr_out_shape) y_validation.append(validation_valid) if self.feature_extractor: validation_feats = self.feature_extractor.model.predict( validation_set['hr']) y_validation.extend([*validation_feats]) for epoch in range(starting_epoch, epochs): self.logger.info('Epoch {e}/{tot_eps}'.format(e=epoch, tot_eps=epochs)) K.set_value(self.model.optimizer.lr, self._lr_scheduler(epoch=epoch)) self.logger.info('Current learning rate: {}'.format( K.eval(self.model.optimizer.lr))) flatness = self._flatness_scheduler(epoch) if flatness: self.logger.info( 'Current flatness treshold: {}'.format(flatness)) epoch_start = time() for step in tqdm(range(steps_per_epoch)): batch = self.train_dh.get_batch(batch_size, flatness=flatness) y_train = [batch['hr']] training_losses = {} ## Discriminator training if self.discriminator: sr = self.generator.model.predict(batch['lr']) d_loss_real = self.discriminator.model.train_on_batch( batch['hr'], valid) d_loss_fake = self.discriminator.model.train_on_batch( sr, fake) d_loss_fake = self._format_losses( 'train_d_fake_', d_loss_fake, self.discriminator.model.metrics_names) d_loss_real = self._format_losses( 'train_d_real_', d_loss_real, self.discriminator.model.metrics_names) training_losses.update(d_loss_real) training_losses.update(d_loss_fake) y_train.append(valid) ## Generator training if self.feature_extractor: hr_feats = self.feature_extractor.model.predict( batch['hr']) y_train.extend([*hr_feats]) model_losses = self.model.train_on_batch(batch['lr'], y_train) model_losses = self._format_losses('train_', model_losses, self.model.metrics_names) training_losses.update(model_losses) self.tensorboard.on_epoch_end(epoch * steps_per_epoch + step, training_losses) self.logger.debug('Losses at step {s}:\n {l}'.format( s=step, l=training_losses)) elapsed_time = time() - epoch_start self.logger.info('Epoch {} took {:10.1f}s'.format( epoch, elapsed_time)) validation_losses = self.model.evaluate(validation_set['lr'], y_validation, batch_size=batch_size) validation_losses = self._format_losses('val_', validation_losses, self.model.metrics_names) if epoch == starting_epoch: remove_metrics = [] for metric in monitored_metrics: if (metric not in training_losses) and ( metric not in validation_losses): msg = ' '.join([ metric, 'is NOT among the model metrics, removing it.' ]) self.logger.error(msg) remove_metrics.append(metric) for metric in remove_metrics: _ = monitored_metrics.pop(metric) # should average train metrics end_losses = {} end_losses.update(validation_losses) end_losses.update(training_losses) self.helper.on_epoch_end( epoch=epoch, losses=end_losses, generator=self.model.get_layer('generator'), discriminator=self.discriminator, metrics=monitored_metrics, ) self.tensorboard.on_epoch_end(epoch, validation_losses) self.tensorboard.on_train_end(None)
def train(train_lbld_trios, val_lbls_trios, network, weights, model_path, n_epochs, init_lr, optmzr_name, imagenet=False, freeze_until=None): """Training function: train a model of type 'network' over the data. Args: network (str): String identifying the network architecture to use. weights (str): Path string to a .cpkt weights file. model_path (str): Path string to a directory to save models in. n_epochs (int): Integer representing the number of epochs to run training. """ # Create a folder for saving trained models if os.path.isdir(model_path) is False: logging.info("Creating a folder to save models at: " + str(model_path)) os.mkdir(model_path) starting_epoch = 0 if network == 'SiameseNetTriplet': siamese_net = SiameseNetTriplet((128, 128, 3), arch='resnet18', sliding=True, imagenet=imagenet, freeze_until=freeze_until) optimizer = Adam(lr=0.0006) model = siamese_net.build_model() loss_model = siamese_net.loss_model single_model = siamese_net.single_model if weights: print("Loading model at: " + str(weights)) starting_epoch = int(weights.split('-')[1]) + 1 model.load_weights(weights) model.compile(loss=triplet_loss, optimizer=optimizer, metrics=[cos_sim_pos, cos_sim_neg]) # Load data and create data generator: train_ds = tf.data.Dataset.from_generator( image_trio_generator, args=[train_lbld_trios, True, False, False, imagenet], output_types=((tf.float32, tf.float32, tf.float32), tf.float32, tf.string), output_shapes=(((TARGET_WIDTH, TARGET_HEIGHT, 3), (TARGET_WIDTH, TARGET_HEIGHT, 3), (TARGET_WIDTH, TARGET_HEIGHT, 3)), (1, None), (3))) batched_train_ds = train_ds.batch(BATCH_SIZE) # shuffle(10000).batch valid_ds = tf.data.Dataset.from_generator( image_trio_generator, args=[val_lbld_trios, False, False, False, imagenet], output_types=((tf.float32, tf.float32, tf.float32), tf.float32, tf.string), # output_shapes=(((TARGET_WIDTH, TARGET_HEIGHT, 3), (TARGET_WIDTH, TARGET_HEIGHT, 3), (TARGET_WIDTH, TARGET_HEIGHT, 3)), (1, None))) output_shapes=(((TARGET_WIDTH, TARGET_HEIGHT, 3), (TARGET_WIDTH, TARGET_HEIGHT, 3), (TARGET_WIDTH, TARGET_HEIGHT, 3)), (1, None), (3))) batched_valid_ds = valid_ds.batch(BATCH_SIZE) logdir = "logs/scalars/" + datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = TensorBoard(log_dir=logdir, histogram_freq=0, batch_size=BATCH_SIZE, write_graph=True, write_grads=True) tensorboard_callback.set_model(model) def named_logs(metrics_names, logs): result = {} for l in zip(metrics_names, logs): result[l[0]] = l[1] return result # Train model: steps_per_epoch = len(train_lbld_trios) // BATCH_SIZE val_steps_per_epoch = len( val_lbld_trios) // BATCH_SIZE # steps_per_epoch//3 best_val_loss = 1000 best_train_loss = 1000 batched_train_iter = iter(batched_train_ds) batched_val_iter = iter(batched_valid_ds) # batched_train_iter = batched_train_ds.make_one_shot_iterator() # batched_val_iter = batched_valid_ds.make_one_shot_iterator() for epoch in range(starting_epoch, n_epochs): cumm_csim_pos_tr = 0 cumm_csim_neg_tr = 0 cumm_tr_loss = 0 cumm_csim_pos_val = 0 cumm_csim_neg_val = 0 cumm_val_loss = 0 print('Epoch #' + str(epoch) + ':') for step in tqdm(range(steps_per_epoch)): train_inputs, train_y, train_seq_ids = next(batched_train_iter) #train_inputs, train_y = batched_train_iter.get_next() # tinrain_x1 = train_inputs['input_1'] # train_x2 = train_inputs['input_2'] train_x1 = train_inputs[0] train_x2 = train_inputs[1] train_x3 = train_inputs[2] # train_y = train_y['output'] X_dict = {} seq_ids = [] for idx, row in enumerate(train_seq_ids): for idx2, class_id in enumerate(row): class_id = class_id.numpy().decode('utf8') if class_id not in seq_ids: seq_ids.append(class_id) if class_id in X_dict: X_dict[class_id].append(train_inputs[idx2][idx]) else: X_dict[class_id] = [] X_dict[class_id].append(train_inputs[idx2][idx]) triplets = get_batch_hard(model, train_inputs, seq_ids, BATCH_SIZE) loss, csim_pos_tr, csim_neg_tr = model.train_on_batch( [triplets[0], triplets[1], triplets[2]], train_y[:BATCH_SIZE // 2]) cumm_tr_loss += loss cumm_csim_pos_tr += csim_pos_tr cumm_csim_neg_tr += csim_neg_tr cumm_csim_pos_tr = cumm_csim_pos_tr / steps_per_epoch cumm_csim_neg_tr = cumm_csim_neg_tr / steps_per_epoch cumm_tr_loss = cumm_tr_loss / steps_per_epoch # evaluate for step in tqdm(range(val_steps_per_epoch)): valid_inputs, val_y, val_seq_ids = next(batched_val_iter) # valid_inputs, valid_y = batched_val_iter.get_next() valid_x1 = valid_inputs[0] valid_x2 = valid_inputs[1] valid_x3 = valid_inputs[2] val_loss, csim_pos_val, csim_neg_val = model.test_on_batch( [valid_x1, valid_x2, valid_x3], val_y) cumm_val_loss += val_loss cumm_csim_pos_val += csim_pos_val cumm_csim_neg_val += csim_neg_val cumm_csim_pos_val = cumm_csim_pos_val / val_steps_per_epoch cumm_csim_neg_val = cumm_csim_neg_val / val_steps_per_epoch cumm_val_loss = cumm_val_loss / val_steps_per_epoch print('Training loss: ' + str(cumm_tr_loss)) print('Validation loss: ' + str(cumm_val_loss)) print('* Cosine sim positive (train) for this epoch: %0.2f' % (cumm_csim_pos_tr)) print('* Cosine sim negative (train) for this epoch: %0.2f' % (cumm_csim_neg_tr)) print('* Cosine sim positive (valid) for this epoch: %0.2f' % (cumm_csim_pos_val)) print('* Cosine sim negative (valid) for this epoch: %0.2f' % (cumm_csim_neg_val)) metrics_names = [ 'tr_loss', 'tr_csim_pos', 'tr_csim_neg', 'val_loss', 'val_csim_pos', 'val_csim_neg' ] tensorboard_callback.on_epoch_end( epoch, named_logs(metrics_names, [ cumm_tr_loss, cumm_csim_pos_tr, cumm_csim_neg_tr, cumm_val_loss, cumm_csim_pos_val, cumm_csim_neg_val ])) model_filepath = os.path.join( model_path, "model-{epoch:03d}-{val_loss:.4f}.hdf5".format( epoch=epoch, val_loss=cumm_val_loss)) if cumm_val_loss < best_val_loss * 1.5: if cumm_val_loss < best_val_loss: best_val_loss = cumm_val_loss model.save(model_filepath) # OR model.save_weights() print("Best model w/ val loss {} saved to {}".format( cumm_val_loss, model_filepath)) tensorboard_callback.on_train_end(None) return
def _run(game, network_params, memory_params, ops): """Sets up and runs the gaming simulation. Initializes TensorFlow, the training agent, and the game environment. The agent plays the game from the starting state for a number of episodes set by the user. Args: args: The arguments from the command line parsed by_parse_arguments. """ # Setup TensorBoard Writer. trial_id = json.loads(os.environ.get('TF_CONFIG', '{}')).get('task', {}).get('trial', '') output_path = ops.job_dir if not trial_id else ops.job_dir + '/' tensorboard = TensorBoard(log_dir=output_path) hpt = hypertune.HyperTune() graph = tf.Graph() with graph.as_default(): env = gym.make(game) agent = _create_agent(env, network_params, memory_params) rewards = [] tensorboard.set_model(agent.policy) def _train_or_evaluate(print_score, training=False): """Runs a gaming simulation and writes results for tensorboard. Args: print_score (bool): True to print a score to the console. training (bool): True if the agent is training, False to eval. Returns: loss if training, else reward for evaluating. """ reward = _play(agent, env, training) if print_score: print( 'Train - ', 'Episode: {}'.format(episode), 'Total reward: {}'.format(reward), ) return reward for episode in range(1, ops.episodes + 1): print_score = ops.print_rate and episode % ops.print_rate == 0 get_summary = ops.eval_rate and episode % ops.eval_rate == 0 rewards.append(_train_or_evaluate(print_score, training=True)) if get_summary: avg_reward = sum(rewards) / len(rewards) summary = {'eval_reward': avg_reward} tensorboard.on_epoch_end(episode, summary) hpt.report_hyperparameter_tuning_metric( hyperparameter_metric_tag='avg_reward', metric_value=avg_reward, global_step=episode) print( 'Eval - ', 'Episode: {}'.format(episode), 'Average Reward: {}'.format(avg_reward), ) rewards = [] tensorboard.on_train_end(None) _record_video(env, agent, output_path) agent.policy.save(output_path, save_format='tf')
'val_acc': np.mean(testing_acc) } modelcheckpoint.on_epoch_end(epoch, logs) earlystop.on_epoch_end(epoch, logs) reduce_lr.on_epoch_end(epoch, logs) tensorboard.on_epoch_end(epoch, logs) print( "accuracy: {}, loss: {}, validation accuracy: {}, validation loss: {}". format(np.mean(training_acc), np.mean(training_loss), np.mean(testing_acc), np.mean(testing_loss))) if model.stop_training: break earlystop.on_train_end() modelcheckpoint.on_train_end() reduce_lr.on_train_end() tensorboard.on_train_end() # confusion metric for training y_train_pred = model.predict(x_train).argmax(axis=1) conf_mat = confusion_matrix(y_train, y_train_pred) class_label = [ "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck" ] df = pd.DataFrame(conf_mat, index=class_label, columns=class_label) sns.heatmap(df, annot=True, cmap="YlGnBu", fmt="d") plt.title("Confusion Matrix for Training data") plt.xlabel("Predicted Label") plt.ylabel("True Label") plt.show()