def create_callbacks(training_model, prediction_model, evaluation_generator, validation_generator, logs_path, snapshots_path, config): callbacks = [] tensorboard_callback = None if logs_path: tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=logs_path, histogram_freq=0, batch_size=config['batch_size'], write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None) callbacks.append(tensorboard_callback) from eval.pascal import Evaluate val_prefix = 'val_' evaluation = Evaluate(validation_generator, prediction_model, iou_threshold=0.1, score_threshold=0.05, max_detections=100, save_path=None, weighted_average=False, verbose=1, tensorboard=tensorboard_callback, prefix=val_prefix) callbacks.append(evaluation) if 'train_evaluation' in config and config['train_evaluation'] == True: evaluation2 = Evaluate(evaluation_generator, prediction_model, iou_threshold=0.1, score_threshold=0.05, max_detections=100, save_path=None, weighted_average=False, verbose=1, tensorboard=tensorboard_callback, prefix='train_') callbacks.append(evaluation2) #h save the model # ensure directory created first; otherwise h5py will error after epoch. os.makedirs(snapshots_path, exist_ok=True) checkpoint = tf.keras.callbacks.ModelCheckpoint(os.path.join( snapshots_path, '{phi}_{{epoch:02d}}.h5'.format(phi=config['phi'])), verbose=1, save_best_only=True, monitor=val_prefix + "mAP", mode='max') callbacks.append(checkpoint) callbacks.append( tf.keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.1, patience=2, verbose=1, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0)) early_stopping_callback = tf.keras.callbacks.EarlyStopping( monitor=val_prefix + 'mAP', min_delta=0.003, patience=5, verbose=0, mode='max', baseline=None, restore_best_weights=False) callbacks.append(early_stopping_callback) return callbacks
def create_callbacks(training_model, prediction_model, validation_generator, args): """ Creates the callbacks to use during training. Args training_model: The model that is used for training. prediction_model: The model that should be used for validation. validation_generator: The generator for creating validation data. args: parseargs args object. Returns: A list of callbacks used for training. """ callbacks = [] tensorboard_callback = None # in tfv2 the way for logging custom scalars was changed # https://www.tensorflow.org/tensorboard/scalars_and_keras if args.tensorboard_dir: file_writer = tf.summary.create_file_writer(args.tensorboard_dir + "/metrics") file_writer.set_as_default() tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=args.tensorboard_dir, histogram_freq=0, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None) callbacks.append(tensorboard_callback) if args.evaluation and validation_generator: if args.dataset_type == 'coco': from eval.coco import CocoEval # use prediction model for evaluation evaluation = CocoEval(validation_generator, prediction_model, tensorboard=tensorboard_callback) else: from eval.pascal import Evaluate evaluation = Evaluate(validation_generator, prediction_model, tensorboard=tensorboard_callback) callbacks.append(evaluation) # save the model if args.snapshots: # ensure directory created first; otherwise h5py will error after epoch. makedirs(args.snapshot_path) checkpoint = tf.keras.callbacks.ModelCheckpoint( os.path.join( args.snapshot_path, '{dataset_type}_{{epoch:02d}}_{{loss:.4f}}_{{val_loss:.4f}}.h5' .format(dataset_type=args.dataset_type)), verbose=1, # save_best_only=True, # monitor="mAP", # mode='max' ) callbacks.append(checkpoint) # callbacks.append(keras.callbacks.ReduceLROnPlateau( # monitor='loss', # factor=0.1, # patience=2, # verbose=1, # mode='auto', # min_delta=0.0001, # cooldown=0, # min_lr=0 # )) return callbacks
def create_callbacks(training_model, prediction_model, validation_generator, args): """ Creates the callbacks to use during training. Args training_model: The model that is used for training. prediction_model: The model that should be used for validation. validation_generator: The generator for creating validation data. args: parseargs args object. Returns: A list of callbacks used for training. """ callbacks = [] tensorboard_callback = None if args.tensorboard_dir: tensorboard_callback = keras.callbacks.TensorBoard( log_dir=args.tensorboard_dir, histogram_freq=0, batch_size=args.batch_size, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None) callbacks.append(tensorboard_callback) if args.evaluation and validation_generator: if args.dataset_type == 'coco': from eval.coco import CocoEval # use prediction model for evaluation evaluation = CocoEval(validation_generator, prediction_model, tensorboard=tensorboard_callback) else: from eval.pascal import Evaluate evaluation = Evaluate(validation_generator, prediction_model, tensorboard=tensorboard_callback) callbacks.append(evaluation) # save the model if args.snapshots: # ensure directory created first; otherwise h5py will error after epoch. makedirs(args.snapshot_path) checkpoint = keras.callbacks.ModelCheckpoint( os.path.join( args.snapshot_path, '{dataset_type}.h5'.format(dataset_type=args.dataset_type)), verbose=1, save_weights_only=True # save_best_only=True, # monitor="mAP", # mode='max' ) callbacks.append(checkpoint) # callbacks.append(keras.callbacks.ReduceLROnPlateau( # monitor='loss', # factor=0.1, # patience=2, # verbose=1, # mode='auto', # min_delta=0.0001, # cooldown=0, # min_lr=0 # )) return callbacks