raise FileNotFoundError("ERROR: Config file \"%s\" not found" % (args.config)) else: cfg = get_config(args.config) model_name = args.config.split('/')[-1] model_name = model_name.split('.')[0] cfg.model_name = model_name if args.st: score_threshold = args.st else: score_threshold = cfg.score_thresh_train # make sure keras is the minimum required version check_keras_version() # optionally choose specific GPU os.environ['CUDA_VISIBLE_DEVICES'] = cfg.gpu # create object that stores backbone information backbone = architectures.backbone(cfg.network) # create the generators generators = create_generators(cfg, backbone) test_generator = generators[2] class_to_color = { 'bg': np.array([0, 0, 0]) / 255, 'human': np.array([34, 114, 227]) / 255, 'vehicle.bicycle': np.array([0, 182, 0]) / 255,
def main(): FILE_DIRECTORY = os.path.dirname(os.path.abspath(__file__)) # Parse arguments parser = argparse.ArgumentParser() parser.add_argument('--config', type=str, default=os.path.join(FILE_DIRECTORY, "configs/local.cfg")) args = parser.parse_args() if not os.path.exists(args.config): raise FileNotFoundError("ERROR: Config file \"%s\" not found" % (args.config)) else: cfg = get_config(args.config) model_name = args.config.split('/')[-1] model_name = model_name.split('.')[0] cfg.model_name = cfg.runtime + "_" + model_name assert cfg.inference is False, "You are running a training in inference mode. Please check your config!" # setting seed from .utils.helpers import initialize_seed # Set seed to compare trainings and exclude randomness initialize_seed(cfg.seed) # create object that stores backbone information backbone = architectures.backbone(cfg.network) # make sure keras is the minimum required version check_keras_version() # optionally choose specific GPU os.environ['CUDA_VISIBLE_DEVICES'] = cfg.gpu keras.backend.tensorflow_backend.set_session(get_session( cfg.gpu_mem_usage)) # create the generators if 'nuscenes' in cfg.data_set: train_generator, validation_generator, test_generator, test_night_generator, test_rain_generator = create_generators( cfg, backbone) else: train_generator, validation_generator = create_generators( cfg, backbone) # create the model weights = None if cfg.load_model: print('Loading model, this may take a second...') model = architectures.load_model(cfg.load_model, backbone_name=cfg.network) training_model = model prediction_model = retinanet_bbox( model=model, anchor_params=None, class_specific_filter=cfg.class_specific_nms) else: if cfg.pretrain_basenet: weights = backbone.download_imagenet() in_shape = (cfg.image_size[0], cfg.image_size[1], len(train_generator.channels)) print('Creating model, this may take a second...') model, training_model, prediction_model = create_models( backbone_retinanet=backbone.retinanet, num_classes=train_generator.num_classes(), weights=weights, multi_gpu=0, freeze_backbone=False, lr=cfg.learning_rate, inputs=in_shape, cfg=cfg, distance=cfg.distance_detection, distance_alpha=cfg.distance_alpha) # print model summary print(model.summary()) print("Model Parameters: ", model.count_params()) # this lets the generator compute backbone layer shapes using the actual backbone model if 'vgg' in cfg.network or 'densenet' in cfg.network: train_generator.compute_shapes = make_shapes_callback(model) if validation_generator: validation_generator.compute_shapes = train_generator.compute_shapes # create the callbacks callbacks = create_callbacks( model, prediction_model, validation_generator, cfg, ) # Use multiprocessing if cpu_count > 0 use_multiprocessing = cfg.workers > 0 # class weights class_weights_labels = {} if cfg.class_weights: class_weights_names = cfg.class_weights for key in class_weights_names.keys(): class_weights_labels[train_generator.name_to_label(key)] = float( class_weights_names[key]) # Print outputs print() print("=" * 60) print("\t\t##### Parameters #####") print("=" * 60) descr = cfg.get_description() descr = os.linesep.join([s for s in descr.splitlines() if s.strip()]) print(descr) print() print("=" * 60) print("\t\t##### Start Training #####") print("=" * 60) ## Start training training_model.fit_generator(generator=train_generator, steps_per_epoch=len(train_generator), epochs=cfg.epochs, validation_data=validation_generator, validation_steps=len(validation_generator), verbose=1, callbacks=callbacks, workers=cfg.workers, use_multiprocessing=use_multiprocessing, class_weight=class_weights_labels) ## Evaluate on test data_set print("=" * 60) print("\t\t##### Evaluate Test Set #####") print("=" * 60) # Load best model best_model = keras.models.load_model( cfg.save_model + cfg.model_name + '.h5', custom_objects=backbone.custom_objects) # load anchor parameters, or pass None (so that defaults will be used) if 'small' in cfg.anchor_params: anchor_params = AnchorParameters.small else: anchor_params = None best_prediction_model = retinanet_bbox(model=best_model, anchor_params=anchor_params, class_specific_filter=False) # Evaluate from .utils.eval_test import evaluate_test_set evaluate_test_set(best_prediction_model, test_generator, cfg, mode='all', tensorboard=callbacks[1], verbose=1) print("=" * 60) print("\t##### Evaluate Test Set at Night #####") print("=" * 60) evaluate_test_set(best_prediction_model, test_night_generator, cfg, mode='night', tensorboard=callbacks[1], verbose=1) print("=" * 60) print("\t##### Evaluate Test Set at Rain #####") print("=" * 60) evaluate_test_set(best_prediction_model, test_rain_generator, cfg, mode='rain', tensorboard=callbacks[1], verbose=1) print("=" * 60) print("\t######## Finished successfully ########") print("=" * 60)