コード例 #1
0
def dataset_creator(config):
    opt = config["opt"]
    hyper_params = config["hyper_params"]

    train_data, _ = data_utils.get_dataset("voc/2007", "train+validation")
    val_data, _ = data_utils.get_dataset("voc/2007", "test")

    if opt.with_voc12:
        voc_2012_data, _ = data_utils.get_dataset("voc/2012",
                                                  "train+validation")
        train_data = train_data.concatenate(voc_2012_data)

    img_size = hyper_params["img_size"]

    train_data = train_data.map(lambda x: data_utils.preprocessing(
        x, img_size, img_size, augmentation.apply))
    val_data = val_data.map(
        lambda x: data_utils.preprocessing(x, img_size, img_size))

    data_shapes = data_utils.get_data_shapes()
    padding_values = data_utils.get_padding_values()
    train_data = train_data.shuffle(opt.batch_size * 4).padded_batch(
        opt.batch_size,
        padded_shapes=data_shapes,
        padding_values=padding_values)
    val_data = val_data.padded_batch(opt.batch_size,
                                     padded_shapes=data_shapes,
                                     padding_values=padding_values)

    prior_boxes = bbox_utils.generate_prior_boxes(
        hyper_params["feature_map_shapes"], hyper_params["aspect_ratios"])
    ssd_train_feed = train_utils.generator(train_data, prior_boxes,
                                           hyper_params)
    ssd_val_feed = train_utils.generator(val_data, prior_boxes, hyper_params)

    return ssd_train_feed, ssd_val_feed
コード例 #2
0
                                 padding_values=padding_values)
# Setup training model (ssd+vgg) and loss function (location + confidence)
ssd_model = get_model(hyper_params)
ssd_custom_losses = CustomLoss(hyper_params["neg_pos_ratio"],
                               hyper_params["loc_loss_alpha"])
ssd_model.compile(
    optimizer=Adam(learning_rate=1e-3),
    loss=[ssd_custom_losses.loc_loss_fn, ssd_custom_losses.conf_loss_fn])
init_model(ssd_model)

ssd_model_path = io_utils.get_model_path(backbone)
if load_weights:
    ssd_model.load_weights(ssd_model_path)
ssd_log_path = io_utils.get_log_path(backbone)
# We calculate prior boxes for one time and use it for all operations because of the all images are the same sizes
prior_boxes = bbox_utils.generate_prior_boxes(
    hyper_params["feature_map_shapes"], hyper_params["aspect_ratios"])
ssd_train_feed = train_utils.generator(train_data, prior_boxes, hyper_params)
ssd_val_feed = train_utils.generator(val_data, prior_boxes, hyper_params)

checkpoint_callback = ModelCheckpoint(ssd_model_path,
                                      monitor="val_loss",
                                      save_best_only=True,
                                      save_weights_only=True)
tensorboard_callback = TensorBoard(log_dir=ssd_log_path)
learning_rate_callback = LearningRateScheduler(train_utils.scheduler,
                                               verbose=0)

step_size_train = train_utils.get_step_size(train_total_items, batch_size)
step_size_val = train_utils.get_step_size(val_total_items, batch_size)
ssd_model.fit(ssd_train_feed,
              steps_per_epoch=step_size_train,