示例#1
0
    _, info = data_utils.get_dataset("voc/2007", "train+validation")
    _, voc_2012_info = data_utils.get_dataset("voc/2012", "train+validation")

    voc_2012_total_items = data_utils.get_total_item_size(
        voc_2012_info, "train+validation")
    train_total_items = data_utils.get_total_item_size(info,
                                                       "train+validation")
    val_total_items = data_utils.get_total_item_size(info, "test")
    if args.with_voc12:
        train_total_items += voc_2012_total_items

    labels = data_utils.get_labels(info)
    labels = ["bg"] + labels
    hyper_params["total_labels"] = len(labels)

    step_size_train = train_utils.get_step_size(train_total_items,
                                                args.batch_size)
    step_size_val = train_utils.get_step_size(val_total_items, args.batch_size)

    num_train_steps = 10 if args.smoke_test else step_size_train
    num_eval_steps = 10 if args.smoke_test else step_size_val

    trainer = TFTrainer(model_creator=model_creator,
                        data_creator=dataset_creator,
                        num_replicas=args.num_replicas,
                        use_gpu=args.use_gpu,
                        verbose=True,
                        config={
                            "batch_size": args.batch_size,
                            "fit_config": {
                                "steps_per_epoch": num_train_steps,
                            },
示例#2
0
    total_items = len(img_paths)
    test_data = tf.data.Dataset.from_generator(
        lambda: data_utils.custom_data_generator(img_paths, img_size, img_size
                                                 ), data_types, data_shapes)
else:
    test_data = test_data.map(lambda x: data_utils.preprocessing(
        x, img_size, img_size, evaluate=evaluate))

test_data = test_data.padded_batch(batch_size,
                                   padded_shapes=data_shapes,
                                   padding_values=padding_values)

ssd_model = get_model(hyper_params)
ssd_model_path = io_utils.get_model_path(backbone)
ssd_model.load_weights(ssd_model_path)

prior_boxes = bbox_utils.generate_prior_boxes(
    hyper_params["feature_map_shapes"], hyper_params["aspect_ratios"])
ssd_decoder_model = get_decoder_model(ssd_model, prior_boxes, hyper_params)

step_size = train_utils.get_step_size(total_items, batch_size)
pred_bboxes, pred_labels, pred_scores = ssd_decoder_model.predict(
    test_data, steps=step_size, verbose=1)

if evaluate:
    eval_utils.evaluate_predictions(test_data, pred_bboxes, pred_labels,
                                    pred_scores, labels, batch_size)
else:
    drawing_utils.draw_predictions(test_data, pred_bboxes, pred_labels,
                                   pred_scores, labels, batch_size)