예제 #1
0
        ray.init(num_cpus=2)
    else:
        ray.init(address=args.address)

    if args.backbone == "mobilenet_v2":
        from models.ssd_mobilenet_v2 import get_model, init_model
    else:
        from models.ssd_vgg16 import get_model, init_model
    ssd_log_path = io_utils.get_log_path(args.backbone)

    ssd_model_path = io_utils.get_model_path(args.backbone)
    hyper_params = train_utils.get_hyper_params(args.backbone)
    _, info = data_utils.get_dataset("voc/2007", "train+validation")
    _, voc_2012_info = data_utils.get_dataset("voc/2012", "train+validation")

    voc_2012_total_items = data_utils.get_total_item_size(
        voc_2012_info, "train+validation")
    train_total_items = data_utils.get_total_item_size(info,
                                                       "train+validation")
    val_total_items = data_utils.get_total_item_size(info, "test")
    if args.with_voc12:
        train_total_items += voc_2012_total_items

    labels = data_utils.get_labels(info)
    labels = ["bg"] + labels
    hyper_params["total_labels"] = len(labels)

    step_size_train = train_utils.get_step_size(train_total_items,
                                                args.batch_size)
    step_size_val = train_utils.get_step_size(val_total_items, args.batch_size)

    num_train_steps = 10 if args.smoke_test else step_size_train
예제 #2
0
batch_size = 32
evaluate = False
use_custom_images = False
custom_image_path = "data/images/"
backbone = args.backbone
io_utils.is_valid_backbone(backbone)
#
if backbone == "mobilenet_v2":
    from models.ssd_mobilenet_v2 import get_model, init_model
else:
    from models.ssd_vgg16 import get_model, init_model
#
hyper_params = train_utils.get_hyper_params(backbone)
#
test_data, info = data_utils.get_dataset("voc/2007", "test")
total_items = data_utils.get_total_item_size(info, "test")
labels = data_utils.get_labels(info)
labels = ["bg"] + labels
hyper_params["total_labels"] = len(labels)
img_size = hyper_params["img_size"]

data_types = data_utils.get_data_types()
data_shapes = data_utils.get_data_shapes()
padding_values = data_utils.get_padding_values()

if use_custom_images:
    img_paths = data_utils.get_custom_imgs(custom_image_path)
    total_items = len(img_paths)
    test_data = tf.data.Dataset.from_generator(
        lambda: data_utils.custom_data_generator(img_paths, img_size, img_size
                                                 ), data_types, data_shapes)
예제 #3
0
img_size = hyper_params["img_size"]

data_types = data_utils.get_data_types()
data_shapes = data_utils.get_data_shapes()
padding_values = data_utils.get_padding_values()

if use_custom_images:
    img_paths = data_utils.get_custom_imgs(custom_image_path)
    total_items = len(img_paths)
    test_data = tf.data.Dataset.from_generator(
        lambda: data_utils.custom_data_generator(img_paths, img_size, img_size
                                                 ), data_types, data_shapes)
else:
    test_split = "train[80%:]"
    test_data, info = data_utils.get_dataset("the300w_lp", test_split)
    total_items = data_utils.get_total_item_size(info, test_split)
    test_data = test_data.map(
        lambda x: data_utils.preprocessing(x, img_size, img_size))
#
test_data = test_data.padded_batch(batch_size,
                                   padded_shapes=data_shapes,
                                   padding_values=padding_values)

model = blazeface.get_model(hyper_params)
model_path = io_utils.get_model_path()
model.load_weights(model_path)

prior_boxes = bbox_utils.generate_prior_boxes(
    hyper_params["feature_map_shapes"], hyper_params["aspect_ratios"])

variances = hyper_params["variances"]
예제 #4
0
import random

args = io_utils.handle_args()
if args.handle_gpu:
    io_utils.handle_gpu_compatibility()

batch_size = 32
epochs = 150
load_weights = False
hyper_params = train_utils.get_hyper_params()

train_split = "train[:80%]"
val_split = "train[80%:]"
train_data, info = data_utils.get_dataset("the300w_lp", train_split)
val_data, _ = data_utils.get_dataset("the300w_lp", val_split)
train_total_items = data_utils.get_total_item_size(info, train_split)
val_total_items = data_utils.get_total_item_size(info, val_split)
#
img_size = hyper_params["img_size"]

train_data = train_data.map(lambda x: data_utils.preprocessing(
    x, img_size, img_size, augmentation.apply))
val_data = val_data.map(
    lambda x: data_utils.preprocessing(x, img_size, img_size))
#
data_shapes = data_utils.get_data_shapes()
padding_values = data_utils.get_padding_values()
train_data = train_data.shuffle(batch_size * 12).padded_batch(
    batch_size, padded_shapes=data_shapes, padding_values=padding_values)
val_data = val_data.padded_batch(batch_size,
                                 padded_shapes=data_shapes,