args = io_utils.handle_args() if args.smoke_test: ray.init(num_cpus=2) else: ray.init(address=args.address) if args.backbone == "mobilenet_v2": from models.ssd_mobilenet_v2 import get_model, init_model else: from models.ssd_vgg16 import get_model, init_model ssd_log_path = io_utils.get_log_path(args.backbone) ssd_model_path = io_utils.get_model_path(args.backbone) hyper_params = train_utils.get_hyper_params(args.backbone) _, info = data_utils.get_dataset("voc/2007", "train+validation") _, voc_2012_info = data_utils.get_dataset("voc/2012", "train+validation") voc_2012_total_items = data_utils.get_total_item_size( voc_2012_info, "train+validation") train_total_items = data_utils.get_total_item_size(info, "train+validation") val_total_items = data_utils.get_total_item_size(info, "test") if args.with_voc12: train_total_items += voc_2012_total_items labels = data_utils.get_labels(info) labels = ["bg"] + labels hyper_params["total_labels"] = len(labels)
import tensorflow as tf from utils import bbox_utils, data_utils, drawing_utils, io_utils, train_utils, landmark_utils import blazeface args = io_utils.handle_args() if args.handle_gpu: io_utils.handle_gpu_compatibility() batch_size = 1 use_custom_images = False custom_image_path = "data/images/" hyper_params = train_utils.get_hyper_params() img_size = hyper_params["img_size"] data_types = data_utils.get_data_types() data_shapes = data_utils.get_data_shapes() padding_values = data_utils.get_padding_values() if use_custom_images: img_paths = data_utils.get_custom_imgs(custom_image_path) total_items = len(img_paths) test_data = tf.data.Dataset.from_generator( lambda: data_utils.custom_data_generator(img_paths, img_size, img_size ), data_types, data_shapes) else: test_split = "train[80%:]" test_data, info = data_utils.get_dataset("the300w_lp", test_split) total_items = data_utils.get_total_item_size(info, test_split) test_data = test_data.map( lambda x: data_utils.preprocessing(x, img_size, img_size)) #