def test_custom_open_images_v4_object_detection(): validation_size = 0.2 batch_size = 1 image_size = [256, 128] train_dataset = Dummy(batch_size=batch_size, validation_size=validation_size, pre_processor=ResizeWithGtBoxes(image_size)) validation_dataset = Dummy(batch_size=batch_size, subset="validation", validation_size=validation_size, pre_processor=ResizeWithGtBoxes(image_size)) num_max_boxes = train_dataset.num_max_boxes assert train_dataset.num_max_boxes == Dummy.count_max_boxes() assert train_dataset.num_per_epoch == 10 * (1 - validation_size) assert validation_dataset.num_per_epoch == 10 * (validation_size) for _ in range(13): images, labels = train_dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5
def test_delta_mark_object_detection_has_validation_path(): batch_size = 4 image_size = [256, 128] train_data_num = 3 validation_data_num = 2 train_dataset = setup_dataset(DummyObjectDetectionHasValidationPath, subset="train", batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size)) validation_dataset = setup_dataset( DummyObjectDetectionHasValidationPath, subset="validation", batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size)) num_max_boxes = train_dataset.num_max_boxes assert train_dataset.num_max_boxes == DummyObjectDetectionHasValidationPath.count_max_boxes( ) assert validation_dataset.num_max_boxes == DummyObjectDetectionHasValidationPath.count_max_boxes( ) assert train_dataset.num_per_epoch == train_data_num assert validation_dataset.num_per_epoch == validation_data_num for _ in range(2): images, labels = train_dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5 for _ in range(2): images, labels = validation_dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5
def test_pascalvoc_2007_2012_no_skip_difficult(): batch_size = 3 image_size = [256, 512] dataset = Pascalvoc20072012( batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), skip_difficult=False, ) dataset = DatasetIterator(dataset) num_max_boxes = 56 num_train_val_2007 = 2501 + 2510 num_train_val_2012 = 5717 + 5823 num_test_2007 = 4952 assert dataset.num_max_boxes == num_max_boxes assert Pascalvoc20072012.count_max_boxes( skip_difficult=False) == num_max_boxes assert dataset.num_per_epoch == num_train_val_2007 + num_train_val_2012 val_dataset = Pascalvoc20072012( subset="validation", batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), skip_difficult=False) val_dataset = DatasetIterator(val_dataset) assert val_dataset.num_per_epoch == num_test_2007 for _ in range(STEP_SIZE): images, labels = dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5 for _ in range(STEP_SIZE): images, labels = val_dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5
def test_mscoco_object_detection_person(): batch_size = 3 image_size = [256, 512] num_max_boxes = 14 num_train = 38699 num_val = 18513 dataset = MscocoObjectDetectionPerson( batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size)) dataset = DatasetIterator(dataset) assert MscocoObjectDetectionPerson.count_max_boxes() == num_max_boxes assert dataset.num_max_boxes == num_max_boxes assert dataset.num_per_epoch == num_train val_dataset = MscocoObjectDetectionPerson( subset="validation", batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size)) val_dataset = DatasetIterator(val_dataset) num_max_boxes = MscocoObjectDetectionPerson.count_max_boxes() assert val_dataset.num_per_epoch == num_val for _ in range(STEP_SIZE): images, labels = dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5 for _ in range(STEP_SIZE): images, labels = val_dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5
def test_bdd100k(): batch_size = 3 image_size = [320, 320] num_max_boxes = 100 num_train = 70000 num_val = 10000 dataset = BDD100K(batch_size=batch_size, max_boxes=num_max_boxes, pre_processor=ResizeWithGtBoxes(image_size)) dataset = DatasetIterator(dataset) assert dataset.num_max_boxes == num_max_boxes assert dataset.num_per_epoch == num_train val_dataset = BDD100K(subset="validation", batch_size=batch_size, max_boxes=num_max_boxes, pre_processor=ResizeWithGtBoxes(image_size)) val_dataset = DatasetIterator(val_dataset) assert val_dataset.num_per_epoch == num_val for _ in range(STEP_SIZE): images, labels = dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5 for _ in range(STEP_SIZE): images, labels = val_dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5
def test_custom_has_validation_open_images_v4_object_detection(): batch_size = 8 image_size = [196, 128] train_dataset = DummyHasValidation(subset="train", batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size)) train_dataset = DatasetIterator(train_dataset) validation_dataset = DummyHasValidation(subset="validation", batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size)) validation_dataset = DatasetIterator(validation_dataset) num_max_boxes = validation_dataset.num_max_boxes assert validation_dataset.num_max_boxes == DummyHasValidation.count_max_boxes() assert train_dataset.num_per_epoch == 10 assert validation_dataset.num_per_epoch == 16 assert len(train_dataset.classes) == 44 assert len(validation_dataset.classes) == 44 for _ in range(3): images, labels = train_dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5 for _ in range(3): images, labels = validation_dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5
def test_widerface(): batch_size = 3 image_size = [160, 160] num_max_boxes = 3 num_train = 7255 num_val = 1758 dataset = WiderFace(batch_size=batch_size, max_boxes=num_max_boxes, pre_processor=ResizeWithGtBoxes(image_size)) assert dataset.num_max_boxes == num_max_boxes assert dataset.num_per_epoch == num_train val_dataset = WiderFace(subset="validation", batch_size=batch_size, max_boxes=num_max_boxes, pre_processor=ResizeWithGtBoxes(image_size)) assert val_dataset.num_per_epoch == num_val for _ in range(STEP_SIZE): images, labels = dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5 for _ in range(STEP_SIZE): images, labels = val_dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5
def test_training(): """Test only that no error raised.""" config = EasyDict() config.NETWORK_CLASS = YoloV2 config.DATASET_CLASS = LmThingsOnATable config.IS_DEBUG = False config.IMAGE_SIZE = [128, 160] config.BATCH_SIZE = 2 config.TEST_STEPS = 1 config.MAX_STEPS = 2 config.SAVE_STEPS = 1 config.SUMMARISE_STEPS = 1 config.IS_PRETRAIN = False config.IS_DISTRIBUTION = False # network model config config.NETWORK = EasyDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE config.NETWORK.DATA_FORMAT = "NCHW" # daasegt config config.DATASET = EasyDict() config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE config.DATASET.DATA_FORMAT = "NCHW" environment.init("test_yolo_v2") prepare_dirs(recreate=True) start_training(config)
def test_open_images_v4_object_detection(): batch_size = 1 image_size = [256, 256] dataset = OpenImagesV4BoundingBox(batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size)) dataset = DatasetIterator(dataset) num_max_boxes = dataset.num_max_boxes assert dataset.num_max_boxes == OpenImagesV4BoundingBox.count_max_boxes() for _ in range(5): images, labels = dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5
def test_training(): """Test only that no error raised.""" config = EasyDict() config.NETWORK_CLASS = YoloV1 config.DATASET_CLASS = LmThingsOnATable config.IS_DEBUG = False config.IMAGE_SIZE = [70, 70] config.BATCH_SIZE = 4 config.TEST_STEPS = 1 config.MAX_STEPS = 2 config.SAVE_CHECKPOINT_STEPS = 1 config.KEEP_CHECKPOINT_MAX = 5 config.SUMMARISE_STEPS = 1 config.IS_PRETRAIN = False config.TASK = Tasks.OBJECT_DETECTION # network model config config.NETWORK = EasyDict() config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE # daasegt config config.DATASET = EasyDict() config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE environment.init("test_yolov_1") prepare_dirs(recreate=True) start_training(config)
def test_training(): """Test only that no error raised.""" config = EasyDict() config.NETWORK_CLASS = YoloV2 config.DATASET_CLASS = Pascalvoc2007 config.IS_DEBUG = False config.IMAGE_SIZE = [128, 160] config.BATCH_SIZE = 2 config.TEST_STEPS = 1 config.MAX_STEPS = 2 config.SAVE_CHECKPOINT_STEPS = 1 config.KEEP_CHECKPOINT_MAX = 5 config.SUMMARISE_STEPS = 1 config.IS_PRETRAIN = False config.TASK = Tasks.OBJECT_DETECTION # network model config config.NETWORK = EasyDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE config.NETWORK.DATA_FORMAT = "NHWC" # dataset config config.DATASET = EasyDict() config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE config.DATASET.DATA_FORMAT = "NHWC" environment.init("test_yolo_v2") prepare_dirs(recreate=True) start_training(config)
def test_pascalvoc_2012(): batch_size = 3 image_size = [256, 512] dataset = Pascalvoc2012(batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size)) num_max_boxes = 39 dataset = DatasetIterator(dataset) assert dataset.num_max_boxes == num_max_boxes assert Pascalvoc2012.count_max_boxes() == num_max_boxes assert dataset.num_per_epoch == 5717 val_dataset = Pascalvoc2012(subset="validation", batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size)) val_dataset = DatasetIterator(val_dataset) assert val_dataset.num_per_epoch == 5823 for _ in range(STEP_SIZE): images, labels = dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5 for _ in range(STEP_SIZE): images, labels = val_dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5
def test_delta_mark_object_detection(): validation_size = 1 / 3 batch_size = 3 image_size = [256, 128] all_data_num = 3 train_dataset = setup_dataset(DummyObjectDetectio, subset="train", validation_size=validation_size, batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size)) validation_dataset = setup_dataset( DummyObjectDetectio, subset="validation", validation_size=validation_size, batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size)) num_max_boxes = train_dataset.num_max_boxes assert train_dataset.num_max_boxes == DummyObjectDetectio.count_max_boxes() assert validation_dataset.num_max_boxes == DummyObjectDetectio.count_max_boxes( ) assert train_dataset.num_per_epoch == (1 - validation_size) * all_data_num assert validation_dataset.num_per_epoch == validation_size * all_data_num for _ in range(2): images, labels = train_dataset.feed() _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5
def test_filp_top_bottom(): batch_size = 3 image_size = [256, 512] dataset = LmThingsOnATable( batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), augmentor=FlipTopBottom(), ) for _ in range(5): images, labels = dataset.feed() _show_images_with_boxes(images, labels)
def test_filp_left_right(): batch_size = 3 image_size = [256, 512] dataset = LmThingsOnATable( batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), augmentor=FlipLeftRight(is_bounding_box=True), ) for _ in range(5): images, labels = dataset.feed() _show_images_with_boxes(images, labels)
def test_contrast(): batch_size = 3 image_size = [256, 512] dataset = LmThingsOnATable( batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), augmentor=Contrast((0.0, 2.0)), ) dataset = DatasetIterator(dataset) for _ in range(5): images, labels = dataset.feed() _show_images_with_boxes(images, labels)
def test_hue(): batch_size = 3 image_size = [256, 512] dataset = Pascalvoc2007( batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), augmentor=Hue((-10, 10)), ) dataset = DatasetIterator(dataset) for _ in range(5): images, labels = dataset.feed() _show_images_with_boxes(images, labels)
def test_hue(): batch_size = 3 image_size = [256, 512] dataset = LmThingsOnATable( batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), augmentor=Hue((-10, 10)), ) for _ in range(5): images, labels = dataset.feed() _show_images_with_boxes(images, labels)
def test_filp_top_bottom(): batch_size = 3 image_size = [256, 512] dataset = Pascalvoc2007( batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), augmentor=FlipTopBottom(), ) dataset = DatasetIterator(dataset) for _ in range(5): images, labels = dataset.feed() _show_images_with_boxes(images, labels)
def test_ssd_random_crop(): batch_size = 3 image_size = [256, 512] dataset = LmThingsOnATable( batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), augmentor=SSDRandomCrop(), ) for _ in range(5): images, labels = dataset.feed() _show_images_with_boxes(images, labels) assert np.all(labels[:, :, 2] <= 512) assert np.all(labels[:, :, 3] <= 256)
def test_ssd_random_crop(): batch_size = 3 image_size = [256, 512] dataset = Pascalvoc2007( batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), augmentor=SSDRandomCrop(), ) dataset = DatasetIterator(dataset) for _ in range(5): images, labels = dataset.feed() _show_images_with_boxes(images, labels) assert np.all(labels[:, :, 2] <= 512) assert np.all(labels[:, :, 3] <= 256)
def test_sequence(): batch_size = 3 image_size = [256, 512] augmentor = Sequence([ FlipLeftRight(), FlipTopBottom(), SSDRandomCrop(), ]) dataset = Pascalvoc2007( batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), augmentor=augmentor, ) dataset = DatasetIterator(dataset) for _ in range(5): images, labels = dataset.feed() _show_images_with_boxes(images, labels)
def test_sequence(): batch_size = 3 image_size = [256, 512] augmentor = Sequence([ FlipLeftRight(is_bounding_box=True), FlipTopBottom(is_bounding_box=True), SSDRandomCrop(), ]) dataset = LmThingsOnATable( batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), augmentor=augmentor, ) for _ in range(5): images, labels = dataset.feed() _show_images_with_boxes(images, labels)
def test_training(): """Test only no error raised.""" config = EasyDict() config.NETWORK_CLASS = YoloV2Quantize config.DATASET_CLASS = Pascalvoc2007 config.IS_DEBUG = False config.IMAGE_SIZE = [128, 160] config.BATCH_SIZE = 2 config.TEST_STEPS = 1 config.MAX_STEPS = 2 config.SAVE_CHECKPOINT_STEPS = 1 config.KEEP_CHECKPOINT_MAX = 5 config.SUMMARISE_STEPS = 1 config.IS_PRETRAIN = False config.TASK = Tasks.OBJECT_DETECTION # network model config config.NETWORK = EasyDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = { 'bit': 2, 'max_value': 2.0 } config.NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # daasegt config config.DATASET = EasyDict() config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE) config.DATASET.BATCH_SIZE = config.BATCH_SIZE environment.init("test_yolov_2_quantize") prepare_dirs(recreate=True) start_training(config)
def test_lm_things_of_a_table(): batch_size = 3 image_size = [256, 512] dataset = LmThingsOnATable(batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size)) num_max_boxes = LmThingsOnATable.count_max_boxes() for _ in range(STEP_SIZE): images, labels = dataset.feed() assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5
def test_pascalvoc_2007_with_target_classes(): batch_size = 3 image_size = [256, 512] num_max_boxes = 12 num_train = 240 num_validation = 254 num_test = 433 assert TargetClassesPascalvoc2007.count_max_boxes() == num_max_boxes dataset = TargetClassesPascalvoc2007( batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), ) dataset = DatasetIterator(dataset) assert dataset.num_per_epoch == num_train val_dataset = TargetClassesPascalvoc2007( subset="validation", batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size)) val_dataset = DatasetIterator(val_dataset) assert val_dataset.num_per_epoch == num_validation test_dataset = TargetClassesPascalvoc2007( subset="test", batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size)) test_dataset = DatasetIterator(test_dataset) assert test_dataset.num_per_epoch == num_test for _ in range(STEP_SIZE): images, labels = dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5 for _ in range(STEP_SIZE): images, labels = val_dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5 for _ in range(STEP_SIZE): images, labels = test_dataset.feed() # _show_images_with_boxes(images, labels) assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5
def test_pascalvoc_2007_not_skip_difficult(): batch_size = 3 image_size = [256, 512] num_max_boxes = 42 num_train = 2501 num_validation = 2510 num_test = 4952 assert Pascalvoc2007.count_max_boxes(skip_difficult=False) == num_max_boxes dataset = Pascalvoc2007(batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), skip_difficult=False) dataset = DatasetIterator(dataset) assert dataset.num_per_epoch == num_train val_dataset = Pascalvoc2007(subset="validation", batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), skip_difficult=False) val_dataset = DatasetIterator(val_dataset) assert val_dataset.num_per_epoch == num_validation test_dataset = Pascalvoc2007(subset="test", batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size), skip_difficult=False) test_dataset = DatasetIterator(test_dataset) assert test_dataset.num_per_epoch == num_test for _ in range(STEP_SIZE): images, labels = dataset.feed() assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5 for _ in range(STEP_SIZE): images, labels = val_dataset.feed() assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5 for _ in range(STEP_SIZE): images, labels = test_dataset.feed() assert isinstance(images, np.ndarray) assert images.shape[0] == batch_size assert images.shape[1] == image_size[0] assert images.shape[2] == image_size[1] assert images.shape[3] == 3 assert isinstance(labels, np.ndarray) assert labels.shape[0] == batch_size assert labels.shape[1] == num_max_boxes assert labels.shape[2] == 5
SUMMARISE_STEPS = 100 # for debug # IS_DEBUG = True # SUMMARISE_STEPS = 1 # SUMMARISE_STEPS = 100 # TEST_STEPS = 10000 # SUMMARISE_STEPS = 100 # pretrain IS_PRETRAIN = False PRETRAIN_VARS = [] PRETRAIN_DIR = "" PRETRAIN_FILE = "" PRE_PROCESSOR = Sequence([ResizeWithGtBoxes(size=IMAGE_SIZE), DivideBy255()]) anchors = [(1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892), (9.47112, 4.84053), (11.2364, 10.0071)] score_threshold = 0.05 nms_iou_threshold = 0.5 nms_max_output_size = 100 POST_PROCESSOR = Sequence([ FormatYoloV2( image_size=IMAGE_SIZE, classes=CLASSES, anchors=anchors, data_format=DATA_FORMAT, ), ExcludeLowScoreBox(threshold=score_threshold), NMS( iou_threshold=nms_iou_threshold,
# for debug # IS_DEBUG = True # SUMMARISE_STEPS = 1 # SUMMARISE_STEPS = 100 # TEST_STEPS = 10000 # SUMMARISE_STEPS = 100 # pretrain IS_PRETRAIN = False PRETRAIN_VARS = [] PRETRAIN_DIR = "" PRETRAIN_FILE = "" PRE_PROCESSOR = Sequence([ ResizeWithGtBoxes(size=IMAGE_SIZE), DivideBy255() ]) anchors = [ (1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892), (9.47112, 4.84053), (11.2364, 10.0071) ] score_threshold = 0.05 nms_iou_threshold = 0.5 nms_max_output_size = 100 POST_PROCESSOR = Sequence([ FormatYoloV2( image_size=IMAGE_SIZE, classes=CLASSES, anchors=anchors, data_format=DATA_FORMAT, ),
SAVE_CHECKPOINT_STEPS = 1 KEEP_CHECKPOINT_MAX = 5 TEST_STEPS = 100 SUMMARISE_STEPS = 100 # distributed training IS_DISTRIBUTION = False # pretrain IS_PRETRAIN = False PRETRAIN_VARS = [] PRETRAIN_DIR = "" PRETRAIN_FILE = "" PRE_PROCESSOR = Sequence( [ResizeWithGtBoxes(IMAGE_SIZE), PerImageStandardization()]) anchors = [ (0.5, 0.25), (1.0, 0.75), ] POST_PROCESSOR = Sequence([ FormatYoloV2( image_size=IMAGE_SIZE, classes=CLASSES, anchors=anchors, data_format=DATA_FORMAT, ), ExcludeLowScoreBox(threshold=0.05), NMS(