예제 #1
0
def test_custom_has_validation_open_images_v4_object_detection():
    batch_size = 8
    image_size = [196, 128]
    train_dataset = DummyHasValidation(
        subset="train",
        batch_size=batch_size,
        pre_processor=ResizeWithGtBoxes(image_size))
    train_dataset = DatasetIterator(train_dataset)
    validation_dataset = DummyHasValidation(
        subset="validation",
        batch_size=batch_size,
        pre_processor=ResizeWithGtBoxes(image_size))
    validation_dataset = DatasetIterator(validation_dataset)

    num_max_boxes = validation_dataset.num_max_boxes
    assert validation_dataset.num_max_boxes == DummyHasValidation.count_max_boxes(
    )

    assert train_dataset.num_per_epoch == 10
    assert validation_dataset.num_per_epoch == 16
    assert len(train_dataset.classes) == 44
    assert len(validation_dataset.classes) == 44

    for _ in range(3):
        images, labels = train_dataset.feed()

        # _show_images_with_boxes(images, labels)

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5

    for _ in range(3):
        images, labels = validation_dataset.feed()

        # _show_images_with_boxes(images, labels)

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
예제 #2
0
def test_delta_mark_object_detection_has_validation_path():
    batch_size = 4
    image_size = [256, 128]
    train_data_num = 3
    validation_data_num = 2

    train_dataset = setup_dataset(DummyObjectDetectionHasValidationPath,
                                  subset="train",
                                  batch_size=batch_size,
                                  pre_processor=ResizeWithGtBoxes(image_size))

    validation_dataset = setup_dataset(DummyObjectDetectionHasValidationPath,
                                       subset="validation",
                                       batch_size=batch_size,
                                       pre_processor=ResizeWithGtBoxes(image_size))

    num_max_boxes = train_dataset.num_max_boxes
    assert train_dataset.num_max_boxes == DummyObjectDetectionHasValidationPath.count_max_boxes()
    assert validation_dataset.num_max_boxes == DummyObjectDetectionHasValidationPath.count_max_boxes()

    assert train_dataset.num_per_epoch == train_data_num
    assert validation_dataset.num_per_epoch == validation_data_num

    for _ in range(2):
        images, labels = train_dataset.feed()
        # _show_images_with_boxes(images, labels)

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5

    for _ in range(2):
        images, labels = validation_dataset.feed()
        # _show_images_with_boxes(images, labels)

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
예제 #3
0
def test_training():
    """Test only that no error raised."""
    config = EasyDict()

    config.NETWORK_CLASS = YoloV2
    config.DATASET_CLASS = Pascalvoc2007

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [128, 160]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.OBJECT_DETECTION

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.DATA_FORMAT = "NHWC"

    # dataset config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE
    config.DATASET.DATA_FORMAT = "NHWC"

    environment.init("test_yolo_v2")
    prepare_dirs(recreate=True)
    start_training(config, profile_step=1)
예제 #4
0
def test_training():
    """Test only that no error raised."""
    config = EasyDict()

    config.NETWORK_CLASS = YoloV1
    config.DATASET_CLASS = Pascalvoc2007

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [70, 70]
    config.BATCH_SIZE = 4
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.OBJECT_DETECTION

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE

    # daasegt config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_yolov_1")
    prepare_dirs(recreate=True)
    start_training(config)
예제 #5
0
def test_open_images_v4_object_detection():
    batch_size = 1
    image_size = [256, 256]
    dataset = OpenImagesV4BoundingBox(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size))
    dataset = DatasetIterator(dataset)

    num_max_boxes = dataset.num_max_boxes
    assert dataset.num_max_boxes == OpenImagesV4BoundingBox.count_max_boxes()

    for _ in range(5):
        images, labels = dataset.feed()

        # _show_images_with_boxes(images, labels)

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
예제 #6
0
def test_hue():
    batch_size = 3
    image_size = [256, 512]
    dataset = Pascalvoc2007(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=Hue((-10, 10)),
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
예제 #7
0
def test_filp_top_bottom():
    batch_size = 3
    image_size = [256, 512]
    dataset = Pascalvoc2007(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=FlipTopBottom(),
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
예제 #8
0
def test_delta_mark_object_detection():
    validation_size = 1/3
    batch_size = 3
    image_size = [256, 128]
    all_data_num = 3

    train_dataset = setup_dataset(DummyObjectDetectio,
                                  subset="train",
                                  validation_size=validation_size,
                                  batch_size=batch_size,
                                  pre_processor=ResizeWithGtBoxes(image_size))

    validation_dataset = setup_dataset(DummyObjectDetectio,
                                       subset="validation",
                                       validation_size=validation_size,
                                       batch_size=batch_size,
                                       pre_processor=ResizeWithGtBoxes(image_size))

    num_max_boxes = train_dataset.num_max_boxes
    assert train_dataset.num_max_boxes == DummyObjectDetectio.count_max_boxes()
    assert validation_dataset.num_max_boxes == DummyObjectDetectio.count_max_boxes()

    assert train_dataset.num_per_epoch == (1 - validation_size) * all_data_num
    assert validation_dataset.num_per_epoch == validation_size * all_data_num

    for _ in range(2):
        images, labels = train_dataset.feed()
        _show_images_with_boxes(images, labels)

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
예제 #9
0
def test_ssd_random_crop():
    batch_size = 3
    image_size = [256, 512]
    dataset = Pascalvoc2007(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=SSDRandomCrop(),
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
        assert np.all(labels[:, :, 2] <= 512)
        assert np.all(labels[:, :, 3] <= 256)
예제 #10
0
def test_custom_open_images_v4_object_detection():
    validation_size = 0.2
    batch_size = 1
    image_size = [256, 128]
    train_dataset = Dummy(batch_size=batch_size,
                          validation_size=validation_size,
                          pre_processor=ResizeWithGtBoxes(image_size))
    train_dataset = DatasetIterator(train_dataset)

    validation_dataset = Dummy(batch_size=batch_size,
                               subset="validation",
                               validation_size=validation_size,
                               pre_processor=ResizeWithGtBoxes(image_size))
    validation_dataset = DatasetIterator(validation_dataset)

    num_max_boxes = train_dataset.num_max_boxes
    assert train_dataset.num_max_boxes == Dummy.count_max_boxes()

    assert train_dataset.num_per_epoch == 10 * (1 - validation_size)
    assert validation_dataset.num_per_epoch == 10 * (validation_size)

    for _ in range(13):
        images, labels = train_dataset.feed()

        # _show_images_with_boxes(images, labels)

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
예제 #11
0
def test_sequence():
    batch_size = 3
    image_size = [256, 512]
    augmentor = Sequence([
        FlipLeftRight(),
        FlipTopBottom(),
        SSDRandomCrop(),
    ])

    dataset = Pascalvoc2007(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=augmentor,
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
예제 #12
0
def test_training():
    """Test only no error raised."""

    config = SmartDict()

    config.NETWORK_CLASS = YoloV2Quantize
    config.DATASET_CLASS = Pascalvoc2007

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [128, 160]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.OBJECT_DETECTION

    # network model config
    config.NETWORK = SmartDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
    config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2.0}
    config.NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer
    config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

    # daasegt config
    config.DATASET = SmartDict()
    config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_yolov_2_quantize")
    prepare_dirs(recreate=True)
    start_training(config, profile_step=1)
예제 #13
0
TASK = Tasks.OBJECT_DETECTION
CLASSES = DATASET_CLASS.classes

KEEP_CHECKPOINT_MAX = 5
MAX_EPOCHS = 100
SAVE_CHECKPOINT_STEPS = 100
TEST_STEPS = 100
SUMMARISE_STEPS = 10

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence([ResizeWithGtBoxes(size=IMAGE_SIZE), DivideBy255()])
anchors = [(1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892),
           (9.47112, 4.84053), (11.2364, 10.0071)]
score_threshold = 0.05
nms_iou_threshold = 0.5
nms_max_output_size = 100
POST_PROCESSOR = Sequence([
    FormatYoloV2(
        image_size=IMAGE_SIZE,
        classes=CLASSES,
        anchors=anchors,
        data_format=DATA_FORMAT,
    ),
    ExcludeLowScoreBox(threshold=score_threshold),
    NMS(
        iou_threshold=nms_iou_threshold,
예제 #14
0
MAX_EPOCHS = 400
SAVE_CHECKPOINT_STEPS = 1000
KEEP_CHECKPOINT_MAX = 1
TEST_STEPS = 1000
SUMMARISE_STEPS = 10000


# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence([
    ResizeWithGtBoxes(size=IMAGE_SIZE),
    PerImageStandardization()
])
anchors = [
    (1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892), (9.47112, 4.84053), (11.2364, 10.0071)
]
score_threshold = 0.05
nms_iou_threshold = 0.5
nms_max_output_size = 100
POST_PROCESSOR = Sequence([
    FormatYoloV2(
        image_size=IMAGE_SIZE,
        classes=CLASSES,
        anchors=anchors,
        data_format=DATA_FORMAT,
    ),
예제 #15
0
CLASSES = DATASET_CLASS.classes

MAX_STEPS = 2
SAVE_CHECKPOINT_STEPS = 1
KEEP_CHECKPOINT_MAX = 5
TEST_STEPS = 100
SUMMARISE_STEPS = 100

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence(
    [ResizeWithGtBoxes(IMAGE_SIZE),
     PerImageStandardization()])

anchors = [
    (0.5, 0.25),
    (1.0, 0.75),
]
POST_PROCESSOR = Sequence([
    FormatYoloV2(
        image_size=IMAGE_SIZE,
        classes=CLASSES,
        anchors=anchors,
        data_format=DATA_FORMAT,
    ),
    ExcludeLowScoreBox(threshold=0.05),
    NMS(