예제 #1
0
def test_delta_mark_classification():
    validation_size = 1 / 3
    batch_size = 3
    image_size = [256, 128]
    all_data_num = 3

    train_dataset = Dummy(subset="train",
                          validation_size=validation_size,
                          batch_size=batch_size,
                          pre_processor=Resize(image_size))

    validation_dataset = Dummy(subset="validation",
                               validation_size=validation_size,
                               batch_size=batch_size,
                               pre_processor=Resize(image_size))

    assert train_dataset.num_per_epoch == (1 - validation_size) * all_data_num
    assert validation_dataset.num_per_epoch == validation_size * all_data_num

    for _ in range(5):
        images, labels = train_dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == train_dataset.num_classes
예제 #2
0
def test_cifar10():
    batch_size = 3
    image_size = [256, 512]
    dataset = Cifar10(batch_size=batch_size, pre_processor=Resize(image_size))
    dataset = DatasetIterator(dataset)
    val_dataset = Cifar10(subset="validation",
                          batch_size=batch_size,
                          pre_processor=Resize(image_size))
    val_dataset = DatasetIterator(val_dataset)

    assert dataset.num_classes == 10
    assert dataset.num_per_epoch == 50000
    assert val_dataset.num_per_epoch == 10000

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == dataset.num_classes
예제 #3
0
def test_training():
    """Verify only that no error raised."""
    config = EasyDict()

    config.NETWORK_CLASS = LMBiSeNet
    config.DATASET_CLASS = DummyCamvid

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [128, 160]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_STEPS = 1
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.IS_DISTRIBUTION = False

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.DATA_FORMAT = "NHWC"

    # daasegt config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE
    config.DATASET.DATA_FORMAT = "NHWC"

    environment.init("test_lm_bisenet")
    prepare_dirs(recreate=True)
    start_training(config)
예제 #4
0
def test_training():
    """Test only no error raised."""
    config = EasyDict()

    config.NETWORK_CLASS = Darknet
    config.DATASET_CLASS = Dummy

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [28, 14]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.CLASSIFICATION

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE

    # daasegt config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_darknet")
    prepare_dirs(recreate=True)
    start_training(config)
예제 #5
0
def test_delta_mark_classification_has_validation_path():
    batch_size = 3
    image_size = [256, 128]
    train_data_num = 3
    validation_data_num = 2

    train_dataset = setup_dataset(DummyHasValidation,
                                  subset="train",
                                  batch_size=batch_size,
                                  pre_processor=Resize(image_size))

    validation_dataset = setup_dataset(DummyHasValidation,
                                       subset="validation",
                                       batch_size=batch_size,
                                       pre_processor=Resize(image_size))

    assert train_dataset.num_per_epoch == train_data_num
    assert validation_dataset.num_per_epoch == validation_data_num

    for _ in range(5):
        images, labels = train_dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == train_dataset.num_classes

    for _ in range(5):
        images, labels = validation_dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == validation_dataset.num_classes
예제 #6
0
def test_image_folder():
    validation_size = 0.2
    batch_size = 3
    image_size = [256, 256]
    train_dataset = setup_dataset(Dummy,
                                  subset="train",
                                  validation_size=validation_size,
                                  batch_size=batch_size,
                                  pre_processor=Resize(image_size))

    validation_dataset = setup_dataset(Dummy,
                                       subset="validation",
                                       validation_size=validation_size,
                                       batch_size=batch_size,
                                       pre_processor=Resize(image_size))

    expected_image_dir = os.path.join(environment.DATA_DIR, Dummy.extend_dir)
    expected_paths = [
        image_path
        for image_path in glob(os.path.join(expected_image_dir, "**/*"))
        if os.path.isfile(image_path)
        and imghdr.what(image_path) in ["jpeg", "png"]
    ]

    assert len(expected_paths) * (
        1 - validation_size) == train_dataset.num_per_epoch
    assert len(expected_paths) * (
        validation_size) == validation_dataset.num_per_epoch

    for _ in range(5):
        images, labels = train_dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == train_dataset.num_classes
예제 #7
0
def test_ilsvrc_2012():
    batch_size = 3
    image_size = [256, 512]
    dataset = Ilsvrc2012(batch_size=batch_size,
                         pre_processor=Resize(image_size))
    dataset = DatasetIterator(dataset)
    val_dataset = Ilsvrc2012(subset="validation",
                             batch_size=batch_size,
                             pre_processor=Resize(image_size))
    val_dataset = DatasetIterator(val_dataset)

    num_train = 1281167
    num_validation = 50000

    assert dataset.num_per_epoch == num_train
    assert val_dataset.num_per_epoch == num_validation

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == dataset.num_classes

    for _ in range(STEP_SIZE):
        images, labels = val_dataset.feed()
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == dataset.num_classes
예제 #8
0
def test_resize():
    IMAGE_SIZE = [32, 32]
    orig_image = np.zeros(shape=(1024, 512, 3), dtype=np.uint8)
    orig_mask = np.zeros(shape=(1024, 512, 3), dtype=np.uint8)

    pre_processor = Resize(IMAGE_SIZE)
    resized = pre_processor(image=orig_image, mask=orig_mask)
    resized_image = resized["image"]
    resized_mask = resized["mask"]

    assert isinstance(resized_image, np.ndarray)
    assert isinstance(resized_mask, np.ndarray)
    assert resized_image.shape[:2] == (32, 32)
    assert resized_image.shape[2] == 3

    assert resized_mask.shape[:2] == (32, 32)
    assert resized_mask.shape[2] == 3
예제 #9
0
def test_open_images_v4_classification():
    batch_size = 1
    image_size = [256, 256]
    dataset = OpenImagesV4Classification(batch_size=batch_size,
                                         pre_processor=Resize(image_size))

    for _ in range(5):
        images, labels = dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == dataset.num_classes
예제 #10
0
def test_caltech101():
    batch_size = 3
    image_size = [256, 512]
    dataset = Caltech101(batch_size=batch_size,
                         pre_processor=Resize(image_size))

    assert dataset.num_classes == 101

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == dataset.num_classes
예제 #11
0
def test_training():
    """Test only that no error raised."""
    config = EasyDict()

    config.NETWORK_CLASS = LmResnetQuantize
    config.DATASET_CLASS = Dummy

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [32, 32]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.CLASSIFICATION

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
    config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2}
    config.NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
    config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

    # dataset config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_lm_resnet_quantize")
    prepare_dirs(recreate=True)
    start_training(config)
예제 #12
0
# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# for debug
# MAX_STEPS = 100
# # BATCH_SIZE = 31
# SAVE_CHECKPOINT_STEPS = 10
# KEEP_CHECKPOINT_MAX = 5
# TEST_STEPS = 10
# SUMMARISE_STEPS = 2
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([Resize(size=IMAGE_SIZE), PerImageStandardization()])
POST_PROCESSOR = None

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant
NETWORK.LEARNING_RATE_KWARGS = {
    "values": [0.1, 0.01, 0.001, 0.0001],
    "boundaries": [40000, 60000, 80000],
}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.WEIGHT_DECAY_RATE = 0.0001
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
예제 #13
0
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# distributed training
IS_DISTRIBUTION = False

# for debug
# MAX_STEPS = 10
# BATCH_SIZE = 31
# SAVE_STEPS = 2
# TEST_STEPS = 10
# SUMMARISE_STEPS = 2
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    PerImageStandardization(),
])
POST_PROCESSOR = None

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant
step_per_epoch = int(50000 / BATCH_SIZE)
NETWORK.LEARNING_RATE_KWARGS = {
    "values": [0.01, 0.1, 0.01, 0.001, 0.0001],
    "boundaries": [
        step_per_epoch, step_per_epoch * 50, step_per_epoch * 100,
        step_per_epoch * 198
    ],
예제 #14
0
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# distributed training
IS_DISTRIBUTION = True
num_worker = 4

# for debug
# MAX_STEPS = 10
# BATCH_SIZE = 31
# SAVE_STEPS = 2
# TEST_STEPS = 10
# SUMMARISE_STEPS = 2
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([Resize(size=IMAGE_SIZE), DivideBy255()])
POST_PROCESSOR = None

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant
step_per_epoch = int(50000 / (BATCH_SIZE * num_worker))
NETWORK.LEARNING_RATE_KWARGS = {
    "values": ([
        0.01 * num_worker, 0.001 * num_worker, 0.0001 * num_worker,
        0.00001 * num_worker
    ]),
    "boundaries":
    [step_per_epoch * 200, step_per_epoch * 300, step_per_epoch * 350],
}