コード例 #1
0
ファイル: test_delta_mark.py プロジェクト: ytfksw/blueoil
def test_delta_mark_classification():
    validation_size = 1/3
    batch_size = 3
    image_size = [256, 128]
    all_data_num = 3

    train_dataset = setup_dataset(Dummy,
                                  subset="train",
                                  validation_size=validation_size,
                                  batch_size=batch_size,
                                  pre_processor=Resize(image_size))

    validation_dataset = setup_dataset(Dummy,
                                       subset="validation",
                                       validation_size=validation_size,
                                       batch_size=batch_size,
                                       pre_processor=Resize(image_size))

    assert train_dataset.num_per_epoch == (1 - validation_size) * all_data_num
    assert validation_dataset.num_per_epoch == validation_size * all_data_num

    for _ in range(5):
        images, labels = train_dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == train_dataset.num_classes
コード例 #2
0
ファイル: test_darknet.py プロジェクト: ytfksw/blueoil
def test_training():
    """Test only no error raised."""
    config = EasyDict()

    config.NETWORK_CLASS = Darknet
    config.DATASET_CLASS = Dummy

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [28, 14]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.CLASSIFICATION

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE

    # daasegt config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_darknet")
    prepare_dirs(recreate=True)
    start_training(config)
コード例 #3
0
ファイル: test_lm_bisenet.py プロジェクト: kainoj/blueoil
def test_training():
    """Verify only that no error raised."""
    config = SmartDict()

    config.NETWORK_CLASS = LMBiSeNet
    config.DATASET_CLASS = DummyCamvid

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [128, 160]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.SEMANTIC_SEGMENTATION

    # network model config
    config.NETWORK = SmartDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.DATA_FORMAT = "NHWC"

    # daasegt config
    config.DATASET = SmartDict()
    config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE
    config.DATASET.DATA_FORMAT = "NHWC"

    environment.init("test_lm_bisenet")
    prepare_dirs(recreate=True)
    start_training(config, profile_step=1)
コード例 #4
0
ファイル: test_delta_mark.py プロジェクト: ytfksw/blueoil
def test_delta_mark_classification_has_validation_path():
    batch_size = 3
    image_size = [256, 128]
    train_data_num = 3
    validation_data_num = 2

    train_dataset = setup_dataset(DummyHasValidation,
                                  subset="train",
                                  batch_size=batch_size,
                                  pre_processor=Resize(image_size))

    validation_dataset = setup_dataset(DummyHasValidation,
                                       subset="validation",
                                       batch_size=batch_size,
                                       pre_processor=Resize(image_size))

    assert train_dataset.num_per_epoch == train_data_num
    assert validation_dataset.num_per_epoch == validation_data_num

    for _ in range(5):
        images, labels = train_dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == train_dataset.num_classes

    for _ in range(5):
        images, labels = validation_dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == validation_dataset.num_classes
コード例 #5
0
ファイル: test_image_folder.py プロジェクト: ytfksw/blueoil
def test_image_folder():
    validation_size = 0.2
    batch_size = 3
    image_size = [256, 256]
    train_dataset = setup_dataset(Dummy,
                                  subset="train",
                                  validation_size=validation_size,
                                  batch_size=batch_size,
                                  pre_processor=Resize(image_size))

    validation_dataset = setup_dataset(Dummy,
                                       subset="validation",
                                       validation_size=validation_size,
                                       batch_size=batch_size,
                                       pre_processor=Resize(image_size))

    expected_image_dir = os.path.join(environment.DATA_DIR, Dummy.extend_dir)
    expected_paths = [
        image_path
        for image_path in glob(os.path.join(expected_image_dir, "**/*"))
        if os.path.isfile(image_path)
        and imghdr.what(image_path) in {"jpeg", "png"}
    ]

    assert len(expected_paths) * (
        1 - validation_size) == train_dataset.num_per_epoch
    assert len(expected_paths) * (
        validation_size) == validation_dataset.num_per_epoch

    for _ in range(5):
        images, labels = train_dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == train_dataset.num_classes
コード例 #6
0
ファイル: test_pre_processor.py プロジェクト: ytfksw/blueoil
def test_resize():
    IMAGE_SIZE = [32, 32]
    orig_image = np.zeros(shape=(1024, 512, 3), dtype=np.uint8)
    orig_mask = np.zeros(shape=(1024, 512, 3), dtype=np.uint8)

    pre_processor = Resize(IMAGE_SIZE)
    resized = pre_processor(image=orig_image, mask=orig_mask)
    resized_image = resized["image"]
    resized_mask = resized["mask"]

    assert isinstance(resized_image, np.ndarray)
    assert isinstance(resized_mask, np.ndarray)
    assert resized_image.shape[:2] == (32, 32)
    assert resized_image.shape[2] == 3

    assert resized_mask.shape[:2] == (32, 32)
    assert resized_mask.shape[2] == 3
コード例 #7
0
def test_open_images_v4_classification():
    batch_size = 1
    image_size = [256, 256]
    dataset = OpenImagesV4Classification(batch_size=batch_size,
                                         pre_processor=Resize(image_size))
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == dataset.num_classes
コード例 #8
0
def test_training():
    """Test only that no error raised."""
    config = EasyDict()

    config.NETWORK_CLASS = LmResnetQuantize
    config.DATASET_CLASS = Dummy

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [32, 32]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.CLASSIFICATION

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
    config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {
        'bit': 2,
        'max_value': 2
    }
    config.NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
    config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

    # dataset config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_lm_resnet_quantize")
    prepare_dirs(recreate=True)
    start_training(config)
コード例 #9
0
CLASSES = DATASET_CLASS.classes

MAX_STEPS = 100000
SAVE_CHECKPOINT_STEPS = 5000
KEEP_CHECKPOINT_MAX = 5
TEST_STEPS = 1000
SUMMARISE_STEPS = 100

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    DivideBy255(),
])
POST_PROCESSOR = None

STEP_PER_EPOCH = 50000 // BATCH_SIZE

TUNE_SPEC = {
    'run': 'tunable',
    'resources_per_trial': {
        "cpu": 2,
        "gpu": 0.5
    },
    'stop': {
        'mean_accuracy': 0.87,
        'training_iteration': 200,
コード例 #10
0
DATA_FORMAT = "NHWC"
TASK = Tasks.CLASSIFICATION
CLASSES = DATASET_CLASS.classes

MAX_STEPS = 100000
SAVE_CHECKPOINT_STEPS = 1000
KEEP_CHECKPOINT_MAX = 5
TEST_STEPS = 1000
SUMMARISE_STEPS = 100
# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence([Resize(size=IMAGE_SIZE), DivideBy255()])
POST_PROCESSOR = None

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.cosine_decay
# Train data num is 28709
step_per_epoch = 28709 // BATCH_SIZE
NETWORK.LEARNING_RATE_KWARGS = {'learning_rate': 0.1, 'decay_steps': 100000}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.WEIGHT_DECAY_RATE = 0.0005
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2}
コード例 #11
0
TASK = Tasks.CLASSIFICATION
CLASSES = DATASET_CLASS(subset="train", batch_size=1).classes

MAX_STEPS = 2
SAVE_CHECKPOINT_STEPS = 1
KEEP_CHECKPOINT_MAX = 5
TEST_STEPS = 100
SUMMARISE_STEPS = 100

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence([Resize(size=IMAGE_SIZE), PerImageStandardization()])
POST_PROCESSOR = None

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.WEIGHT_DECAY_RATE = 0.0005
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2}
NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

# dataset