コード例 #1
0
def test_filp_left_right():
    batch_size = 3
    image_size = [256, 512]
    dataset = Pascalvoc2007(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=FlipLeftRight(),
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
コード例 #2
0
ファイル: test_data_augmentor.py プロジェクト: tkng/blueoil
def test_filp_left_right():
    batch_size = 3
    image_size = [256, 512]
    dataset = LmThingsOnATable(
        batch_size=batch_size,
        pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=FlipLeftRight(),
    )

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
コード例 #3
0
def test_sequence():
    batch_size = 3
    image_size = [256, 512]
    augmentor = Sequence([
        FlipLeftRight(),
        FlipTopBottom(),
        SSDRandomCrop(),
    ])

    dataset = Pascalvoc2007(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=augmentor,
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
コード例 #4
0
def test_sequence():
    batch_size = 3
    image_size = [256, 512]
    augmentor = Sequence([
        FlipLeftRight(is_bounding_box=True),
        FlipTopBottom(is_bounding_box=True),
        SSDRandomCrop(),
    ])

    dataset = LmThingsOnATable(
        batch_size=batch_size,
        pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=augmentor,
    )

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
コード例 #5
0
PRE_PROCESSOR = Sequence([Resize(size=IMAGE_SIZE), PerImageStandardization()])
POST_PROCESSOR = None

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant
NETWORK.LEARNING_RATE_KWARGS = {
    "values": [0.1, 0.01, 0.001, 0.0001],
    "boundaries": [40000, 60000, 80000],
}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.WEIGHT_DECAY_RATE = 0.0001
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2}
NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

# dataset
DATASET = EasyDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
DATASET.PRE_PROCESSOR = PRE_PROCESSOR
DATASET.AUGMENTOR = Sequence([
    Pad(2),
    Crop(size=IMAGE_SIZE),
    FlipLeftRight(),
])
コード例 #6
0
ファイル: dataset_iterator.py プロジェクト: suttang/blueoil
        print("Shuffle {} train dataset with random state {}.".format(self.__class__.__name__, self.seed))
        print(random_indices[0:10])
        self.seed += 1
        return random_indices


if __name__ == '__main__':

    from lmnet.datasets.cifar10 import Cifar10
    from lmnet.data_processor import Sequence
    from lmnet.data_augmentor import FlipLeftRight, Hue, Blur

    cifar10 = Cifar10()

    augmentor = Sequence([
        FlipLeftRight(0.5),
        Hue((-10, 10)),
        Blur(),
    ])

    dataset_iterator = DatasetIterator(dataset=cifar10, enable_prefetch=True, augmentor=augmentor)
    time.sleep(2)
    import time
    t0 = time.time()
    data_batch = next(dataset_iterator)
    t1 = time.time()
    print("time of prefetch: {}".format(t1 - t0))

    dataset_iterator2 = DatasetIterator(dataset=cifar10, enable_prefetch=False, augmentor=augmentor)
    t0 = time.time()
    data_batch = next(dataset_iterator2)
コード例 #7
0
ファイル: lm_fyolo_bdd100k.py プロジェクト: smilejx/blueoil
}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ANCHORS = anchors
NETWORK.OBJECT_SCALE = 5.0
NETWORK.NO_OBJECT_SCALE = 1.0
NETWORK.CLASS_SCALE = 1.0
NETWORK.COORDINATE_SCALE = 1.0
NETWORK.LOSS_IOU_THRESHOLD = 0.6
NETWORK.WEIGHT_DECAY_RATE = 0.0005
NETWORK.SCORE_THRESHOLD = score_threshold
NETWORK.NMS_IOU_THRESHOLD = nms_iou_threshold
NETWORK.NMS_MAX_OUTPUT_SIZE = nms_max_output_size
NETWORK.SEEN_THRESHOLD = 8000

# dataset
DATASET = EasyDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
DATASET.PRE_PROCESSOR = PRE_PROCESSOR
DATASET.AUGMENTOR = Sequence([
    FlipLeftRight(is_bounding_box=True),
    Brightness((0.75, 1.25)),
    Color((0.75, 1.25)),
    Contrast((0.75, 1.25)),
    Hue((-10, 10)),
    SSDRandomCrop(min_crop_ratio=0.7),
])
DATASET.ENABLE_PREFETCH = True