Example #1
0
# distributed training
IS_DISTRIBUTION = False

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# for debug
# BATCH_SIZE = 2
# SUMMARISE_STEPS = 1
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    DivideBy255(),
])
POST_PROCESSOR = None

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2}
NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

DATASET = EasyDict()
Example #2
0
# SAVE_CHECKPOINT_STEPS = 2
# KEEP_CHECKPOINT_MAX = 5
# TEST_STEPS = 10
# SUMMARISE_STEPS = 2

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# distributed training
IS_DISTRIBUTION = False

PRE_PROCESSOR = Sequence([
    DevideBy255(),
])
POST_PROCESSOR = None

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
NETWORK.OPTIMIZER_KWARGS = {"beta1": 0.9, "beta2": 0.999}
NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant
NETWORK.LEARNING_RATE_KWARGS = {
    "values": [0.0000125, 0.0001, 0.00005, 0.000025, 0.0000125, 0.00000625],
    "boundaries": [50000, 400000, 600000, 800000, 1000000],
}
NETWORK.WEIGHT_DECAY_RATE = 0.0004
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
IS_DISTRIBUTION = False

# for debug
# IS_DEBUG = True
# SUMMARISE_STEPS = 1
# SUMMARISE_STEPS = 100
# TEST_STEPS = 10000
# SUMMARISE_STEPS = 100

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence([ResizeWithGtBoxes(size=IMAGE_SIZE), DivideBy255()])
anchors = [(1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892),
           (9.47112, 4.84053), (11.2364, 10.0071)]
score_threshold = 0.05
nms_iou_threshold = 0.5
nms_max_output_size = 100
POST_PROCESSOR = Sequence([
    FormatYoloV2(
        image_size=IMAGE_SIZE,
        classes=CLASSES,
        anchors=anchors,
        data_format=DATA_FORMAT,
    ),
    ExcludeLowScoreBox(threshold=score_threshold),
    NMS(
        iou_threshold=nms_iou_threshold,
TEST_STEPS = 1000
SUMMARISE_STEPS = 1000

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# for debug
# BATCH_SIZE = 2
# SUMMARISE_STEPS = 1
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    PerImageStandardization(),
])
POST_PROCESSOR = Sequence([
    Bilinear(size=IMAGE_SIZE,
             data_format=DATA_FORMAT,
             compatible_tensorflow_v1=True),
    Softmax(),
])

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.WEIGHT_DECAY_RATE = 0.
# distributed training
IS_DISTRIBUTION = False

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# for debug
# BATCH_SIZE = 2
# SUMMARISE_STEPS = 1
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([Resize(size=IMAGE_SIZE), DivideBy255()])
POST_PROCESSOR = None

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2}
NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

DATASET = EasyDict()
DATASET.BATCH_SIZE = BATCH_SIZE
Example #6
0
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# for debug
# MAX_STEPS = 10
# BATCH_SIZE = 31
# SAVE_CHECKPOINT_STEPS = 2
# KEEP_CHECKPOINT_MAX = 5
# TEST_STEPS = 10
# SUMMARISE_STEPS = 2
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    PerImageStandardization(),
])
POST_PROCESSOR = None

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant
step_per_epoch = int(50000 / 200)
NETWORK.LEARNING_RATE_KWARGS = {
    "values": [0.01, 0.001, 0.0001, 0.00001],
    "boundaries":
    [step_per_epoch * 200, step_per_epoch * 300, step_per_epoch * 350],
}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
Example #7
0
        print(random_indices[0:10])
        self.seed += 1
        return random_indices


if __name__ == '__main__':

    from lmnet.datasets.cifar10 import Cifar10
    from lmnet.data_processor import Sequence
    from lmnet.data_augmentor import FlipLeftRight, Hue, Blur

    cifar10 = Cifar10()

    augmentor = Sequence([
        FlipLeftRight(0.5),
        Hue((-10, 10)),
        Blur(),
    ])

    dataset_iterator = DatasetIterator(dataset=cifar10,
                                       enable_prefetch=True,
                                       augmentor=augmentor)
    time.sleep(2)
    import time
    t0 = time.time()
    data_batch = next(dataset_iterator)
    t1 = time.time()
    print("time of prefetch: {}".format(t1 - t0))

    dataset_iterator2 = DatasetIterator(dataset=cifar10,
                                        enable_prefetch=False,
Example #8
0
CLASSES = DATASET_CLASS.classes

MAX_STEPS = 2
SAVE_CHECKPOINT_STEPS = 1
KEEP_CHECKPOINT_MAX = 5
TEST_STEPS = 100
SUMMARISE_STEPS = 100

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence(
    [ResizeWithGtBoxes(IMAGE_SIZE),
     PerImageStandardization()])

anchors = [
    (0.5, 0.25),
    (1.0, 0.75),
]
POST_PROCESSOR = Sequence([
    FormatYoloV2(
        image_size=IMAGE_SIZE,
        classes=CLASSES,
        anchors=anchors,
        data_format=DATA_FORMAT,
    ),
    ExcludeLowScoreBox(threshold=0.05),
    NMS(
Example #9
0
TEST_STEPS = 1000
SUMMARISE_STEPS = 1000

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# for debug
# BATCH_SIZE = 2
# SUMMARISE_STEPS = 1
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([
    PerImageStandardization(),
])
POST_PROCESSOR = None

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT

DATASET = EasyDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
DATASET.PRE_PROCESSOR = PRE_PROCESSOR
Example #10
0
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# for debug
# BATCH_SIZE = 2
# SUMMARISE_STEPS = 1
# IS_DEBUG = True

# stride of output heatmap. the smaller, the slower.
STRIDE = 8

PRE_PROCESSOR = Sequence([
    ResizeWithJoints(image_size=IMAGE_SIZE),
    JointsToGaussianHeatmap(image_size=IMAGE_SIZE, stride=STRIDE, sigma=2),
    DivideBy255()
])
POST_PROCESSOR = Sequence([
    GaussianHeatmapToJoints(num_dimensions=2,
                            stride=STRIDE,
                            confidence_threshold=0.1)
])

step_per_epoch = 149813 // BATCH_SIZE

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
NETWORK.OPTIMIZER_KWARGS = {}
NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant
NETWORK.LEARNING_RATE_KWARGS = {
Example #11
0
IMAGE_SIZE = [448, 448]
BATCH_SIZE = 4
DATA_FORMAT = "NHWC"
TASK = Tasks.CLASSIFICATION
CLASSES = DATASET_CLASS.classes

# for debug
# MAX_STEPS = 100
# SAVE_STEPS = 100
# TEST_STEPS = 10
# SUMMARISE_STEPS = 100
# IS_PRETRAIN = False
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    DivideBy255()
])
POST_PROCESSOR = None

NETWORK = EasyDict()
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT

# dataset
DATASET = EasyDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
DATASET.PRE_PROCESSOR = PRE_PROCESSOR