Example #1
0
def _load_yaml(config_file):
    with file_io.File(config_file) as config_file_stream:
        config = yaml.load(config_file_stream, Loader=yaml.Loader)

    # use only upper key.
    keys = [key for key in config.keys() if key.isupper()]
    config_dict = {key: config[key] for key in keys}
    config = SmartDict(config_dict)
    return config
Example #2
0
def test_classification():
    """Verify just image is changed."""
    input_image = PIL.Image.new("RGB", size=(100, 200))
    results = np.array([0.1, 0.3, 0.4, 0.2])
    config = SmartDict({"CLASSES": ["a", "b", "c", "d"]})

    result_image = visualize_classification(np.array(input_image), results,
                                            config)

    assert not np.all(np.array(input_image) == np.array(result_image))
Example #3
0
def _load_py(config_file):
    config = {}
    with file_io.File(config_file) as config_file_stream:
        source = config_file_stream.read()
        exec(source, globals(), config)

    # use only upper key.
    return SmartDict({
        key: value
        for key, value in config.items()
        if key.isupper()
    })
Example #4
0
def test_semantic_segmentation():
    """Verify just image is changed."""
    input_image = PIL.Image.new("RGB", size=(100, 200))
    results = np.random.random_sample(size=(64, 64, 4))
    config = SmartDict({
        "IMAGE_SIZE": (64, 64),
        "CLASSES": ["a", "b", "c", "d"]
    })

    result_image = visualize_semantic_segmentation(np.array(input_image),
                                                   results, config)

    assert not np.all(np.array(input_image) == np.array(result_image))
Example #5
0
def test_object_detection():
    """Verify just image is changed."""
    input_image = PIL.Image.new("RGB", size=(100, 200))
    results = np.array([[32, 20, 10, 5, 2, 0.5], [2, 4, 2, 4, 1, 0.5]])
    config = SmartDict({
        "IMAGE_SIZE": (64, 64),
        "CLASSES": ["a", "b", "c", "d"]
    })

    result_image = visualize_object_detection(np.array(input_image), results,
                                              config)

    assert not np.all(np.array(input_image) == np.array(result_image))
Example #6
0
def merge(base_config, override_config):
    """merge config.

    Return: merged config (SmartDict object).
    """

    result = SmartDict(base_config)
    for k, v in override_config.items():
        if type(v) is SmartDict:
            v = merge(base_config[k], override_config[k])

        result[k] = v

    return result
Example #7
0
def test_merge():

    base_config = SmartDict({"a": "aa", "nest": SmartDict({"b": "bb", "c": "cc"}), "d": "dd"})
    override_config = SmartDict({"a": "_a", "nest": SmartDict({"b": "_b"})})

    expected = SmartDict({"a": "_a", "nest": SmartDict({"b": "_b", "c": "cc"}), "d": "dd"})

    config = config_util.merge(base_config, override_config)
    assert config == expected
Example #8
0
def load_yaml(config_file):
    """Load meta.yaml that is output when the model is converted.

    Args:
        config_file (str): Path of the configuration file.

    Returns:
        SmartDict: Dictionary object of loaded configuration file.

    Examples:
        >>> config = load_yaml("/path/of/meta.yaml")
    """
    with open(config_file) as config_file_stream:
        config = yaml.load(config_file_stream, Loader=yaml.Loader)
    # use only upper key.
    return SmartDict({k: v for k, v in config.items() if k.isupper()})
Example #9
0
IS_DEBUG = False

IMAGE_SIZE = [448, 448]
BATCH_SIZE = 4
DATA_FORMAT = "NHWC"
TASK = Tasks.CLASSIFICATION
CLASSES = DATASET_CLASS.classes

# for debug
# MAX_STEPS = 100
# SAVE_CHECKPOINT_STEPS = 100
# TEST_STEPS = 10
# SUMMARISE_STEPS = 100
# IS_PRETRAIN = False
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([Resize(size=IMAGE_SIZE), DivideBy255()])
POST_PROCESSOR = None

NETWORK = SmartDict()
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT

# dataset
DATASET = SmartDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
DATASET.PRE_PROCESSOR = PRE_PROCESSOR
Example #10
0
MAX_STEPS = 100000
SAVE_CHECKPOINT_STEPS = 1000
KEEP_CHECKPOINT_MAX = 5
TEST_STEPS = 1000
SUMMARISE_STEPS = 100
# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence([Resize(size=IMAGE_SIZE), DivideBy255()])
POST_PROCESSOR = None

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.cosine_decay
# Train data num is 28709
step_per_epoch = 28709 // BATCH_SIZE
NETWORK.LEARNING_RATE_KWARGS = {'learning_rate': 0.1, 'decay_steps': 100000}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.WEIGHT_DECAY_RATE = 0.0005
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2}
NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}
Example #11
0
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence([Resize(size=IMAGE_SIZE), PerImageStandardization()])
POST_PROCESSOR = None

# for debug
# MAX_STEPS = 10
# BATCH_SIZE = 31
# SAVE_CHECKPOINT_STEPS = 2
# KEEP_CHECKPOINT_MAX = 5
# TEST_STEPS = 10
# SUMMARISE_STEPS = 2
# IS_DEBUG = True

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant
step_per_epoch = 50000 // 200
NETWORK.LEARNING_RATE_KWARGS = {
    "values": [0.01, 0.001, 0.0001, 0.00001],
    "boundaries":
    [step_per_epoch * 200, step_per_epoch * 300, step_per_epoch * 350],
}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.WEIGHT_DECAY_RATE = 0.0005

# dataset
DATASET = SmartDict()
Example #12
0
    "conv4/bias:",
    "conv5/kernel:",
    "conv5/bias:",
    "conv6/kernel:",
    "conv6/bias:",
]
PRETRAIN_DIR = "saved/lmnet_0.01_caltech101/checkpoints"
PRETRAIN_FILE = "save.ckpt-99001"

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    PerImageStandardization()
])
POST_PROCESSOR = None

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer
NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.WEIGHT_DECAY_RATE = 0.0005
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {
    'bit': 2,
    'max_value': 2
}
NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

# dataset
Example #13
0
def test_training():
    """Test only no error raised."""
    config = SmartDict()

    config.NETWORK_CLASS = Darknet
    config.DATASET_CLASS = Dummy

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [28, 14]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.CLASSIFICATION

    # network model config
    config.NETWORK = SmartDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE

    # daasegt config
    config.DATASET = SmartDict()
    config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_darknet")
    prepare_dirs(recreate=True)
    start_training(config, profile_step=1)
Example #14
0
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# for debug
# MAX_STEPS = 10
# BATCH_SIZE = 31
# SAVE_CHECKPOINT_STEPS = 2
# TEST_STEPS = 10
# SUMMARISE_STEPS = 2
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([Resize(size=IMAGE_SIZE), DivideBy255()])
POST_PROCESSOR = None

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.polynomial_decay
# TODO(wakiska): It is same as original yolov2 paper (batch size = 128).
NETWORK.LEARNING_RATE_KWARGS = {
    "learning_rate": 1e-1,
    "decay_steps": 1600000,
    "power": 4.0,
    "end_learning_rate": 0.0
}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.WEIGHT_DECAY_RATE = 0.0005
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
Example #15
0
nms_max_output_size = 100
POST_PROCESSOR = Sequence([
    FormatYoloV2(
        image_size=IMAGE_SIZE,
        classes=CLASSES,
        anchors=anchors,
        data_format=DATA_FORMAT,
    ),
    ExcludeLowScoreBox(threshold=score_threshold),
    NMS(
        iou_threshold=nms_iou_threshold,
        max_output_size=nms_max_output_size,
        classes=CLASSES,
    ),
])

NETWORK = SmartDict()
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ANCHORS = anchors
NETWORK.SCORE_THRESHOLD = score_threshold
NETWORK.NMS_IOU_THRESHOLD = nms_iou_threshold
NETWORK.NMS_MAX_OUTPUT_SIZE = nms_max_output_size

# dataset
DATASET = SmartDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
DATASET.PRE_PROCESSOR = PRE_PROCESSOR
# for debug
# BATCH_SIZE = 2
# SUMMARISE_STEPS = 1
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    PerImageStandardization(),
])
POST_PROCESSOR = Sequence([
    Bilinear(size=IMAGE_SIZE, data_format=DATA_FORMAT, compatible_tensorflow_v1=True),
    Softmax(),
])


NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer
NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.WEIGHT_DECAY_RATE = 0.
NETWORK.AUXILIARY_LOSS_WEIGHT = 0.5
NETWORK.USE_FEATURE_FUSION = True
NETWORK.USE_ATTENTION_REFINEMENT = True
NETWORK.USE_TAIL_GAP = True
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {
    'bit': 2,
    'max_value': 2
}
Example #17
0
    ]),
    'learning_rate':
    hp.uniform('learning_rate', 0, 0.01),
    'learning_rate_func':
    hp.choice('learning_rate_func', [
        {
            'scheduler': tf.compat.v1.train.piecewise_constant,
            'scheduler_factor': hp.uniform('scheduler_factor', 0.05, 0.5),
            'scheduler_steps': [25000, 50000, 75000],
        },
    ]),
    'weight_decay_rate':
    0.0001,
}

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = None
NETWORK.OPTIMIZER_KWARGS = {}
NETWORK.LEARNING_RATE_FUNC = None
NETWORK.LEARNING_RATE_KWARGS = {}
NETWORK.WEIGHT_DECAY_RATE = None
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2}
NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

# dataset
DATASET = SmartDict()
Example #18
0
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# for debug
# BATCH_SIZE = 2
# SUMMARISE_STEPS = 1
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([
    PerImageStandardization(),
])
POST_PROCESSOR = None

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer
NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT

DATASET = SmartDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
DATASET.PRE_PROCESSOR = PRE_PROCESSOR

DATASET.AUGMENTOR = Sequence([
    Brightness((0.75, 1.25)),
    Color((0.75, 1.25)),
    Contrast((0.75, 1.25)),
Example #19
0
            'optimizer': tf.compat.v1.train.AdamOptimizer,
        },
    ]),
    'learning_rate':
    hp.uniform('learning_rate', 0, 0.01),
    'learning_rate_func':
    hp.choice('learning_rate_func', [
        {
            'scheduler': tf.compat.v1.train.piecewise_constant,
            'scheduler_factor': 1.0,
            'scheduler_steps': [25000, 50000, 75000],
        },
    ]),
}

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = None
NETWORK.OPTIMIZER_KWARGS = {}
NETWORK.LEARNING_RATE_FUNC = None
NETWORK.LEARNING_RATE_KWARGS = {}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2}
NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

DATASET = SmartDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
]
POST_PROCESSOR = Sequence([
    FormatYoloV2(
        image_size=IMAGE_SIZE,
        classes=CLASSES,
        anchors=anchors,
        data_format=DATA_FORMAT,
    ),
    ExcludeLowScoreBox(threshold=0.05),
    NMS(
        iou_threshold=0.5,
        classes=CLASSES,
    ),
])

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ANCHORS = anchors
NETWORK.WEIGHT_DECAY_RATE = 0.0005
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2}
NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

# dataset
DATASET = SmartDict()
DATASET.BATCH_SIZE = BATCH_SIZE
Example #21
0
def test_training():
    """Verify only that no error raised."""
    config = SmartDict()

    config.NETWORK_CLASS = LMBiSeNet
    config.DATASET_CLASS = DummyCamvid

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [128, 160]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.SEMANTIC_SEGMENTATION

    # network model config
    config.NETWORK = SmartDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.DATA_FORMAT = "NHWC"

    # daasegt config
    config.DATASET = SmartDict()
    config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE
    config.DATASET.DATA_FORMAT = "NHWC"

    environment.init("test_lm_bisenet")
    prepare_dirs(recreate=True)
    start_training(config, profile_step=1)
Example #22
0
score_threshold = 0.05
nms_iou_threshold = 0.5
nms_max_output_size = 100
POST_PROCESSOR = Sequence([
    FormatYoloV2(
        image_size=IMAGE_SIZE,
        classes=CLASSES,
        anchors=anchors,
        data_format=DATA_FORMAT,
    ),
    ExcludeLowScoreBox(threshold=score_threshold),
    NMS(iou_threshold=nms_iou_threshold,
        max_output_size=nms_max_output_size, classes=CLASSES,),
])

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant
# In the origianl yolov2 Paper, with a starting learning rate of 10−3, dividing it by 10 at 60 and 90 epochs.
# Train data num per epoch is 69863
step_per_epoch = 69863 // BATCH_SIZE

NETWORK.LEARNING_RATE_KWARGS = {
    "values": [1e-4, 2e-3, 5e-4, 5e-5, 5e-6],
    "boundaries": [
        step_per_epoch,
        step_per_epoch * 60,
        step_per_epoch * 90,
        step_per_epoch * 120,
    ],
Example #23
0
def test_training():
    """Test only no error raised."""

    config = SmartDict()

    config.NETWORK_CLASS = YoloV2Quantize
    config.DATASET_CLASS = Pascalvoc2007

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [128, 160]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.OBJECT_DETECTION

    # network model config
    config.NETWORK = SmartDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
    config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2.0}
    config.NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer
    config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

    # daasegt config
    config.DATASET = SmartDict()
    config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_yolov_2_quantize")
    prepare_dirs(recreate=True)
    start_training(config, profile_step=1)
Example #24
0
def test_training():
    """Test only no error raised."""

    config = SmartDict()

    config.NETWORK_CLASS = LmSinglePoseV1Quantize
    config.DATASET_CLASS = MscocoSinglePersonKeypoints

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [160, 160]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.IS_DISTRIBUTION = False
    config.TASK = Tasks.KEYPOINT_DETECTION

    # network model config
    config.NETWORK = SmartDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
    config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2.0}
    config.NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer
    config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

    # daasegt config
    config.DATASET = SmartDict()
    config.DATASET.PRE_PROCESSOR = Sequence([
        ResizeWithJoints(image_size=config.IMAGE_SIZE),
        JointsToGaussianHeatmap(image_size=config.IMAGE_SIZE, stride=2),
        DivideBy255()
    ])
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_lm_single_pose_v1")
    prepare_dirs(recreate=True)
    start_training(config, profile_step=1)
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# for debug
# BATCH_SIZE = 2
# SUMMARISE_STEPS = 1
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    DivideBy255(),
])
POST_PROCESSOR = None

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer
NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2}
NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

DATASET = SmartDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
DATASET.PRE_PROCESSOR = PRE_PROCESSOR
DATASET.AUGMENTOR = Sequence([
Example #26
0
def test_training():
    """Test only that no error raised."""
    config = SmartDict()

    config.NETWORK_CLASS = SampleNetworkQuantize
    config.DATASET_CLASS = Dummy

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [32, 32]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.CLASSIFICATION

    # network model config
    config.NETWORK = SmartDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
    config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2}
    config.NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
    config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {}
    config.NETWORK.DATA_FORMAT = "NHWC"

    # dataset config
    config.DATASET = SmartDict()
    config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE
    config.DATASET.DATA_FORMAT = "NHWC"

    environment.init("test_example_quantize")
    prepare_dirs(recreate=True)
    start_training(config, profile_step=1)
# for debug
# MAX_STEPS = 100
# # BATCH_SIZE = 31
# SAVE_CHECKPOINT_STEPS = 10
# KEEP_CHECKPOINT_MAX = 5
# TEST_STEPS = 10
# SUMMARISE_STEPS = 2
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    PerImageStandardization()
])
POST_PROCESSOR = None

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant
NETWORK.LEARNING_RATE_KWARGS = {
    "values": [0.1, 0.01, 0.001, 0.0001],
    "boundaries": [40000, 60000, 80000],
}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.WEIGHT_DECAY_RATE = 0.0001
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {
    'bit': 2,
    'max_value': 2
Example #28
0
# for debug
# MAX_STEPS = 100
# # BATCH_SIZE = 31
# SAVE_CHECKPOINT_STEPS = 10
# TEST_STEPS = 10
# SUMMARISE_STEPS = 2
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    PerImageStandardization()
])
POST_PROCESSOR = None

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant
NETWORK.LEARNING_RATE_KWARGS = {
    "values": [0.1, 0.01, 0.001, 0.0001],
    "boundaries": [40000, 60000, 80000],
}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT

# dataset
DATASET = SmartDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
Example #29
0
POST_PROCESSOR = Sequence([
    FormatYoloV2(
        image_size=IMAGE_SIZE,
        classes=CLASSES,
        anchors=anchors,
        data_format=DATA_FORMAT,
    ),
    ExcludeLowScoreBox(threshold=score_threshold),
    NMS(
        iou_threshold=nms_iou_threshold,
        max_output_size=nms_max_output_size,
        classes=CLASSES,
    ),
])

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.piecewise_constant
_epoch_steps = 16551 // BATCH_SIZE
NETWORK.LEARNING_RATE_KWARGS = {
    "values": [1e-6, 1e-4, 1e-5, 1e-6, 1e-7],
    "boundaries":
    [_epoch_steps, _epoch_steps * 10, _epoch_steps * 60, _epoch_steps * 90],
}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ANCHORS = anchors
NETWORK.OBJECT_SCALE = 5.0
NETWORK.NO_OBJECT_SCALE = 1.0
Example #30
0
# distributed training
IS_DISTRIBUTION = False

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    DivideBy255(),
])
POST_PROCESSOR = None

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.AdamOptimizer
NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2}
NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

DATASET = SmartDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
DATASET.PRE_PROCESSOR = PRE_PROCESSOR
DATASET.AUGMENTOR = Sequence([