コード例 #1
0
def test_training():
    """Test only no error raised."""

    config = EasyDict()

    config.NETWORK_CLASS = LmSinglePoseV1Quantize
    config.DATASET_CLASS = MscocoSinglePersonKeypoints

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [160, 160]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.IS_DISTRIBUTION = False
    config.TASK = Tasks.KEYPOINT_DETECTION

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
    config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2.0}
    config.NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer
    config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

    # daasegt config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = Sequence([
        ResizeWithJoints(image_size=config.IMAGE_SIZE),
        JointsToGaussianHeatmap(image_size=config.IMAGE_SIZE, stride=2),
        DivideBy255()
    ])
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_lm_single_pose_v1")
    prepare_dirs(recreate=True)
    start_training(config)
コード例 #2
0
SUMMARISE_STEPS = 100

# for debug
# IS_DEBUG = True
# SUMMARISE_STEPS = 1
# SUMMARISE_STEPS = 100
# TEST_STEPS = 10000
# SUMMARISE_STEPS = 100

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence([ResizeWithGtBoxes(size=IMAGE_SIZE), DivideBy255()])
anchors = [(1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892),
           (9.47112, 4.84053), (11.2364, 10.0071)]
score_threshold = 0.05
nms_iou_threshold = 0.5
nms_max_output_size = 100
POST_PROCESSOR = Sequence([
    FormatYoloV2(
        image_size=IMAGE_SIZE,
        classes=CLASSES,
        anchors=anchors,
        data_format=DATA_FORMAT,
    ),
    ExcludeLowScoreBox(threshold=score_threshold),
    NMS(
        iou_threshold=nms_iou_threshold,
コード例 #3
0
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# distributed training
IS_DISTRIBUTION = True
num_worker = 4

# for debug
# MAX_STEPS = 10
# BATCH_SIZE = 31
# SAVE_STEPS = 2
# TEST_STEPS = 10
# SUMMARISE_STEPS = 2
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([Resize(size=IMAGE_SIZE), DivideBy255()])
POST_PROCESSOR = None

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant
step_per_epoch = int(50000 / (BATCH_SIZE * num_worker))
NETWORK.LEARNING_RATE_KWARGS = {
    "values": ([
        0.01 * num_worker, 0.001 * num_worker, 0.0001 * num_worker,
        0.00001 * num_worker
    ]),
    "boundaries":
    [step_per_epoch * 200, step_per_epoch * 300, step_per_epoch * 350],
}
コード例 #4
0
MAX_STEPS = 150000
SAVE_CHECKPOINT_STEPS = 3000
KEEP_CHECKPOINT_MAX = 5
TEST_STEPS = 1000
SUMMARISE_STEPS = 1000

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    DivideBy255(),
])
POST_PROCESSOR = None

TUNE_SPEC = {
    'run': 'tunable',
    'resources_per_trial': {
        "cpu": 2,
        "gpu": 1
    },
    'stop': {
        'mean_accuracy': 1.0,
        'training_iteration': 200,
    },
    'config': {
        'lm_config': {},
コード例 #5
0

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# for debug
# BATCH_SIZE = 2
# SUMMARISE_STEPS = 1
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    {% if quantize_first_convolution %}DivideBy255(){% else %}PerImageStandardization(){% endif %}
])
POST_PROCESSOR = None

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = {{optimizer_class}}
NETWORK.OPTIMIZER_KWARGS = {{optimizer_kwargs}}
NETWORK.LEARNING_RATE_FUNC = {{learning_rate_func}}
NETWORK.LEARNING_RATE_KWARGS = {{learning_rate_kwargs}}

NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {
    'bit': 2,