Ejemplo n.º 1
0
def test_divide_255():
    orig_image = np.ones((1, 1, 3))
    expect = np.array([[[0.00392157, 0.00392157, 0.00392157]]])

    pre_processor = DivideBy255()
    processed = pre_processor(image=orig_image)
    processed_imaged = processed["image"]

    assert isinstance(processed_imaged, np.ndarray)
    assert processed_imaged.shape[:2] == orig_image.shape[:2]
    assert np.allclose(expect, processed_imaged)
Ejemplo n.º 2
0
def test_training():
    """Test only no error raised."""

    config = EasyDict()

    config.NETWORK_CLASS = LmSinglePoseV1Quantize
    config.DATASET_CLASS = MscocoSinglePersonKeypoints

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [160, 160]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.IS_DISTRIBUTION = False
    config.TASK = Tasks.KEYPOINT_DETECTION

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
    config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {
        'bit': 2,
        'max_value': 2.0
    }
    config.NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer
    config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

    # daasegt config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = Sequence([
        ResizeWithJoints(image_size=config.IMAGE_SIZE),
        JointsToGaussianHeatmap(image_size=config.IMAGE_SIZE, stride=2),
        DivideBy255()])
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_lm_single_pose_v1")
    prepare_dirs(recreate=True)
    start_training(config, profile_step=1)
Ejemplo n.º 3
0
TASK = Tasks.OBJECT_DETECTION
CLASSES = DATASET_CLASS.classes

KEEP_CHECKPOINT_MAX = 5
MAX_EPOCHS = 100
SAVE_CHECKPOINT_STEPS = 100
TEST_STEPS = 100
SUMMARISE_STEPS = 10

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence([ResizeWithGtBoxes(size=IMAGE_SIZE), DivideBy255()])
anchors = [(1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892),
           (9.47112, 4.84053), (11.2364, 10.0071)]
score_threshold = 0.05
nms_iou_threshold = 0.5
nms_max_output_size = 100
POST_PROCESSOR = Sequence([
    FormatYoloV2(
        image_size=IMAGE_SIZE,
        classes=CLASSES,
        anchors=anchors,
        data_format=DATA_FORMAT,
    ),
    ExcludeLowScoreBox(threshold=score_threshold),
    NMS(
        iou_threshold=nms_iou_threshold,
Ejemplo n.º 4
0
MAX_STEPS = 100000
SAVE_CHECKPOINT_STEPS = 5000
KEEP_CHECKPOINT_MAX = 5
TEST_STEPS = 1000
SUMMARISE_STEPS = 100

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    DivideBy255(),
])
POST_PROCESSOR = None

STEP_PER_EPOCH = 50000 // BATCH_SIZE

TUNE_SPEC = {
    'run': 'tunable',
    'resources_per_trial': {
        "cpu": 2,
        "gpu": 0.5
    },
    'stop': {
        'mean_accuracy': 0.87,
        'training_iteration': 200,
    },
Ejemplo n.º 5
0
DATA_FORMAT = "NHWC"
TASK = Tasks.CLASSIFICATION
CLASSES = DATASET_CLASS.classes

MAX_STEPS = 100000
SAVE_CHECKPOINT_STEPS = 1000
KEEP_CHECKPOINT_MAX = 5
TEST_STEPS = 1000
SUMMARISE_STEPS = 100
# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

PRE_PROCESSOR = Sequence([Resize(size=IMAGE_SIZE), DivideBy255()])
POST_PROCESSOR = None

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = tf.compat.v1.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.compat.v1.train.cosine_decay
# Train data num is 28709
step_per_epoch = 28709 // BATCH_SIZE
NETWORK.LEARNING_RATE_KWARGS = {'learning_rate': 0.1, 'decay_steps': 100000}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.WEIGHT_DECAY_RATE = 0.0005
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2}
Ejemplo n.º 6
0

# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""

# for debug
# BATCH_SIZE = 2
# SUMMARISE_STEPS = 1
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    {% if quantize_first_convolution %}DivideBy255(){% else %}PerImageStandardization(){% endif %}
])
POST_PROCESSOR = None

NETWORK = SmartDict()
NETWORK.OPTIMIZER_CLASS = {{optimizer_class}}
NETWORK.OPTIMIZER_KWARGS = {{optimizer_kwargs}}
NETWORK.LEARNING_RATE_FUNC = {{learning_rate_func}}
NETWORK.LEARNING_RATE_KWARGS = {{learning_rate_kwargs}}

NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {
    'bit': 2,