def test_quantize_training(): """Test only that no error raised.""" config = EasyDict() config.NETWORK_CLASS = FlowNetSV1Quantized config.DATASET_CLASS = FlyingChairs config.IS_DEBUG = False config.IMAGE_SIZE = [384, 512] config.BATCH_SIZE = 8 config.TEST_STEPS = 1 config.MAX_STEPS = 2 config.SAVE_CHECKPOINT_STEPS = 1 config.KEEP_CHECKPOINT_MAX = 5 config.SUMMARISE_STEPS = 1 config.IS_PRETRAIN = False config.IS_DISTRIBUTION = False # network model config config.NETWORK = EasyDict() config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE config.NETWORK.BATCH_SIZE = config.BATCH_SIZE config.NETWORK.DATA_FORMAT = "NHWC" config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2.0} config.NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {} # dataset config config.DATASET = EasyDict() config.DATASET.PRE_PROCESSOR = None config.DATASET.BATCH_SIZE = config.BATCH_SIZE config.DATASET.DATA_FORMAT = "NHWC" config.DATASET.VALIDATION_RATE = 0.2 config.DATASET.VALIDATION_SEED = 2019 config.DATASET.AUGMENTOR = Sequence([ # Geometric transformation FlipLeftRight(0.5), FlipTopBottom(0.5), Translate(-0.2, 0.2), Rotate(-17, +17), Scale(1.0, 2.0), # Pixel-wise augmentation Brightness(0.8, 1.2), Contrast(0.2, 1.4), Color(0.5, 2.0), Gamma(0.7, 1.5), # Hue(-128.0, 128.0), GaussianNoise(0.0, 10.0) ]) config.DATASET.PRE_PROCESSOR = Sequence([ DevideBy255(), ]) environment.init("test_flownet_s_v1_quantize") prepare_dirs(recreate=True) start_training(config)
# SAVE_CHECKPOINT_STEPS = 2 # KEEP_CHECKPOINT_MAX = 5 # TEST_STEPS = 10 # SUMMARISE_STEPS = 2 # pretrain IS_PRETRAIN = False PRETRAIN_VARS = [] PRETRAIN_DIR = "" PRETRAIN_FILE = "" # distributed training IS_DISTRIBUTION = False PRE_PROCESSOR = Sequence([ DevideBy255(), ]) POST_PROCESSOR = None NETWORK = EasyDict() NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"beta1": 0.9, "beta2": 0.999} NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant NETWORK.LEARNING_RATE_KWARGS = { "values": [0.0001, 0.00005, 0.000025, 0.0000125, 0.00000625], "boundaries": [400000, 600000, 800000, 1000000], } NETWORK.WEIGHT_DECAY_RATE = 0.0004 NETWORK.IMAGE_SIZE = IMAGE_SIZE NETWORK.BATCH_SIZE = BATCH_SIZE NETWORK.DATA_FORMAT = DATA_FORMAT
# SAVE_CHECKPOINT_STEPS = 2 # KEEP_CHECKPOINT_MAX = 5 # TEST_STEPS = 10 # SUMMARISE_STEPS = 2 # pretrain IS_PRETRAIN = False PRETRAIN_VARS = [] PRETRAIN_DIR = "" PRETRAIN_FILE = "" # distributed training IS_DISTRIBUTION = False PRE_PROCESSOR = Sequence( [DevideBy255(), DiscretizeFlow(THRESHOLD_RADIUS, SPLIT_NUM)]) POST_PROCESSOR = None NETWORK = EasyDict() NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer NETWORK.OPTIMIZER_KWARGS = {"beta1": 0.9, "beta2": 0.999} NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant NETWORK.LEARNING_RATE_KWARGS = { # "values": [0.0001, 0.00005, 0.000025, 0.0000125, 0.00000625], # "boundaries": [400000, 600000, 800000, 1000000], "values": [0.001, 0.0005, 0.00025, 0.000125, 0.0000625], "boundaries": [400000, 600000, 800000, 1000000], } NETWORK.WEIGHT_DECAY_RATE = 0.0004 NETWORK.IMAGE_SIZE = IMAGE_SIZE NETWORK.BATCH_SIZE = BATCH_SIZE