Пример #1
0
def test_training():
    """Test only that no error raised."""
    config = EasyDict()

    config.NETWORK_CLASS = YoloV1
    config.DATASET_CLASS = LmThingsOnATable

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [70, 70]
    config.BATCH_SIZE = 4
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.OBJECT_DETECTION

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE

    # daasegt config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_yolov_1")
    prepare_dirs(recreate=True)
    start_training(config)
Пример #2
0
def test_training():
    """Test only that no error raised."""
    config = EasyDict()

    config.NETWORK_CLASS = YoloV2
    config.DATASET_CLASS = LmThingsOnATable

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [128, 160]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_STEPS = 1
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.IS_DISTRIBUTION = False

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.DATA_FORMAT = "NCHW"

    # daasegt config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE
    config.DATASET.DATA_FORMAT = "NCHW"

    environment.init("test_yolo_v2")
    prepare_dirs(recreate=True)
    start_training(config)
Пример #3
0
def test_training():
    """Test only no error raised."""
    config = EasyDict()

    config.NETWORK_CLASS = Darknet
    config.DATASET_CLASS = Dummy

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [28, 14]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.CLASSIFICATION

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE

    # daasegt config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_darknet")
    prepare_dirs(recreate=True)
    start_training(config)
Пример #4
0
def test_training():
    """Verify only that no error raised."""
    config = EasyDict()

    config.NETWORK_CLASS = LMBiSeNet
    config.DATASET_CLASS = DummyCamvid

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [128, 160]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_STEPS = 1
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.IS_DISTRIBUTION = False

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.DATA_FORMAT = "NHWC"

    # daasegt config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE
    config.DATASET.DATA_FORMAT = "NHWC"

    environment.init("test_lm_bisenet")
    prepare_dirs(recreate=True)
    start_training(config)
Пример #5
0
def test_training():
    """Test only that no error raised."""
    config = EasyDict()

    config.NETWORK_CLASS = YoloV2
    config.DATASET_CLASS = Pascalvoc2007

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [128, 160]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.OBJECT_DETECTION

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.DATA_FORMAT = "NHWC"

    # dataset config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE
    config.DATASET.DATA_FORMAT = "NHWC"

    environment.init("test_yolo_v2")
    prepare_dirs(recreate=True)
    start_training(config)
Пример #6
0
def test_quantize_training():
    """Test only that no error raised."""
    config = EasyDict()

    config.NETWORK_CLASS = FlowNetSV1Quantized
    config.DATASET_CLASS = FlyingChairs

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [384, 512]
    config.BATCH_SIZE = 8
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.IS_DISTRIBUTION = False

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.DATA_FORMAT = "NHWC"
    config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
    config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2.0}
    config.NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer
    config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

    # dataset config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = None
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE
    config.DATASET.DATA_FORMAT = "NHWC"
    config.DATASET.VALIDATION_RATE = 0.2
    config.DATASET.VALIDATION_SEED = 2019
    config.DATASET.AUGMENTOR = Sequence([
        # Geometric transformation
        FlipLeftRight(0.5),
        FlipTopBottom(0.5),
        Translate(-0.2, 0.2),
        Rotate(-17, +17),
        Scale(1.0, 2.0),
        # Pixel-wise augmentation
        Brightness(0.8, 1.2),
        Contrast(0.2, 1.4),
        Color(0.5, 2.0),
        Gamma(0.7, 1.5),
        # Hue(-128.0, 128.0),
        GaussianNoise(0.0, 10.0)
    ])
    config.DATASET.PRE_PROCESSOR = Sequence([
        DevideBy255(),
    ])
    environment.init("test_flownet_s_v1_quantize")
    prepare_dirs(recreate=True)
    start_training(config)
def SqueezeSegV2Config():
    mc = EasyDict()

    mc.CLASSES = [
        'Road', 'Sidewalk', 'Building', 'Pole', 'Vegetation', 'Person',
        'TwoWheeler', 'Car', 'Truck', 'Bus', "None"
    ]
    mc.NUM_CLASS = len(mc.CLASSES)
    mc.CLS_2_ID = dict(zip(mc.CLASSES, range(len(mc.CLASSES))))
    mc.CLS_LOSS_WEIGHT = np.array(
        [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
    mc.CLS_COLOR_MAP = np.array([
        [128, 64, 128],  # Road
        [244, 35, 232],  # Sidewalk
        [70, 70, 70],  # Building
        [153, 153, 153],  # Pole
        [107, 142, 35],  # Vegetation
        [220, 20, 60],  # Person
        [255, 0, 0],  # Two Wheeler
        [0, 0, 142],  # Car
        [0, 0, 70],  # Truck
        [0, 60, 100],  # Bus
        [0, 0, 0]  # None
    ]) / 255.0

    # Input Shape
    mc.BATCH_SIZE = 8
    mc.AZIMUTH_LEVEL = 240
    mc.ZENITH_LEVEL = 32
    mc.NUM_FEATURES = 6

    # Loss
    mc.FOCAL_GAMMA = 2.0
    mc.CLS_LOSS_COEF = 15.0
    mc.DENOM_EPSILON = 1e-12  # small value used in denominator to prevent division by 0

    # Gradient Decent
    mc.LEARNING_RATE = 0.05
    mc.LR_DECAY_STEPS = 500
    mc.LR_DECAY_FACTOR = 0.9
    mc.MAX_GRAD_NORM = 100.0

    # Network
    mc.L2_WEIGHT_DECAY = 0.05
    mc.DROP_RATE = 0.1
    mc.BN_MOMENTUM = 0.9
    mc.REDUCTION = 16

    # Dataset
    mc.DATA_AUGMENTATION = True
    mc.RANDOM_FLIPPING = True
    mc.RANDOM_SHIFT = True

    # x, y, z, intensity, distance
    mc.INPUT_MEAN = np.array([[[24.810, 0.819, 0.000, 16.303, 25.436]]])
    mc.INPUT_STD = np.array([[[30.335, 7.807, 2.058, 25.208, 30.897]]])

    return mc
Пример #8
0
def test_training():
    """Test only no error raised."""

    config = EasyDict()

    config.NETWORK_CLASS = LmSinglePoseV1Quantize
    config.DATASET_CLASS = MscocoSinglePersonKeypoints

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [160, 160]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.IS_DISTRIBUTION = False
    config.TASK = Tasks.KEYPOINT_DETECTION

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
    config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {
        'bit': 2,
        'max_value': 2.0
    }
    config.NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer
    config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

    # daasegt config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = Sequence([
        ResizeWithJoints(image_size=config.IMAGE_SIZE),
        JointsToGaussianHeatmap(image_size=config.IMAGE_SIZE, stride=2),
        DivideBy255()])
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_lm_single_pose_v1")
    prepare_dirs(recreate=True)
    start_training(config, profile_step=1)
Пример #9
0
def test_training():
    """Test only no error raised."""

    config = EasyDict()

    config.NETWORK_CLASS = YoloV2Quantize
    config.DATASET_CLASS = Pascalvoc2007

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [128, 160]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.OBJECT_DETECTION

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
    config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {
        'bit': 2,
        'max_value': 2.0
    }
    config.NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer
    config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

    # daasegt config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = ResizeWithGtBoxes(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_yolov_2_quantize")
    prepare_dirs(recreate=True)
    start_training(config)
Пример #10
0
def test_training():
    """Test only that no error raised."""
    config = EasyDict()

    config.NETWORK_CLASS = LmResnetQuantize
    config.DATASET_CLASS = Dummy

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [32, 32]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.CLASSIFICATION

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE
    config.NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
    config.NETWORK.ACTIVATION_QUANTIZER_KWARGS = {
        'bit': 2,
        'max_value': 2
    }
    config.NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
    config.NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

    # dataset config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_lm_resnet_quantize")
    prepare_dirs(recreate=True)
    start_training(config)
# 'mnist': MNIST
# 'cifar10' CIFAR-10
__C.DATABASE_NAME = 'cifar10'

# Training version
# Set None to auto generate version
__C.VERSION = None

# Learning rate
__C.LEARNING_RATE = 0.001

# Epochs
__C.EPOCHS = 50

# Batch size
__C.BATCH_SIZE = 256

# Setting test set as validation when preprocessing data
__C.DPP_TEST_AS_VALID = False

# ===========================================
# #            Model Architecture           #
# ===========================================

# -------------------------------------------
# Classification

# Parameters of margin loss
# default: {'m_plus': 0.9, 'm_minus': 0.1, 'lambda_': 0.5}
__C.MARGIN_LOSS_PARAMS = {'m_plus': 0.9, 'm_minus': 0.1, 'lambda_': 0.5}
Пример #12
0
# SUMMARISE_STEPS = 2
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([Resize(size=IMAGE_SIZE), PerImageStandardization()])
POST_PROCESSOR = None

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant
NETWORK.LEARNING_RATE_KWARGS = {
    "values": [0.1, 0.01, 0.001, 0.0001],
    "boundaries": [40000, 60000, 80000],
}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.WEIGHT_DECAY_RATE = 0.0001
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {'bit': 2, 'max_value': 2}
NETWORK.WEIGHT_QUANTIZER = binary_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}

# dataset
DATASET = EasyDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
DATASET.PRE_PROCESSOR = PRE_PROCESSOR
DATASET.AUGMENTOR = Sequence([
    Pad(2),
    Crop(size=IMAGE_SIZE),
Пример #13
0
# Learning rate
__C.LEARNING_RATE = 0.001

# Learning rate with exponential decay
# Use learning rate decay
__C.LR_DECAY = True
# Decay steps
__C.LR_DECAY_STEPS = 2000
# Exponential decay rate
__C.LR_DECAY_RATE = 0.96

# Epochs
__C.EPOCHS = 20

# Batch size
__C.BATCH_SIZE = 512

# Data format
# 'NCHW': (batch, channel, height, width)
# 'NHWC': (batch, height, width, channel)
__C.DATA_FORMAT = 'NCHW'

# ===========================================
# #               Preprocessing             #
# ===========================================

# Setting test set as validation when preprocessing data
__C.DPP_TEST_AS_VALID = True

# Rate of train-test split
__C.TEST_SIZE = 0.2
Пример #14
0
# coding:utf-8

from easydict import EasyDict

config = EasyDict()

config.BATCH_SIZE = 384
config.CLS_OHEM = True
config.CLS_OHEM_RATIO = 0.7
config.BBOX_OHEM = False
config.BBOX_OHEM_RATIO = 0.7
config.GPU = '2,3'
config.VISIBLE_GPU = '2,3'
config.EPS = 1e-14
config.LR_EPOCH = [6, 14, 20]  # [6, 14, 20]
Пример #15
0
YOLO_CONFIG.USE_FOCAL_LOSS = False
YOLO_CONFIG.USE_LABEL_SMOOTH = False


YOLO_CONFIG.TRAIN_PATH = '/home/huydao/Source/LogoDetection/data/metadata/train.txt'
YOLO_CONFIG.VAL_PATH = '/home/huydao/Source/LogoDetection/data/metadata/val.txt'

YOLO_CONFIG.N_CLASSES = 1
YOLO_CONFIG.NMS_TOPK = 150
YOLO_CONFIG.NMS_THRESHOLD = 0.45
YOLO_CONFIG.SCORE_THRESHOLD = 0.01

YOLO_CONFIG.RESTORE_PATH = '/home/huydao/Source/LogoDetection/data/weights/yolov3.ckpt'
YOLO_CONFIG.SAVE_DIR = '/home/huydao/Source/LogoDetection/checkpoints'

YOLO_CONFIG.BATCH_SIZE = 8
YOLO_CONFIG.TOTAL_EPOCHS = 10
YOLO_CONFIG.TRAIN_EVALUATION_STEP = 10
YOLO_CONFIG.VAL_EVALUATION_STEP = 2
YOLO_CONFIG.SAVE_EPOCH = 10

YOLO_CONFIG.BATCH_NORM_DECAY = 0.99
WEIGHT_DECAY = 5e-4

YOLO_CONFIG.CLASSES_PATH = ''
YOLO_CONFIG.ANCHORS_PATH = ''

YOLO_CONFIG.TRAIN_EXAMPLES = len(open(YOLO_CONFIG.TRAIN_PATH,'r').readlines())
YOLO_CONFIG.VAL_EXAMPLES = len(open(YOLO_CONFIG.VAL_PATH,'r').readlines())

YOLO_CONFIG.TRAIN_BATCH_NUM = int(math.ceil(float(YOLO_CONFIG.TRAIN_EXAMPLES)/YOLO_CONFIG.BATCH_SIZE))
Пример #16
0
for item in NAME_TO_ID.items():
    ID_TO_NAME[item[1]] = item[0]
"""
Logging settings
"""

cfg.LOG_DIR = os.path.join(cfg.ROOT_DIR, 'runs', cfg.RUN_NAME)
cfg.CLEAR_HISTORY = conf['clear_history']

cfg.PRINT_EVERY = conf['print_every']
"""
Training settings
"""
cfg.NUM_EPOCHS = conf['num_epochs']
cfg.BATCH_SIZE = conf['batch_size']
cfg.DEVICE = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
cfg.PATIENCE = conf['patience']
cfg.RESUME = conf['resume']
cfg.NUM_WORKERS = conf['num_workers']
cfg.LEARNING_RATE = conf['learning_rate']
cfg.WEIGHT_DECAY = conf['weight_decay']
"""
Model saving
"""
cfg.CHECKPOINT_DIR = os.path.join(cfg.ROOT_DIR, 'checkpoint')
cfg.BEST_MODEL_PATH = os.path.join(cfg.CHECKPOINT_DIR, 'best_model.pth')
cfg.BEST_ACC_PATH = os.path.join(cfg.CHECKPOINT_DIR, 'best_acc.txt')
cfg.CHECKPOINT_PATH = os.path.join(cfg.CHECKPOINT_DIR, cfg.RUN_NAME + '.tar')
"""
Clear history
Пример #17
0
conf.LR_BG = 1e-3
conf.EPOCH_SIZE_BG = 8000
conf.BATCH_SIZE_BG = 15
conf.LD_DECAY_BG = 2000
conf.BG_CUE_FILE = "bg_cue_initsec.p"
conf.SALIENCY_TH_BG = 0.1

#training params for SEC models
conf.CUE_FILE_INITSEC = "sec_cue.p"
conf.INPUT_SIZE_SEC = 320
conf.DOWN_SAMPLE_SEC = 8  #network resolution
conf.Q_FG = 0.996
conf.Q_BG = 0.999

conf.LR = 1e-3
conf.LR_DECAY = 2000
conf.MAX_EPOCH = 8
conf.EPOCH_SIZE = 1000
conf.BATCH_SIZE = 15

conf.WD = 5e-4
conf.MOMENTUM = 0.9
conf.WORKSPACE = 512
conf.CPU_WORKER_NUM = 8

conf.CRF_POS_XY_STD = 3
conf.CRF_POS_W = 3
conf.CRF_BI_RGB_STD = 10
conf.CRF_BI_XY_STD = 80
conf.CRF_BI_W = 10
Пример #18
0
from utils import ShapeSpec

cfg = ED()
# TODO: Most hyperparameters be hard code(so I can test it easy), it all will be added to this file later(maybe).

# trainer
# epoch, batch_size, so on.

# base info
cfg.NUM_CLASSES = 1
cfg.EPOCH = 100
cfg.lr = 1e-4

# data
cfg.ROOT = 'datasets/wgisd'
cfg.BATCH_SIZE = 3
cfg.NUM_WORKERS = 8
cfg.RESIZE = (800, 1280)

# backbone
cfg.BACKBONE_DEPTH = 101
cfg.BACKBONE_OUTPUT_SHAPE = {
    'res2': ShapeSpec(channels=256, height=None, width=None, stride=4),
    'res3': ShapeSpec(channels=512, height=None, width=None, stride=8),
    'res4': ShapeSpec(channels=1024, height=None, width=None, stride=16),
    'res5': ShapeSpec(channels=2048, height=None, width=None, stride=32)
}

# MASK
cfg.CLS_AGNOSTIC_MASK = False
Пример #19
0
# Learning rate
__C.LEARNING_RATE = 0.001

# Learning rate with exponential decay
# Use learning rate decay
__C.LR_DECAY = True
# Decay steps
__C.LR_DECAY_STEPS = 2000
# Exponential decay rate
__C.LR_DECAY_RATE = 0.96

# Epochs
__C.EPOCHS = 50

# Batch size
__C.BATCH_SIZE = 2048

# ===========================================
# #               Preprocessing             #
# ===========================================

# Setting test set as validation when preprocessing data
__C.DPP_TEST_AS_VALID = True

# Rate of train-test split
__C.TEST_SIZE = 0.2

# Rate of train-validation split
__C.VALID_SIZE = 0.1

# Resize images