Example #1
0
from fvcore.common.config import CfgNode
from Archs_3D import Register

CONFIGS = CfgNode()
CONFIGS.INTENSOR_SHAPE = (224, 224)
CONFIGS.BATCH_SIZE = 4
CONFIGS.DEVICE = 'cuda'

CONFIGS.TRAINING = CfgNode()
CONFIGS.TRAINING.LOGDIR = './logdirs/mobiv2_retinanet'
CONFIGS.TRAINING.EPOCHS = 700
CONFIGS.TRAINING.CHECKPOINT_MODE = 'PRETRAINED'  #['PRETRAINED', 'RESUME', 'START']
CONFIGS.TRAINING.CHECKPOINT_FILE = './pretrained/mobilenetv2_pretrained.pkl'

CONFIGS.DATASET = CfgNode()
CONFIGS.DATASET.PATH = './datasets/data/kitti'
CONFIGS.DATASET.MEAN = [95.87739305, 98.76049672, 93.83309082]
CONFIGS.DATASET.DIM_MEAN = [[1.52607842, 1.62858147, 3.88396124],
                            [2.20649159, 1.90197734, 5.07812564],
                            [3.25207685, 2.58505032, 10.10703568],
                            [1.76067766, 0.6602296, 0.84220464],
                            [1.27538462, 0.59506787, 0.80180995],
                            [1.73712792, 0.59677122, 1.76338868],
                            [3.52905882, 2.54368627, 16.09707843],
                            [1.9074177, 1.51386831, 3.57683128]]

CONFIGS.DATALOADER = CfgNode()
CONFIGS.DATALOADER.SAMPLER_TRAIN = 'TrainingSampler'
# Solver
# ---------------------------------------------------------------------------- #
CONFIGS.SOLVER = CfgNode()
Example #2
0
config.SYSTEM.cudnn = CN()
config.SYSTEM.cudnn.benchmark = True
config.SYSTEM.cudnn.deterministic = False
config.SYSTEM.cudnn.enable = True

# about use the distributed
config.SYSTEM.distributed = CN()
config.SYSTEM.distributed.use = False
# configure the log things
config.LOG = CN()
config.LOG.log_output_dir = './output/log' # log 
config.LOG.tb_output_dir = './output/tensorboard' # tensorboard log output dir
config.LOG.vis_dir = './output/vis'

# configure the dataset 
config.DATASET = CN()
config.DATASET.factory = 'VideoAnomalyDatasetFactory'
config.DATASET.num_workers = 16
config.DATASET.name = ''
config.DATASET.seed = 2020
config.DATASET.read_format = 'opencv'
config.DATASET.image_format = 'jpg'
config.DATASET.channel_num = 3 # 1: grayscale image | 2: optical flow | 3: RGB or other 3 channel image
config.DATASET.channel_name = 'rgb' # 'gray' | 'uv' | 'rgb' | ....
config.DATASET.optical_format = 'Y' # the format of the optical 
config.DATASET.optical_size = [384, 512] # the size of image before estimating the optical flow, H*W
config.DATASET.train = CN()
config.DATASET.train.data_path = ''
config.DATASET.train.clip_length = 5 # the total clip length, including frames not be sampled
config.DATASET.train.sampled_clip_length = 5 # the real used frame, most time it equals to clip_length
config.DATASET.train.frame_step = 1  # frame sample frequency
def get_cfg():
    cfg = CN()
    cfg.TEST = CN()
    cfg.TEST.ANNOTATIONS = ""
    cfg.TEST.RESULTS = ""
    cfg.TEST.N_BINS = 15
    cfg.TEST.IOU = 0.75
    cfg.TEST.IOU_TYPE = "segm"
    cfg.TEST.MAX_DETS_EVAL = -1
    cfg.TEST.MAX_DETS_PER_CLASS_EVAL = -1

    # If enabled, report results on k-fold cross validation splits.
    cfg.CROSS_VAL = CN({"ENABLED": False})
    cfg.CROSS_VAL.FOLDS = 5

    cfg.TRAIN = CN()
    # What data to calibrate on. Options:
    # - none: Don't train calibration.
    # - cv-train: Train on cross-validation train split.
    # - cv-test: Train on cross-validation test split.
    # - test: Train on full test split.
    # - custom: Train on annotations and results specified in TRAIN.ANNOTATIONS,
    #       TRAIN.RESULTS.
    cfg.TRAIN.TRAIN_SET = "none"
    # Specify training annotations and results.
    cfg.TRAIN.ANNOTATIONS = ""
    cfg.TRAIN.RESULTS = ""

    cfg.METHOD = CN()
    # One of: HistogramBinning, IsotonicRegression, BBQ, ENIR, LogisticCalibration,
    # BetaCalibration, TemperatureScaling
    cfg.METHOD.NAME = "HistogramBinning"
    # One of "overall" (calibrate across all categories), "frequency" (calibrate per
    # frequency bin), or "per-class" (calibrate per class).
    # Note that "per-class" will likely error when using cross-validation, since some
    # splits will have classes in the validation split which are missing from the train
    # split.
    cfg.METHOD.GROUPING = "overall"
    # How to calibrate classes with no training
    cfg.METHOD.PER_CLASS = CN()
    # When using METHOD.GROUPING = "per-class, some classes may have no predictions in
    # the training set, or may have only false-positive predictions.
    # MISSING_SCORE, NO_TP_SCORE specify strategies for dealing with classes with no
    # predictions, or only false-positive predictions, respectively.
    # Options:
    #   - zero: Set score to zero
    #   - keep: Keep score at original value
    #   - by-frequency: Fit calibration for classes with the same frequency, and
    #       use the per-frequency calibration for this class.
    cfg.METHOD.PER_CLASS.MISSING_SCORE = "zero"
    # How to assign scores for classes with no predictions in the training set.
    cfg.METHOD.PER_CLASS.NO_TP_SCORE = "zero"
    cfg.METHOD.HIST = CN()
    # Number of bins for histogram binning. If -1, set to cfg.TEST.N_BINS.
    cfg.METHOD.HIST.N_BINS = -1
    cfg.METHOD.BBQ = CN()
    cfg.METHOD.BBQ.SCORE_FN = "BIC"  # "BIC" or "AIC"
    cfg.METHOD.ENIR = CN()
    cfg.METHOD.ENIR.SCORE_FN = "BIC"  # "BIC" or "AIC"
    # Some methods, like histogram binning, assign the same scores to many detections,
    # resulting in an undefined ranking. Setting MAINTAIN_RANK to True modifies these
    # scores so that the original ranking is respected, while also ensuring the average
    # score within a score bin stays the same.
    cfg.METHOD.MAINTAIN_RANK = True

    cfg.FEATURES = CN()
    cfg.FEATURES.ENABLED = False
    cfg.FEATURES.INSTANCE_COUNT = True
    cfg.FEATURES.POSITION = True
    cfg.FEATURES.SIZE = True

    cfg.DATASET = "lvis"
    cfg.SEED = 0
    cfg.NAME = ""
    cfg.VISUALIZE = False
    # Whether to visualize reliability matrices for each class.
    # If VISUALIZE is False, this is ignored.
    cfg.VISUALIZE_PER_CLASS = False
    # Whether to save results json. Disabled by default to save disk space.
    cfg.SAVE_RESULTS = False

    return cfg
Example #4
0
from fvcore.common.config import CfgNode

_C = CfgNode()

_C.GPU_IDS = [0, 1, 2, 3]
_C.MODE = 'training'
_C.EVAL_TYPE = 'proposal'
_C.DATASET = 'anet'
_C.USE_ENV = True
_C.USE_AGENT = True
_C.EVAL_SCORE = 'AUC'

_C.TRAIN = CfgNode()
_C.TRAIN.SPLIT = 'training'
_C.TRAIN.NUM_EPOCHS = 10
_C.TRAIN.BATCH_SIZE = 16
_C.TRAIN.STEP_PERIOD = 1
_C.TRAIN.ATTENTION_STEPS = 1
_C.TRAIN.LR = 0.001
_C.TRAIN.WEIGHT_DECAY = 0.0001
_C.TRAIN.CHECKPOINT_FILE_PATH = ''
_C.TRAIN.LOG_DIR = 'runs/c3d_runs/'

_C.VAL = CfgNode()
_C.VAL.SPLIT = 'validation'
_C.VAL.BATCH_SIZE = 32

_C.TEST = CfgNode()
_C.TEST.SPLIT = 'testing'
_C.TEST.BATCH_SIZE = 32
_C.TEST.CHECKPOINT_PATH = 'checkpoints/c3d_checkpoints/checkpoint_6/best_auc.pth'
Example #5
0
CONFIGS.IS_MOSAIC = False
CONFIGS.BATCH_SIZE = 32
CONFIGS.DEVICE = 'cuda'

CONFIGS.TRAINING = CfgNode(new_allowed=True)
CONFIGS.TRAINING.LOGDIR = './logdirs/'
CONFIGS.TRAINING.WEIGHTS = './weights/'
CONFIGS.TRAINING.CHECKPOINT_MODE = 'RESUME'  # ['pretrained', 'resume', 'start']
CONFIGS.TRAINING.CHECKPOINT_FILE = './pretrained/mobilenetv2_pretrained.pkl'
CONFIGS.TRAINING.W_MKF = 1.
CONFIGS.TRAINING.W_VKF = 1.
CONFIGS.TRAINING.W_VFM = 1.
CONFIGS.TRAINING.W_M_OFF = 0.5
CONFIGS.TRAINING.W_V_OFF = 0.5

CONFIGS.DATASET = CfgNode(new_allowed=True)
CONFIGS.DATASET.PATH = './datasets/data/kitti'
CONFIGS.DATASET.OBJs = ['Car', 'Pedestrian', 'Cyclist']
CONFIGS.DATASET.RELATE_OBJs = [['Van', 'Truck'], ['Person_sitting'],
                               ['Person_sitting']]
CONFIGS.DATASET.MEAN = [0.485, 0.456, 0.406]
CONFIGS.DATASET.STD = [0.229, 0.224, 0.225]
CONFIGS.DATASET.BBOX_AREA_MAX = 0.2598311523503046
CONFIGS.DATASET.BBOX_AREA_MIN = 0.0002022788461538487
CONFIGS.DATASET.GAUSSIAN_SIGMA_MAX = 19
CONFIGS.DATASET.GAUSSIAN_SIGMA_MIN = 3
CONFIGS.DATASET.VERTEX_OFFSET_INFER = [0.75, 0.57]
CONFIGS.DATASET.GAUSSIAN_GEN_TYPE = 'dynamic_radius'  # 'dynamic_radius', 'dynamic_sigma'
CONFIGS.DATASET.aug_hsv_h = 0.014  # image HSV-Hue augmentation (fraction)
CONFIGS.DATASET.aug_hsv_s = 0.68  # image HSV-Saturation augmentation (fraction)
CONFIGS.DATASET.aug_hsv_v = 0.36  # image HSV-Value augmentation (fraction)