import copy import yaml import numpy as np from ast import literal_eval from utils.collections import AttrDict __C = AttrDict() cfg = __C # ---------------------------------------------------------------------------- # # MISC options # ---------------------------------------------------------------------------- # # Device for training or testing # E.g., 'cuda' for using GPU, 'cpu' for using CPU __C.DEVICE = 'cuda' # Number of GPUs to use (applies to both training and testing) __C.NUM_GPUS = 1 # Pixel mean values (BGR order) as a list __C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]]) # Pixel std values (BGR order) as a list __C.PIXEL_STDS = np.array([[[1.0, 1.0, 1.0]]]) # Clean up the generated files during model testing __C.CLEAN_UP = True # Directory for saving checkpoints and loggers __C.CKPT = 'ckpts/rcnn/mscoco/e2e_faster_rcnn_R-50-FPN_1x'
__C.CHECKPOINT = AttrDict() __C.CHECKPOINT.CHECKPOINT_MODEL = True __C.CHECKPOINT.CHECKPOINT_PERIOD = -1 __C.CHECKPOINT.RESUME = True __C.CHECKPOINT.DIR = b'.' # Metrics option __C.METRICS = AttrDict() # __C.METRICS.EVALUATE_ALL_CLASSES = True __C.METRICS.EVALUATE_ALL_CLASSES = False __C.METRICS.EVALUATE_FIRST_N_WAYS = False __C.METRICS.FIRST_N_WAYS = 1000 __C.METRICS.NUM_MEDIAN_EPOCHS = 5 # GPU or CPU __C.DEVICE = b'GPU' # for example 8 gpus __C.NUM_DEVICES = 8 __C.DATADIR = b'' __C.DATASET = b'' __C.DATA_SHUFFLE_K = 1 # The sources for imagenet dataset are: gfsai | laser __C.DATA_SOURCE = b'gfsai' __C.ROOT_DEVICE_ID = 0 __C.CUDNN_WORKSPACE_LIMIT = 256 __C.RNG_SEED = 2 __C.COMPUTE_LAYER_STATS = False __C.LAYER_STATS_FREQ = 10 __C.STATS_LAYERS = [b'conv1', b'pred', b'res5_2_branch2', b'res2_0_branch2'] # use the following option to save the model proto for mobile predictions