示例#1
0
def build_num_workers_cfg(node, key='num_workers'):
    if key not in node:
        node[key] = CN()
    node[key].train = 8
    node[key].val = 2
    node[key].test = 2
    return node[key]
示例#2
0
def add_pointrend_config(cfg):
    cfg.MODEL.ROI_MASK_HEAD.IN_FEATURES = ("p2", )

    cfg.MODEL.ROI_MASK_HEAD.FC_DIM = 1024

    cfg.MODEL.ROI_MASK_HEAD.NUM_FC = 2

    cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION = 7

    cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON = False

    cfg.MODEL.POINT_HEAD = CN()

    cfg.MODEL.POINT_HEAD.NAME = "StandardPointHead"

    cfg.MODEL.POINT_HEAD.IN_FEATURES = ("p2", )

    cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS = 14 * 14

    cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO = 3

    cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO = 0.75

    cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS = 5

    cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS = 28 * 28

    cfg.MODEL.POINT_HEAD.FC_DIM = 256

    cfg.MODEL.POINT_HEAD.NUM_FC = 3

    cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK = False

    cfg.MODEL.POINT_HEAD.COARSE_PRED_EACH_LAYER = True
示例#3
0
def build_transform_cfg(node, key='transforms', flip_prob=0.0,
                        downsample_factor_min=1.0,
                        downsample_factor_max=1.0,
                        center_jitter_factor=0.0,
                        downsample_dist='categorical',
                        ):
    if key not in node:
        node[key] = CN()
    node[key].flip_prob = flip_prob
    node[key].downsample_dist = downsample_dist
    node[key].downsample_factor_min = downsample_factor_min
    node[key].downsample_factor_max = downsample_factor_max
    node[key].downsample_cat_factors = (1.0,)
    node[key].center_jitter_factor = center_jitter_factor
    node[key].center_jitter_dist = 'normal'
    node[key].crop_size = 256
    node[key].scale_factor_min = 1.0
    node[key].scale_factor_max = 1.0
    node[key].scale_factor = 0.0
    node[key].scale_dist = 'uniform'
    node[key].noise_scale = 0.0
    node[key].rotation_factor = 0.0
    node[key].mean = [0.485, 0.456, 0.406]
    node[key].std = [0.229, 0.224, 0.225]
    node[key].brightness = 0.0
    node[key].saturation = 0.0
    node[key].hue = 0.0
    node[key].contrast = 0.0

    return node[key]
示例#4
0
def create_camera_config(node):
    node.camera = CN()
    node.camera.type = 'weak-persp'
    node.camera.pos_func = 'softplus'

    node.camera.weak_persp = CN()
    node.camera.weak_persp.regress_translation = True
    node.camera.weak_persp.regress_scale = True
    node.camera.weak_persp.regress_scale = True
    node.camera.weak_persp.mean_scale = 0.9

    node.camera.perspective = CN()
    node.camera.perspective.regress_translation = False
    node.camera.perspective.regress_rotation = False
    node.camera.perspective.regress_focal_length = False
    node.camera.perspective.focal_length = 5000
    return node.camera
示例#5
0
def create_conv_layers(node, key='layer'):
    if key not in node:
        node[key] = CN()

    node[key].num_layers = 5
    node[key].num_filters = 2048
    node[key].stride = 1
    return node[key]
示例#6
0
def create_mlp_config(node, key='mlp'):
    if key not in node:
        node[key] = CN()

    node[key].layers = (1024, 1024)
    node[key].activ_type = 'relu'
    node[key].lrelu_slope = 0.2
    node[key].norm_type = 'none'
    node[key].num_groups = 32
    node[key].dropout = 0.0
    node[key].init_type = 'xavier'
    node[key].gain = 0.01
    node[key].bias_init = 0.0

    return node[key]
示例#7
0
def create_subsample_layer(node,
                           num_layers=3,
                           key='layer',
                           kernel_size=3,
                           stride=2):
    if key not in node:
        node[key] = CN()

    node[key].num_filters = (512, ) * num_layers
    node[key].norm_type = 'bn'
    node[key].activ_type = 'relu'
    node[key].dim = 2
    node[key].kernel_sizes = [kernel_size] * len(node[key].num_filters)
    node[key].strides = [stride] * len(node[key].num_filters)
    node[key].padding = 1
    return node[key]
示例#8
0
    node.backbone.hrnet.stage2.subsample.strides = [2]

    node.backbone.hrnet.stage3.subsample = create_subsample_layer(
        node.backbone.hrnet.stage3, key='subsample', num_layers=1)
    node.backbone.hrnet.stage3.subsample.num_filters = [192, 384]
    node.backbone.hrnet.stage3.subsample.kernel_sizes = [3, 3]
    node.backbone.hrnet.stage3.subsample.strides = [2, 2]

    node.backbone.hrnet.final_conv = create_conv_layers(node.backbone.hrnet,
                                                        key='final_conv')
    node.backbone.hrnet.final_conv.num_filters = 2048

    return node.backbone


_C = CN()

# General

_C.num_gpus = 1
_C.local_rank = 0
_C.use_cuda = True
_C.log_file = '/tmp/logs/'

_C.output_folder = 'output'
_C.summary_folder = 'summaries'
_C.results_folder = 'results'
_C.code_folder = 'code'
_C.summary_steps = 100
_C.img_summary_steps = 100
_C.hd_img_summary_steps = 1000
示例#9
0
import os
from fvcore.common.config import CfgNode as CN

_C = CN()

_C.SEED = 1

## data related
_C.DATA = CN()
_C.DATA.DATASET = 'ImageNet'
# assuming you've set up the dataset using provided script
_C.DATA.ROOT = os.path.expanduser('~/.encoding/data/ILSVRC2012')
_C.DATA.BASE_SIZE = None
_C.DATA.CROP_SIZE = 224
_C.DATA.LABEL_SMOOTHING = 0.0
_C.DATA.MIXUP = 0.0
_C.DATA.RAND_AUG = False

## model related
_C.MODEL = CN()
_C.MODEL.NAME = 'resnet50'
_C.MODEL.FINAL_DROP = False

## training params 
_C.TRAINING = CN()
# (per-gpu batch size)
_C.TRAINING.BATCH_SIZE = 64
_C.TRAINING.TEST_BATCH_SIZE = 256
_C.TRAINING.LAST_GAMMA = False
_C.TRAINING.EPOCHS = 120
_C.TRAINING.START_EPOCHS = 0
示例#10
0
from fvcore.common.config import CfgNode as CN
_C = CN()
_C.MODEL = CN()
_C.MODEL.RESNETS = CN()

_C.MODEL.RESNETS.DEPTH = 50
_C.MODEL.RESNETS.OUT_FEATURES = [
    "res4"
]  # res4 for C4 backbone, res2..5 for FPN backbone

# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
_C.MODEL.RESNETS.NUM_GROUPS = 1

# Options: FrozenBN, GN, "SyncBN", "BN"
_C.MODEL.RESNETS.NORM = "BN"

# Baseline width of each group.
# Scaling this parameters will scale the width of all bottleneck layers.
_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64

# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; use False for C2 and Torch models
_C.MODEL.RESNETS.STRIDE_IN_1X1 = True

# Apply dilation in stage "res5"
_C.MODEL.RESNETS.RES5_DILATION = 1

# Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet
# For R18 and R34, this needs to be set to 64
_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
示例#11
0
def create_backbone_cfg(node, backbone_type='resnet50'):
    if 'backbone' not in node:
        node.backbone = CN()
    node.backbone.type = backbone_type
    node.backbone.pretrained = True

    node.backbone.resnet = CN()
    node.backbone.resnet.replace_stride_with_dilation = (False, False, False)

    node.backbone.fpn = CN()
    node.backbone.fpn.pooling_type = 'concat'
    node.backbone.fpn.concat = CN()
    node.backbone.fpn.concat.use_max = True
    node.backbone.fpn.concat.use_avg = True

    node.backbone.hrnet = CN()
    node.backbone.hrnet.pretrained_layers = ['*']
    node.backbone.hrnet.pretrained_path = ('data/'
                                           'network_weights/hrnet/'
                                           'imagenet/hrnet_w48-8ef0771d.pth')

    node.backbone.hrnet.stage1 = CN()
    node.backbone.hrnet.stage1.num_modules = 1
    node.backbone.hrnet.stage1.num_branches = 1
    node.backbone.hrnet.stage1.num_blocks = [4]
    node.backbone.hrnet.stage1.num_channels = [64]
    node.backbone.hrnet.stage1.block = 'BOTTLENECK'
    node.backbone.hrnet.stage1.fuse_method = 'SUM'

    node.backbone.hrnet.stage2 = CN()
    node.backbone.hrnet.stage2.num_modules = 1
    node.backbone.hrnet.stage2.num_branches = 2
    node.backbone.hrnet.stage2.num_blocks = [4, 4]
    node.backbone.hrnet.stage2.num_channels = [48, 96]
    node.backbone.hrnet.stage2.block = 'BASIC'
    node.backbone.hrnet.stage2.fuse_method = 'SUM'

    node.backbone.hrnet.stage3 = CN()
    node.backbone.hrnet.stage3.num_modules = 4
    node.backbone.hrnet.stage3.num_branches = 3
    node.backbone.hrnet.stage3.num_blocks = [4, 4, 4]
    node.backbone.hrnet.stage3.num_channels = [48, 96, 192]
    node.backbone.hrnet.stage3.block = 'BASIC'
    node.backbone.hrnet.stage3.fuse_method = 'SUM'

    node.backbone.hrnet.stage4 = CN()
    node.backbone.hrnet.stage4.num_modules = 3
    node.backbone.hrnet.stage4.num_branches = 4
    node.backbone.hrnet.stage4.num_blocks = [4, 4, 4, 4]
    node.backbone.hrnet.stage4.num_channels = [48, 96, 192, 384]
    node.backbone.hrnet.stage4.block = 'BASIC'
    node.backbone.hrnet.stage4.fuse_method = 'SUM'

    node.backbone.hrnet.stage2.subsample = create_subsample_layer(
        node.backbone.hrnet.stage2, key='subsample', num_layers=2)
    node.backbone.hrnet.stage2.subsample.num_filters = [96, 192]
    node.backbone.hrnet.stage2.subsample.num_filters = [384]
    node.backbone.hrnet.stage2.subsample.kernel_sizes = [3]
    node.backbone.hrnet.stage2.subsample.strides = [2]

    node.backbone.hrnet.stage3.subsample = create_subsample_layer(
        node.backbone.hrnet.stage3, key='subsample', num_layers=1)
    node.backbone.hrnet.stage3.subsample.num_filters = [192, 384]
    node.backbone.hrnet.stage3.subsample.kernel_sizes = [3, 3]
    node.backbone.hrnet.stage3.subsample.strides = [2, 2]

    node.backbone.hrnet.final_conv = create_conv_layers(node.backbone.hrnet,
                                                        key='final_conv')
    node.backbone.hrnet.final_conv.num_filters = 2048

    return node.backbone
示例#12
0
    node[key].hue = 0.0
    node[key].contrast = 0.0

    return node[key]


def build_num_workers_cfg(node, key='num_workers'):
    if key not in node:
        node[key] = CN()
    node[key].train = 8
    node[key].val = 2
    node[key].test = 2
    return node[key]


_C = CN()

_C.use_equal_sampling = True
_C.use_face_contour = False
_C.use_packed = False

_C.body = CN()

_C.body.vertex_flip_correspondences = ''

_C.use_hands = True
_C.use_face = True
_C.body.batch_size = 1

_C.body.splits = CN()
_C.body.splits.train = ['spin', 'curated_fits']
示例#13
0
from fvcore.common.config import CfgNode as CN

_C = CN()

# -----------------------------------------------------------------------------
# Model options
# -----------------------------------------------------------------------------
_C.MODEL = CN()

_C.MODEL.META_ARCHITECTURE = ""

_C.MODEL.DEVICE = "cuda"

_C.MODEL.WEIGHTS = ""

_C.MODEL.MASK_ON = False

_C.MODEL.KEYPOINT_ON = False

_C.MODEL.NUM_CLASSES = -1

_C.MODEL.SCORE_THRESHOLD = 0.05

_C.MODEL.NMS_THRESHOLD = 0.45

_C.MODEL.LOAD_PROPOSALS = False

# -----------------------------------------------------------------------------
# Backbone options
# -----------------------------------------------------------------------------
_C.MODEL.BACKBONE = CN()
def get_cfg():
    cfg = CN()
    cfg.TEST = CN()
    cfg.TEST.ANNOTATIONS = ""
    cfg.TEST.RESULTS = ""
    cfg.TEST.N_BINS = 15
    cfg.TEST.IOU = 0.75
    cfg.TEST.IOU_TYPE = "segm"
    cfg.TEST.MAX_DETS_EVAL = -1
    cfg.TEST.MAX_DETS_PER_CLASS_EVAL = -1

    # If enabled, report results on k-fold cross validation splits.
    cfg.CROSS_VAL = CN({"ENABLED": False})
    cfg.CROSS_VAL.FOLDS = 5

    cfg.TRAIN = CN()
    # What data to calibrate on. Options:
    # - none: Don't train calibration.
    # - cv-train: Train on cross-validation train split.
    # - cv-test: Train on cross-validation test split.
    # - test: Train on full test split.
    # - custom: Train on annotations and results specified in TRAIN.ANNOTATIONS,
    #       TRAIN.RESULTS.
    cfg.TRAIN.TRAIN_SET = "none"
    # Specify training annotations and results.
    cfg.TRAIN.ANNOTATIONS = ""
    cfg.TRAIN.RESULTS = ""

    cfg.METHOD = CN()
    # One of: HistogramBinning, IsotonicRegression, BBQ, ENIR, LogisticCalibration,
    # BetaCalibration, TemperatureScaling
    cfg.METHOD.NAME = "HistogramBinning"
    # One of "overall" (calibrate across all categories), "frequency" (calibrate per
    # frequency bin), or "per-class" (calibrate per class).
    # Note that "per-class" will likely error when using cross-validation, since some
    # splits will have classes in the validation split which are missing from the train
    # split.
    cfg.METHOD.GROUPING = "overall"
    # How to calibrate classes with no training
    cfg.METHOD.PER_CLASS = CN()
    # When using METHOD.GROUPING = "per-class, some classes may have no predictions in
    # the training set, or may have only false-positive predictions.
    # MISSING_SCORE, NO_TP_SCORE specify strategies for dealing with classes with no
    # predictions, or only false-positive predictions, respectively.
    # Options:
    #   - zero: Set score to zero
    #   - keep: Keep score at original value
    #   - by-frequency: Fit calibration for classes with the same frequency, and
    #       use the per-frequency calibration for this class.
    cfg.METHOD.PER_CLASS.MISSING_SCORE = "zero"
    # How to assign scores for classes with no predictions in the training set.
    cfg.METHOD.PER_CLASS.NO_TP_SCORE = "zero"
    cfg.METHOD.HIST = CN()
    # Number of bins for histogram binning. If -1, set to cfg.TEST.N_BINS.
    cfg.METHOD.HIST.N_BINS = -1
    cfg.METHOD.BBQ = CN()
    cfg.METHOD.BBQ.SCORE_FN = "BIC"  # "BIC" or "AIC"
    cfg.METHOD.ENIR = CN()
    cfg.METHOD.ENIR.SCORE_FN = "BIC"  # "BIC" or "AIC"
    # Some methods, like histogram binning, assign the same scores to many detections,
    # resulting in an undefined ranking. Setting MAINTAIN_RANK to True modifies these
    # scores so that the original ranking is respected, while also ensuring the average
    # score within a score bin stays the same.
    cfg.METHOD.MAINTAIN_RANK = True

    cfg.FEATURES = CN()
    cfg.FEATURES.ENABLED = False
    cfg.FEATURES.INSTANCE_COUNT = True
    cfg.FEATURES.POSITION = True
    cfg.FEATURES.SIZE = True

    cfg.DATASET = "lvis"
    cfg.SEED = 0
    cfg.NAME = ""
    cfg.VISUALIZE = False
    # Whether to visualize reliability matrices for each class.
    # If VISUALIZE is False, this is ignored.
    cfg.VISUALIZE_PER_CLASS = False
    # Whether to save results json. Disabled by default to save disk space.
    cfg.SAVE_RESULTS = False

    return cfg
示例#15
0
from fvcore.common.config import CfgNode as CN
#  from yacs.config import CfgNode as CN

_C = CN()

_C.body_model = CN()

_C.body_model.j14_regressor_path = ''
_C.body_model.mean_pose_path = ''
_C.body_model.shape_mean_path = 'data/shape_mean.npy'
_C.body_model.type = 'smplx'
_C.body_model.model_folder = 'models'
_C.body_model.use_compressed = True
_C.body_model.gender = 'neutral'
_C.body_model.num_betas = 10
_C.body_model.num_expression_coeffs = 10
_C.body_model.use_feet_keypoints = True
_C.body_model.use_face_keypoints = True
_C.body_model.use_face_contour = False

_C.body_model.global_orient = CN()
# The configuration for the parameterization of the body pose
_C.body_model.global_orient.param_type = 'cont_rot_repr'

_C.body_model.body_pose = CN()
# The configuration for the parameterization of the body pose
_C.body_model.body_pose.param_type = 'cont_rot_repr'
_C.body_model.body_pose.finetune = False

_C.body_model.left_hand_pose = CN()
# The configuration for the parameterization of the left hand pose
示例#16
0
def get_shapenet_cfg():

    cfg = CN()
    cfg.MODEL = CN()
    cfg.MODEL.VOXEL_ON = False
    cfg.MODEL.MESH_ON = False
    # options: predicted_depth_only | rendered_depth_only | input_concat | input_diff | feature_concat | feature_diff
    cfg.MODEL.CONTRASTIVE_DEPTH_TYPE = "input_concat"
    cfg.MODEL.USE_GT_DEPTH = False
    # options: multihead_attention | simple_attention | stats
    cfg.MODEL.FEATURE_FUSION_METHOD = "multihead_attention"
    cfg.MODEL.MULTIHEAD_ATTENTION = CN()
    # -1 maintains same feature dimensions as before attention
    cfg.MODEL.MULTIHEAD_ATTENTION.FEATURE_DIMS = 960
    cfg.MODEL.MULTIHEAD_ATTENTION.NUM_HEADS = 10

    # ------------------------------------------------------------------------ #
    # Checkpoint
    # ------------------------------------------------------------------------ #
    cfg.MODEL.CHECKPOINT = ""  # path to checkpoint

    # ------------------------------------------------------------------------ #
    # Voxel Head
    # ------------------------------------------------------------------------ #
    cfg.MODEL.VOXEL_HEAD = CN()
    # The number of convs in the voxel head and the number of channels
    cfg.MODEL.VOXEL_HEAD.NUM_CONV = 0
    cfg.MODEL.VOXEL_HEAD.CONV_DIM = 256
    # Normalization method for the convolution layers. Options: "" (no norm), "GN"
    cfg.MODEL.VOXEL_HEAD.NORM = ""
    # The number of depth channels for the predicted voxels
    cfg.MODEL.VOXEL_HEAD.VOXEL_SIZE = 28
    cfg.MODEL.VOXEL_HEAD.LOSS_WEIGHT = 1.0
    cfg.MODEL.VOXEL_HEAD.CUBIFY_THRESH = 0.0
    # voxel only iterations
    cfg.MODEL.VOXEL_HEAD.VOXEL_ONLY_ITERS = 100
    # Whether voxel weights are frozen
    cfg.MODEL.VOXEL_HEAD.FREEZE = False
    # Whether to use single view voxel prediction
    # without probabilistic merging
    cfg.MODEL.VOXEL_HEAD.SINGLE_VIEW = False
    cfg.MODEL.VOXEL_HEAD.RGB_FEATURES_INPUT = True
    cfg.MODEL.VOXEL_HEAD.DEPTH_FEATURES_INPUT = True
    cfg.MODEL.VOXEL_HEAD.RGB_BACKBONE = "resnet50"
    cfg.MODEL.VOXEL_HEAD.DEPTH_BACKBONE = "vgg"

    # ------------------------------------------------------------------------ #
    # Mesh Head
    # ------------------------------------------------------------------------ #
    cfg.MODEL.MESH_HEAD = CN()
    cfg.MODEL.MESH_HEAD.NAME = "VoxMeshHead"
    # Numer of stages
    cfg.MODEL.MESH_HEAD.NUM_STAGES = 1
    cfg.MODEL.MESH_HEAD.NUM_GRAPH_CONVS = 1  # per stage
    cfg.MODEL.MESH_HEAD.GRAPH_CONV_DIM = 256
    cfg.MODEL.MESH_HEAD.GRAPH_CONV_INIT = "normal"
    # Mesh sampling
    cfg.MODEL.MESH_HEAD.GT_NUM_SAMPLES = 5000
    cfg.MODEL.MESH_HEAD.PRED_NUM_SAMPLES = 5000
    # whether to upsample mesh for training
    cfg.MODEL.MESH_HEAD.UPSAMPLE_PRED_MESH = True
    # loss weights
    cfg.MODEL.MESH_HEAD.CHAMFER_LOSS_WEIGHT = 1.0
    cfg.MODEL.MESH_HEAD.NORMALS_LOSS_WEIGHT = 1.0
    cfg.MODEL.MESH_HEAD.EDGE_LOSS_WEIGHT = 1.0
    # Init ico_sphere level (only for when voxel_on is false)
    cfg.MODEL.MESH_HEAD.ICO_SPHERE_LEVEL = -1

    cfg.MODEL.MESH_HEAD.RGB_FEATURES_INPUT = True
    cfg.MODEL.MESH_HEAD.DEPTH_FEATURES_INPUT = True
    cfg.MODEL.MESH_HEAD.RGB_BACKBONE = "resnet50"
    cfg.MODEL.MESH_HEAD.DEPTH_BACKBONE = "vgg"

    cfg.MODEL.MVSNET = CN()
    cfg.MODEL.MVSNET.FEATURES_LIST = [32, 64, 128, 256]
    cfg.MODEL.MVSNET.CHECKPOINT = ""
    cfg.MODEL.MVSNET.FREEZE = False

    # the depth values are different than Pixel2Mesh and 3D-R2N2
    # the depths here are not scaled by the factor 0.57 here
    cfg.MODEL.MVSNET.MIN_DEPTH = 0.175
    cfg.MODEL.MVSNET.DEPTH_INTERVAL = 0.044

    cfg.MODEL.MVSNET.NUM_DEPTHS = 48
    cfg.MODEL.MVSNET.INPUT_IMAGE_SIZE = (224, 224)
    cfg.MODEL.MVSNET.FOCAL_LENGTH = (248, 248)
    cfg.MODEL.MVSNET.PRINCIPAL_POINT = (111.5, 111.5)
    # loss weights
    cfg.MODEL.MVSNET.PRED_DEPTH_WEIGHT = 0.1
    cfg.MODEL.MVSNET.RENDERED_DEPTH_WEIGHT = 0.00
    cfg.MODEL.MVSNET.RENDERED_VS_GT_DEPTH_WEIGHT = 0.00

    # ------------------------------------------------------------------------ #
    # Solver
    # ------------------------------------------------------------------------ #
    cfg.SOLVER = CN()
    cfg.SOLVER.LR_SCHEDULER_NAME = "constant"  # {'constant', 'cosine'}
    cfg.SOLVER.BATCH_SIZE = 32
    cfg.SOLVER.BATCH_SIZE_EVAL = 8
    cfg.SOLVER.NUM_EPOCHS = 25
    cfg.SOLVER.BASE_LR = 0.0001
    cfg.SOLVER.OPTIMIZER = "adam"  # {'sgd', 'adam'}
    cfg.SOLVER.MOMENTUM = 0.9
    cfg.SOLVER.WARMUP_ITERS = 500
    cfg.SOLVER.WARMUP_FACTOR = 0.1
    cfg.SOLVER.CHECKPOINT_PERIOD = 24949  # in iters
    cfg.SOLVER.LOGGING_PERIOD = 50  # in iters
    # stable training
    cfg.SOLVER.SKIP_LOSS_THRESH = 50.0
    cfg.SOLVER.LOSS_SKIP_GAMMA = 0.9
    # for saving checkpoint
    cfg.SOLVER.EARLY_STOP_METRIC = "[email protected]"

    # ------------------------------------------------------------------------ #
    # Datasets
    # ------------------------------------------------------------------------ #
    cfg.DATASETS = CN()
    cfg.DATASETS.NAME = "shapenet"
    # ['depth', 'multi_view', 'single_view']
    cfg.DATASETS.TYPE = "single_view"
    cfg.DATASETS.SPLITS_FILE = "./datasets/shapenet/pix2mesh_splits_val05.json"

    cfg.DATASETS.INPUT_VIEWS = [0, 6, 7]

    # ------------------------------------------------------------------------ #
    # Misc options
    # ------------------------------------------------------------------------ #
    # Directory where output files are written
    cfg.OUTPUT_DIR = "./output"

    return cfg
示例#17
0
"""
@author:  Yuhao Cheng
@contact: yuhao.cheng[at]outlook.com
"""
from fvcore.common.config import CfgNode as CN

__all__ = ['update_config'] 
"""
This the default configuration of the whole prohect.
"""
config = CN()
config.DESCROPTION = 'This the description of the configuration defaults. If you have some information related to the configuration file, please fullfil this item'
# configure the system related matters, such as gpus, cudnn and so on
config.SYSTEM = CN()
# config.SYSTEM.multigpus = False  # to determine whether use multi gpus to train or test(data parallel) # will be deprecated in the future 
# config.SYSTEM.num_gpus = 1    # decide the num_gpus  # will be deprecated in the future 
# Configure the number of gpus, and whether use the  parallell training 
config.SYSTEM.gpus = [0]

config.SYSTEM.cudnn = CN()
config.SYSTEM.cudnn.benchmark = True
config.SYSTEM.cudnn.deterministic = False
config.SYSTEM.cudnn.enable = True

# about use the distributed
config.SYSTEM.distributed = CN()
config.SYSTEM.distributed.use = False
# configure the log things
config.LOG = CN()
config.LOG.log_output_dir = './output/log' # log 
config.LOG.tb_output_dir = './output/tensorboard' # tensorboard log output dir
    def __init__(self,
                 config_file: Optional[str] = None,
                 override_list: List[Any] = []):
        _C = CN()

        # Random seed for NumPy and PyTorch, important for reproducibility.
        _C.RANDOM_SEED = 0
        # Train with Automatic Mixed Precision (native PyTorch).
        _C.AMP = True
        # Set CUDNN deterministic flag (torch.backends.cudnn.deterministic).
        # Setting this will ensure exact results on every run at the cost of
        # little slowdown. Good for debugging.
        _C.CUDNN_DETERMINISTIC = False
        # Set CUDNN benchmark flag (torch.backends.cudnn.benchmark). Enables
        # CUDNN to select fastest implementation for operations based on GPU.
        # May change results (in decimals) on different hardware, but faster
        # to train. Turn off while debugging.
        _C.CUDNN_BENCHMARK = True

        # ---------------------------------------------------------------------
        #   Data paths and parameters related to dataloading.
        # ---------------------------------------------------------------------
        _C.DATA = CN()

        # Path to the dataset root, which structure as per README. Path is
        # assumed to be relative to project root.
        _C.DATA.ROOT = "datasets/coco"
        # Path to .model file generated by ``sentencepiece``.
        _C.DATA.TOKENIZER_MODEL = "datasets/vocab/coco_10k.model"

        # Handy config params for vocab size and indices of special tokens.
        # While these can be picked up from the tokenizer, having these in
        # the config makes it easy to create a model without instantiating too
        # many tokenizer instances (especially when not needed, e.g. model zoo).
        # These must match according to what's present in ``TOKENIZER_VOCAB``
        # and ``TOKENIZER_MODEL`` above.
        _C.DATA.VOCAB_SIZE = 10000
        # Index of out-of-vocabulary (and padding) token.
        _C.DATA.UNK_INDEX = 0
        # Index of the start-of-sentence [SOS] token.
        _C.DATA.SOS_INDEX = 1
        # Index of the end-of-sentence [EOS] token.
        _C.DATA.EOS_INDEX = 2
        # Index of the word masking token. While not used for captioning, having
        # this extra token makes it possible to train an MLM model without
        # re-creating a new vocab mapping.
        _C.DATA.MASK_INDEX = 3

        # Size of the image (square) to crop from original input image.
        _C.DATA.IMAGE_CROP_SIZE = 224
        # Maximum length of input caption (number of tokens).
        # Longer captions will be truncated up to this length.
        _C.DATA.MAX_CAPTION_LENGTH = 30

        # COCO Captions has five captions per image. If ``True``, training will
        # use one random caption per image (data efficiency ablations).
        _C.DATA.USE_SINGLE_CAPTION = False
        # Percentage of dataset to use for training (data efficiency ablations).
        _C.DATA.USE_PERCENTAGE = 100.0

        # List of image transforms (pre-processing and data augmentation) to be
        # applied sequentially (always or randomly) during training and
        # validation. Refer ``virtex/facetories.py`` for all possible transforms.
        _C.DATA.IMAGE_TRANSFORM_TRAIN = [
            "random_resized_crop",
            "horizontal_flip",
            "color_jitter",
            "normalize",
        ]
        _C.DATA.IMAGE_TRANSFORM_VAL = [
            "smallest_resize",
            "center_crop",
            "normalize",
        ]

        # Hyper-parameters for masked LM pretraining task. These are only used
        # when ``MODEL.NAME`` is "masked_lm".
        _C.DATA.MASKED_LM = CN()
        # Fraction of tokens to choose for masking, this must be less than 1.
        _C.DATA.MASKED_LM.MASK_PROPORTION = 0.15
        # Probability to replace chosen tokens with [MASK] token.
        _C.DATA.MASKED_LM.MASK_PROBABILITY = 0.85
        # Probability to replace chosen tokens with a random token.
        _C.DATA.MASKED_LM.REPLACE_PROBABILITY = 0.10

        # ---------------------------------------------------------------------
        #   Model architecture: visual backbone and textual head.
        # ---------------------------------------------------------------------
        _C.MODEL = CN()

        # Name of model, based on pretraining task.
        # Possible choices: {"token_classification", "multilabel_classification",
        # "captioning", "bicaptioning", "masked_lm", "virtex"}
        _C.MODEL.NAME = "virtex"

        _C.MODEL.VISUAL = CN()
        # Name of visual backbone. Possible choices: {"blind", "torchvision"}
        # Models from torchvision can be specified as shown below.
        _C.MODEL.VISUAL.NAME = "torchvision::resnet50"
        # Number of channels in pooled spatial features of visual backbone.
        _C.MODEL.VISUAL.FEATURE_SIZE = 2048
        # Whether to load ImageNet pretrained weights into visual backbone.
        _C.MODEL.VISUAL.PRETRAINED = False
        # Whether to keep visual backbone frozen and train only textual head.
        _C.MODEL.VISUAL.FROZEN = False

        _C.MODEL.TEXTUAL = CN()
        # Name of textual head. Set to "none" for MODEL.NAME = "*_classification".
        # Possible choices: {"transformer_postnorm", "transformer_prenorm"}.
        # Architectural hyper-parameters are specified as shown above.
        _C.MODEL.TEXTUAL.NAME = "transformer_postnorm::L1_H2048_A32_F8192"
        # L = Number of layers in the transformer.
        # H = Hidden size of the transformer (embeddings, attention features).
        # A = Number of attention heads in the transformer.
        # F = Size of feedforward layers in the transformer.
        # Typically, we have (A = H / 64) and (F = 4 * H).

        # Dropout probability for embedding, hidden features in textual head.
        _C.MODEL.TEXTUAL.DROPOUT = 0.1

        # ---------------------------------------------------------------------
        #   Optimization hyper-parameters, default values are for pretraining
        #   our best model on bicaptioning task (COCO Captions).
        # ---------------------------------------------------------------------
        _C.OPTIM = CN()

        # Name of optimizer to use. Supported values: {"sgd", "adamw"}.
        # AdamW uses default (beta1, beta2) values from PyTorch.
        _C.OPTIM.OPTIMIZER_NAME = "sgd"
        # Momentum co-efficient for SGD. Ignored for AdamW.
        _C.OPTIM.SGD_MOMENTUM = 0.9
        # Weight decay co-efficient for the optimizer.
        _C.OPTIM.WEIGHT_DECAY = 0.0001
        # Regex pattern of params for which there will be no weight decay.
        _C.OPTIM.NO_DECAY = ".*textual.(embedding|transformer).*(norm.*|bias)"
        # Max gradient norm for clipping to avoid exploding gradients.
        _C.OPTIM.CLIP_GRAD_NORM = 10

        # Wrap our optimizer with Lookahead (https://arxiv.org/abs/1907.08610).
        _C.OPTIM.USE_LOOKAHEAD = False
        _C.OPTIM.LOOKAHEAD_ALPHA = 0.5
        _C.OPTIM.LOOKAHEAD_STEPS = 5

        # We set different learning rates for CNN (visual backbone) and rest of
        # the model. CNN LR is typically much higher for training from scratch.
        # Both LRs undergo same warmup-decay schedules.

        # Total batch size (will be distributed evenly across GPUs).
        _C.OPTIM.BATCH_SIZE = 256
        # Max learning rate for CNN (visual backbone).
        _C.OPTIM.CNN_LR = 0.2
        # Max learning rate for rest of the model.
        _C.OPTIM.LR = 0.001
        # Number of iterations to train for, batches are randomly sampled.
        _C.OPTIM.NUM_ITERATIONS = 500000

        # Number of steps at the start of training for linear LR warmup.
        _C.OPTIM.WARMUP_STEPS = 10000
        # Learning rate annealing schedule for decay after warmup.
        # Possible choices: {"none", "linear", "cosine", "multistep"}.
        _C.OPTIM.LR_DECAY_NAME = "cosine"
        # Steps to decay LR for "multistep" schedule.
        _C.OPTIM.LR_STEPS = []
        # Factor to multiply with LR for "multistep" schedule.
        _C.OPTIM.LR_GAMMA = 0.1

        # Override parameter values from YAML file first, then from override
        # list, then add derived params.
        self._C = _C
        if config_file is not None:
            self._C.merge_from_file(config_file)
        self._C.merge_from_list(override_list)

        self.add_derived_params()

        # Make an instantiated object of this class immutable.
        self._C.freeze()
from fvcore.common.config import CfgNode as CN

_C = CN()

_C.EVAL = CN(new_allowed=True)
_C.EVAL.MAX_CLICKS = 100
_C.EVAL.OUTPUT_PROBABILITY_THRESH = 0.5


def get_cfg_defaults():
  """Get a yacs CfgNode object with default values for my_project."""
  # Return a clone so that the defaults will not be altered
  # This is for the "local variable" use pattern
  return _C.clone()

示例#20
0
def get_shapenet_cfg():

    cfg = CN()
    cfg.MODEL = CN()
    cfg.MODEL.BACKBONE = "resnet50"
    cfg.MODEL.VOXEL_ON = False
    cfg.MODEL.MESH_ON = False

    # ------------------------------------------------------------------------ #
    # Checkpoint
    # ------------------------------------------------------------------------ #
    cfg.MODEL.CHECKPOINT = ""  # path to checkpoint

    # ------------------------------------------------------------------------ #
    # Voxel Head
    # ------------------------------------------------------------------------ #
    cfg.MODEL.VOXEL_HEAD = CN()
    # The number of convs in the voxel head and the number of channels
    cfg.MODEL.VOXEL_HEAD.NUM_CONV = 0
    cfg.MODEL.VOXEL_HEAD.CONV_DIM = 256
    # Normalization method for the convolution layers. Options: "" (no norm), "GN"
    cfg.MODEL.VOXEL_HEAD.NORM = ""
    # The number of depth channels for the predicted voxels
    cfg.MODEL.VOXEL_HEAD.VOXEL_SIZE = 28
    cfg.MODEL.VOXEL_HEAD.LOSS_WEIGHT = 1.0
    cfg.MODEL.VOXEL_HEAD.CUBIFY_THRESH = 0.0
    # voxel only iterations
    cfg.MODEL.VOXEL_HEAD.VOXEL_ONLY_ITERS = 100
    ##
    cfg.MODEL.VOXEL_HEAD.TCONV_USE_BIAS = False
    cfg.MODEL.VOXEL_HEAD.LEAKY_VALUE = 0.2
    ##
    # ------------------------------------------------------------------------ #
    # Mesh Head
    # ------------------------------------------------------------------------ #
    cfg.MODEL.MESH_HEAD = CN()
    cfg.MODEL.MESH_HEAD.NAME = "VoxMeshHead"
    # Numer of stages
    cfg.MODEL.MESH_HEAD.NUM_STAGES = 1
    cfg.MODEL.MESH_HEAD.NUM_GRAPH_CONVS = 1  # per stage
    cfg.MODEL.MESH_HEAD.GRAPH_CONV_DIM = 256
    cfg.MODEL.MESH_HEAD.GRAPH_CONV_INIT = "normal"
    # Mesh sampling
    cfg.MODEL.MESH_HEAD.GT_NUM_SAMPLES = 5000
    cfg.MODEL.MESH_HEAD.PRED_NUM_SAMPLES = 5000
    # loss weights
    cfg.MODEL.MESH_HEAD.CHAMFER_LOSS_WEIGHT = 1.0
    cfg.MODEL.MESH_HEAD.NORMALS_LOSS_WEIGHT = 1.0
    cfg.MODEL.MESH_HEAD.EDGE_LOSS_WEIGHT = 1.0
    # Init ico_sphere level (only for when voxel_on is false)
    cfg.MODEL.MESH_HEAD.ICO_SPHERE_LEVEL = -1

    # ------------------------------------------------------------------------ #
    # Solver
    # ------------------------------------------------------------------------ #
    cfg.SOLVER = CN()
    cfg.SOLVER.LR_SCHEDULER_NAME = "constant"  # {'constant', 'cosine'}
    cfg.SOLVER.BATCH_SIZE = 32
    cfg.SOLVER.BATCH_SIZE_EVAL = 8
    cfg.SOLVER.NUM_EPOCHS = 25
    cfg.SOLVER.BASE_LR = 0.0001
    cfg.SOLVER.OPTIMIZER = "adam"  # {'sgd', 'adam'}
    cfg.SOLVER.MOMENTUM = 0.9
    cfg.SOLVER.WARMUP_ITERS = 500
    cfg.SOLVER.WARMUP_FACTOR = 0.1
    cfg.SOLVER.CHECKPOINT_PERIOD = 24949  # in iters
    cfg.SOLVER.LOGGING_PERIOD = 50  # in iters
    # stable training
    cfg.SOLVER.SKIP_LOSS_THRESH = 50.0
    cfg.SOLVER.LOSS_SKIP_GAMMA = 0.9

    # ------------------------------------------------------------------------ #
    # Datasets
    # ------------------------------------------------------------------------ #
    cfg.DATASETS = CN()
    cfg.DATASETS.NAME = "shapenet"

    # ------------------------------------------------------------------------ #
    # Misc options
    # ------------------------------------------------------------------------ #
    # Directory where output files are written
    cfg.OUTPUT_DIR = "./output"

    return cfg
示例#21
0
from copy import deepcopy
#  from yacs.config import CfgNode as CN
from fvcore.common.config import CfgNode as CN

_C = CN()

_C.stages_to_penalize = [-1]
_C.stages_to_regularize = [-1]

_C.body_joints_2d = CN()
_C.body_joints_2d.type = 'keypoints'
_C.body_joints_2d.robustifier = 'none'
_C.body_joints_2d.norm_type = 'l1'
_C.body_joints_2d.rho = 100.0
_C.body_joints_2d.beta = 5.0 / 100 * 2
_C.body_joints_2d.size_average = True
_C.body_joints_2d.weight = 1.0
_C.body_joints_2d.enable = 0

_C.hand_joints_2d = CN()
_C.hand_joints_2d.type = 'keypoints'
_C.hand_joints_2d.norm_type = 'l1'
_C.hand_joints_2d.robustifier = 'none'
_C.hand_joints_2d.rho = 100.0
_C.hand_joints_2d.beta = 5.0 / 100 * 2
_C.hand_joints_2d.size_average = True
_C.hand_joints_2d.weight = 1.0
_C.hand_joints_2d.enable = 0

_C.face_joints_2d = CN()
_C.face_joints_2d.type = 'keypoints'
示例#22
0
    def __init__(self,
                 config_file: Optional[str] = None,
                 override_list: List[Any] = []):
        _C = CN()
        _C.VALID_IMAGES = [
            'CXR1576_IM-0375-2001.png', 'CXR1581_IM-0378-2001.png',
            'CXR3177_IM-1497-2001.png', 'CXR2585_IM-1082-1001.png',
            'CXR1125_IM-0082-1001.png', 'CXR3_IM-1384-2001.png',
            'CXR1565_IM-0368-1001.png', 'CXR1105_IM-0072-1001-0001.png',
            'CXR2874_IM-1280-1001.png', 'CXR1886_IM-0574-1001.png'
        ]

        _C.MODELS = [{
            'resnet18': (pretrainedmodels.resnet18(pretrained=None), 512, 224),
            'resnet50':
            (pretrainedmodels.resnet50(pretrained=None), 2048, 224),
            'resnet101':
            (pretrainedmodels.resnet101(pretrained=None), 2048, 224),
            'resnet152':
            (pretrainedmodels.resnet152(pretrained=None), 2048, 224),
            'inception_resnet_v2':
            (pretrainedmodels.inceptionresnetv2(pretrained=None), 1536, 299)
        }]

        # _C.MODELS_FEATURE_SIZE = {'resnet18':512, 'resnet50':2048, 'resnet101':2048, 'resnet152':2048,
        #                           'inception_v3':2048, 'inception_resnet_v2':1536}

        # Random seed for NumPy and PyTorch, important for reproducibility.
        _C.RANDOM_SEED = 42
        # Opt level for mixed precision training using NVIDIA Apex. This can be
        # one of {0, 1, 2}. Refer NVIDIA Apex docs for their meaning.
        _C.FP16_OPT = 2

        # Path to the dataset root, which structure as per README. Path is
        # assumed to be relative to project root.
        _C.IMAGE_PATH = '/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/Images_2'
        _C.TRAIN_JSON_PATH = '/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/iu_xray_train_2.json'
        _C.VAL_JSON_PATH = '/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/iu_xray_val_2.json'
        _C.TEST_JSON_PATH = '/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/iu_xray_test_2.json'
        _C.PRETRAINED_EMDEDDING = False
        # Path to .vocab file generated by ``sentencepiece``.
        _C.VOCAB_FILE_PATH = "/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/Vocab/indiana.vocab"
        # Path to .model file generated by ``sentencepiece``.
        _C.VOCAB_MODEL_PATH = "/netscratch/gsingh/MIMIC_CXR/DataSet/Indiana_Chest_XRay/Vocab/indiana.model"
        _C.VOCAB_SIZE = 3000
        _C.EPOCHS = 1024
        _C.BATCH_SIZE = 10
        _C.TEST_BATCH_SIZE = 100
        _C.ITERATIONS_PER_EPOCHS = 1
        _C.WEIGHT_DECAY = 1e-5
        _C.NUM_LABELS = 41
        _C.IMAGE_SIZE = 299
        _C.MAX_SEQUENCE_LENGTH = 130
        _C.DROPOUT_RATE = 0.1
        _C.D_HEAD = 64

        _C.TRAIN_DATASET_LENGTH = 25000
        _C.INFERENCE_TIME = False
        _C.COMBINED_N_LAYERS = 1
        _C.BEAM_SIZE = 50
        _C.PADDING_INDEX = 0
        _C.EOS_INDEX = 3
        _C.SOS_INDEX = 2
        _C.USE_BEAM_SEARCH = True
        _C.EXTRACTED_FEATURES = False
        _C.IMAGE_MODEL_PATH = '/netscratch/gsingh/MIMIC_CXR/Results/Image_Feature_Extraction/MIMIC_CXR_No_ES/model.pth'

        _C.EMBEDDING_DIM = 8192
        _C.CONTEXT_SIZE = 1024
        _C.LR_COMBINED = 1e-4
        _C.MAX_LR = 1e-1
        _C.SAVED_DATASET = False
        _C.MODEL_NAME = 'inception_resnet_v2'
        INIT_PATH = '/netscratch/gsingh/MIMIC_CXR/Results/Modified_Transformer/Indiana_15_10_2020_2/'
        _C.SAVED_DATASET_PATH_TRAIN = INIT_PATH + 'DataSet/train_dataloader.pth'
        _C.SAVED_DATASET_PATH_VAL = INIT_PATH + 'DataSet/val_dataloader.pth'
        _C.SAVED_DATASET_PATH_TEST = INIT_PATH + 'DataSet/test_dataloader.pth'

        _C.CHECKPOINT_PATH = INIT_PATH + 'CheckPoints'
        _C.MODEL_PATH = INIT_PATH + 'combined_model.pth'
        _C.MODEL_STATE_DIC = INIT_PATH + 'combined_model_state_dic.pth'
        _C.FIGURE_PATH = INIT_PATH + 'Graphs'
        _C.CSV_PATH = INIT_PATH
        _C.TEST_CSV_PATH = INIT_PATH + 'test_output_image_feature_input.json'
        self._C = _C
        if config_file is not None:
            self._C.merge_from_file(config_file)
        self._C.merge_from_list(override_list)

        self.add_derived_params()

        # Make an instantiated object of this class immutable.
        self._C.freeze()
示例#23
0
from fvcore.common.config import CfgNode as CN

_C = CN()

_C.TASK = 'classification'
_C.TRAIN_DIR = ('data/train', )
_C.VAL_DIR = ('data/val', )
_C.OUTPUT_DIR = 'baseline'
_C.EPOCH = 60
_C.BATCH_SIZE = 64
_C.MIXED_PRECISION = False
_C.QUANTIZATION_TRAINING = False
_C.TENSORBOARD = True
_C.MULTI_GPU = True

_C.MODEL = CN()
_C.MODEL.NAME = 'resnet50'
_C.MODEL.NUM_CLASSES = 3
_C.MODEL.CLASSES = (None, )

_C.MODEL.TEMPERATURE_SCALING = 1

_C.MODEL.AUTOML = False
_C.MODEL.AUTOML_TRIALS = 1000

_C.SOLVER = CN()
_C.SOLVER.NAME = 'sgd'
_C.SOLVER.LR = 0.0002
_C.SOLVER.WEIGHT_DECAY = 0.00001

_C.SOLVER.SCHEDULER = CN()
示例#24
0
    def __init__(self,
                 config_file: Optional[str] = None,
                 override_list: List[Any] = []):
        _C = CN()
        _C.VALID_IMAGES = [
            '8a4e1705-f30d7e1d-dd1ef999-a8521d7e-e64ad0c9.jpg.npy',
            '6660e8d2-6381a94a-843d96da-11713488-59a660eb.jpg.npy',
            '8a4e1705-f30d7e1d-dd1ef999-a8521d7e-e64ad0c9.jpg.npy',
            '865486e4-6d43765f-e1cebccc-d80670c5-b9aeea25.jpg.npy',
            '35ab1e49-b049f284-ba901484-a52ba49e-053d2c10.jpg.npy',
            'f3d88efb-8d1f70db-a2131320-90053712-cfd9a1bd.jpg.npy',
            'c5937742-fb73ee63-48b37017-9cc947e5-fa8342d4.jpg.npy',
            'b3ce45dc-111ceca0-bab01f71-9b22033a-ae9705dd.jpg.npy',
            '6660e8d2-6381a94a-843d96da-11713488-59a660eb.jpg.npy',
            '4fc9abbd-f405ecdb-ca896442-413d67c8-928fe3c4.jpg.npy'
        ]

        # Random seed for NumPy and PyTorch, important for reproducibility.
        _C.RANDOM_SEED = 42
        # Opt level for mixed precision training using NVIDIA Apex. This can be
        # one of {0, 1, 2}. Refer NVIDIA Apex docs for their meaning.
        _C.FP16_OPT = 2

        # Path to the dataset root, which structure as per README. Path is
        # assumed to be relative to project root.
        _C.TRAIN_IMAGE_PATH = "/netscratch/gsingh/MIMIC_CXR/DataSet/JPG_DataSet_Split/Without_Preprocessing_Reports/Train_Features_Extracted"
        _C.TRAIN_JSON_PATH = "/netscratch/gsingh/MIMIC_CXR/DataSet/MIMIC_CXR_Reports/Report_CSV_Files/no_missing_train.json"
        _C.VALID_IMAGE_PATH = "/netscratch/gsingh/MIMIC_CXR/DataSet/JPG_DataSet_Split/Without_Preprocessing_Reports/Valid_Features_Extracted"
        _C.VALID_JSON_PATH = "/netscratch/gsingh/MIMIC_CXR/DataSet/MIMIC_CXR_Reports/Report_CSV_Files/no_missing_valid.json"
        _C.TEST_IMAGE_PATH = "/netscratch/gsingh/MIMIC_CXR/DataSet/JPG_DataSet_Split/Without_Preprocessing_Reports/Test_Images"
        _C.TEST_JSON_PATH = "/netscratch/gsingh/MIMIC_CXR/DataSet/MIMIC_CXR_Reports/Report_CSV_Files/no_missing_test.json"
        _C.PRETRAINED_EMDEDDING = False
        # Path to .vocab file generated by ``sentencepiece``.
        _C.VOCAB_FILE_PATH = "/netscratch/gsingh/MIMIC_CXR/DataSet/JPG_DataSet_Split/Vocab/Vocab.vocab"
        # Path to .model file generated by ``sentencepiece``.
        _C.VOCAB_MODEL_PATH = "/netscratch/gsingh/MIMIC_CXR/DataSet/JPG_DataSet_Split/Vocab/Vocab.model"
        _C.VOCAB_SIZE = 10000
        _C.EPOCHS = 1024
        _C.BATCH_SIZE = 650
        _C.TEST_BATCH_SIZE = 100
        _C.ITERATIONS_PER_EPOCHS = 1
        _C.WEIGHT_DECAY = 1e-5
        _C.NUM_LABELS = 41
        _C.IMAGE_SIZE = 299
        _C.MAX_SEQUENCE_LENGTH = 150
        _C.DROPOUT_RATE = 0.1
        _C.D_HEAD = 64
        _C.N_HEAD = 12
        _C.TRAIN_DATASET_LENGTH = 25000
        _C.INFERENCE_TIME = False
        _C.COMBINED_N_LAYERS = 1
        _C.BEAM_SIZE = 3
        _C.PADDING_INDEX = 0
        _C.EOS_INDEX = 0
        _C.SOS_INDEX = 0
        _C.EXTRACTED_FEATURES = True
        _C.IMAGE_MODEL_PATH = '/netscratch/gsingh/MIMIC_CXR/Results/Image_Feature_Extraction/MIMIC_CXR_No_ES/model.pth'

        _C.EMBEDDING_DIM = 768
        _C.CONTEXT_SIZE = 768
        _C.LR_COMBINED = 1e-3
        _C.MAX_LR = 1e-1
        _C.SAVED_DATASET = False
        INIT_PATH = '/netscratch/gsingh/MIMIC_CXR/Results/Modified_Transformer/Complete_mimic_dataset/'
        _C.SAVED_DATASET_PATH_TRAIN = INIT_PATH + 'DataSet/train_dataloader.pth'
        _C.SAVED_DATASET_PATH_VAL = INIT_PATH + 'DataSet/val_dataloader.pth'
        _C.SAVED_DATASET_PATH_TEST = INIT_PATH + 'DataSet/test_dataloader.pth'

        _C.CHECKPOINT_PATH = INIT_PATH + 'CheckPoints'
        _C.MODEL_PATH = INIT_PATH + 'combined_model.pth'
        _C.MODEL_STATE_DIC = INIT_PATH + 'combined_model_state_dic.pth'
        _C.FIGURE_PATH = INIT_PATH + 'Graphs'
        _C.CSV_PATH = INIT_PATH
        _C.TEST_CSV_PATH = INIT_PATH + 'test_output_image_feature_input.csv'
        self._C = _C
        if config_file is not None:
            self._C.merge_from_file(config_file)
        self._C.merge_from_list(override_list)

        self.add_derived_params()

        # Make an instantiated object of this class immutable.
        self._C.freeze()