class CenterNetCfg: center_net: CenterNet = field(default_factory=CenterNet) train: TrainCfg = field(default_factory=TrainCfg) valid: ValidCfg = field(default_factory=ValidCfg) gpus: Union[Tuple, list] = (0, 1, 2, 3 ) # gpu individual ids, not necessarily consecutive
class FasterRCNNCfg: faster_rcnn: FasterRCNN = field(default_factory=FasterRCNN) train: TrainCfg = field(default_factory=TrainCfg) valid: ValidCfg = field(default_factory=ValidCfg) # Dataset name. eg. 'coco', 'voc', 'voc_tiny' dataset: str = 'voc_tiny' # Path of the directory where the dataset is located. dataset_root: str = '~/.mxnet/datasets/' # Training with GPUs, you can specify (1,3) for example. gpus: Union[Tuple, list] = (0, 1, 2, 3) # Resume from previously saved parameters if not None. # For example, you can resume from ./faster_rcnn_xxx_0123.params. resume: str = '' # Saving parameter prefix save_prefix: str = '' # Saving parameters epoch interval, best model will always be saved. save_interval: int = 1 # Use MXNet Horovod for distributed training. Must be run with OpenMPI. horovod: bool = False # Number of data workers, you can use larger number to accelerate data loading, # if your CPU and GPUs are powerful. num_workers: int = 16 # KV store options. local, device, nccl, dist_sync, dist_device_sync, # dist_async are available. kv_store: str = 'nccl' # Whether to disable hybridize the model. Memory usage and speed will decrese. disable_hybridization: bool = False
class MaskRCNNCfg: mask_rcnn: MaskRCNN = field(default_factory=MaskRCNN) train: TrainCfg = field(default_factory=TrainCfg) valid: ValidCfg = field( default_factory=ValidCfg) # Dataset name. eg. 'coco', 'voc' dataset = 'coco' # Training with GPUs, you can specify (1,3) for example. gpus = (0, ) # Resume from previously saved parameters if not None. # For example, you can resume from ./faster_rcnn_xxx_0123.params. resume = '' # Saving parameter prefix save_prefix = '' # Saving parameters epoch interval, best model will always be saved. save_interval = 1 # Use MXNet Horovod for distributed training. Must be run with OpenMPI. horovod = False # Number of data workers, you can use larger number to accelerate data loading, # if your CPU and GPUs are powerful. num_workers = 16 # KV store options. local, device, nccl, dist_sync, dist_device_sync, # dist_async are available. kv_store = 'nccl' # Whether to disable hybridize the model. Memory usage and speed will decrese. disable_hybridization = False # Use NVIDIA MSCOCO API. Make sure you install first. use_ext = False
class ImageClassificationCfg: img_cls: ImageClassification = field(default_factory=ImageClassification) train: TrainCfg = field(default_factory=TrainCfg) valid: ValidCfg = field(default_factory=ValidCfg) gpus: Union[Tuple, list] = (0, ) # gpu individual ids, not necessarily consecutive
class TorchImageClassificationCfg: img_cls : ImageClassification = field(default_factory=ImageClassification) data: DataCfg = field(default_factory=DataCfg) optimizer: OptimizerCfg = field(default_factory=OptimizerCfg) train: TrainCfg = field(default_factory=TrainCfg) augmentation: AugmentationCfg = field(default_factory=AugmentationCfg) model_ema: ModelEMACfg = field(default_factory=ModelEMACfg) misc: MiscCfg = field(default_factory=MiscCfg) gpus : Union[Tuple, list] = (0, ) # gpu individual ids, not necessarily consecutive
class SSDCfg: ssd: SSD = field(default_factory=SSD) train: TrainCfg = field(default_factory=TrainCfg) valid: ValidCfg = field(default_factory=ValidCfg) # Dataset name. eg. 'coco', 'voc', 'voc_tiny' dataset: str = 'voc_tiny' # Path of the directory where the dataset is located. dataset_root: str = '~/.mxnet/datasets/' # Training with GPUs, you can specify (1,3) for example. gpus: Union[Tuple, list] = (0, 1, 2, 3) # Resume from previously saved parameters if not None. # For example, you can resume from ./faster_rcnn_xxx_0123.params. resume: str = '' # Saving parameters epoch interval, best model will always be saved. save_interval: int = 1 # Use MXNet Horovod for distributed training. Must be run with OpenMPI. horovod: bool = False # Number of data workers, you can use larger number to accelerate data loading, # if your CPU and GPUs are powerful. num_workers: int = 4
class CenterNet: base_network: str = 'dla34_deconv' # base feature network heads: CenterNetHead = field(default_factory=CenterNetHead) scale: float = 4.0 # output vs input scaling ratio, e.g., input_h // feature_h topk: int = 100 # topk detection results will be kept after inference root: str = os.path.expanduser(os.path.join( '~', '.mxnet', 'models')) # model zoo root dir wh_weight: float = 0.1 # Loss weight for width/height center_reg_weight: float = 1.0 # Center regression loss weight data_shape: Tuple[int, int] = (512, 512) # use the pre-trained detector for transfer learning(use preset, ignore other network settings) transfer: str = 'center_net_resnet50_v1b_coco'