class CLIConfig(scfg.Config): """ Create COCO toydata """ default = { 'key': scfg.Value('shapes8', help='special demodata code', position=1), 'dst': scfg.Value(None, help=ub.paragraph(''' Output path for the final kwcoco json file. Note, that even when given, a data.kwcoco.json file will also be generated in a bundle_dpath. ''')), 'bundle_dpath': scfg.Value(None, help=ub.paragraph(''' Creates a bundled dataset in the specified location. If unspecified, a bundle name is generated based on the toydata config. ''')), 'use_cache': scfg.Value(True) } epilog = """
class ExampleConfig(scfg.Config): default = { 'item1': [], 'item2': scfg.Value([], type=list), 'item3': scfg.Value([]), 'item4': scfg.Value([], nargs='*'), }
class ListDeployedConfig(scfg.Config): """ Given a netharn work directory list all deployed models """ default = { 'workdir': scfg.Value(None, help='work directory'), 'name': scfg.Value(None, help='"nice" name of the run'), }
class ConvertConfig(scfg.Config): default = { 'src': scfg.PathList('in.viame.csv'), 'dst': scfg.Value('out.kwcoco.json'), 'new_root': None, 'old_root': None, 'images': scfg.Value(None, help='image list file or path to image directory if the CSV does not specify image names'), }
class MyConfig(scfg.Config): default = { 'option1': scfg.Value('bar', help='an option'), 'option2': scfg.Value((1, 2, 3), tuple, help='another option'), 'option3': None, 'option4': 'foo', 'discrete': scfg.Value(None, choices=['a', 'b', 'c']), 'apath': scfg.Path(help='a path'), }
class CLIConfig(scfg.Config): """ Combine multiple COCO datasets into a single merged dataset. """ default = { 'src': scfg.Value([], nargs='+', help='path to multiple input datasets', position=1), 'dst': scfg.Value('combo.mscoco.json', help='path to output dataset'), 'absolute': scfg.Value(False, help='if True, converts paths to absolute paths before doing union') } epilog = """
class DetectPredictCLIConfig(scfg.Config): default = ub.dict_union( { 'dataset': scfg.Value(None, help='coco dataset, path to images or folder of images'), 'out_dpath': scfg.Value('./out', help='output directory'), 'draw': scfg.Value(False), 'workdir': scfg.Value('~/work/bioharn', help='work directory for sampler if needed'), }, DetectPredictConfig.default )
class CLIConfig(scfg.Config): """ Compute summary statistics about a COCO dataset """ default = { 'src': scfg.Value(['special:shapes8'], nargs='+', help='path to dataset', position=1), 'basic': scfg.Value(True, help='show basic stats'), 'extended': scfg.Value(True, help='show extended stats'), 'catfreq': scfg.Value(True, help='show category frequency stats'), 'boxes': scfg.Value(False, help=ub.paragraph(''' show bounding box stats in width-height format. ''')), 'annot_attrs': scfg.Value(False, help='show annotation attribute information'), 'image_attrs': scfg.Value(False, help='show image attribute information'), 'video_attrs': scfg.Value(False, help='show video attribute information'), 'embed': scfg.Value(False, help='embed into interactive shell'), } epilog = """
class CLIConfig(scfg.Config): """ Split a single COCO dataset into two sub-datasets. """ default = { 'src': scfg.Value(None, help='input dataset to split', position=1), 'dst1': scfg.Value('split1.mscoco.json', help='output path1'), 'dst2': scfg.Value('split2.mscoco.json', help='output path2'), 'factor': scfg.Value(3, help='ratio of items put in dset1 vs dset2'), 'rng': scfg.Value(None, help='random seed'), } epilog = """
class CLIConfig(scfg.Config): """ Create a COCO file from bitmasks """ default = { 'src': scfg.PathList( help='a file, globstr, or comma-separated list of files'), 'dst': scfg.Value('masks.mscoco.json', help='output path'), 'serialization': scfg.Value('vector', help='can be raster or vector'), } epilog = r"""
class FileHashConfig(scfg.Config): """ The docstring will be the description in the CLI help """ default = { 'fpath': scfg.Value(None, position=1, help=ub.paragraph( ''' a path to a file to hash ''')), 'hasher': scfg.Value('sha1', choices=['sha1', 'sha512'], help=ub.paragraph( ''' a name of a hashlib hasher' ''')), }
class DemoConfig(scfg.Config): default = { 'num': 1, 'mode': 'bar', 'mode2': scfg.Value('bar', str), 'ignore': ['baz', 'biz'], }
class DocstrStubgenCLI(scfg.Config): name = 'doctypes' description = 'Generate Typed Stubs from Docstrings' default = { 'module': scfg.Value(None, position=1, help=ub.paragraph(''' The name of a module in the PYTHONPATH or an explicit path to that module. ''')), } @classmethod def main(cls, cmdline=False, **kwargs): from xdev.cli import docstr_stubgen import ubelt as ub config = cls(cmdline=cmdline, data=kwargs) print(f'config={config}') modname_or_path = config['module'] print(f'modname_or_path={modname_or_path}') if modname_or_path is None: raise ValueError('Must specify the module') modpath = docstr_stubgen.modpath_coerce(modname_or_path) modpath = ub.Path(modpath) generated = docstr_stubgen.generate_typed_stubs(modpath) for fpath, text in generated.items(): fpath = ub.Path(fpath) print(f'Write fpath={fpath}') fpath.write_text(text) # Generate a py.typed file to mark the package as typed if modpath.is_dir(): pytyped_fpath = (modpath / 'py.typed') print(f'touch pytyped_fpath={pytyped_fpath}') pytyped_fpath.touch()
class SedCLI(scfg.Config): name = 'sed' description = 'Search and replace text in files' default = { 'regexpr': scfg.Value('', position=1, help=ub.paragraph(''' The pattern to search for. ''')), 'repl': scfg.Value('', position=2, help=ub.paragraph(''' The pattern to replace with. ''')), 'dpath': scfg.Value(None, position=3, help=ub.paragraph(''' The directory to recursively search or a file pattern to match. ''')), 'dry': scfg.Value('ask', position=4, help=ub.paragraph(''' if 1, show what would be done. if 0, execute the change, if "ask", then show the dry run and then ask for confirmation. ''')), 'include': scfg.Value(None), 'exclude': scfg.Value(None), 'recursive': scfg.Value(True), 'verbose': scfg.Value(1), } @classmethod def main(cls, cmdline=False, **kwargs): from xdev import search_replace config = cls(cmdline=cmdline, data=kwargs) if config['dry'] in {'ask', 'auto'}: from rich.prompt import Confirm config['dry'] = True search_replace.sed(**config) flag = Confirm.ask('Do you want to execute this sed?') if flag: config['dry'] = False search_replace.sed(**config) else: search_replace.sed(**config)
class CodeblockCLI(scfg.Config): name = 'codeblock' description = 'Remove indentation from text' default = { 'text': scfg.Value('', position=1, help='text to dedent'), } @classmethod def main(cls, cmdline=False, **kwargs): config = cls(cmdline=cmdline, data=kwargs) print(ub.codeblock(config['text']))
class DetectPredictConfig(scfg.Config): default = { 'deployed': None, 'batch_size': 4, 'xpu': 'auto', 'window_dims': scfg.Value('full', help='size of a sliding window'), # (512, 512), 'input_dims': scfg.Value((512, 512), help='The size of the inputs to the network'), 'workers': 0, 'overlap': scfg.Value(0.0, help='overlap of the sliding window'), # Note: these dont work exactly correct due to mmdetection model # differences 'nms_thresh': 0.4, 'conf_thresh': 0.1, 'verbose': 3, }
class CLIConfig(scfg.Config): """ Reroot image paths onto a new image root. """ epilog = """ Example Usage: kwcoco reroot --help kwcoco reroot --src=special:shapes8 --dst rerooted.json kwcoco reroot --src=special:shapes8 --new_prefix=foo --check=True --dst rerooted.json """ default = { 'src': scfg.Value(None, help=( 'Path to the coco dataset'), position=1), 'new_prefix': scfg.Value(None, help=( 'Path to the new image root.')), 'old_prefix': scfg.Value(None, help=( 'Previous root to remove.')), 'absolute': scfg.Value(True, help=( 'If False, the output file uses relative paths')), 'check': scfg.Value(True, help=( 'If True, checks that all data exists')), 'dst': scfg.Value(None, help=( 'Save the re-rooted dataset to a new file')), }
class CLIConfig(scfg.Config): """ Make the COCO file conform to the spec. Populates inferable information such as image size, annotation area, etc. """ epilog = """ Example Usage: kwcoco conform --help kwcoco conform --src=special:shapes8 --dst conformed.json """ default = { 'src': scfg.Value(None, help=('Path to the coco dataset'), position=1), 'ensure_imgsize': scfg.Value(True, help=ub.paragraph(''' ensure each image has height and width attributes ''')), 'pycocotools_info': scfg.Value(True, help=ub.paragraph(''' ensure information needed for pycocotools ''')), 'legacy': scfg.Value(False, help='if True tries to convert to the ' 'original ms-coco format'), 'workers': scfg.Value(8, help='number of background workers for bigger checks'), 'dst': scfg.Value(None, help=('Save the modified dataset to a new file')), }
class FindCLI(scfg.Config): name = 'find' description = 'Find files based on names' default = { 'pattern': scfg.Value('', position=1), 'dpath': scfg.Value(None, position=2), 'include': scfg.Value(None), 'exclude': scfg.Value(None), 'type': scfg.Value('f', help="can be f and/or d"), 'recursive': scfg.Value(True), 'followlinks': scfg.Value(False), } @classmethod def main(cls, cmdline=False, **kwargs): from xdev import search_replace config = cls(cmdline=cmdline, data=kwargs) for found in search_replace.find(**config): print(found)
class CLIConfig(scfg.Config): """ Visualize a COCO image using matplotlib or opencv, optionally writing it to disk """ epilog = """ Example Usage: kwcoco show --help kwcoco show --src=special:shapes8 --gid=1 kwcoco show --src=special:shapes8 --gid=1 --dst out.png """ default = { 'src': scfg.Value(None, help=('Path to the coco dataset'), position=1), 'gid': scfg.Value( None, help=( 'Image id to show, if unspecified the first image is shown' )), 'aid': scfg.Value( None, help=('Annotation id to show, mutually exclusive with gid')), 'dst': scfg.Value( None, help=('Save the image to the specified file. ' 'If unspecified, the image is shown with pyplot')), 'mode': scfg.Value('matplotlib', choices=['matplotlib', 'opencv'], help='method used to draw the image'), 'channels': scfg.Value(None, type=str, help=ub.paragraph(''' By default uses the default channels (usually this is rgb), otherwise specify the name of an auxiliary channels ''')), 'show_annots': scfg.Value(True, help=('Overlay annotations on dispaly')), }
class CLIConfig(scfg.Config): """ Rename or remove categories """ epilog = """ Example Usage: kwcoco modify_categories --help kwcoco modify_categories --src=special:shapes8 --dst modcats.json kwcoco modify_categories --src=special:shapes8 --dst modcats.json --rename eff:F,star:sun kwcoco modify_categories --src=special:shapes8 --dst modcats.json --remove eff,star kwcoco modify_categories --src=special:shapes8 --dst modcats.json --keep eff, kwcoco modify_categories --src=special:shapes8 --dst modcats.json --keep=[] --keep_annots=True """ default = { 'src': scfg.Value(None, help=('Path to the coco dataset'), position=1), 'dst': scfg.Value(None, help=('Save the rebased dataset to a new file')), 'keep_annots': scfg.Value( False, help= ('if False, removes annotations when categories are removed, ' 'otherwise the annotations category is simply unset')), 'remove': scfg.Value(None, help='Category names to remove. Mutex with keep.'), 'keep': scfg.Value( None, help= 'If specified, remove all other categories. Mutex with remove.' ), 'rename': scfg.Value( None, type=str, help='category mapping in the format. "old1:new1,old2:new2"'), }
class CLIConfig(scfg.Config): """ Validate that a coco file conforms to the json schema, that assets exist, and potentially fix corrupted assets by removing them. """ default = { 'src': scfg.Value(['special:shapes8'], nargs='+', help='path to datasets', position=1), 'schema': scfg.Value(True, help='If True check the json schema'), 'missing': scfg.Value(True, help='If True check if all assets (e.g. images) exist'), 'corrupted': scfg.Value(False, help='If True check the assets can be read'), 'fix': scfg.Value(None, help=ub.paragraph(''' Code indicating strategy to attempt to fix the dataset. If None, do nothing. If remove, removes missing / corrupted images. Other strategies may be added in the future. This is a hueristic and does not always work. dst must be specified. And only one src dataset can be given. ''')), 'dst': scfg.Value(None, help=ub.paragraph(''' Location to write a "fixed" coco file if a fix strategy is given. ''')) } epilog = """
class TestConfig(scfg.Config): default = { 'key': scfg.Value(None, type=str), }
class TemplateConfig(scfg.Config): default = { 'repodir': scfg.Value(None, help='path to the new or existing repo', required=True), 'repo_name': scfg.Value(None, help='repo name'), 'setup_secrets': scfg.Value(False), 'tags': scfg.Value([], nargs='*', help=ub.paragraph(''' Tags modify what parts of the template are used. Valid tags are: "binpy" - do we build binpy wheels? "graphics" - do we need opencv / opencv-headless? "erotemic" - this is an erotemic repo "kitware" - this is an kitware repo "pyutils" - this is an pyutils repo "purepy" - this is a pure python repo ''')), } def normalize(self): if self['tags']: if isinstance(self['tags'], str): self['tags'] = [self['tags']] new = [] for t in self['tags']: new.extend([p.strip() for p in t.split(',')]) self['tags'] = new @classmethod def main(cls, cmdline=0, **kwargs): """ Ignore: repodir = ub.Path('~/code/pyflann_ibeis').expand() kwargs = { 'repodir': repodir, 'tags': ['binpy', 'erotemic', 'github'], } cmdline = 0 Example: repodir = ub.Path.appdir('pypkg/demo/my_new_repo') import sys, ubelt sys.path.append(ubelt.expandpath('~/misc/templates/PYPKG')) from apply_template import * # NOQA kwargs = { 'repodir': repodir, } cmdline = 0 """ import ubelt as ub config = TemplateConfig(cmdline=cmdline, data=kwargs) repo_dpath = ub.Path(config['repodir']) repo_dpath.ensuredir() IS_NEW_REPO = 0 create_new_repo_info = ub.codeblock(''' # TODO: # At least instructions on how to create a new repo, or maybe an # API call https://github.com/new git init ''') print(create_new_repo_info) if IS_NEW_REPO: # TODO: git init # TODO: github or gitlab register pass self = TemplateApplier(config) self.setup().gather_tasks() self.setup().apply() if config['setup_secrets']: setup_secrets_fpath = self.repo_dpath / 'dev/setup_secrets.sh' if 'erotemic' in self.config['tags']: environ_export = 'setup_package_environs_github_erotemic' upload_secret_cmd = 'upload_github_secrets' elif 'pyutils' in self.config['tags']: environ_export = 'setup_package_environs_github_pyutils' upload_secret_cmd = 'upload_github_secrets' elif 'kitware' in self.config['tags']: environ_export = 'setup_package_environs_gitlab_kitware' upload_secret_cmd = 'upload_gitlab_repo_secrets' else: raise Exception import cmd_queue script = cmd_queue.Queue.create() script.submit( ub.codeblock(f''' cd {self.repo_dpath} source {setup_secrets_fpath} {environ_export} load_secrets export_encrypted_code_signing_keys git commit -am "Updated secrets" {upload_secret_cmd} ''')) script.rprint()
class ConddaConfig(scfg.Config): """ Default configuration for CONDDA protocol. Example: >>> from sail_on_client.protocol.condda_config import ConddaConfig >>> config = ConddaConfig() >>> print('config = {!r}'.format(config)) """ default = { "domain": scfg.Value("image_classification"), "test_ids": ["CONDDA.2.1.293"], "novelty_detector_class": scfg.Value("CONDDA_5_14_A1"), "seed": scfg.Value("seed"), "dataset_root": "", "feature_extraction_only": scfg.Value(False, help="Quit after feature extraction"), "save_features": scfg.Value(False, help="Save features as pkl file"), "use_saved_features": scfg.Value(False, help="Use features saved the pkl file"), "save_dir": scfg.Value("", help="Directory where features are saved"), "save_attributes": scfg.Value(False, help="Flag to attributes in save dir"), "use_saved_attributes": scfg.Value(False, help="Use attributes saved in save dir"), "saved_attributes": {}, "skip_stage": [], "hints": [], "detector_config": { "efficientnet_params": { "model_path": "", "known_classes": 413 }, "evm_params": { "model_path": "", "known_feature_path": "", "tailsize": 33998, "cover_threshold": 0.7, "distance_multiplier": 0.55, "number_of_unknown_to_crate_evm": 3, }, "dataloader_params": { "batch_size": 128, "num_workers": 3 }, "characterization_param": { "clustering_type": "FINCH", "number_of_unknown_to_strat_clustering": 20, }, "csv_folder": "", "cores": 4, "detection_threshold": 0.5, }, }
class DetectFitConfig(scfg.Config): default = { # Personal Preference 'nice': scfg.Value( 'untitled', help= ('a human readable tag for your experiment (we also keep a ' 'failsafe computer readable tag in case you update hyperparams, ' 'but forget to update this flag)')), # System Options 'workdir': scfg.Path('~/work/detect', help='path where this script can dump stuff'), 'workers': scfg.Value(0, help='number of DataLoader processes'), 'xpu': scfg.Value('argv', help='a CUDA device or a CPU'), # Data (the hardest part of machine learning) 'datasets': scfg.Value('special:shapes1024', help='special dataset key'), 'train_dataset': scfg.Value(None, help='override train with a custom coco dataset'), 'vali_dataset': scfg.Value(None, help='override vali with a custom coco dataset'), 'test_dataset': scfg.Value(None, help='override test with a custom coco dataset'), # Dataset options 'multiscale': False, 'visible_thresh': scfg.Value( 0.5, help= 'percentage of a box that must be visible to be included in truth' ), 'input_dims': scfg.Value((256, 256), help='size to '), 'normalize_inputs': scfg.Value( False, help='if True, precompute training mean and std for data whitening' ), 'augment': scfg.Value('simple', help='key indicating augmentation strategy', choices=['complex', 'simple', None]), 'ovthresh': 0.5, # High level options 'arch': scfg.Value('yolo2', help='network toplogy', choices=['yolo2']), 'optim': scfg.Value('adam', help='torch optimizer', choices=['sgd', 'adam', 'adamw']), 'batch_size': scfg.Value( 4, help='number of images that run through the network at a time'), 'bstep': scfg.Value(8, help='num batches before stepping'), 'lr': scfg.Value(1e-3, help='learning rate'), # 1e-4, 'decay': scfg.Value(1e-5, help='weight decay'), 'schedule': scfg.Value('step90', help='learning rate / momentum scheduler'), 'max_epoch': scfg.Value(140, help='Maximum number of epochs'), 'patience': scfg.Value( 140, help='Maximum number of bad epochs on validation before stopping'), # Initialization 'init': scfg.Value('imagenet', help='initialization strategy'), 'pretrained': scfg.Path(help='path to a netharn deploy file'), # Loss Terms 'focus': scfg.Value(0.0, help='focus for Focal Loss'), } def normalize(self): if self['pretrained'] in ['null', 'None']: self['pretrained'] = None if self['datasets'] == 'special:voc': self['train_dataset'] = ub.expandpath( '~/data/VOC/voc-trainval.mscoco.json') self['vali_dataset'] = ub.expandpath( '~/data/VOC/voc-test-2007.mscoco.json') key = self.get('pretrained', None) or self.get('init', None) if key == 'imagenet': self['pretrained'] = yolo2.initial_imagenet_weights() elif key == 'lightnet': self['pretrained'] = yolo2.demo_voc_weights() if self['pretrained'] is not None: self['init'] = 'pretrained'
class TestConfig(scfg.Config): default = { 'paths': scfg.Value(None, nargs='+'), }
class ConddaConfig(scfg.Config): """ Default configuration for CONDDA protocol. Example: >>> from sail_on_client.protocol.condda_config import ConddaConfig >>> config = ConddaConfig() >>> print('config = {!r}'.format(config)) """ default = { "domain": scfg.Value("image_classification"), "test_ids": ["CONDDA.2.1.293"], "seed": scfg.Value("seed"), "dataset_root": "", "feature_extraction_only": scfg.Value(False, help="Quit after feature extraction"), "save_features": scfg.Value(False, help="Save features as pkl file"), "use_saved_features": scfg.Value(False, help="Use features saved the pkl file"), "use_consolidated_features": scfg.Value(False, help="Use features consolidated over multiple tests"), "save_dir": scfg.Value("", help="Directory where features are saved"), "save_attributes": scfg.Value(False, help="Flag to attributes in save dir"), "use_saved_attributes": scfg.Value(False, help="Use attributes saved in save dir"), "saved_attributes": {}, "skip_stage": [], "hints": [], "resume_session": scfg.Value(False, help="Flag to resume session"), "resumed_session_ids": {}, "detectors": { "has_baseline": False, "has_reaction_baseline": False, "baseline_class": None, "csv_folder": "", "cores": 6, "detection_threshold": 0.1, "detector_configs": { "CONDDA_5_14_A1": { "feature_extractor_params": { "backbone_weight_path": "", "name": "i3d", "arch": "i3d-50", "graph_weight_path": "", "model_name": "i3d", "n_classes": 400, "no_cuda": "False", "hidden_dims": [512, 128], "hidden": "True", "in_dim": 1024, "num_heads": [4, 1], "sample_duration": 64, "graph_classes": 88, "mode": "feature", "feature_type": "graph", }, "evm_params": { "weight_path": "", "number_of_unknown_to_crate_evm": 7, }, "characterization_params": { "clustering_type": "FINCH", "number_of_unknown_to_strat_clustering": 50, }, "dataloader_params": { "sample_size": 224, "mean": [114.7748, 107.7354, 99.4750], "sample_duration": 64, "batch_size": 1, "n_threads": 6, "n_classes": 88, }, }, }, }, "harness_config": { "url": "http://3.32.8.161:5001/", "data_location": "", "data_dir": "", "gt_dir": "", "gt_config": "", }, }
class OndConfig(scfg.Config): """ Default configuration for Ond protocol. Example: >>> from learn.protocol.learn_config import LearnConfig >>> config = LearnConfig() >>> print('config = {!r}'.format(config)) """ default = { "domain": scfg.Value("image_classification"), "test_ids": ["OND.1.1.293"], "seed": scfg.Value("seed"), "dataset_root": "/home/eric/sail-on/images", "feature_extraction_only": scfg.Value(False, help="Quit after feature extraction"), "use_feedback": scfg.Value(False, help="Use feedback for the run"), "feedback_type": scfg.Value("classification", help="Type of feedback"), "save_features": scfg.Value(False, help="Save features as pkl file"), "use_saved_features": scfg.Value(False, help="Use features saved the pkl file"), "use_consolidated_features": scfg.Value(False, help="Use features consolidated over multiple tests"), "save_dir": scfg.Value("", help="Directory where features are saved"), "save_attributes": scfg.Value(False, help="Flag to attributes in save dir"), "use_saved_attributes": scfg.Value(False, help="Use attributes saved in save dir"), "save_elementwise": scfg.Value(False, help="Save attributes elementwise"), "is_eval_enabled": scfg.Value(False, help="Flag to enable evaluate"), "is_eval_roundwise_enabled": scfg.Value(False, help="Flag to enable roundwise evaluate"), "saved_attributes": {}, "skip_stage": [], "hints": [], "resume_session": scfg.Value(False, help="Flag to resume session"), "resumed_session_ids": {}, "detectors": { "has_baseline": False, "has_reaction_baseline": False, "baseline_class": None, "csv_folder": "", "cores": 6, "detection_threshold": 0.1, "detector_configs": { "gae_kl_nd": { "feature_extractor_params": { "backbone_weight_path": "", "name": "i3d", "arch": "i3d-50", "graph_weight_path": "", "model_name": "i3d", "n_classes": 400, "no_cuda": "False", "hidden_dims": [512, 128], "hidden": "True", "in_dim": 1024, "num_heads": [4, 1], "sample_duration": 64, "graph_classes": 88, "mode": "feature", "feature_type": "graph", }, "kl_params": { "window_size": 100, "mu_train": 1.0, "sigma_train": 0.0057400320777888664, "KL_threshold": 5.830880886275709, }, "evm_params": { "weight_path": "", "number_of_unknown_to_crate_evm": 7, }, "characterization_params": { "clustering_type": "FINCH", "number_of_unknown_to_strat_clustering": 50, }, "dataloader_params": { "sample_size": 224, "mean": [114.7748, 107.7354, 99.4750], "sample_duration": 64, "batch_size": 1, "n_threads": 6, "n_classes": 88, }, }, "baseline_i3d": { "feature_extractor_params": { "backbone_weight_path": "", "name": "i3d", "arch": "i3d-50", "graph_weight_path": "", "model_name": "i3d", "n_classes": 400, "no_cuda": "False", "hidden_dims": [512, 128], "hidden": "True", "in_dim": 1024, "num_heads": [4, 1], "sample_duration": 64, "graph_classes": 88, "mode": "feature", "feature_type": "graph", }, }, }, }, "harness_config": { "url": "http://3.32.8.161:5001/", "data_location": "", "data_dir": "", "gt_dir": "", "gt_config": "", }, }
class SegmentationConfig(scfg.Config): """ Default configuration for setting up a training session """ default = { 'nice': scfg.Path('untitled', help='A human readable tag that is "nice" for humans'), 'workdir': scfg.Path('~/work/camvid', help='Dump all results in your workdir'), 'workers': scfg.Value(0, help='number of parallel dataloading jobs'), 'xpu': scfg.Value('argv', help='See netharn.XPU for details. can be cpu/gpu/cuda0/0,1,2,3)'), 'augment': scfg.Value('simple', help='type of training dataset augmentation'), 'class_weights': scfg.Value('log-median-idf', help='how to weight inbalanced classes'), # 'class_weights': scfg.Value(None, help='how to weight inbalanced classes'), 'datasets': scfg.Value('special:camvid', help='Eventually you may be able to sepcify a coco file'), 'train_dataset': scfg.Value(None), 'vali_dataset': scfg.Value(None), 'arch': scfg.Value('psp', help='Network architecture code'), 'optim': scfg.Value('adamw', help='Weight optimizer. Can be SGD, ADAM, ADAMW, etc..'), 'input_dims': scfg.Value((128, 128), help='Window size to input to the network'), 'input_overlap': scfg.Value(0.25, help='amount of overlap when creating a sliding window dataset'), 'batch_size': scfg.Value(4, help='number of items per batch'), 'bstep': scfg.Value(1, help='number of batches before a gradient descent step'), 'max_epoch': scfg.Value(140, help='Maximum number of epochs'), 'patience': scfg.Value(140, help='Maximum "bad" validation epochs before early stopping'), 'lr': scfg.Value(1e-3, help='Base learning rate'), 'decay': scfg.Value(1e-5, help='Base weight decay'), 'focus': scfg.Value(2.0, help='focus for focal loss'), 'schedule': scfg.Value('step90', help=('Special coercable netharn code. Eg: onecycle50, step50, gamma')), 'init': scfg.Value('kaiming_normal', help='How to initialized weights. (can be a path to a pretrained model)'), 'pretrained': scfg.Path(help=('alternative way to specify a path to a pretrained model')), } def normalize(self): if self['pretrained'] in ['null', 'None']: self['pretrained'] = None if self['pretrained'] is not None: self['init'] = 'pretrained'