def add_argument(parser: argparse.ArgumentParser, model_name: str = None, model: Union[str, Model] = None, config: Config = config, class_dict: dict[str, type[Model]] = {}): dataset_name = get_name(arg_list=['-d', '--dataset']) if dataset_name is None: dataset_name = config.get_full_config()['dataset']['default_dataset'] model_name = get_name(name=model_name, module=model, arg_list=['-m', '--model']) if model_name is None: model_name = config.get_config( dataset_name=dataset_name)['model']['default_model'] model_name = get_model_class(model_name, class_dict=class_dict) group = parser.add_argument_group('{yellow}model{reset}'.format(**ansi), description=model_name) model_class_name = get_model_class(model_name, class_dict=class_dict) try: ModelType = class_dict[model_class_name] except KeyError as e: print(f'{model_class_name} not in \n{list(class_dict.keys())}') raise e return ModelType.add_argument(group)
def create(model_name: str = None, model: Union[str, Model] = None, folder_path: str = None, dataset_name: str = None, dataset: Union[str, Dataset] = None, config: Config = config, class_dict: dict[str, type[Model]] = {}, **kwargs) -> Model: dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) model_name = get_name(name=model_name, module=model, arg_list=['-m', '--model']) if dataset_name is None: dataset_name = config.get_full_config()['dataset']['default_dataset'] result = config.get_config( dataset_name=dataset_name)['model']._update(kwargs) model_name = model_name if model_name is not None else result[ 'default_model'] ModelType: type[Model] = class_dict[model_name] if folder_path is None and isinstance(dataset, Dataset): folder_path = os.path.join(result['model_dir'], dataset.data_type, dataset.name) return ModelType(name=model_name, dataset=dataset, folder_path=folder_path, **result)
def add_argument(parser: argparse.ArgumentParser, dataset_name: str = None, dataset: Union[str, Dataset] = None, config: Config = config, class_dict: dict[str, type[Dataset]] = {}) -> argparse._ArgumentGroup: dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) dataset_name = dataset_name if dataset_name is not None else config.get_full_config()['dataset']['default_dataset'] group = parser.add_argument_group('{yellow}dataset{reset}'.format(**ansi), description=dataset_name) DatasetType = class_dict[dataset_name] return DatasetType.add_argument(group) # TODO: Linting problem
def create(dataset_name: str = None, dataset: str = None, folder_path: str = None, config: Config = config, class_dict: dict[str, type[Dataset]] = {}, **kwargs) -> Dataset: dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) dataset_name = dataset_name if dataset_name is not None else config.get_full_config()['dataset']['default_dataset'] result = config.get_config(dataset_name=dataset_name)['dataset']._update(kwargs) DatasetType = class_dict[dataset_name] folder_path = folder_path if folder_path is not None else \ os.path.join(result['data_dir'], DatasetType.data_type, DatasetType.name) # TODO: Linting problem return DatasetType(folder_path=folder_path, **result)
def create(config_path: str = None, dataset_name: str = None, dataset: str = None, seed: int = None, benchmark: bool = None, config: Config = config, cache_threshold: float = None, verbose: int = None, color: bool = None, tqdm: bool = None, **kwargs) -> Env: other_kwargs = { 'cache_threshold': cache_threshold, 'verbose': verbose, 'color': color, 'tqdm': tqdm } config.update_cmd(config_path) dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) dataset_name = dataset_name if dataset_name is not None else config.get_full_config( )['dataset']['default_dataset'] result = config.get_config( dataset_name=dataset_name)['env']._update(other_kwargs) env.update(config_path=config_path, **result) if seed is None and 'seed' in env.keys(): seed = env['seed'] random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) num_gpus: int = torch.cuda.device_count() device = result['device'] if device == 'none': device = None else: if device is None or device == 'auto': device = 'cuda' if num_gpus else 'cpu' if isinstance(device, str): device = torch.device(device) if device.type == 'cpu': num_gpus = 0 if device.index is not None and torch.cuda.is_available(): num_gpus = 1 if benchmark is None and 'benchmark' in env.keys(): benchmark = env['benchmark'] if benchmark: torch.backends.cudnn.benchmark = benchmark env.update(seed=seed, device=device, benchmark=benchmark, num_gpus=num_gpus) return env
def add_argument(parser: argparse.ArgumentParser, model_name: str = None, model: Union[str, Model] = None, config: Config = config, class_dict: dict[str, type[Model]] = None) -> argparse._ArgumentGroup: dataset_name = get_name(arg_list=['-d', '--dataset']) if dataset_name is None: dataset_name = config.get_full_config()['dataset']['default_dataset'] model_name = get_name(name=model_name, module=model, arg_list=['-m', '--model']) if model_name is None: model_name = config.get_config(dataset_name=dataset_name)['model']['default_model'] group = parser.add_argument_group('{yellow}model{reset}'.format(**ansi), description=model_name) ModelType = class_dict[model_name] return ModelType.add_argument(group) # TODO: Linting problem
def add_argument(parser: argparse.ArgumentParser, dataset_name: str = None, dataset: Union[str, Dataset] = None, config: Config = config, class_dict: dict[str, type[Dataset]] = {}): dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) dataset_name = dataset_name if dataset_name is not None else config.get_full_config( )['dataset']['default_dataset'] group = parser.add_argument_group('{yellow}dataset{reset}'.format(**ansi), description=dataset_name) try: DatasetType = class_dict[dataset_name] except KeyError as e: print(f'{dataset_name} not in \n{list(class_dict.keys())}') raise e return DatasetType.add_argument(group)
def create(model_name: str = None, model: Union[str, Model] = None, dataset_name: str = None, dataset: Union[str, Dataset] = None, folder_path: str = None, config: Config = config, class_dict: dict[str, type[Model]] = {}, **kwargs) -> Model: dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) model_name = get_name(name=model_name, module=model, arg_list=['-m', '--model']) if dataset_name is None: dataset_name = config.get_full_config()['dataset']['default_dataset'] if model_name is None: model_name = config.get_config( dataset_name=dataset_name)['model']['default_model'] result = config.get_config( dataset_name=dataset_name)['model'].update(kwargs) model_name = model_name if model_name is not None else result[ 'default_model'] name_list = [ name for sub_list in get_available_models(class_dict=class_dict).values() for name in sub_list ] name_list = sorted(name_list) assert model_name in name_list, f'{model_name} not in \n{name_list}' model_class_name = get_model_class(model_name, class_dict=class_dict) try: ModelType = class_dict[model_class_name] except KeyError as e: print(f'{model_class_name} not in \n{list(class_dict.keys())}') raise e if folder_path is None and isinstance(dataset, Dataset): folder_path = os.path.join(result['model_dir'], dataset.data_type, dataset.name) return ModelType(name=model_name, dataset=dataset, folder_path=folder_path, **result)
def create(attack_name: str = None, attack: Union[str, Attack] = None, folder_path: str = None, dataset_name: str = None, dataset: Union[str, Dataset] = None, model_name: str = None, model: Union[str, Model] = None, config: Config = config, class_dict: dict[str, type[Attack]] = {}, **kwargs): dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) model_name = get_name(name=model_name, module=model, arg_list=['-m', '--model']) attack_name = get_name(name=attack_name, module=attack, arg_list=['--attack']) if dataset_name is None: dataset_name = config.get_full_config()['dataset']['default_dataset'] general_config = config.get_config(dataset_name=dataset_name)['attack'] specific_config = config.get_config(dataset_name=dataset_name)[attack_name] result = general_config.update(specific_config).update(kwargs) try: AttackType = class_dict[attack_name] except KeyError as e: print(f'{attack_name} not in \n{list(class_dict.keys())}') raise e if folder_path is None: folder_path = result['attack_dir'] if isinstance(dataset, Dataset): folder_path = os.path.join(folder_path, dataset.data_type, dataset.name) if model_name is not None: folder_path = os.path.join(folder_path, model_name) folder_path = os.path.join(folder_path, AttackType.name) return AttackType(name=attack_name, dataset=dataset, model=model, folder_path=folder_path, **result)
def create(defense_name: str = None, defense: Union[str, Defense] = None, folder_path: str = None, dataset_name: str = None, dataset: Union[str, Dataset] = None, model_name: str = None, model: Union[str, Model] = None, config: Config = config, class_dict: dict[str, type[Defense]] = {}, **kwargs) -> Defense: dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) model_name = get_name(name=model_name, module=model, arg_list=['-m', '--model']) defense_name = get_name(name=defense_name, module=defense, arg_list=['--defense']) if dataset_name is None: dataset_name = config.get_full_config()['dataset']['default_dataset'] general_config = config.get_config(dataset_name=dataset_name)['defense'] specific_config = config.get_config( dataset_name=dataset_name)[defense_name] result = general_config._update(specific_config)._update( kwargs) # TODO: linting issues DefenseType: type[Defense] = class_dict[defense_name] if folder_path is None: folder_path = result['defense_dir'] if isinstance(dataset, Dataset): folder_path = os.path.join(folder_path, dataset.data_type, dataset.name) if model_name is not None: folder_path = os.path.join(folder_path, model_name) folder_path = os.path.join(folder_path, DefenseType.name) return DefenseType(name=defense_name, dataset=dataset, model=model, folder_path=folder_path, **result)
def create(dataset_name: str = None, dataset: str = None, folder_path: str = None, config: Config = config, class_dict: dict[str, type[Dataset]] = {}, **kwargs): dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) dataset_name = dataset_name if dataset_name is not None else config.get_full_config( )['dataset']['default_dataset'] result = config.get_config( dataset_name=dataset_name)['dataset'].update(kwargs) try: DatasetType = class_dict[dataset_name] except KeyError as e: print(f'{dataset_name} not in \n{list(class_dict.keys())}') raise e folder_path = folder_path if folder_path is not None \ else os.path.join(result['data_dir'], DatasetType.data_type, DatasetType.name) return DatasetType(folder_path=folder_path, **result)