def create(model_name: str = None, model: Union[str, ImageModel] = None, layer: int = None, dataset_name: str = None, dataset: Union[str, ImageSet] = None, config: Config = config, class_dict: dict[str, type[ImageModel]] = class_dict, **kwargs) -> ImageModel: dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) if dataset_name is None: dataset_name = config.get_full_config()['dataset']['default_dataset'] model_name = get_name(name=model_name, module=model, arg_list=['-m', '--model']) if model_name is None: model_name = config.get_config( dataset_name=dataset_name)['model']['default_model'] model_name, layer = split_name(model_name, layer=layer) return trojanzoo.models.create(model_name=model_name, model=model, dataset_name=dataset_name, dataset=dataset, config=config, class_dict=class_dict, layer=layer, **kwargs)
def add_argument(parser: argparse.ArgumentParser, model_name: str = None, model: Union[str, Model] = None, config: Config = config, class_dict: dict[str, type[Model]] = {}): dataset_name = get_name(arg_list=['-d', '--dataset']) if dataset_name is None: dataset_name = config.get_full_config()['dataset']['default_dataset'] model_name = get_name(name=model_name, module=model, arg_list=['-m', '--model']) if model_name is None: model_name = config.get_config( dataset_name=dataset_name)['model']['default_model'] model_name = get_model_class(model_name, class_dict=class_dict) group = parser.add_argument_group('{yellow}model{reset}'.format(**ansi), description=model_name) model_class_name = get_model_class(model_name, class_dict=class_dict) try: ModelType = class_dict[model_class_name] except KeyError as e: print(f'{model_class_name} not in \n{list(class_dict.keys())}') raise e return ModelType.add_argument(group)
def create(model_name: str = None, model: Union[str, ImageModel] = None, folder_path: str = None, dataset_name: str = None, dataset: Union[str, ImageSet] = None, config: Config = config, class_dict: dict[str, type[ImageModel]] = class_dict, **kwargs) -> ImageModel: dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) model_name = get_name(name=model_name, module=model, arg_list=['-m', '--model']) if dataset_name is None: dataset_name = config.get_full_config()['dataset']['default_dataset'] if model_name is None: model_name = config.get_config( dataset_name=dataset_name)['model']['default_model'] result = config.get_config( dataset_name=dataset_name)['model']._update(kwargs) model_name = model_name if model_name is not None else result[ 'default_model'] ModelType: type[ImageModel] = class_dict[get_model_class(model_name)] if folder_path is None and isinstance(dataset, ImageSet): folder_path = os.path.join(result['model_dir'], dataset.data_type, dataset.name) return ModelType(name=model_name, dataset=dataset, folder_path=folder_path, **result)
def add_argument(parser: argparse.ArgumentParser, model_name: str = None, model: Union[str, Model] = None, config: Config = config, class_dict: dict[str, type[Model]] = None) -> argparse._ArgumentGroup: dataset_name = get_name(arg_list=['-d', '--dataset']) if dataset_name is None: dataset_name = config.get_full_config()['dataset']['default_dataset'] model_name = get_name(name=model_name, module=model, arg_list=['-m', '--model']) if model_name is None: model_name = config.get_config(dataset_name=dataset_name)['model']['default_model'] group = parser.add_argument_group('{yellow}model{reset}'.format(**ansi), description=model_name) ModelType = class_dict[model_name] return ModelType.add_argument(group) # TODO: Linting problem
def create(dataset_name: str = None, dataset: Dataset = None, model: Model = None, tensorboard: bool = None, config: Config = config, **kwargs) -> tuple[Optimizer, _LRScheduler, dict]: assert isinstance(model, Model) dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) result = config.get_config(dataset_name=dataset_name)['trainer']._update(kwargs) optim_keys = model.define_optimizer.__code__.co_varnames train_keys = model._train.__code__.co_varnames writer_keys = SummaryWriter.__init__.__code__.co_varnames # log_dir, flush_secs, ... optim_args = {} train_args = {} writer_args = {} for key, value in result.items(): if key in optim_keys: _dict = optim_args elif key in train_keys: _dict = train_args elif key in writer_keys: _dict = writer_args else: continue _dict[key] = value optimizer, lr_scheduler = model.define_optimizer(**optim_args) writer = SummaryWriter(**writer_args) if tensorboard else None return Trainer(optim_args=optim_args, train_args=train_args, optimizer=optimizer, lr_scheduler=lr_scheduler, writer=writer)
def add_argument(parser: argparse.ArgumentParser, dataset_name: str = None, dataset: Union[str, Dataset] = None, config: Config = config, class_dict: dict[str, type[Dataset]] = {}) -> argparse._ArgumentGroup: dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) dataset_name = dataset_name if dataset_name is not None else config.get_full_config()['dataset']['default_dataset'] group = parser.add_argument_group('{yellow}dataset{reset}'.format(**ansi), description=dataset_name) DatasetType = class_dict[dataset_name] return DatasetType.add_argument(group) # TODO: Linting problem
def create(dataset_name: str = None, dataset: Dataset = None, model: Model = None, config: Config = config, **kwargs) -> tuple[Optimizer, _LRScheduler, dict]: assert isinstance(model, Model) dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) result = config.get_config( dataset_name=dataset_name)['trainer']._update(kwargs) func_keys = model.define_optimizer.__code__.co_varnames train_keys = model._train.__code__.co_varnames optim_args = {} train_args = {} for key, value in result.items(): if key in func_keys: _dict = optim_args elif key in train_keys: _dict = train_args else: continue # raise KeyError(key) _dict[key] = value optimizer, lr_scheduler = model.define_optimizer(**optim_args) return Trainer(optim_args=optim_args, train_args=train_args, optimizer=optimizer, lr_scheduler=lr_scheduler)
def create(model_name: str = None, model: Union[str, Model] = None, dataset_name: str = None, dataset: Union[str, Dataset] = None, folder_path: str = None, config: Config = config, class_dict: dict[str, type[Model]] = {}, **kwargs) -> Model: dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) model_name = get_name(name=model_name, module=model, arg_list=['-m', '--model']) if dataset_name is None: dataset_name = config.get_full_config()['dataset']['default_dataset'] if model_name is None: model_name = config.get_config( dataset_name=dataset_name)['model']['default_model'] result = config.get_config( dataset_name=dataset_name)['model'].update(kwargs) model_name = model_name if model_name is not None else result[ 'default_model'] name_list = [ name for sub_list in get_available_models(class_dict=class_dict).values() for name in sub_list ] name_list = sorted(name_list) assert model_name in name_list, f'{model_name} not in \n{name_list}' model_class_name = get_model_class(model_name, class_dict=class_dict) try: ModelType = class_dict[model_class_name] except KeyError as e: print(f'{model_class_name} not in \n{list(class_dict.keys())}') raise e if folder_path is None and isinstance(dataset, Dataset): folder_path = os.path.join(result['model_dir'], dataset.data_type, dataset.name) return ModelType(name=model_name, dataset=dataset, folder_path=folder_path, **result)
def create(dataset_name: str = None, dataset: str = None, folder_path: str = None, config: Config = config, class_dict: dict[str, type[Dataset]] = {}, **kwargs) -> Dataset: dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) dataset_name = dataset_name if dataset_name is not None else config.get_full_config()['dataset']['default_dataset'] result = config.get_config(dataset_name=dataset_name)['dataset']._update(kwargs) DatasetType = class_dict[dataset_name] folder_path = folder_path if folder_path is not None else \ os.path.join(result['data_dir'], DatasetType.data_type, DatasetType.name) # TODO: Linting problem return DatasetType(folder_path=folder_path, **result)
def create(attack_name: str = None, attack: Union[str, Attack] = None, folder_path: str = None, dataset_name: str = None, dataset: Union[str, Dataset] = None, model_name: str = None, model: Union[str, Model] = None, config: Config = config, class_dict: dict[str, type[Attack]] = {}, **kwargs): dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) model_name = get_name(name=model_name, module=model, arg_list=['-m', '--model']) attack_name = get_name(name=attack_name, module=attack, arg_list=['--attack']) if dataset_name is None: dataset_name = config.get_full_config()['dataset']['default_dataset'] general_config = config.get_config(dataset_name=dataset_name)['attack'] specific_config = config.get_config(dataset_name=dataset_name)[attack_name] result = general_config.update(specific_config).update(kwargs) try: AttackType = class_dict[attack_name] except KeyError as e: print(f'{attack_name} not in \n{list(class_dict.keys())}') raise e if folder_path is None: folder_path = result['attack_dir'] if isinstance(dataset, Dataset): folder_path = os.path.join(folder_path, dataset.data_type, dataset.name) if model_name is not None: folder_path = os.path.join(folder_path, model_name) folder_path = os.path.join(folder_path, AttackType.name) return AttackType(name=attack_name, dataset=dataset, model=model, folder_path=folder_path, **result)
def create(defense_name: str = None, defense: Union[str, Defense] = None, folder_path: str = None, dataset_name: str = None, dataset: Union[str, Dataset] = None, model_name: str = None, model: Union[str, Model] = None, config: Config = config, class_dict: dict[str, type[Defense]] = {}, **kwargs) -> Defense: dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) model_name = get_name(name=model_name, module=model, arg_list=['-m', '--model']) defense_name = get_name(name=defense_name, module=defense, arg_list=['--defense']) if dataset_name is None: dataset_name = config.get_full_config()['dataset']['default_dataset'] general_config = config.get_config(dataset_name=dataset_name)['defense'] specific_config = config.get_config( dataset_name=dataset_name)[defense_name] result = general_config._update(specific_config)._update( kwargs) # TODO: linting issues DefenseType: type[Defense] = class_dict[defense_name] if folder_path is None: folder_path = result['defense_dir'] if isinstance(dataset, Dataset): folder_path = os.path.join(folder_path, dataset.data_type, dataset.name) if model_name is not None: folder_path = os.path.join(folder_path, model_name) folder_path = os.path.join(folder_path, DefenseType.name) return DefenseType(name=defense_name, dataset=dataset, model=model, folder_path=folder_path, **result)
def add_argument( parser: argparse.ArgumentParser, attack_name: str = None, attack: Union[str, Attack] = None, class_dict: dict[str, type[Attack]] = None) -> argparse._ArgumentGroup: attack_name = get_name(name=attack_name, module=attack, arg_list=['--attack']) group = parser.add_argument_group('{yellow}attack{reset}'.format(**ansi), description=attack_name) AttackType = class_dict[attack_name] return AttackType.add_argument(group) # TODO: Linting problem
def add_argument( parser: argparse.ArgumentParser, defense_name: str = None, defense: Union[str, Defense] = None, class_dict: dict[str, type[Defense]] = None) -> argparse._ArgumentGroup: defense_name = get_name(name=defense_name, module=defense, arg_list=['--defense']) group = parser.add_argument_group('{yellow}defense{reset}'.format(**ansi), description=defense_name) DefenseType = class_dict[defense_name] return DefenseType.add_argument(group) # TODO: Linting problem
def add_argument( parser: argparse.ArgumentParser, model_name: str = None, model: Union[str, ImageModel] = None, config: Config = config, class_dict: dict[str, type[ImageModel]] = class_dict ) -> argparse._ArgumentGroup: dataset_name = get_name(arg_list=['-d', '--dataset']) if dataset_name is None: dataset_name = config.get_full_config()['dataset']['default_dataset'] model_name = get_name(name=model_name, module=model, arg_list=['-m', '--model']) if model_name is None: model_name = config.get_config( dataset_name=dataset_name)['model']['default_model'] model_name = get_model_class(model_name) return trojanzoo.models.add_argument(parser=parser, model_name=model_name, model=model, config=config, class_dict=class_dict)
def add_argument(parser: argparse.ArgumentParser, dataset_name: str = None, dataset: Union[str, Dataset] = None, config: Config = config, class_dict: dict[str, type[Dataset]] = {}): dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) dataset_name = dataset_name if dataset_name is not None else config.get_full_config( )['dataset']['default_dataset'] group = parser.add_argument_group('{yellow}dataset{reset}'.format(**ansi), description=dataset_name) try: DatasetType = class_dict[dataset_name] except KeyError as e: print(f'{dataset_name} not in \n{list(class_dict.keys())}') raise e return DatasetType.add_argument(group)
def create(dataset_name: str = None, dataset: Dataset = None, model: Model = None, ClassType: type[Trainer] = Trainer, tensorboard: bool = None, config: Config = config, **kwargs): assert isinstance(model, Model) dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) result = config.get_config( dataset_name=dataset_name)['trainer'].update(kwargs) optim_keys = model.define_optimizer.__code__.co_varnames train_keys = model._train.__code__.co_varnames optim_args: dict[str, Any] = {} train_args: dict[str, Any] = {} for key, value in result.items(): if key in optim_keys: _dict = optim_args elif key in train_keys: _dict = train_args else: continue _dict[key] = value optimizer, lr_scheduler = model.define_optimizer(T_max=result['epoch'], **optim_args) writer = None writer_args: dict[str, Any] = {} if tensorboard: from torch.utils.tensorboard import SummaryWriter writer_keys = SummaryWriter.__init__.__code__.co_varnames # log_dir, flush_secs, ... for key, value in result.items(): if key in writer_keys: writer_args[key] = value writer = SummaryWriter(**writer_args) return ClassType(optim_args=optim_args, train_args=train_args, writer_args=writer_args, optimizer=optimizer, lr_scheduler=lr_scheduler, writer=writer)
def add_argument(parser: argparse.ArgumentParser, attack_name: str = None, attack: Union[str, Attack] = None, class_dict: dict[str, type[Attack]] = {}): attack_name = get_name(name=attack_name, module=attack, arg_list=['--attack']) group = parser.add_argument_group('{yellow}attack{reset}'.format(**ansi), description=attack_name) try: AttackType = class_dict[attack_name] except KeyError as e: if attack_name is None: print( '{red}you need to first claim the attack name using "--attack".{reset}' .format(**ansi)) print(f'{attack_name} not in \n{list(class_dict.keys())}') raise e return AttackType.add_argument(group)
def create(dataset_name: str = None, dataset: str = None, folder_path: str = None, config: Config = config, class_dict: dict[str, type[Dataset]] = {}, **kwargs): dataset_name = get_name(name=dataset_name, module=dataset, arg_list=['-d', '--dataset']) dataset_name = dataset_name if dataset_name is not None else config.get_full_config( )['dataset']['default_dataset'] result = config.get_config( dataset_name=dataset_name)['dataset'].update(kwargs) try: DatasetType = class_dict[dataset_name] except KeyError as e: print(f'{dataset_name} not in \n{list(class_dict.keys())}') raise e folder_path = folder_path if folder_path is not None \ else os.path.join(result['data_dir'], DatasetType.data_type, DatasetType.name) return DatasetType(folder_path=folder_path, **result)