def add_auxiliary_task_arguments(group: _ArgumentGroup) -> _ArgumentGroup: """Add arguments for auxiliary task.""" group.add_argument( "--use-ctc-loss", type=strtobool, nargs="?", default=False, help="Whether to compute auxiliary CTC loss.", ) group.add_argument( "--ctc-loss-weight", default=0.5, type=float, help="Weight of auxiliary CTC loss.", ) group.add_argument( "--ctc-loss-dropout-rate", default=0.0, type=float, help="Dropout rate for auxiliary CTC.", ) group.add_argument( "--use-lm-loss", type=strtobool, nargs="?", default=False, help="Whether to compute auxiliary LM loss (label smoothing).", ) group.add_argument( "--lm-loss-weight", default=0.5, type=float, help="Weight of auxiliary LM loss.", ) group.add_argument( "--lm-loss-smoothing-rate", default=0.0, type=float, help="Smoothing rate for LM loss. If > 0, label smoothing is enabled.", ) group.add_argument( "--use-aux-transducer-loss", type=strtobool, nargs="?", default=False, help="Whether to compute auxiliary Transducer loss.", ) group.add_argument( "--aux-transducer-loss-weight", default=0.2, type=float, help="Weight of auxiliary Transducer loss.", ) group.add_argument( "--aux-transducer-loss-enc-output-layers", default=None, type=ast.literal_eval, help="List of intermediate encoder layers for auxiliary " "transducer loss computation.", ) group.add_argument( "--aux-transducer-loss-mlp-dim", default=320, type=int, help= "Multilayer perceptron hidden dimension for auxiliary Transducer loss.", ) group.add_argument( "--aux-transducer-loss-mlp-dropout-rate", default=0.0, type=float, help= "Multilayer perceptron dropout rate for auxiliary Transducer loss.", ) group.add_argument( "--use-symm-kl-div-loss", type=strtobool, nargs="?", default=False, help="Whether to compute symmetric KL divergence loss.", ) group.add_argument( "--symm-kl-div-loss-weight", default=0.2, type=float, help="Weight of symmetric KL divergence loss.", ) return group
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--pgd_alpha', dest='pgd_alpha', type=float) group.add_argument('--pgd_epsilon', dest='pgd_epsilon', type=float) group.add_argument('--pgd_iteration', dest='pgd_iteration', type=int) group.add_argument('--stop_conf', dest='stop_conf', type=float) group.add_argument('--magnet', dest='magnet', action='store_true') group.add_argument('--randomized_smooth', dest='randomized_smooth', action='store_true') group.add_argument('--curvature', dest='curvature', action='store_true')
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--transform_mode', dest='transform_mode', type=str, help='Image Transform Mode, defaults to "recompress".') group.add_argument('--resize_ratio', dest='resize_ratio', type=float, help='Image Resize Ratio for Recompress, defaults to 0.95.')
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--term') group.add_argument('--inner_iter', type=int) group.add_argument('--inner_lr', type=float) group.add_argument( '--class_sample_num', type=int, help='the number of sampled images per class, defaults to 100') group.add_argument( '--mse_weight', type=float, help='the weight of mse loss during retraining, defaults to 100') group.add_argument( '--preprocess_layer', help= 'the chosen feature layer patched by trigger, defaults to \'features\'' ) group.add_argument('--preprocess_epoch', type=int, help='preprocess optimization epoch') group.add_argument('--preprocess_lr', type=float, help='preprocess learning rate') return group
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--sample_ratio', type=float, help='sample ratio from the full training data') group.add_argument('--noise_dim', type=int, help='GAN noise dimension') group.add_argument('--remask_epoch', type=int, help='Remask optimizing epoch') group.add_argument('--remask_lr', type=float, help='Remask optimizing learning rate') group.add_argument('--gamma_1', type=float, help='control effect of GAN loss') group.add_argument('--gamma_2', type=float, help='control effect of perturbation loss') return group
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--target_platform') return group
def add_argument(group: argparse._ArgumentGroup): group.add_argument( '--edge_color', dest='edge_color', help='edge color in watermark image, defaults to \'auto\'.') group.add_argument( '--mark_path', dest='mark_path', help= 'edge color in watermark image, defaults to trojanzoo/data/mark/apple_white.png.' ) group.add_argument('--mark_alpha', dest='mark_alpha', type=float, help='mark transparency, defaults to 0.0.') group.add_argument('--mark_height', dest='mark_height', type=int, help='mark height, defaults to 3.') group.add_argument('--mark_width', dest='mark_width', type=int, help='mark width, defaults to 3.') group.add_argument('--height_offset', dest='height_offset', type=int, help='height offset, defaults to 0') group.add_argument('--width_offset', dest='width_offset', type=int, help='width offset, defaults to 0') group.add_argument('--random_pos', dest='random_pos', action='store_true', help='Random offset Location for add_mark.') group.add_argument('--random_init', dest='random_init', action='store_true', help='random values for mark pixel.') group.add_argument('--mark_distributed', dest='mark_distributed', action='store_true', help='Distributed Mark.') return group
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--select_point', dest='select_point', type=int, help='the number of select_point, defaults to 2')
def add_argument( cls, group: argparse._ArgumentGroup) -> argparse._ArgumentGroup: r"""Add image dataset arguments to argument parser group. View source to see specific arguments. Note: This is the implementation of adding arguments. The concrete dataset class may override this method to add more arguments. For users, please use :func:`add_argument()` instead, which is more user-friendly. See Also: :meth:`trojanzoo.datasets.Dataset.add_argument()` """ super().add_argument(group) group.add_argument( '--dataset_normalize', dest='normalize', action='store_true', help='use transforms.Normalize in dataset transform. ' '(It\'s used in model as the first layer by default.)') group.add_argument('--transform', choices=[None, 'none', 'bit', 'pytorch']) group.add_argument('--auto_augment', action='store_true', help='use auto augment') group.add_argument('--mixup', action='store_true', help='use mixup') group.add_argument('--mixup_alpha', type=float, help='mixup alpha (default: 0.0)') group.add_argument('--cutmix', action='store_true', help='use cutmix') group.add_argument('--cutmix_alpha', type=float, help='cutmix alpha (default: 0.0)') group.add_argument('--cutout', action='store_true', help='use cutout') group.add_argument('--cutout_length', type=int, help='cutout length') return group
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--mix_image_num', type=int, help='the number of sampled image') group.add_argument('--clean_image_ratio', type=float, help='the ratio of clean image') group.add_argument('--retrain_epoch', type=int, help='the epoch of retraining the model') group.add_argument('--nb_clusters', type=int, help='') group.add_argument('--clustering_method', type=str, help='the amount of clusters') group.add_argument('--nb_dims', type=int, help='the dimension set in the process of reduceing the dimensionality of data') group.add_argument('--reduce_method', type=str, help=' the method for reduing the dimensionality of data') group.add_argument('--cluster_analysis', type=str, help='the method chosen to analyze whether cluster is the poison cluster, ' 'including size, distance, relative-size, silhouette-scores') return group
def add_argument(group: argparse._ArgumentGroup): group.add_argument('-m', '--model', dest='model_name', help='model name, defaults to config[model][default_model]') group.add_argument('--suffix', dest='suffix', help='model name suffix, e.g. _adv_train') group.add_argument('--pretrain', dest='pretrain', action='store_true', help='load pretrained weights, defaults to False') group.add_argument('--official', dest='official', action='store_true', help='load official weights, defaults to False') group.add_argument('--model_dir', dest='model_dir', help='directory to contain pretrained models') group.add_argument('--randomized_smooth', dest='randomized_smooth', action='store_true', help='whether to use randomized smoothing, defaults to False') group.add_argument('--rs_sigma', dest='rs_sigma', type=float, help='randomized smoothing sampling std, defaults to 0.01') group.add_argument('--rs_n', dest='rs_n', type=int, help='randomized smoothing sampling number, defaults to 100') return group
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--num_classes', type=int, help='number of classes') return group
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument( '--alpha', dest='alpha', type=float, help='PGD learning rate per step, defaults to 3.0/255') group.add_argument( '--epsilon', dest='epsilon', type=float, help='Projection norm constraint, defaults to 8.0/255') group.add_argument('--iteration', dest='iteration', type=int, help='Attack Iteration, defaults to 20') group.add_argument('--stop_threshold', dest='stop_threshold', type=float, help='early stop confidence, defaults to None') group.add_argument( '--target_idx', dest='target_idx', type=int, help='Target label order in original classification, defaults to 1 ' '(0 for untargeted attack, 1 for most possible class, -1 for most unpossible class)' ) group.add_argument( '--grad_method', dest='grad_method', help='gradient estimation method, defaults to \'white\'') group.add_argument( '--query_num', dest='query_num', type=int, help= 'query numbers for black box gradient estimation, defaults to 100.' ) group.add_argument( '--sigma', dest='sigma', type=float, help= 'gaussian sampling std for black box gradient estimation, defaults to 1e-3' )
def add_custom_encoder_arguments(group: _ArgumentGroup) -> _ArgumentGroup: """Define arguments for Custom encoder.""" group.add_argument( "--enc-block-arch", type=eval, action="append", default=None, help="Encoder architecture definition by blocks", ) group.add_argument( "--enc-block-repeat", default=1, type=int, help="Repeat N times the provided encoder blocks if N > 1", ) group.add_argument( "--custom-enc-input-layer", type=str, default="conv2d", choices=["conv2d", "vgg2l", "linear", "embed"], help="Custom encoder input layer type", ) group.add_argument( "--custom-enc-input-dropout-rate", type=float, default=0.0, help="Dropout rate of custom encoder input layer", ) group.add_argument( "--custom-enc-input-pos-enc-dropout-rate", type=float, default=0.0, help= "Dropout rate of positional encoding in custom encoder input layer", ) group.add_argument( "--custom-enc-positional-encoding-type", type=str, default="abs_pos", choices=["abs_pos", "scaled_abs_pos", "rel_pos"], help="Custom encoder positional encoding layer type", ) group.add_argument( "--custom-enc-self-attn-type", type=str, default="self_attn", choices=["self_attn", "rel_self_attn"], help="Custom encoder self-attention type", ) group.add_argument( "--custom-enc-pw-activation-type", type=str, default="relu", choices=["relu", "hardtanh", "selu", "swish"], help="Custom encoder pointwise activation type", ) group.add_argument( "--custom-enc-conv-mod-activation-type", type=str, default="swish", choices=["relu", "hardtanh", "selu", "swish"], help="Custom encoder convolutional module activation type", ) return group
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--data_format', dest='data_format', type=str, help='folder or zip. (zip is using ZIP_STORED)')
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--adv_train', action='store_true', help='enable adversarial training.') group.add_argument( '--adv_train_iter', type=int, help='adversarial training PGD iteration, defaults to 7.') group.add_argument( '--adv_train_alpha', type=float, help='adversarial training PGD alpha, defaults to 2/255.') group.add_argument( '--adv_train_eps', type=float, help='adversarial training PGD eps, defaults to 8/255.') group.add_argument( '--adv_train_valid_eps', type=float, help='adversarial training PGD eps, defaults to 8/255.') group.add_argument( '--sgm', action='store_true', help='whether to use sgm gradient, defaults to False') group.add_argument('--sgm_gamma', type=float, help='sgm gamma, defaults to 1.0') return group
def add_argument(cls, group: argparse._ArgumentGroup): r"""Add image model arguments to argument parser group. View source to see specific arguments. Note: This is the implementation of adding arguments. The concrete model class may override this method to add more arguments. For users, please use :func:`add_argument()` instead, which is more user-friendly. See Also: :meth:`trojanzoo.models.Model.add_argument()` """ super().add_argument(group) group.add_argument('--adv_train', choices=[None, 'pgd', 'free', 'trades'], help='adversarial training (default: None)') group.add_argument('--adv_train_random_init', action='store_true') group.add_argument('--adv_train_iter', type=int, help='adversarial training PGD iteration (default: 7).') group.add_argument('--adv_train_alpha', type=float, help='adversarial training PGD alpha (default: 2/255).') group.add_argument('--adv_train_eps', type=float, help='adversarial training PGD eps (default: 8/255).') group.add_argument('--adv_train_eval_iter', type=int) group.add_argument('--adv_train_eval_alpha', type=float) group.add_argument('--adv_train_eval_eps', type=float) group.add_argument('--adv_train_trades_beta', type=float, help='regularization, i.e., 1/lambda in TRADES ' '(default: 6.0)') group.add_argument('--norm_layer', choices=['bn', 'gn'], default='bn') group.add_argument('--sgm', action='store_true', help='whether to use sgm gradient (default: False)') group.add_argument('--sgm_gamma', type=float, help='sgm gamma (default: 1.0)') return group
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--preprocess_layer', dest='preprocess_layer', type=str, help='the chosen feature layer patched by trigger where rare neuron activation is maxmized, defaults to ``flatten``') group.add_argument('--threshold', dest='threshold', type=float, help='Trojan Net Threshold, defaults to 5') group.add_argument('--target_value', dest='target_value', type=float, help='Trojan Net Target_Value, defaults to 10') group.add_argument('--neuron_lr', dest='neuron_lr', type=float, help='Trojan Net learning rate in neuron preprocessing, defaults to 0.015') group.add_argument('--neuron_epoch', dest='neuron_epoch', type=int, help='Trojan Net epoch in neuron preprocessing, defaults to 20') group.add_argument('--neuron_num', dest='neuron_num', type=int, help='Trojan Net neuron numbers in neuron preprocessing, defaults to 2')
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument( '--poison_generation_method', dest='poison_generation_method', type=str, help= 'the chosen method to generate poisoned sample, defaults to config[clean_label][poison_generation_method]=pgd' ) group.add_argument( '--tau', dest='tau', type=float, help= 'the interpolation constant used to balance source imgs and target imgs, defaults to config[clean_label][tau]=0.2' ) group.add_argument( '--epsilon', dest='epsilon', type=float, help= 'the perturbation bound in input space, defaults to config[clean_label][epsilon]=0.1, 300/(3*32*32)' ) group.add_argument( '--noise_dim', dest='noise_dim', type=int, help= 'the dimension of the input in the generator, defaults to config[clean_label][noise_dim]=100' ) group.add_argument( '--train_gan', dest='train_gan', action='store_true', help='whether train the GAN if it already exists, defaults to False' ) group.add_argument( '--generator_iters', dest='generator_iters', type=int, help= ' the epoch for training the generator, defaults to config[clean_label][generator_iters]=1000' ) group.add_argument( '--critic_iter', dest='critic_iter', type=int, help= ' the critic iterations per generator training iteration, defaults to config[clean_label][critic_iter]=5' )
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--poison_generation_method', choices=['pgd', 'gan'], help='the chosen method to generate poisoned sample ' '(default: "pgd")') group.add_argument('--pgd_alpha', type=float) group.add_argument('--pgd_eps', type=float) group.add_argument('--pgd_iter', type=int) group.add_argument('--tau', type=float, help='the interpolation constant used to balance source imgs and target imgs, ' 'defaults to 0.2') group.add_argument('--noise_dim', type=int, help='the dimension of the input in the generator, ' 'defaults to config[clean_label][noise_dim]=100') group.add_argument('--train_gan', action='store_true', help='whether train the GAN if it already exists, defaults to False') group.add_argument('--generator_iters', type=int, help='epochs for training the generator, defaults to 1000') group.add_argument('--critic_iter', type=int, help='critic iterations per generator training iteration ' '(default: 5)') return group
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--supernet', action='store_true', help='whether to use supernet') group.add_argument('--model_arch', help='genotype name (default: "darts")') group.add_argument('--layers', type=int, help='total number of layers (default: 20)') group.add_argument('--init_channels', type=int, help='out_channel of stem conv layer (default: 36)') group.add_argument('--dropout_p', type=float, help='dropout probability (default: 0.2)') group.add_argument('--auxiliary', action='store_true', help='whether to use auxiliary classifier') group.add_argument( '--auxiliary_weight', type=float, help='loss weight of auxiliary classifier (default: 0.4)') group.add_argument( '--arch_search', action='store_true', help='whether to search supernet architecture weight parameters') group.add_argument( '--use_full_train_set', action='store_true', help='whether to use full training data during architecture search' ) group.add_argument( '--arch_lr', type=float, help='learning rate for architecture optimizer (default: 3e-4)') group.add_argument( '--arch_weight_decay', type=float, help='weight decay for architecture optimizer (default: 1e-3)') group.add_argument( '--arch_unrolled', action='store_true', default=False, help='whether to use one-step unrolled validation loss (darts-v2)') return group
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--abs_weight', dest='abs_weight', type=float) group.add_argument('--strip_percent', dest='strip_percent', type=float)
def add_argument(group: argparse._ArgumentGroup): group.add_argument('-d', '--dataset', dest='dataset_name', type=str, help='dataset name (lowercase).') group.add_argument( '--batch_size', dest='batch_size', type=int, help='batch size (negative number means batch_size for each gpu).') group.add_argument('--valid_batch_size', dest='valid_batch_size', type=int, help='valid batch size.') group.add_argument('--test_batch_size', dest='test_batch_size', type=int, help='test batch size.') group.add_argument( '--num_workers', dest='num_workers', type=int, help= 'num_workers passed to torch.utils.data.DataLoader for training set, defaults to 4.' ) group.add_argument( '--download', dest='download', action='store_true', help='download dataset if not exist by calling dataset.initialize()' ) group.add_argument('--data_dir', dest='data_dir', help='directory to contain datasets') return group
def add_args_to_group(cls, group: ArgumentGroup): group.add_argument("--hidden-default-argument", dest="hidden_default_argument", default=3) group.add_argument("--hidden-argument", dest="hidden_argument")
def add_to_group(self, group: argparse._ArgumentGroup) -> None: group.add_argument(*self.args, **self.kwargs)
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument( '--pgd_alpha', type=float, help='PGD learning rate per step, defaults to 2.0/255') group.add_argument( '--pgd_eps', type=float, help='Projection norm constraint, defaults to 8.0/255') group.add_argument('--iteration', type=int, help='Attack Iteration, defaults to 7') group.add_argument('--stop_threshold', type=float, help='early stop confidence, defaults to 0.99') group.add_argument( '--target_idx', type=int, help='Target label order in original classification, defaults to -1 ' '(0 for untargeted attack, 1 for most possible class, -1 for most unpossible class)' ) group.add_argument( '--test_num', type=int, help='total number of test examples for PGD, defaults to 1000.') group.add_argument( '--num_init', type=int, help= 'number of random init for PGD, defaults to 0 (without random initialization).' ) group.add_argument( '--grad_method', help='gradient estimation method, defaults to \'white\'') group.add_argument( '--query_num', type=int, help= 'query numbers for black box gradient estimation, defaults to 100.' ) group.add_argument( '--sigma', type=float, help= 'gaussian sampling std for black box gradient estimation, defaults to 1e-3' ) return group
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--attack_adapt', dest='attack_adapt', action='store_true', help='Adaptive attack to add fake queries.') group.add_argument('--fake_percent', dest='fake_percent', type=float, help='fake query percentage.') group.add_argument('--dist', dest='dist', type=float, help='fake query noise std.') group.add_argument('--defend_adapt', dest='defend_adapt', action='store_true', help='Robust location M-estimator.') group.add_argument('--active', dest='active', action='store_true', help='Proactive solicitation.') group.add_argument('--active_percent', dest='active_percent', type=float, help='Active gradient weight.')
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--pgd_alpha', type=float) group.add_argument('--pgd_eps', type=float) group.add_argument('--pgd_iter', type=int) return group
def add_argument(cls, group: argparse._ArgumentGroup): super().add_argument(group) group.add_argument('--inner_iter', dest='inner_iter', type=int) group.add_argument('--inner_lr', dest='inner_lr', type=float)
def add_custom_training_arguments(group: _ArgumentGroup) -> _ArgumentGroup: """Define arguments for training with Custom architecture.""" group.add_argument( "--optimizer-warmup-steps", default=25000, type=int, help="Optimizer warmup steps", ) group.add_argument( "--noam-lr", default=10.0, type=float, help="Initial value of learning rate", ) group.add_argument( "--noam-adim", default=0, type=int, help="Most dominant attention dimension for scheduler.", ) group.add_argument( "--transformer-warmup-steps", type=int, help="Optimizer warmup steps. The parameter is deprecated, " "please use --optimizer-warmup-steps instead.", dest="optimizer_warmup_steps", ) group.add_argument( "--transformer-lr", type=float, help="Initial value of learning rate. The parameter is deprecated, " "please use --noam-lr instead.", dest="noam_lr", ) group.add_argument( "--adim", type=int, help="Most dominant attention dimension for scheduler. " "The parameter is deprecated, please use --noam-adim instead.", dest="noam_adim", ) return group