예제 #1
0
            args.devices = list(f"cuda:{i}"
                                for i in range(torch.cuda.device_count()))
        else:
            args.devices = ["cpu"]
    else:
        args.devices = list(name.strip() for name in args.devices.split(","))

# ---------------------------------------------------------------------------- #
# Serial preloading of the dataset
tools.success("Pre-downloading datasets...")

# Pre-load the datasets to prevent the first parallel runs from downloading them several times
with tools.Context("dataset", "info"):
    for name in ("cifar10", ):
        with tools.Context(name, "info"):
            experiments.make_datasets(name, 1, 1)

# ---------------------------------------------------------------------------- #
# Run (missing) experiments
tools.success("Running experiments...")

# GAR to use
gars = ("krum", "median", "bulyan")


# Command maker helper
def make_command(params):
    cmd = ["python3", "-OO", "attack.py"]
    cmd += tools.dict_to_cmdlist(params)
    return tools.Command(cmd)
예제 #2
0
 # Defense
 defense = aggregators.gars.get(args.gar)
 if defense is None:
   tools.fatal_unavailable(aggregators.gars, args.gar, what="aggregation rule")
 # Attack
 attack = attacks.attacks.get(args.attack)
 if attack is None:
   tools.fatal_unavailable(attacks.attacks, args.attack, what="attack")
 # Model
 model = experiments.Model(args.model, config, **args.model_args)
 # Datasets
 if args.no_transform:
   train_transforms = test_transforms = torchvision.transforms.ToTensor()
 else:
   train_transforms = test_transforms = None # Let default values
 trainset, testset = experiments.make_datasets(args.dataset, args.batch_size, args.batch_size_test, train_transforms=train_transforms, test_transforms=test_transforms, **args.dataset_args)
 model.default("trainset", trainset)
 model.default("testset", testset)
 # Loss and criterion
 loss = experiments.Loss(args.loss, **args.loss_args)
 if args.l1_regularize is not None:
   loss += args.l1_regularize * experiments.Loss("l1")
 if args.l2_regularize is not None:
   loss += args.l2_regularize * experiments.Loss("l2")
 criterion = experiments.Criterion(args.criterion, **args.criterion_args)
 model.default("loss", loss)
 model.default("criterion", criterion)
 # Optimizer
 optimizer = experiments.Optimizer("sgd", model, lr=args.learning_rate, momentum=args.momentum, dampening=args.dampening, weight_decay=args.weight_decay)
 model.default("optimizer", optimizer)
 # Privacy noise distribution