Exemple #1
0
def main(args, store):
    '''Given arguments and a cox store, trains as a model. Check out the 
    argparse object in this file for argument options.
    '''
    ds, train_loader, validation_loader = get_dataset_and_loaders(args)

    if args.per_class_accuracy:
        assert args.dataset in ['pets', 'caltech101', 'caltech256', 'flowers', 'aircraft'], \
            f'Per-class accuracy not supported for the {args.dataset} dataset.'

        # VERY IMPORTANT
        # We report the per-class accuracy using the validation
        # set distribution. So ignore the training accuracy (as you will see it go
        # beyond 100. Don't freak out, it doesn't really capture anything),
        # just look at the validation accuarcy
        args.custom_accuracy = get_per_class_accuracy(args, validation_loader)

    model, checkpoint = get_model(args, ds)

    if args.eval_only:
        return train.eval_model(args, model, validation_loader, store=store)

    update_params = freeze_model(model, freeze_level=args.freeze_level)

    print(f"Dataset: {args.dataset} | Model: {args.arch}")
    train.train_model(args,
                      model, (train_loader, validation_loader),
                      store=store,
                      checkpoint=checkpoint,
                      update_params=update_params)
Exemple #2
0
def main(args, store):

    ds, train_loader, validation_loader = get_dataset_and_loaders(args)

    if args.dataset in [
            'pets', 'caltech101', 'caltech256', 'flowers', 'aircraft'
    ]:
        class_weights = get_class_weights(args, validation_loader)

        def custom_acc(logits, labels):
            '''Returns the top1 accuracy, weighted by the class distribution.
                This is important when evaluating an unbalanced dataset. 
                '''
            batch_size = labels.size(0)
            maxk = min(5, logits.shape[-1])
            prec1, _ = helpers.accuracy(logits,
                                        labels,
                                        topk=(1, maxk),
                                        exact=True)

            normal_prec1 = prec1.sum(0, keepdim=True).mul_(100 / batch_size)
            weighted_prec1 = prec1 * class_weights[labels.cpu()].cuda()
            weighted_prec1 = weighted_prec1.sum(0, keepdim=True).mul_(
                100 / batch_size)

            return weighted_prec1.item(), normal_prec1.item()

        args.custom_accuracy = custom_acc  # meaningful onlt for validation set. Ignore trainig set prec.

    model, checkpoint = get_model(args, ds)

    if args.eval_only:
        return train.eval_model(args, model, validation_loader, store=store)

    update_params = freeze_model(model, freeze_level=args.freeze_level)

    # Checking if freeze is working
    #(uncomment this part, and check if the weights stay the same or change across iterations)
    # def check_freezed_features_hook(model, i, loop_type, inp, target):
    #     if i%100==0:
    #         for name, param in model.named_parameters():
    #             if name == 'module.model.layer4.1.conv2.weight':
    #                 print(name, param)

    # args.iteration_hook = check_freezed_features_hook

    print(f"Dataset: {args.dataset} | Model: {args.arch}")
    train.train_model(args,
                      model, (train_loader, validation_loader),
                      store=store,
                      checkpoint=checkpoint,
                      update_params=update_params)
def main():
    args = Bunch(config)

    print("Translating model file")
    path_hash = hashlib.md5(args.model_path.encode("utf-8")).hexdigest()
    translated_model_path = f"/tmp/checkpoint{path_hash}"
    g = ch.load(args.model_path)
    sd = {}
    for k, v in g["state_dict"].items():
        kk = k[len("1.module."):]
        sd[f"module.attacker.model.{kk}"] = v
        sd[f"module.model.{kk}"] = v
    ch.save({"state_dict": sd, "epoch": g["epoch"]}, translated_model_path)
    args.__dict__["model_path"] = translated_model_path
    print("Done translating")

    # Create store and log the args
    store = StoreWrapper(os.path.join(output_dir, "cox"))
    if "metadata" not in store.keys:
        args_dict = args.__dict__
        schema = cox.store.schema_from_dict(args_dict)
        store.add_table("metadata", schema)
        store["metadata"].append_row(args_dict)
    else:
        print("[Found existing metadata in store. Skipping this part.]")

    ds, train_loader, validation_loader = get_dataset_and_loaders(args)

    if args.per_class_accuracy:
        assert args.dataset in [
            "pets",
            "caltech101",
            "caltech256",
            "flowers",
            "aircraft",
        ], f"Per-class accuracy not supported for the {args.dataset} dataset."

        # VERY IMPORTANT
        # We report the per-class accuracy using the validation
        # set distribution. So ignore the training accuracy (as you will see it go
        # beyond 100. Don't freak out, it doesn't really capture anything),
        # just look at the validation accuarcy
        args.custom_accuracy = get_per_class_accuracy(args, validation_loader)

    model, checkpoint = get_model(args, ds)

    if args.eval_only:
        return train.eval_model(args, model, validation_loader, store=store)

    update_params = freeze_model(model, freeze_level=args.freeze_level)

    log_info({"state.progress": 0.0})
    print(f"Dataset: {args.dataset} | Model: {args.arch}")
    train.train_model(
        args,
        model,
        (train_loader, validation_loader),
        store=store,
        checkpoint=checkpoint,
        update_params=update_params,
    )
Exemple #4
0
out_store = cox.store.Store(args.out_dir)

# Hard-coded base parameters
train_kwargs = {
    'out_dir': args.out_dir,
    'adv_train': 0,
    'constraint': GaussianNoise,
    'eps': args.eps,
    'attack_lr': args.eps,
    'lr': args.lr,
    'attack_steps': 1,
    'step_lr': 2000,
    'random_start': 1,
    'use_best': False,
    'epochs': args.epochs,
    'save_ckpt_iters': -1,  # best and last
    'eps_fadein_epochs': args.fade_in
}
train_args = Parameters(train_kwargs)

# Fill whatever parameters are missing from the defaults
# use Imagnet defaults
ds_class = datasets.DATASETS['imagenet']
train_args = defaults.check_and_fill_args(train_args,
                                          defaults.TRAINING_ARGS, ds_class)
train_args = defaults.check_and_fill_args(train_args,
                                          defaults.PGD_ARGS, ds_class)

# Train a model
train.train_model(train_args, model, (train_loader, val_loader), store=out_store)
Exemple #5
0
def main_trainer(args, store):
    ds, (train_loader, val_loader) = get_dataset_and_loaders(args)
    if args.single_class is not None:
        print(f"Boosting towards a single class {args.single_class}")
        # Transform everything to have the same label
        class_tx = lambda x, y: (x, ch.ones_like(y) * args.single_class)
        train_loader = loaders.LambdaLoader(train_loader, class_tx)
        val_loader = loaders.LambdaLoader(val_loader, class_tx)

    model = get_boosted_model(args, ds)

    # Resume traing the boosted model from a checkpoint
    resume_path = os.path.join(args.out_dir, args.exp_name,
                               'checkpoint.pt.latest')
    checkpoint = None
    if args.resume and os.path.isfile(resume_path):
        print('[Resuming training BoostedModel from a checkpoint...]')
        checkpoint = ch.load(resume_path, pickle_module=dill)

        sd = checkpoint['model']
        sd = {k[len('module.'):]: v for k, v in sd.items()}
        model.load_state_dict(sd)
        print("=> loaded checkpoint of BoostedModel'{}' (epoch {})".format(
            resume_path, checkpoint['epoch']))

    print(f"Dataset: {args.dataset} | Model: {args.arch}")

    if args.eval_only:
        print('==>[Evaluating the model]')
        return train.eval_model(args, model, val_loader, store=store)

    parameters = [
        model.dummy
    ]  # avoids empty param list to optimizer when optimizing the booster alone
    if args.training_mode in ['joint', 'model']:
        parameters = model.boosted_model.parameters()

    def iteration_hook(model, i, loop_type, inp, target):
        if loop_type == 'val' or model.module.booster is None:
            return
        if args.training_mode in ['booster', 'joint']:
            model.module.booster.step_booster(lr=args.patch_lr)
        if i % args.save_freq == 0:
            save_dir = Path(store.save_dir)
            #TODO: Move this part inside the 2D boosters. It is
            # a bit tricky cause if we do that, we cannot save the "corrupted"
            # boosted images, but only the boosted images
            if args.boosting != '3d':
                inp, target = inp.cuda(), target.cuda()
                example_boosted = model.module.booster(inp, target)
                bs_path = save_dir / f'boosted_{i}.jpg'
                save_image(example_boosted[:4], bs_path)
                example_adversaried = model.module.boosted_model.apply(
                    example_boosted)
                inp_path = save_dir / f'inp_{i}.jpg'
                adv_path = save_dir / f'adv_{i}.jpg'
                save_image(inp[:4], inp_path)
                save_image(example_adversaried[:4], adv_path)
            else:
                if not args.save_only_last:
                    save_dir = save_dir / f'iteration_{i}'
                    os.makedirs(save_dir)
                with ch.no_grad():
                    model(inp, target, save_dir=save_dir)
            if i == 0:
                print(f'Saved in {store.save_dir}')

    args.iteration_hook = iteration_hook

    return train.train_model(args,
                             model, (train_loader, val_loader),
                             store=store,
                             checkpoint=checkpoint,
                             update_params=parameters)
Exemple #6
0
            loss.backward()
            adv = self.clip(adv + step_size * torch.sign(adv.grad.data), inp,
                            eps)  # gradient ASCENT
        return adv.clone().detach()


ds = CIFAR('/scratch/raunakc/datasets/cifar10')
model, _ = make_and_restore_model(arch='resnet18', dataset=ds)
model.attacker = WhiteboxPGD(model.model, ds)

train_kwargs = {
    'dataset': 'cifar',
    'arch': 'resnet',
    'out_dir': "train_out",
    'adv_train': 1,
    'adv_eval': 1,
    'eps': 8 / 255,
    'attack_lr': 2 / 255,
    'attack_steps': 10,
    'constraint': 'inf'  # not required but arg checker requires it :(
}

args = utils.Parameters(train_kwargs)
args = check_and_fill_args(args, defaults.TRAINING_ARGS, ds.__class__)
if args.adv_train or args.adv_eval:
    args = check_and_fill_args(args, defaults.PGD_ARGS, ds.__class__)
args = check_and_fill_args(args, defaults.MODEL_LOADER_ARGS, ds.__class__)

train_loader, val_loader = ds.make_loaders(batch_size=128, workers=8)
train.train_model(args, model, (train_loader, val_loader))
Exemple #7
0
    train_args = cox.utils.Parameters(train_kwargs)

    dx = utils.CIFAR10()
    dataset = dx.get_dataset()

    args = check_and_fill_args(train_args, defaults.TRAINING_ARGS, ds_class)
    args = check_and_fill_args(train_args, defaults.MODEL_LOADER_ARGS,
                               ds_class)

    model, _ = make_and_restore_model(arch='vgg19', dataset=dataset)

    # Make the data loaders
    train_loader, val_loader = dataset.make_loaders(args.workers,
                                                    args.batch_size,
                                                    data_aug=bool(
                                                        args.data_aug))

    # Prefetches data to improve performance
    train_loader = helpers.DataPrefetcher(train_loader)
    val_loader = helpers.DataPrefetcher(val_loader)

    store = cox.store.Store(args.out_dir, args.exp_name)
    args_dict = args.as_dict() if isinstance(
        args, cox.utils.Parameters) else vars(args)
    schema = cox.store.schema_from_dict(args_dict)
    store.add_table('metadata', schema)
    store['metadata'].append_row(args_dict)

    model = train_model(args, model, (train_loader, val_loader), store=store)