コード例 #1
0
def deepfool_choose(target_model: Module, blackbox: Blackbox, queryset,
                    testset, selection: set, transferset: List,
                    indices_list: List, device, penalty: ndarray, **params):
    model_dir = os.path.join(params['model_dir'],
                             'deepfool.{}'.format(params['deepfool_budget']))
    if not os.path.exists(model_dir):
        os.mkdir(model_dir)
    # transferset, num_classes = load_transferset(os.path.join(model_dir, 'transferset.pickle'))
    surrogate = target_model
    evalutation_set = query(blackbox, [data[0] for data in testset],
                            len(testset),
                            device=device,
                            argmax=True)
    optimizer = get_optimizer(surrogate.parameters(),
                              params['optimizer_choice'], **params)
    reverse = True
    budget = params['deepfool_budget']
    ispenalty = params['ispenalty']
    # batch_samples = []
    batch_permutation = []
    total = set([i for i in range(len(queryset))])
    unselected = list(total - selection)
    num_classes = len(testset.classes)
    print('{} object to calculate.'.format(len(unselected)))
    dm = DeepfoolMappable(surrogate, num_classes, queryset)
    # with multiprocessing.Pool(4) as pool:
    #     results = pool.map(dm, unselected)
    # batch_permutation = np.array([item[1] for item in results])
    for index in tqdm(unselected):
        permutation, _, _, _, _ = deepfool(queryset[index][0], surrogate,
                                           penalty, ispenalty, num_classes)
        # batch_samples.append(result.squeeze(0))
        batch_permutation.append(np.linalg.norm(permutation))
    batch_permutation = np.array(batch_permutation)
    current_selection = batch_permutation.argsort(
        0)[:budget] if not reverse else batch_permutation.argsort(
            0)[len(unselected) - budget:]
    assert len(current_selection) == budget
    training_batch = [queryset[unselected[i]][0] for i in current_selection]
    current_selection = [unselected[i] for i in current_selection]
    indices_list.extend(current_selection)
    selection.update(current_selection)
    transferset.extend(query(blackbox, training_batch, budget, device=device))
    model_utils.train_model(surrogate,
                            transferset,
                            model_dir,
                            testset=evalutation_set,
                            criterion_train=model_utils.soft_cross_entropy,
                            optimizer=optimizer,
                            checkpoint_suffix='.deepfool.{}'.format(
                                len(transferset)),
                            **params)
    save_selection_state(transferset, selection, indices_list, model_dir)
コード例 #2
0
    def get_transferset(self):
        """
        :return:
        """
        # for rho_current in range(self.rho):
        rho_current = 0
        while self.blackbox.call_count < self.budget:
            print('=> Beginning substitute epoch {} (|D| = {})'.format(rho_current, len(self.D)))
            # -------------------------- 0. Initialize Model
            model_adv = zoo.get_net(self.model_adv_name, self.modelfamily, self.model_adv_pretrained,
                                    num_classes=self.num_classes)
            model_adv = model_adv.to(self.device)

            # -------------------------- 1. Train model on D
            model_adv = model_utils.train_model(model_adv, self.D, self.out_dir, num_workers=10,
                                                checkpoint_suffix='.{}'.format(self.blackbox.call_count),
                                                device=self.device, epochs=self.train_epochs, log_interval=500, lr=0.1,
                                                momentum=0.9, batch_size=self.batch_size, lr_gamma=0.1,
                                                testset=self.testset, criterion_train=model_utils.soft_cross_entropy)

            # -------------------------- 2. Evaluate model
            # _, acc = model_utils.test_step(model_adv, self.testloader, nn.CrossEntropyLoss(reduction='mean'),
            #                                device=self.device, epoch=rho_current)
            # self.accuracies.append(acc)

            # -------------------------- 3. Jacobian-based data augmentation
            if self.aug_strategy in ['jbda', 'jbself']:
                self.D = self.jacobian_augmentation(model_adv, rho_current)
            elif self.aug_strategy == 'jbtop{}'.format(self.topk):
                self.D = self.jacobian_augmentation_topk(model_adv, rho_current)
            else:
                raise ValueError('Unrecognized augmentation strategy: "{}"'.format(self.aug_strategy))

            # -------------------------- 4. End if necessary
            rho_current += 1
            if (self.blackbox.call_count >= self.budget) or ((self.rho is not None) and (rho_current >= self.rho)):
                print('=> # BB Queries ({}) >= budget ({}). Ending attack.'.format(self.blackbox.call_count,
                                                                                   self.budget))
                model_adv = zoo.get_net(self.model_adv_name, self.modelfamily, self.model_adv_pretrained,
                                        num_classes=self.num_classes)
                model_adv = model_adv.to(self.device)
                model_adv = model_utils.train_model(model_adv, self.D, self.out_dir, num_workers=10,
                                                    checkpoint_suffix='.{}'.format(self.blackbox.call_count),
                                                    device=self.device, epochs=self.final_train_epochs,
                                                    log_interval=500, lr=0.01, momentum=0.9, batch_size=self.batch_size,
                                                    lr_gamma=0.1, testset=self.testset,
                                                    criterion_train=model_utils.soft_cross_entropy)
                break

            print()

        return self.D, model_adv
コード例 #3
0
def deepfool_active():
    params = parser_dealer({
        'transfer': False,
        'active': False,
        'sampling': False,
        'synthetic': False,
        'black_box': True,
        'train': True
    })
    model_dir = params['model_dir']
    transferset, num_classes = load_transferset(
        os.path.join(model_dir, 'transferset.pickle'))
    surrogate = params['surrogate']
    blackbox = params['blackbox']
    device = params['device']
    testset = params['testset']
    # todo: make the iteration batch and rounds part of parameters.
    iter_batch = 1000
    rounds = 20
    remnant = set(range(len(transferset)))
    selected = set()
    params['testset'] = query(blackbox, [data[0] for data in testset],
                              len(testset),
                              device=device,
                              argmax=True)
    optimizer = get_optimizer(surrogate.parameters(),
                              params['optimizer_choice'], **params)

    for i in range(rounds):
        batch_samples = []
        batch_permutation = []
        current_round = list(remnant)
        print('round {}: {} object to calculate.'.format(i + 1, len(remnant)))
        for index in current_round:
            permutation, _, _, _, result = deepfool(transferset[index][0],
                                                    surrogate, num_classes)
            batch_samples.append(result.squeeze(0))
            batch_permutation.append(np.linalg.norm(permutation))
        batch_permutation = np.array(batch_permutation)
        selection = batch_permutation.argsort(0)[:iter_batch]
        training_batch = [batch_samples[i] for i in selection]
        selection = [current_round[i] for i in selection]
        selected.update(selection)
        remnant.difference_update(selection)
        transferset.extend(
            query(blackbox, training_batch, iter_batch, device=device))
        model_utils.train_model(surrogate,
                                transferset,
                                model_dir,
                                criterion_train=model_utils.soft_cross_entropy,
                                optimizer=optimizer,
                                **params)
コード例 #4
0
 def train(self):
     # self.surrogate = zoo.get_net(self.kwargs["model_arch"], 'custom_cnn', None, num_classes=43).to(self.device)
     # self.optim = get_optimizer(self.surrogate.parameters(), self.optimizer_choice, **self.kwargs)
     model_utils.train_model(self.surrogate,
                             self.selected,
                             self.path,
                             batch_size=self.batch_size,
                             testset=self.evaluation_set,
                             criterion_train=self.criterion,
                             checkpoint_suffix='.active.{}'.format(
                                 len(self.selected)),
                             device=self.device,
                             optimizer=self.optim,
                             **self.kwargs)
コード例 #5
0
def train(model, transferset_samples, budget, round):
    transferset = samples_to_transferset(transferset_samples, budget=budget)
    print()
    print('=> Training at budget = {}'.format(len(transferset)))
    checkpoint_suffix = '.{}.{}'.format(budget, round)

    model_utils.train_model(model,
                            transferset,
                            train.model_dir,
                            testset=train.testset,
                            criterion_train=train.criterion_train,
                            checkpoint_suffix=checkpoint_suffix,
                            device=train.device,
                            optimizer=train.optimizer,
                            **train.params)
コード例 #6
0
    def train(self,
              trainset: Union[Dataset, Iterable[Tuple[Tensor, Tensor]],
                              Iterable[Tensor], Iterable[int]],
              to_query: bool = False):
        if self.blackbox is None and to_query:
            raise Exception("Blackbox didn't exists, couldn't query")
        elif to_query:
            if isinstance(trainset[0], Tuple):
                # This means trainset is either Dataset or Iterable[Tuple[Tensor, Tensor]], which needs to unpack.
                data = self.query(unpack(trainset))
            elif isinstance(trainset[0], Tensor):
                # This means trainset has already unpack.
                data = self.query(trainset)
            elif isinstance(trainset[0], int):
                assert self.sampleset is not None
                duplication = self.selection.intersection(trainset)
                if len(duplication) > 0:
                    print('{} samples duplicated.'.format(len(duplication)))
                difference = set(trainset) - duplication
                data = self.query([self.sampleset[i][0] for i in difference])
                self.selection.update(difference)
        else:
            # dataset is baked
            data = trainset
        self.transfer.extend(data)

        train_model(self.target_model,
                    self.transfer,
                    self.state_dir,
                    self.batch_size,
                    self.criterion,
                    testset=self.evaluation_set,
                    device=self.device,
                    num_workers=self.num_workers,
                    optimizer=self.optim,
                    **self.kwargs)
コード例 #7
0
def main():
    #torch.backends.cudnn.enabled = False
    parser = argparse.ArgumentParser(description='Train a model')
    # Required arguments
    parser.add_argument('dataset',
                        metavar='DS_NAME',
                        type=str,
                        help='Dataset name')
    parser.add_argument('model_arch',
                        metavar='MODEL_ARCH',
                        type=str,
                        help='Model name')
    # Optional arguments
    parser.add_argument('-x',
                        '--complexity',
                        metavar='X',
                        type=int,
                        help='Complexity of conv layer channel.',
                        default=64)
    parser.add_argument('-o',
                        '--out_path',
                        metavar='PATH',
                        type=str,
                        help='Output path for model',
                        default=cfg.MODEL_DIR)
    parser.add_argument('-d',
                        '--device_id',
                        metavar='D',
                        type=int,
                        help='Device id. -1 for CPU.',
                        default=0)
    parser.add_argument('-b',
                        '--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('-e',
                        '--epochs',
                        type=int,
                        default=100,
                        metavar='N',
                        help='number of epochs to train (default: 100)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.1,
                        metavar='LR',
                        help='learning rate (default: 0.1)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=100,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--resume',
                        default=None,
                        type=str,
                        metavar='PATH',
                        help='path to latest checkpoint (default: none)')
    parser.add_argument('--lr-step',
                        type=int,
                        default=30,
                        metavar='N',
                        help='Step sizes for LR')
    parser.add_argument('--lr-gamma',
                        type=float,
                        default=0.1,
                        metavar='N',
                        help='LR Decay Rate')
    parser.add_argument('-w',
                        '--num_workers',
                        metavar='N',
                        type=int,
                        help='# Worker threads to load data',
                        default=10)
    parser.add_argument('--train_subset',
                        type=int,
                        help='Use a subset of train set',
                        default=None)
    parser.add_argument('--pretrained',
                        type=str,
                        help='Use pretrained network',
                        default=None)
    parser.add_argument('--weighted-loss',
                        action='store_true',
                        help='Use a weighted loss',
                        default=None)
    parser.add_argument('--optimizer-choice',
                        type=str,
                        help='Optimizer',
                        default='sgdm',
                        choices=('sgd', 'sgdm', 'adam', 'adagrad'))
    args = parser.parse_args()
    params = vars(args)

    # torch.manual_seed(cfg.DEFAULT_SEED)
    if params['device_id'] >= 0:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(params['device_id'])
        device = torch.device('cuda')
        #torch.cuda.set_device(str(params['device_id']))
    else:
        device = torch.device('cpu')

    # ----------- Set up dataset
    dataset_name = params['dataset']
    valid_datasets = datasets.__dict__.keys()
    if dataset_name not in valid_datasets:
        raise ValueError(
            'Dataset not found. Valid arguments = {}'.format(valid_datasets))
    dataset = datasets.__dict__[dataset_name]

    modelfamily = datasets.dataset_to_modelfamily[dataset_name]
    train_transform = datasets.modelfamily_to_transforms[modelfamily]['train']
    test_transform = datasets.modelfamily_to_transforms[modelfamily]['test']
    trainset = dataset(train=True, transform=train_transform)
    testset = dataset(train=False, transform=test_transform)
    num_classes = len(trainset.classes)
    sample = testset[0][0]
    if len(sample.shape) <= 2:
        # 2 dimensional images
        channel = 1
    else:
        channel = sample.shape[0]
    params['channel'] = channel
    params['num_classes'] = num_classes

    if params['train_subset'] is not None:
        idxs = np.arange(len(trainset))
        ntrainsubset = params['train_subset']
        idxs = np.random.choice(idxs, size=ntrainsubset, replace=False)
        trainset = Subset(trainset, idxs)

    # ----------- Set up model
    model_name = params['model_arch']
    pretrained = params['pretrained']
    complexity = params['complexity']
    # model = model_utils.get_net(model_name, n_output_classes=num_classes, pretrained=pretrained)
    model = zoo.get_net(model_name,
                        modelfamily,
                        pretrained,
                        num_classes=num_classes,
                        channel=channel,
                        complexity=complexity)
    model = model.to(device)
    optimizer = get_optimizer(model.parameters(), params['optimizer_choice'],
                              **params)

    # ----------- Train
    out_path = params['out_path']
    model_utils.train_model(model,
                            trainset,
                            testset=testset,
                            device=device,
                            optimizer=optimizer,
                            **params)

    # Store arguments
    params['created_on'] = str(datetime.now())
    params_out_path = osp.join(out_path, 'params.json')
    with open(params_out_path, 'w') as jf:
        json.dump(params, jf, indent=True)
コード例 #8
0
def main():
    parser = argparse.ArgumentParser(description='Train a model')
    # Required arguments
    parser.add_argument('model_dir', metavar='DIR', type=str, help='Directory containing transferset.pickle')
    parser.add_argument('model_arch', metavar='MODEL_ARCH', type=str, help='Model name')
    parser.add_argument('testdataset', metavar='DS_NAME', type=str, help='Name of test')
    parser.add_argument('--budgets', metavar='B', type=str,
                        help='Comma separated values of budgets. Knockoffs will be trained for each budget.')
    # Optional arguments
    parser.add_argument('-d', '--device_id', metavar='D', type=int, help='Device id. -1 for CPU.', default=0)
    parser.add_argument('-b', '--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('-e', '--epochs', type=int, default=100, metavar='N',
                        help='number of epochs to train (default: 100)')
    parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--log-interval', type=int, default=50, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--resume', default=None, type=str, metavar='PATH',
                        help='path to latest checkpoint (default: none)')
    parser.add_argument('--lr-step', type=int, default=60, metavar='N',
                        help='Step sizes for LR')
    parser.add_argument('--lr-gamma', type=float, default=0.1, metavar='N',
                        help='LR Decay Rate')
    parser.add_argument('-w', '--num_workers', metavar='N', type=int, help='# Worker threads to load data', default=10)
    parser.add_argument('--pretrained', type=str, help='Use pretrained network', default=None)
    parser.add_argument('--weighted-loss', action='store_true', help='Use a weighted loss', default=False)
    # Attacker's defense
    parser.add_argument('--argmaxed', action='store_true', help='Only consider argmax labels', default=False)
    parser.add_argument('--optimizer_choice', type=str, help='Optimizer', default='sgdm', choices=('sgd', 'sgdm', 'adam', 'adagrad'))
    args = parser.parse_args()
    params = vars(args)

    torch.manual_seed(cfg.DEFAULT_SEED)
    if params['device_id'] >= 0:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(params['device_id'])
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    model_dir = params['model_dir']

    # ----------- Set up transferset
    transferset_path = osp.join(model_dir, 'transferset.pickle')
    with open(transferset_path, 'rb') as rf:
        transferset_samples = pickle.load(rf)
    num_classes = transferset_samples[0][1].size(0)
    print('=> found transfer set with {} samples, {} classes'.format(len(transferset_samples), num_classes))

    # ----------- Clean up transfer (if necessary)
    if params['argmaxed']:
        new_transferset_samples = []
        print('=> Using argmax labels (instead of posterior probabilities)')
        for i in range(len(transferset_samples)):
            x_i, y_i = transferset_samples[i]
            argmax_k = y_i.argmax()
            y_i_1hot = torch.zeros_like(y_i)
            y_i_1hot[argmax_k] = 1.
            new_transferset_samples.append((x_i, y_i_1hot))
        transferset_samples = new_transferset_samples

    # ----------- Set up testset
    dataset_name = params['testdataset']
    valid_datasets = datasets.__dict__.keys()
    modelfamily = datasets.dataset_to_modelfamily[dataset_name]
    transform = datasets.modelfamily_to_transforms[modelfamily]['test']
    if dataset_name not in valid_datasets:
        raise ValueError('Dataset not found. Valid arguments = {}'.format(valid_datasets))
    dataset = datasets.__dict__[dataset_name]
    testset = dataset(train=False, transform=transform)
    if len(testset.classes) != num_classes:
        raise ValueError('# Transfer classes ({}) != # Testset classes ({})'.format(num_classes, len(testset.classes)))

    # ----------- Set up model
    model_name = params['model_arch']
    pretrained = params['pretrained']
    # model = model_utils.get_net(model_name, n_output_classes=num_classes, pretrained=pretrained)
    model = zoo.get_net(model_name, modelfamily, pretrained, num_classes=num_classes)
    model = model.to(device)

    # ----------- Train
    budgets = [int(b) for b in params['budgets'].split(',')]

    for b in budgets:
        np.random.seed(cfg.DEFAULT_SEED)
        torch.manual_seed(cfg.DEFAULT_SEED)
        torch.cuda.manual_seed(cfg.DEFAULT_SEED)

        transferset = samples_to_transferset(transferset_samples, budget=b, transform=transform)
        print()
        print('=> Training at budget = {}'.format(len(transferset)))

        optimizer = get_optimizer(model.parameters(), params['optimizer_choice'], **params)
        print(params)

        checkpoint_suffix = '.{}'.format(b)
        criterion_train = model_utils.soft_cross_entropy
        model_utils.train_model(model, transferset, model_dir, testset=testset, criterion_train=criterion_train,
                                checkpoint_suffix=checkpoint_suffix, device=device, optimizer=optimizer, **params)

    # Store arguments
    params['created_on'] = str(datetime.now())
    params_out_path = osp.join(model_dir, 'params_train.json')
    with open(params_out_path, 'w') as jf:
        json.dump(params, jf, indent=True)
コード例 #9
0
    def __init__(self, blackbox, budget, model_adv_name, model_adv_pretrained, modelfamily, seedset, testset, device,
                 out_dir, batch_size=cfg.DEFAULT_BATCH_SIZE, train_epochs=20, kappa=400, tau=None, rho=6, sigma=-1,
                 query_batch_size=1, aug_strategy='jbda', useprobs=True, final_train_epochs=100):
        self.blackbox = blackbox
        self.budget = budget
        self.model_adv_name = model_adv_name
        self.model_adv_pretrained = model_adv_pretrained
        self.model_adv = None
        self.modelfamily = modelfamily
        self.seedset = seedset
        self.testset = testset
        self.batch_size = batch_size
        self.query_batch_size = query_batch_size
        self.testloader = DataLoader(self.testset, batch_size=self.batch_size, pin_memory=True)
        self.train_epochs = train_epochs
        self.final_train_epochs = final_train_epochs
        self.kappa = kappa
        self.tau = tau
        self.rho = rho
        self.sigma = sigma
        self.device = device
        self.out_dir = out_dir
        self.num_classes = len(self.testset.classes)
        assert (aug_strategy in ['jbda', 'jbself']) or 'jbtop' in aug_strategy
        self.aug_strategy = aug_strategy
        self.topk = 0
        if 'jbtop' in aug_strategy:
            # extract k from "jbtop<k>"
            self.topk = int(aug_strategy.replace('jbtop', ''))

        self.accuracies = []  # Track test accuracies over time
        self.useprobs = useprobs

        # -------------------------- Initialize seed data
        print('=> Obtaining predictions over {} seed samples using strategy {}'.format(len(self.seedset),
                                                                                       self.aug_strategy))
        Dx = torch.cat([self.seedset[i][0].unsqueeze(0) for i in range(len(self.seedset))])
        Dy = []

        # Populate Dy
        with torch.no_grad():
            for inputs, in DataLoader(TensorDataset(Dx), batch_size=self.query_batch_size):
                inputs = inputs.to(self.device)
                outputs = blackbox(inputs).cpu()
                if not self.useprobs:
                    labels = torch.argmax(outputs, dim=1)
                    labels_onehot = make_one_hot(labels, outputs.shape[1])
                    outputs = labels_onehot
                Dy.append(outputs)
        # Dy = torch.tensor(Dy)
        Dy = torch.cat(Dy)

        # TensorDataset D
        self.D = TensorDataset(Dx, Dy)

        ### Block memory required for training later on
        model_adv = zoo.get_net(self.model_adv_name, self.modelfamily, self.model_adv_pretrained,
                                num_classes=self.num_classes)
        model_adv = model_adv.to(self.device)
        model_adv = model_utils.train_model(model_adv, self.D, self.out_dir, num_workers=10,
                                            checkpoint_suffix='.{}'.format(self.blackbox.call_count),
                                            device=self.device, epochs=1,
                                            log_interval=500, lr=0.01, momentum=0.9, batch_size=self.batch_size,
                                            lr_gamma=0.1, testset=self.testset,
                                            criterion_train=model_utils.soft_cross_entropy)
コード例 #10
0
ファイル: train.py プロジェクト: rahulsmehta/knockoffnets
def main():
    parser = argparse.ArgumentParser(description='Train a model')
    # Required arguments
    parser.add_argument('dataset',
                        metavar='DS_NAME',
                        type=str,
                        help='Dataset name')
    parser.add_argument('model_arch',
                        metavar='MODEL_ARCH',
                        type=str,
                        help='Model name')
    # Optional arguments
    parser.add_argument('-o',
                        '--out_path',
                        metavar='PATH',
                        type=str,
                        help='Output path for model',
                        default=cfg.MODEL_DIR)
    parser.add_argument('-d',
                        '--device_id',
                        metavar='D',
                        type=int,
                        help='Device id. -1 for CPU.',
                        default=0)
    parser.add_argument('-b',
                        '--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('-e',
                        '--epochs',
                        type=int,
                        default=100,
                        metavar='N',
                        help='number of epochs to train (default: 100)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.1,
                        metavar='LR',
                        help='learning rate (default: 0.1)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=100,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--resume',
                        default=None,
                        type=str,
                        metavar='PATH',
                        help='path to latest checkpoint (default: none)')
    parser.add_argument('--lr-step',
                        type=int,
                        default=30,
                        metavar='N',
                        help='Step sizes for LR')
    parser.add_argument('--lr-gamma',
                        type=float,
                        default=0.1,
                        metavar='N',
                        help='LR Decay Rate')
    parser.add_argument('-w',
                        '--num_workers',
                        metavar='N',
                        type=int,
                        help='# Worker threads to load data',
                        default=10)
    parser.add_argument('--pretrained',
                        type=str,
                        help='Use pretrained network',
                        default=None)
    parser.add_argument('--weighted-loss',
                        action='store_true',
                        help='Use a weighted loss',
                        default=None)
    args = parser.parse_args()
    params = vars(args)

    torch.manual_seed(cfg.DEFAULT_SEED)
    if params['device_id'] >= 0:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(params['device_id'])
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    # ----------- Set up dataset
    dataset_name = params['dataset']
    valid_datasets = datasets.__dict__.keys()
    if dataset_name not in valid_datasets:
        raise ValueError(
            'Dataset not found. Valid arguments = {}'.format(valid_datasets))
    dataset = datasets.__dict__[dataset_name]

    modelfamily = datasets.dataset_to_modelfamily[dataset_name]
    train_transform = datasets.modelfamily_to_transforms[modelfamily]['train']
    test_transform = datasets.modelfamily_to_transforms[modelfamily]['test']
    trainset = dataset(train=True, transform=train_transform)
    testset = dataset(train=False, transform=test_transform)
    num_classes = len(trainset.classes)
    params['num_classes'] = num_classes

    # ----------- Set up model
    model_name = params['model_arch']
    pretrained = params['pretrained']
    # model = model_utils.get_net(model_name, n_output_classes=num_classes, pretrained=pretrained)
    model = zoo.get_net(model_name,
                        modelfamily,
                        pretrained,
                        num_classes=num_classes)
    model = model.to(device)

    # ----------- Train
    out_path = params['out_path']
    model_utils.train_model(model,
                            trainset,
                            testset=testset,
                            device=device,
                            **params)

    # Store arguments
    params['created_on'] = str(datetime.now())
    params_out_path = osp.join(out_path, 'params.json')
    with open(params_out_path, 'w') as jf:
        json.dump(params, jf, indent=True)
コード例 #11
0
def main():
    parser = argparse.ArgumentParser(description='Train a model')
    # Required arguments
    parser.add_argument('model_dir',
                        metavar='DIR',
                        type=str,
                        help='Directory containing transferset.pickle')
    parser.add_argument('model_arch',
                        metavar='MODEL_ARCH',
                        type=str,
                        help='Model name')
    parser.add_argument('testdataset',
                        metavar='DS_NAME',
                        type=str,
                        help='Name of test')
    parser.add_argument('--budgets',
                        metavar='B',
                        type=int,
                        help='Knockoffs will be trained for budget.')
    # Optional arguments
    parser.add_argument('-d',
                        '--device_id',
                        metavar='D',
                        type=int,
                        help='Device id. -1 for CPU.',
                        default=0)
    parser.add_argument('-b',
                        '--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('-e',
                        '--epochs',
                        type=int,
                        default=10,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=50,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--resume',
                        default=None,
                        type=str,
                        metavar='PATH',
                        help='path to latest checkpoint (default: none)')
    parser.add_argument('--lr-step',
                        type=int,
                        default=60,
                        metavar='N',
                        help='Step sizes for LR')
    parser.add_argument('--lr-gamma',
                        type=float,
                        default=0.1,
                        metavar='N',
                        help='LR Decay Rate')
    parser.add_argument('-w',
                        '--num_workers',
                        metavar='N',
                        type=int,
                        help='# Worker threads to load data',
                        default=10)
    parser.add_argument('--pretrained',
                        type=str,
                        help='Use pretrained network',
                        default=None)
    parser.add_argument('--weighted-loss',
                        action='store_true',
                        help='Use a weighted loss',
                        default=False)

    # RL arguments
    parser.add_argument('--traj_length',
                        metavar='N',
                        type=int,
                        help='# Step in one trajactory',
                        default=10)
    parser.add_argument('--num_each_class',
                        metavar='N',
                        type=int,
                        help='# sample in each class',
                        default=1)
    parser.add_argument('--n_iter',
                        metavar='N',
                        type=int,
                        help='# iterations of RL training',
                        default=10)
    parser.add_argument('--n_traj_each_iter',
                        metavar='N',
                        type=int,
                        help='# trajactories / iter',
                        default=10)
    parser.add_argument('--queryset',
                        metavar='DS_NAME',
                        type=str,
                        help='Name of test')
    parser.add_argument('--victim_model_dir',
                        default=None,
                        type=str,
                        metavar='PATH',
                        help='path to latest checkpoint (default: none)')

    parser.add_argument('--n_layers',
                        metavar='N',
                        type=int,
                        help='# layers in policy',
                        default=4)
    parser.add_argument('--size',
                        metavar='N',
                        type=int,
                        help='size of layer in policy',
                        default=64)
    parser.add_argument('--policy_lr',
                        type=float,
                        default=1e-4,
                        metavar='N',
                        help='Policy learning rate')
    parser.add_argument('--num_agent_train_steps_per_iter',
                        metavar='N',
                        type=int,
                        help='num_agent_train_steps_per_iter',
                        default=10)
    parser.add_argument('--agent_train_batch_size',
                        metavar='N',
                        type=int,
                        help='num_agent_train_steps_per_iter',
                        default=990)
    parser.add_argument('--policy_gamma',
                        type=float,
                        default=0.9,
                        metavar='N',
                        help='reward discounting')
    parser.add_argument('--eps_random',
                        type=float,
                        default=-1,
                        metavar='N',
                        help='eps random exploration')
    parser.add_argument('--nn_baseline',
                        action='store_true',
                        help='Use nn baseline',
                        default=False)

    # Attacker's defense
    parser.add_argument('--argmaxed',
                        action='store_true',
                        help='Only consider argmax labels',
                        default=False)
    parser.add_argument('--optimizer_choice',
                        type=str,
                        help='Optimizer',
                        default='sgdm',
                        choices=('sgd', 'sgdm', 'adam', 'adagrad'))
    args = parser.parse_args()
    params = vars(args)

    torch.manual_seed(cfg.DEFAULT_SEED)
    if params['device_id'] >= 0:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(params['device_id'])
        device = torch.device('cuda')
        ptu.init_gpu()
    else:
        device = torch.device('cpu')

    model_dir = params['model_dir']

    # ----------- Set up testset
    dataset_name = params['testdataset']
    valid_datasets = datasets.__dict__.keys()
    modelfamily = datasets.dataset_to_modelfamily[dataset_name]
    transform = datasets.modelfamily_to_transforms[modelfamily]['test']
    if dataset_name not in valid_datasets:
        raise ValueError(
            'Dataset not found. Valid arguments = {}'.format(valid_datasets))
    dataset = datasets.__dict__[dataset_name]
    testset = dataset(train=False, transform=transform)
    #if len(testset.classes) != num_classes:
    #    raise ValueError('# Transfer classes ({}) != # Testset classes ({})'.format(num_classes, len(testset.classes)))

    # ----------- Set up queryset
    queryset_name = params['queryset']
    valid_datasets = datasets.__dict__.keys()
    if queryset_name not in valid_datasets:
        raise ValueError(
            'Dataset not found. Valid arguments = {}'.format(valid_datasets))
    modelfamily = datasets.dataset_to_modelfamily[queryset_name]
    transform = datasets.modelfamily_to_transforms[modelfamily]['train']
    try:
        queryset = datasets.__dict__[queryset_name](train=True,
                                                    transform=transform)
    except:
        queryset = datasets.__dict__[queryset_name](split="train",
                                                    transform=transform)

    num_classes = len(queryset.classes)

    # ----------- Initialize blackbox
    blackbox_dir = params['victim_model_dir']
    blackbox = Blackbox.from_modeldir(blackbox_dir, device)

    # ----------- Set up adversary model
    model_name = params['model_arch']
    pretrained = params['pretrained']
    # model = model_utils.get_net(model_name, n_output_classes=num_classes, pretrained=pretrained)
    adv_model = zoo.get_net(model_name,
                            modelfamily,
                            pretrained,
                            num_classes=10)
    adv_model = adv_model.to(device)

    # ----------- Initialize adversary
    num_each_class = params['num_each_class']
    agent_params = {
        "ac_dim": num_classes,
        "ob_dim": len(testset.classes),
        "n_layers": params["n_layers"],
        "size": params["size"],
        "discrete": True,
        "learning_rate": params["policy_lr"],
        "num_agent_train_steps_per_iter":
        params["num_agent_train_steps_per_iter"],
        "agent_train_batch_size": params["agent_train_batch_size"],
        "gamma": params["policy_gamma"],
        "reward_to_go": True,
        "nn_baseline": params["nn_baseline"],
        "standardize_advantages": True,
        "eps_random": params["eps_random"]
    }
    adversary = PGAdversary(queryset, num_each_class, agent_params)

    # ----------- Set up transferset
    def collect_training_trajectories(length, n_traj=10):
        nonlocal avg_rewards, avg_components
        paths = []
        mean_rew = 0
        mean_cert = mean_L = mean_E = mean_div = 0
        X_paths, Y_paths = [], []
        for _ in range(n_traj):
            obs, acs, rewards, next_obs = [], [], [], []
            r_certs, r_Ls, r_Es, r_divs = [], [], [], []
            X_path, Y_path = [], []
            X, actions = adversary.init_sampling()
            X_path.append(X)
            ob = blackbox(X)
            Y_path.append(ob)
            ob = ob.numpy()

            for t in range(length - 1):
                with torch.no_grad():
                    # Observe and react
                    obs.append(ob)
                    X_new, actions = adversary.sample(ob)
                    X_path.append(X_new)
                    acs.append(actions)

                    # Env gives feedback, which is a new observation
                    X_new = X_new.to(device)
                    ob = blackbox(X_new)
                    Y_path.append(ob)
                    ob = ob.cpu().numpy()
                    next_obs.append(ob)
                    Y_adv = adv_model(X_new)
                    Y_adv = F.softmax(Y_adv, dim=1).cpu().numpy()
                reward, r_cert, r_L, r_E, r_div = adversary.agent.calculate_reward(
                    ob, np.concatenate(acs), Y_adv)
                rewards.append(reward)
                r_certs.append(r_cert)
                r_Ls.append(r_L)
                r_Es.append(r_E)
                r_divs.append(r_div)

            obs = np.concatenate(obs)
            acs = np.concatenate(acs)

            rewards = np.concatenate(rewards)
            mean_rew += np.mean(rewards)

            mean_cert += np.mean(np.concatenate(r_certs))
            mean_L += np.mean(np.concatenate(r_Ls))
            mean_E += np.mean(np.array(r_Es))
            mean_div += np.mean(np.array(r_divs))

            next_obs = np.concatenate(next_obs)
            path = {
                "observation": obs,
                "action": acs,
                "reward": rewards,
                "next_observation": next_obs
            }
            paths.append(path)
            X_paths.append(torch.cat(X_path))
            Y_paths.append(torch.cat(Y_path))

        print(f"==> Avg reward: {mean_rew / n_traj}")
        avg_rewards.append(mean_rew / n_traj)
        avg_components["avg_cert"].append(mean_cert / n_traj)
        avg_components["avg_L"].append(mean_L / n_traj)
        avg_components["avg_E"].append(mean_E / n_traj)
        avg_components["avg_div"].append(mean_div / n_traj)
        return torch.cat(X_paths), torch.cat(Y_paths), paths

    traj_length = params['traj_length']
    num_each_class = params['num_each_class']
    n_iter = params['n_iter']
    X, Y = None, None
    budgets = params['budgets']
    n_traj = params['n_traj_each_iter']
    criterion_train = model_utils.soft_cross_entropy
    if traj_length > 0:
        n_iter = budgets // (traj_length * n_traj)

    print(f"==> Budget = {n_iter} x {traj_length} x {n_traj}")
    best_test_acc = []
    best_acc = -1
    avg_rewards = []
    avg_components = collections.defaultdict(list)
    for iter in range(1, n_iter + 1):
        # n_iter * traj_length = budget
        print(f"==> Iteration: {iter}/{n_iter}")
        X_path, Y_path, paths = collect_training_trajectories(traj_length,
                                                              n_traj=n_traj)

        adversary.add_to_replay_buffer(paths)

        adversary.train_agent()

        if X is None:
            X, Y = X_path, Y_path
        else:
            X = torch.cat((X, X_path))
            Y = torch.cat((Y, Y_path))

        transferset = ImageTensorSet((X, Y))

        # ----------- Train
        #np.random.seed(cfg.DEFAULT_SEED)
        #torch.manual_seed(cfg.DEFAULT_SEED)
        #torch.cuda.manual_seed(cfg.DEFAULT_SEED)
        optimizer = get_optimizer(adv_model.parameters(),
                                  params['optimizer_choice'], **params)
        print(f"Train on {len(transferset)} samples")
        checkpoint_suffix = '.{extraction}'
        best_acc = model_utils.train_model(adv_model,
                                           transferset,
                                           model_dir,
                                           testset=testset,
                                           criterion_train=criterion_train,
                                           checkpoint_suffix=checkpoint_suffix,
                                           device=device,
                                           optimizer=optimizer,
                                           benchmark=best_acc,
                                           **params)
        best_test_acc.append(best_acc)
        adversary.agent.actor.save(
            osp.join(model_dir, "checkpoint.agent.state_dict"))

        # ----------- Log
        torch.save(best_test_acc, osp.join(model_dir, "best_acc.pylist"))
        torch.save(avg_rewards, osp.join(model_dir, "avg_rewards.pylist"))
        torch.save(avg_components, osp.join(model_dir,
                                            "avg_components.pydict"))
        torch.save(adversary.idx_counter,
                   osp.join(model_dir, "idx_counter.pydict"))
        torch.save(transferset, osp.join(model_dir, "transferset.pt"))

    # Store arguments
    params['created_on'] = str(datetime.now())
    params_out_path = osp.join(model_dir, 'params_train.json')
    with open(params_out_path, 'w') as jf:
        json.dump(params, jf, indent=True)

    agent_params_out_path = osp.join(model_dir, 'agent_params_train.json')
    with open(agent_params_out_path, 'w') as jf:
        json.dump(agent_params, jf, indent=True)