Exemplo n.º 1
0
def train_eval_model(args):
    dataset_name = args[0]
    model_name = args[1]
    model = models.module_from_name(model_name)
    param_dict = args[2]
    current_gpu = args[3]

    dir_name = os.path.join(FLAGS.models_dir, dataset_name)

    param_dict['batch_size'] = 128
    param_dict['n_draws'] = 1
    hps = models.params.HParams(**param_dict)

    if model_name == 'madry':
        madry.Model.maybe_download_and_extract(FLAGS.models_dir)
    else:
        print("Running on GPU {}\n\t{}".format(current_gpu, hps))
        with tf.Graph().as_default():
            train(hps,
                  model,
                  dataset=dataset_name,
                  dir_name=dir_name,
                  dev='/gpu:{}'.format(current_gpu))

    compute_robustness = True
    if model_name == 'madry':
        compute_robustness = False
        param_dict['batch_size'] = 2000
        param_dict['n_draws'] = 1
    elif param_dict['noise_after_n_layers'] < 0:
        compute_robustness = False
        param_dict['batch_size'] = 100
        param_dict['n_draws'] = 1
    else:
        param_dict['batch_size'] = 1
        param_dict['n_draws'] = 2000

    hps = models.params.HParams(**param_dict)
    with tf.Graph().as_default():
        evaluate(hps,
                 model,
                 dataset=dataset_name,
                 dir_name=dir_name,
                 compute_robustness=compute_robustness,
                 dev='/gpu:{}'.format(current_gpu))
    return hps, model_name
Exemplo n.º 2
0
def train_eval_model(args):
    model_name = args[0]
    model = models.module_from_name(model_name)
    param_dict = args[1]
    current_gpu = args[2]

    dir_name = os.path.join(FLAGS.models_dir, 'imagenet')
    compute_robustness = False
    if param_dict['attack_norm_bound'] == 0.0:
        param_dict['batch_size'] = 100
        param_dict['n_draws']    = 1
    else:
        param_dict['batch_size'] = 1
        param_dict['n_draws']    = 100

    hps   = models.params.HParams(**param_dict)
    with tf.Graph().as_default():
        evaluate(hps, model, dataset='imagenet', dir_name=dir_name,
                compute_robustness=compute_robustness,
                dev='/gpu:{}'.format(current_gpu))

    return hps, model_name
Exemplo n.º 3
0
    def test_epoch(self, epoch, plot_hist=True):
        """ Test eEGBAD model."""

        with torch.no_grad():

            self.opt.phase = 'test'
            self.l_adv_t = torch.nn.BCELoss(reduction='none')

            # Create big error tensor for the test set.
            self.an_scores = torch.zeros(
                size=(self.opt.n_G * self.opt.n_D,
                      len(self.dataloader["gen"][0].valid.dataset)),
                dtype=torch.float32,
                device=self.device)
            self.gt_labels = torch.zeros(size=(len(
                self.dataloader["gen"][0].valid.dataset), ),
                                         dtype=torch.long,
                                         device=self.device)
            self.features = torch.zeros(size=(len(
                self.dataloader["gen"][0].valid.dataset), self.opt.nz),
                                        dtype=torch.float32,
                                        device=self.device)

            ensemble_iter = 0
            for g in range(self.opt.n_G):
                for d in range(self.opt.n_D):

                    self.total_steps = 0
                    epoch_iter = 0
                    for i, (x_real, label) in enumerate(
                            self.dataloader["gen"][0].valid, 0):
                        self.total_steps += self.opt.batchsize
                        epoch_iter += self.opt.batchsize

                        # Forward - Pass
                        self.input = x_real.to(self.device)

                        z_gen = self.net_Ens[g](self.input)
                        x_fake = self.net_Des[g](z_gen)

                        pred_encoder, feat_encoder = self.net_Ds[d](z_gen,
                                                                    self.input)
                        pred_decoder, feat_decoder = self.net_Ds[d](z_gen,
                                                                    x_fake)

                        label_real = torch.ones(self.input.shape[0]).to(
                            self.device)

                        fm = self.l_adv_t(pred_encoder, label_real)

                        error = fm

                        self.an_scores[ensemble_iter,
                                       i * self.opt.batchsize:i *
                                       self.opt.batchsize +
                                       error.size(0)] = error
                        self.gt_labels[i * self.opt.batchsize:i *
                                       self.opt.batchsize +
                                       error.size(0)] = label.reshape(
                                           error.size(0))

                    ensemble_iter = ensemble_iter + 1

            self.an_scores = torch.mean(self.an_scores, dim=0)

            # Scale error vector between [0, 1]
            self.an_scores = (self.an_scores - torch.min(self.an_scores)) / \
                             (torch.max(self.an_scores) - torch.min(self.an_scores))

            per_scores = self.an_scores.cpu()

        if plot_hist:
            self.gt_labels = self.gt_labels.cpu()
            auroc = roc(self.gt_labels, per_scores, epoch)
            precision = auprc(self.gt_labels, per_scores)

            recall = evaluate(self.gt_labels, per_scores, metric='auprc')

            print('auroc is {}'.format(auroc))

            print('recall is {}'.format(recall))
            print('precision is {}'.format(precision))

            self.rocs['auroc'].append(auroc)

            plt.ion()
            # Create data frame for scores and labels.
            scores = {}
            scores['scores'] = per_scores
            scores['labels'] = self.gt_labels

            hist = pd.DataFrame.from_dict(scores)
            hist.to_csv(
                os.path.join(
                    self.opt.outf, self.opt.name,
                    "{0}exp_{1}epoch_score_train.csv".format(
                        self.opt.name, epoch)))
Exemplo n.º 4
0
def run_one():
    # Manual runs support cpu or 1 gpu
    if FLAGS.num_gpus == 0:
        dev = '/cpu:0'
    else:
        dev = '/gpu:0'

    if FLAGS.dataset == 'mnist':
        _model = pixeldp_cnn

        steps_num = 40000
        eval_data_size = 10000
        image_size = 28
        n_channels = 1
        num_classes = 10
        relu_leakiness = 0.0
        lrn_rate = 0.01
        lrn_rte_changes = [30000]
        lrn_rte_vals = [0.01]
        if FLAGS.mode == 'train':
            batch_size = 128
            n_draws = 1
        elif FLAGS.mode == 'eval':
            batch_size = 25
            n_draws = 2000
    elif FLAGS.dataset == 'svhn':
        _model = pixeldp_resnet

        steps_num = 60000
        eval_data_size = 26032
        image_size = 32
        n_channels = 3
        num_classes = 10
        relu_leakiness = 0.0
        lrn_rate = 0.01
        lrn_rte_changes = [20000, 40000, 50000]
        lrn_rte_vals = [0.01, 0.001, 0.0001]
        if FLAGS.mode == 'train':
            batch_size = 128
            n_draws = 1
        elif FLAGS.mode == 'eval':
            batch_size = 25
            n_draws = 2000
    else:
        steps_num = 90000
        eval_data_size = 10000
        lrn_rate = 0.1
        lrn_rte_changes = [40000, 60000, 80000]
        lrn_rte_vals = [0.01, 0.001, 0.0001]
        if FLAGS.mode == 'train':
            batch_size = 128
            n_draws = 1
        elif FLAGS.mode == 'eval':
            batch_size = 1
            n_draws = 2000

    if FLAGS.dataset == 'cifar10':
        _model = pixeldp_resnet

        image_size = 32
        n_channels = 3
        num_classes = 10
        relu_leakiness = 0.1
    elif FLAGS.dataset == 'cifar100':
        _model = pixeldp_resnet

        image_size = 32
        n_channels = 3
        num_classes = 100
        relu_leakiness = 0.1

    if FLAGS.mode in ['attack', 'attack_eval', 'plot']:
        batch_size = 1
        n_draws = 10

    compute_robustness = True

    # See doc in ./models/params.py
    L = 0.1
    hps = models.params.HParams(
        name_prefix="",
        batch_size=batch_size,
        num_classes=num_classes,
        image_size=image_size,
        n_channels=n_channels,
        lrn_rate=lrn_rate,
        lrn_rte_changes=lrn_rte_changes,
        lrn_rte_vals=lrn_rte_vals,
        num_residual_units=4,
        use_bottleneck=False,
        weight_decay_rate=0.0002,
        relu_leakiness=relu_leakiness,
        optimizer='mom',
        image_standardization=False,
        n_draws=n_draws,
        dp_epsilon=1.0,
        dp_delta=0.05,
        robustness_confidence_proba=0.05,
        attack_norm_bound=L,
        attack_norm='l2',
        sensitivity_norm='l2',
        sensitivity_control_scheme='bound',  # bound or optimize
        noise_after_n_layers=1,
        layer_sensitivity_bounds=['l2_l2'],
        noise_after_activation=True,
        parseval_loops=10,
        parseval_step=0.0003,
        steps_num=steps_num,
        eval_data_size=eval_data_size,
    )

    #  atk = pgd
    atk = carlini
    #  atk = carlini_robust_precision
    if atk == carlini_robust_precision:
        attack_params = attacks.params.AttackParamsPrec(
            restarts=1,
            n_draws_attack=20,
            n_draws_eval=500,
            attack_norm='l2',
            max_attack_size=5,
            num_examples=1000,
            attack_methodolody=attacks.name_from_module(atk),
            targeted=False,
            sgd_iterations=100,
            use_softmax=False,
            T=0.01)
    else:
        attack_params = attacks.params.AttackParams(
            restarts=1,
            n_draws_attack=20,
            n_draws_eval=500,
            attack_norm='l2',
            max_attack_size=5,
            num_examples=1000,
            attack_methodolody=attacks.name_from_module(atk),
            targeted=False,
            sgd_iterations=100,
            use_softmax=True)

    #  _model = pixeldp_cnn
    #  _model = pixeldp_resnet
    #  _model = madry

    if _model == madry:
        madry.Model.maybe_download_and_extract(FLAGS.models_dir)
        hps = models.params.update(hps, 'batch_size', 200)
        hps = models.params.update(hps, 'n_draws', 1)
        attack_params = attacks.params.update(attack_params, 'n_draws_attack',
                                              1)
        attack_params = attacks.params.update(attack_params, 'n_draws_eval', 1)
        compute_robustness = False

    if FLAGS.mode == 'train':
        train.train(hps, _model, dev=dev)
    elif FLAGS.mode == 'eval':
        evaluate.evaluate(hps,
                          _model,
                          compute_robustness=compute_robustness,
                          dev=dev)
    elif FLAGS.mode == 'attack':
        train_attack.train_one(FLAGS.dataset,
                               _model,
                               hps,
                               atk,
                               attack_params,
                               dev=dev)

        tf.reset_default_graph()
    elif FLAGS.mode == 'attack_eval':
        if attack_params.attack_methodolody == 'carlini_robust_precision':
            evaluate_attack_carlini_robust_prec.evaluate_one(FLAGS.dataset,
                                                             _model,
                                                             hps,
                                                             atk,
                                                             attack_params,
                                                             dev=dev)
        else:
            evaluate_attack.evaluate_one(FLAGS.dataset,
                                         _model,
                                         hps,
                                         atk,
                                         attack_params,
                                         dev=dev)
    elif FLAGS.mode == 'plot':
        ms = []
        ps = []
        atks = [[]]
        robust_ms = [_model]
        robust_ps = [hps]
        robust_atks = [[attack_params]]
        #  plots.plot_robust_accuracy.plot("test_robust_acc", None, None, ms, ps)
        plots.plot_accuracy_under_attack.plot(
            "test_acc_under_atk",
            robust_ms,
            robust_ps,
            robust_atks,
            x_ticks=[x / 10 for x in range(1, 16)])
Exemplo n.º 5
0
    def test(self):
        """ Test GANomaly model.

        Args:
            dataloader ([type]): Dataloader for the test set

        Raises:
            IOError: Model weights not found.
        """
        with torch.no_grad():
            # Load the weights of netg and netd.
            if self.opt.load_weights:
                path = "./output/{}/{}/train/weights/netG.pth".format(self.name.lower(), self.opt.dataset)
                pretrained_dict = torch.load(path)['state_dict']

                try:
                    self.netg.load_state_dict(pretrained_dict)
                except IOError:
                    raise IOError("netG weights not found")
                print('   Loaded weights.')

            self.opt.phase = 'test'

            # Create big error tensor for the test set.
            self.an_scores = torch.zeros(size=(len(self.dataloader.valid.dataset),), dtype=torch.float32,
                                         device=self.device)
            self.gt_labels = torch.zeros(size=(len(self.dataloader.valid.dataset),), dtype=torch.long,
                                         device=self.device)
            self.latent_i = torch.zeros(size=(len(self.dataloader.valid.dataset), self.opt.nz), dtype=torch.float32,
                                        device=self.device)
            self.latent_o = torch.zeros(size=(len(self.dataloader.valid.dataset), self.opt.nz), dtype=torch.float32,
                                        device=self.device)


            self.times = []
            self.total_steps = 0
            epoch_iter = 0
            for i, data in enumerate(self.dataloader.valid, 0):
                self.total_steps += self.opt.batchsize
                epoch_iter += self.opt.batchsize
                time_i = time.time()
                self.set_input(data)
                latent_is = []
                latent_os = []
                for idx_g in range(self.opt.n_G):
                    # self.fake, latent_i, latent_o = self.netgs[idx_g](self.input)
                    _, latent_i, latent_o = self.netgs[idx_g](self.input)
                    latent_is.append(latent_i)
                    latent_os.append(latent_o)

                latent_i = torch.mean(torch.stack(latent_is), dim=0)
                latent_o = torch.mean(torch.stack(latent_os), dim=0)

                error = torch.mean(torch.pow((latent_i - latent_o), 2), dim=1)
                time_o = time.time()

                self.an_scores[i * self.opt.batchsize: i * self.opt.batchsize + error.size(0)] = error.reshape(
                    error.size(0))
                self.gt_labels[i * self.opt.batchsize: i * self.opt.batchsize + error.size(0)] = self.gt.reshape(
                    error.size(0))
                self.latent_i[i * self.opt.batchsize: i * self.opt.batchsize + error.size(0), :] = latent_i.reshape(
                    error.size(0), self.opt.nz)
                self.latent_o[i * self.opt.batchsize: i * self.opt.batchsize + error.size(0), :] = latent_o.reshape(
                    error.size(0), self.opt.nz)

                self.times.append(time_o - time_i)


            # Measure inference time.
            self.times = np.array(self.times)
            self.times = np.mean(self.times[:100] * 1000)

            # Scale error vector between [0, 1]
            self.an_scores = (self.an_scores - torch.min(self.an_scores)) / (
                        torch.max(self.an_scores) - torch.min(self.an_scores))
            # auc, eer = roc(self.gt_labels, self.an_scores)
            auc = evaluate(self.gt_labels, self.an_scores, metric=self.opt.metric)
            performance = OrderedDict([('Avg Run Time (ms/batch)', self.times), ('AUC', auc)])

            if self.opt.display_id > 0 and self.opt.phase == 'test':
                counter_ratio = float(epoch_iter) / len(self.dataloader.valid.dataset)
                self.visualizer.plot_performance(self.epoch, counter_ratio, performance)
            return performance