コード例 #1
0
def execute_experiment(params):
    data_params = DataParameters()
    pnml_params = PNMLParameters()
    exp_params = ExperimentParameters()

    # Set parameters from dict
    for key, value in params['exp_params'].items():
        setattr(exp_params, key, value)
    for key, value in params['pnml_params'].items():
        setattr(pnml_params, key, value)
    for key, value in params['data_params'].items():
        setattr(data_params, key, value)

    # Create logger and save params to output folder
    logger = Logger(experiment_type=exp_params.experiment_name,
                    output_root=exp_params.output_dir_base)
    logger.info('OutputDirectory: %s' % logger.output_folder)
    with open(os.path.join(logger.output_folder, 'params.json'),
              'w',
              encoding='utf8') as outfile:
        outfile.write(json.dumps(params, indent=4, sort_keys=True))

    logger.info('%s' % data_params)
    logger.info('%s' % pnml_params)
    logger.info('%s' % exp_params)

    exp_h = Experiment(exp_params, data_params, pnml_params)
    if exp_params.exp_type == 'poly':
        exp_h.execute_poly_degree_search()
    elif exp_params.exp_type == 'lambda':
        exp_h.execute_lambda_search()
    regret_df = exp_h.get_regret_df()
    exp_df_dict = exp_h.get_exp_df_dict()
    x_train, y_train = exp_h.get_train()

    # Twice universal
    logger.info('Execute TU.')
    twice_df = twice_universality(exp_df_dict)
    exp_df_dict['Twice'] = twice_df

    # Save results
    logger.info('Save results.')
    regret_df.to_pickle(os.path.join(logger.output_folder, 'regret_df.pkl'))
    exp_df_dict_saver(exp_df_dict, logger.output_folder)

    trainset_dict = {'x_train': x_train, 'y_train': y_train}
    with open(os.path.join(logger.output_folder, 'trainset_dict.pkl'),
              "wb") as f:
        pickle.dump(trainset_dict, f)
        f.close()
    logger.info('Finished. Save to: %s' % logger.output_folder)
コード例 #2
0
ファイル: tests.py プロジェクト: uriyapes/pnml_adv
    def _prepare_test(self):
        self.general_args['param_file_path'] = os.path.join(
            self.expected_result_folder, 'params.json')
        exp = Experiment(self.general_args, self.params_overload)
        with open(os.path.join(self.logger.output_folder, 'params.json'),
                  'w',
                  encoding='utf8') as outfile:
            outfile.write(json.dumps(exp.params, indent=4, sort_keys=False))
        self.logger.info(exp.params)

        model_to_eval = exp.get_model(exp.params['model']['model_arch'],
                                      exp.params['model']['ckpt_path'],
                                      exp.params['model']['pnml_active'])
        dataloaders = exp.get_dataloaders()
        # Get adversarial attack:
        attack = exp.get_attack_for_model(model_to_eval)
        return model_to_eval, dataloaders, attack
コード例 #3
0
def eval_all(base_model, dataloader, attack, exp: Experiment):
    adv = eval_adversarial_dataset(base_model, dataloader, attack, True)

    assert (base_model.pnml_model is False)
    pnml_model = exp.get_pnml_model(base_model, pnml_model_keep_grad=False)
    adv_pnml = eval_pnml_blackbox(pnml_model, adv, exp)

    natural = eval_adversarial_dataset(
        base_model, dataloader,
        get_attack({'attack_type': 'natural'}, base_model), False)
    natural_pnml = eval_adversarial_dataset(
        pnml_model, dataloader,
        get_attack({'attack_type': 'natural'}, pnml_model), False)
    return adv, adv_pnml, natural, natural_pnml
コード例 #4
0
def run_experiment(experiment_type: str):
    ################
    # Load training params
    with open(os.path.join('src', 'params.json')) as f:
        params = json.load(f)

    ################
    # Class that depends ins the experiment type
    experiment_h = Experiment(experiment_type, params)
    params = experiment_h.get_params()

    ################
    # Create logger and save params to output folder
    logger = Logger(experiment_type=experiment_h.get_exp_name(),
                    output_root='output')
    # logger = Logger(experiment_type='TMP', output_root='output')
    logger.info('OutputDirectory: %s' % logger.output_folder)
    with open(os.path.join(logger.output_folder, 'params.json'),
              'w',
              encoding='utf8') as outfile:
        outfile.write(json.dumps(params, indent=4, sort_keys=True))
    logger.info(params)

    ################
    # Load datasets
    data_folder = './data'
    logger.info('Load datasets: %s' % data_folder)
    dataloaders = experiment_h.get_dataloaders(data_folder)

    ################
    # Run basic training- so the base model will be in the same conditions as NML model
    model_base = experiment_h.get_model()
    params_init_training = params['initial_training']
    params_init_training['debug_flags'] = params['debug_flags']
    model_erm = execute_basic_training(model_base, dataloaders,
                                       params_init_training, experiment_h,
                                       logger)

    ################
    # Freeze layers
    logger.info('Freeze layer: %d' % params['freeze_layer'])
    model_erm = freeze_model_layers(model_erm, params['freeze_layer'], logger)

    ############################
    # Iterate over test dataset
    logger.info('Execute pNML')
    params_fit_to_sample = params['fit_to_sample']
    params_fit_to_sample['debug_flags'] = params['debug_flags']
    execute_pnml_on_testset(model_erm, experiment_h, params_fit_to_sample,
                            dataloaders, logger)
    logger.info('Finish All!')
コード例 #5
0
def eval_batch(images: np.ndarray, labels: np.ndarray, iterations: int,
               batch_iteration: int, exp: Experiment,
               logger: logger_utilities.Logger, cuda_dev: int):
    # with torch.cuda.set_device(cuda_dev):
    logger.info("Start run batch {} in cuda_device {}".format(
        batch_iteration, cuda_dev))
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(cuda_dev)

    # logger.info("Create model...")
    # model = exp.get_model("PnmlModel", "./trained_models/mnist_classifier/bpda_ep6_eps0.3_restant20_uniformRnd/model_iter_6.pt")
    model_to_eval = exp.get_model(exp.params['model']['model_arch'],
                                  exp.params['model']['ckpt_path'],
                                  exp.params['model']['pnml_active'], False)
    model_to_eval.eval()
    model_to_eval.freeze_all_layers()
    logger.info("Create foolbox model and attack...")
    # low_bound, upper_bound = get_dataset_min_max_val("mnist_adversarial", dtype=np.float32)
    low_bound, upper_bound = get_dataset_min_max_val(exp.exp_type,
                                                     dtype=np.float32)
    fmodel = foolbox.models.PyTorchModel(model_to_eval,
                                         bounds=(float(low_bound),
                                                 float(upper_bound)),
                                         num_classes=exp.params["num_classes"])
    attack = foolbox.attacks.HopSkipJumpAttack(fmodel,
                                               distance=foolbox.distances.Linf)
    adversarials_batch = attack(images,
                                labels,
                                iterations=iterations,
                                unpack=False,
                                log_every_n_steps=1,
                                loggingLevel=logging.INFO,
                                batch_size=exp.params["batch_size"])

    adversarials_batch_repack, _ = repack_adversarial_results(
        adversarials_batch)
    logger.dump_pickle(adversarials_batch_repack,
                       "adv_batch_%.2d.p" % (batch_iteration))
    logger.info("Finish run batch {} in cuda_device {}".format(
        batch_iteration, cuda_dev))
    sys.stdout.flush()
コード例 #6
0
def execute_pnml_on_testset(model_base, experiment_h: Experiment,
                            params_fit_to_sample: dict, dataloaders: dict,
                            logger: Logger):
    for idx in range(params_fit_to_sample['test_start_idx'],
                     params_fit_to_sample['test_end_idx'] + 1):
        time_start_idx = time.time()

        # Extract a sample from test dataset and check output of base model
        sample_test_data = dataloaders['test'].dataset.test_data[idx]
        sample_test_true_label = dataloaders['test'].dataset.test_labels[idx]

        # Make sure the data is HxWxC:
        if len(
                sample_test_data.shape
        ) == 3 and sample_test_data.shape[2] > sample_test_data.shape[0]:
            sample_test_data = sample_test_data.transpose([1, 2, 0])

        # Execute transformation
        sample_test_data_for_trans = deepcopy(sample_test_data)
        if len(sample_test_data.shape) == 2:
            sample_test_data_for_trans = sample_test_data_for_trans.unsqueeze(
                2).numpy()
        sample_test_data_trans = dataloaders['test'].dataset.transform(
            sample_test_data_for_trans)

        # Evaluate with base model
        prob_org, _ = eval_single_sample(model_base, sample_test_data_trans)
        logger.add_org_prob_to_results_dict(idx, prob_org,
                                            sample_test_true_label)

        # NML training- train the model with test sample
        execute_pnml_training(params_fit_to_sample, dataloaders,
                              sample_test_data, sample_test_true_label, idx,
                              model_base, logger)

        # Log and save
        logger.save_json_file()
        time_idx = time.time() - time_start_idx
        logger.info('----- Finish %s idx = %d, time=%f[sec] ----' %
                    (experiment_h.get_exp_name(), idx, time_idx))
コード例 #7
0
def main():
    parser = jsonargparse.ArgumentParser(description='General arguments',
                                         default_meta=False)
    parser.add_argument('-t',
                        '--general.experiment_type',
                        default='cifar_adversarial',
                        help='Type of experiment to execute',
                        type=str)
    parser.add_argument(
        '-p',
        '--general.param_file_path',
        default=os.path.join('./src/parameters', 'cifar_params.json'),
        help=
        'param file path used to load the parameters file containing default values to all '
        'parameters',
        type=str)
    parser.add_argument('--general.save',
                        default=False,
                        action='store_true',
                        help='Whether to save adversarial samples output',
                        type=bool)
    # parser.add_argument('-p', '--general.param_file_path', default='src/tests/test_mnist_pgd_with_pnml_expected_result/params.json',
    #                     help='param file path used to load the parameters file containing default values to all '
    #                          'parameters', type=str)
    parser.add_argument(
        '-o',
        '--general.output_root',
        default='output',
        help='the output directory where results will be saved',
        type=str)

    parser.add_argument('-f',
                        '--adv_attack_test.test_start_idx',
                        help='first test idx',
                        type=int)
    parser.add_argument('-l',
                        '--adv_attack_test.test_end_idx',
                        help='last test idx',
                        type=int)
    parser.add_argument('-e',
                        '--adv_attack_test.epsilon',
                        help='the epsilon strength of the attack',
                        type=float)
    parser.add_argument('-ts',
                        '--adv_attack_test.pgd_step',
                        help='the step size of the attack',
                        type=float)
    parser.add_argument('-ti',
                        '--adv_attack_test.pgd_iter',
                        help='the number of test pgd iterations',
                        type=int)
    parser.add_argument(
        '-b',
        '--adv_attack_test.beta',
        help='the beta value for regret reduction regularization ',
        type=float)
    parser.add_argument('--adv_attack_test.attack_type',
                        help='The type of the attack',
                        type=str)
    parser.add_argument('-r',
                        '--fit_to_sample.epsilon',
                        help='the epsilon strength of the refinement (lambda)',
                        type=float)
    parser.add_argument('-i',
                        '--fit_to_sample.pgd_iter',
                        help='the number of PGD iterations of the refinement',
                        type=int)
    parser.add_argument('-s',
                        '--fit_to_sample.pgd_step',
                        help='the step size of the refinement',
                        type=float)
    parser.add_argument(
        '-n',
        '--fit_to_sample.pgd_test_restart_num',
        help='the number of PGD restarts where 0 means no random start',
        type=int)

    args = jsonargparse.namespace_to_dict(parser.parse_args())
    general_args = args.pop('general')

    exp = Experiment(general_args, args)
    logger_utilities.init_logger(logger_name=exp.get_exp_name(),
                                 output_root=exp.output_dir)
    logger = logger_utilities.get_logger()
    # Get models:
    model_to_eval = exp.get_model(
        exp.params['model']['model_arch'], exp.params['model']['ckpt_path'],
        exp.params['model']['pnml_active'], True if
        exp.params["adv_attack_test"]["attack_type"] != "natural" else False)

    dataloaders = exp.get_dataloaders()

    # Get adversarial attack:
    attack = exp.get_attack_for_model(model_to_eval)

    with open(os.path.join(logger.output_folder, 'params.json'),
              'w',
              encoding='utf8') as outfile:
        outfile.write(json.dumps(exp.params, indent=4, sort_keys=False))
    logger.info(exp.params)

    # adv, adv_pnml, natural, natural_pnml = eval_all(model_to_eval, dataloaders['test'], attack, exp)
    # logger.info("Base model adversarial - Accuracy: {}, Loss: {}".format(adv.get_accuracy(), adv.get_mean_loss()))
    # logger.info("Pnml model adversarial - Accuracy: {}, Loss: {}".format(adv_pnml.get_accuracy(), adv_pnml.get_mean_loss()))
    # logger.info("Base model natural - Accuracy: {}, Loss: {}".format(natural.get_accuracy(), natural.get_mean_loss()))
    # logger.info("Pnml model natural - Accuracy: {}, Loss: {}".format(natural_pnml.get_accuracy(), natural_pnml.get_mean_loss()))

    adv = eval_adversarial_dataset(model_to_eval, dataloaders['test'], attack,
                                   general_args['save'])
    loss = adv.get_mean_loss()
    acc = adv.get_accuracy()
    logger.info("Accuracy: {}, Loss: {}".format(acc, loss))
    adv.dump(logger.output_folder)
    return adv
コード例 #8
0
def eval_pnml_blackbox(pnml_model, adv, exp: Experiment):

    dataloader = exp.get_blackbox_dataloader(adv)
    return eval_adversarial_dataset(
        pnml_model, dataloader['test'],
        get_attack({'attack_type': 'natural'}, pnml_model), False)
コード例 #9
0
        'param file path used to load the parameters file containing default values to all '
        'parameters',
        type=str)
    # parser.add_argument('-p', '--general.param_file_path', default='src/tests/test_mnist_pgd_with_pnml_expected_result/params.json',
    #                     help='param file path used to load the parameters file containing default values to all '
    #                          'parameters', type=str)
    parser.add_argument(
        '-o',
        '--general.output_root',
        default='output',
        help='the output directory where results will be saved',
        type=str)
    args = jsonargparse.namespace_to_dict(parser.parse_args())
    general_args = args.pop('general')

    exp = Experiment(general_args, args)
    params = exp.get_params()
    logger_utilities.init_logger(logger_name=exp.get_exp_name(),
                                 output_root=exp.output_dir)
    logger = logger_utilities.get_logger()

    model_to_train = exp.get_model(exp.params['model']['model_arch'],
                                   exp.params['model']['ckpt_path'])
    dataloaders = exp.get_dataloaders()

    logger.info('Execute basic training')
    params_init_training = params['initial_training']
    train_class = TrainClass(
        filter(lambda p: p.requires_grad,
               model_to_train.parameters()), params_init_training['lr'],
        params_init_training['momentum'], params_init_training['step_size'],
コード例 #10
0
def _main():
    mp.set_start_method('spawn')
    parser = jsonargparse.ArgumentParser(description='General arguments',
                                         default_meta=False)
    parser.add_argument('-t',
                        '--general.experiment_type',
                        default='imagenet_adversarial',
                        help='Type of experiment to execute',
                        type=str)
    parser.add_argument(
        '-p',
        '--general.param_file_path',
        default=os.path.join('./src/parameters', 'eval_imagenet_param.json'),
        help=
        'param file path used to load the parameters file containing default values to all '
        'parameters',
        type=str)
    # parser.add_argument('-p', '--general.param_file_path', default='src/tests/test_mnist_pgd_with_pnml_expected_result/params.json',
    #                     help='param file path used to load the parameters file containing default values to all '
    #                          'parameters', type=str)
    parser.add_argument(
        '-o',
        '--general.output_root',
        default='output',
        help='the output directory where results will be saved',
        type=str)
    parser.add_argument('--adv_attack_test.attack_type',
                        help='attack type',
                        type=str,
                        default="natural")
    parser.add_argument('-f',
                        '--adv_attack_test.test_start_idx',
                        help='first test idx',
                        type=int)
    parser.add_argument('-l',
                        '--adv_attack_test.test_end_idx',
                        help='last test idx',
                        type=int)
    parser.add_argument('-e',
                        '--adv_attack_test.epsilon',
                        help='the epsilon strength of the attack',
                        type=float)
    parser.add_argument('-ts',
                        '--adv_attack_test.pgd_step',
                        help='the step size of the attack',
                        type=float)
    parser.add_argument('-ti',
                        '--adv_attack_test.pgd_iter',
                        help='the number of test pgd iterations',
                        type=int)
    parser.add_argument(
        '-b',
        '--adv_attack_test.beta',
        help='the beta value for regret reduction regularization ',
        type=float)
    parser.add_argument('-r',
                        '--fit_to_sample.epsilon',
                        help='the epsilon strength of the refinement (lambda)',
                        type=float)
    parser.add_argument('-i',
                        '--fit_to_sample.pgd_iter',
                        help='the number of PGD iterations of the refinement',
                        type=int)
    parser.add_argument('-s',
                        '--fit_to_sample.pgd_step',
                        help='the step size of the refinement',
                        type=float)
    parser.add_argument(
        '-n',
        '--fit_to_sample.pgd_test_restart_num',
        help='the number of PGD restarts where 0 means no random start',
        type=int)
    args = jsonargparse.namespace_to_dict(parser.parse_args())
    general_args = args.pop('general')

    experiment_h = Experiment(general_args, args)
    dataloaders = experiment_h.get_adv_dataloaders()
    ################
    # Create logger and save params to output folder
    logger_utilities.init_logger(logger_name=experiment_h.get_exp_name(),
                                 output_root=experiment_h.output_dir)
    logger = logger_utilities.get_logger()
    # logger = Logger(experiment_type='TMP', output_root='output')
    logger.info('OutputDirectory: %s' % logger.output_folder)
    logger.info('Device: %s' % TorchUtils.get_device())
    logger.info(experiment_h.get_params())
    eval_dataset(dataloaders['test'], 29, logger, experiment_h)
    logger.info("Done")