コード例 #1
0
def main(args):
    config = load_config(args)
    global_eval_config = config["eval_params"]
    models, model_names = config_modelloader(config, load_pretrain=True)

    converted_models = [BoundSequential.convert(model) for model in models]

    robust_errs = []
    errs = []
    for model, model_id, model_config in zip(converted_models, model_names,
                                             config["models"]):
        model = model.cuda()

        # make a copy of global training config, and update per-model config
        eval_config = copy.deepcopy(global_eval_config)
        if "eval_params" in model_config:
            eval_config.update(model_config["eval_params"])

        # read training parameters from config file
        method = eval_config["method"]
        verbose = eval_config["verbose"]
        eps = eval_config["epsilon"]
        # parameters specific to a training method
        method_param = eval_config["method_params"]
        norm = float(eval_config["norm"])
        train_data, test_data = config_dataloader(
            config, **eval_config["loader_params"])

        model_name = get_path(config, model_id, "model", load=False)
        print(model_name)
        model_log = get_path(config, model_id, "eval_log")
        logger = Logger(open(model_log, "w"))
        logger.log("evaluation configurations:", eval_config)

        logger.log("Evaluating...")
        # evaluate
        robust_err, err = Train(model, model_id, 0, test_data, eps, eps, eps,
                                norm, logger, verbose, False, None, method,
                                **method_param)
        robust_errs.append(robust_err)
        errs.append(err)

    print(
        'model robust errors (for robustly trained models, not valid for naturally trained models):'
    )
    print(robust_errs)
    robust_errs = np.array(robust_errs)
    print('min: {:.4f}, max: {:.4f}, median: {:.4f}, mean: {:.4f}'.format(
        np.min(robust_errs), np.max(robust_errs), np.median(robust_errs),
        np.mean(robust_errs)))
    print('clean errors for models with min, max and median robust errors')
    i_min = np.argmin(robust_errs)
    i_max = np.argmax(robust_errs)
    i_median = np.argsort(robust_errs)[len(robust_errs) // 2]
    print('for min: {:.4f}, for max: {:.4f}, for median: {:.4f}'.format(
        errs[i_min], errs[i_max], errs[i_median]))
    print('model clean errors:')
    print(errs)
    print('min: {:.4f}, max: {:.4f}, median: {:.4f}, mean: {:.4f}'.format(
        np.min(errs), np.max(errs), np.median(errs), np.mean(errs)))
コード例 #2
0
ファイル: fb_model.py プロジェクト: maurapintor/CROWN-IBP
def create():

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    args_tuple = namedtuple('args_tuple',
                            "config overrides_dict model_subset path_prefix")
    args = args_tuple(config='config/mnist_dm-large_0.4.json',
                      overrides_dict={},
                      model_subset={},
                      path_prefix='models_crown-ibp_dm-large')
    config = load_config(args)

    global_eval_config = config["eval_params"]
    models, model_names = config_modelloader(config,
                                             load_pretrain=True,
                                             cuda=torch.cuda.is_available())

    model, model_id, model_config = models[0], model_names[0], config[
        "models"][0]

    eval_config = copy.deepcopy(global_eval_config)
    if "eval_params" in model_config:
        eval_config.update(model_config["eval_params"])
    model = BoundSequential.convert(model,
                                    eval_config["method_params"]["bound_opts"])
    if torch.cuda.is_available():
        model = model.cuda()
    model.eval()

    preprocessing = {'mean': 0.0, 'std': 1.0}
    fmodel = fb.models.PyTorchModel(model,
                                    bounds=(0, 1),
                                    preprocessing=preprocessing,
                                    device=device)

    return fmodel
コード例 #3
0
def main(args):
    config = load_config(args)
    global_train_config = config["training_params"]
    models, model_names = config_modelloader(config)

    converted_models = [BoundSequential.convert(model) for model in models]

    for model, model_id, model_config in zip(converted_models, model_names,
                                             config["models"]):
        print("Number of GPUs:", torch.cuda.device_count())
        model = model.cuda()
        # make a copy of global training config, and update per-model config
        train_config = copy.deepcopy(global_train_config)
        if "training_params" in model_config:
            train_config = update_dict(train_config,
                                       model_config["training_params"])

        # read training parameters from config file
        epochs = train_config["epochs"]
        lr = train_config["lr"]
        weight_decay = train_config["weight_decay"]
        starting_epsilon = train_config["starting_epsilon"]
        end_epsilon = train_config["epsilon"]
        schedule_length = train_config["schedule_length"]
        schedule_start = train_config["schedule_start"]
        optimizer = train_config["optimizer"]
        method = train_config["method"]
        verbose = train_config["verbose"]
        lr_decay_step = train_config["lr_decay_step"]
        lr_decay_factor = train_config["lr_decay_factor"]
        # parameters specific to a training method
        method_param = train_config["method_params"]
        norm = float(train_config["norm"])
        train_config["loader_params"]["batch_size"] = train_config[
            "loader_params"]["batch_size"] // args.grad_acc_steps
        train_config["loader_params"]["test_batch_size"] = train_config[
            "loader_params"]["test_batch_size"] // args.grad_acc_steps
        train_data, test_data = config_dataloader(
            config, **train_config["loader_params"])

        # initialize adversary network
        if method_param["attack_type"] == "patch-nn":
            if config["dataset"] == "mnist":
                adv_net = ResNetUNet(n_class=10,
                                     channels=1,
                                     base_width=method_param["base_width"],
                                     dataset="mnist").cuda()
            if config["dataset"] == "cifar":
                adv_net = ResNetUNet(n_class=10,
                                     channels=3,
                                     base_width=method_param["base_width"],
                                     dataset="cifar").cuda()
        else:
            adv_net = None
        if optimizer == "adam":
            opt = optim.Adam(model.parameters(),
                             lr=lr,
                             weight_decay=weight_decay)
            if method_param["attack_type"] == "patch-nn":
                unetopt = optim.Adam(adv_net.parameters(),
                                     lr=lr,
                                     weight_decay=weight_decay)
            else:
                unetopt = None
        elif optimizer == "sgd":
            if method_param["attack_type"] == "patch-nn":
                unetopt = optim.SGD(adv_net.parameters(),
                                    lr=lr,
                                    momentum=0.9,
                                    nesterov=True,
                                    weight_decay=weight_decay)
            else:
                unetopt = None
            opt = optim.SGD(model.parameters(),
                            lr=lr,
                            momentum=0.9,
                            nesterov=True,
                            weight_decay=weight_decay)
        else:
            raise ValueError("Unknown optimizer")
        lr_scheduler = optim.lr_scheduler.StepLR(opt,
                                                 step_size=lr_decay_step,
                                                 gamma=lr_decay_factor)
        if method_param["attack_type"] == "patch-nn":
            lr_scheduler_unet = optim.lr_scheduler.StepLR(
                unetopt, step_size=lr_decay_step, gamma=lr_decay_factor)

        start_epoch = 0
        if args.resume:
            model_log = os.path.join(out_path, "test_log")
            logger = Logger(open(model_log, "w"))
            state_dict = torch.load(args.resume)
            print("***** Loading state dict from {} @ epoch {}".format(
                args.resume, state_dict['epoch']))
            model.load_state_dict(state_dict['state_dict'])
            opt.load_state_dict(state_dict['opt_state_dict'])
            lr_scheduler.load_state_dict(state_dict['lr_scheduler_dict'])
            start_epoch = state_dict['epoch'] + 1

        eps_schedule = [0] * schedule_start + list(
            np.linspace(starting_epsilon, end_epsilon, schedule_length))
        max_eps = end_epsilon

        model_name = get_path(config, model_id, "model", load=False)
        best_model_name = get_path(config, model_id, "best_model", load=False)
        print(model_name)
        model_log = get_path(config, model_id, "train_log")
        logger = Logger(open(model_log, "w"))
        logger.log("Command line:", " ".join(sys.argv[:]))
        logger.log("training configurations:", train_config)
        logger.log("Model structure:")
        logger.log(str(model))
        logger.log("data std:", train_data.std)
        best_err = np.inf
        recorded_clean_err = np.inf
        timer = 0.0

        for t in range(start_epoch, epochs):
            train_data, test_data = config_dataloader(
                config, **train_config["loader_params"])

            if method_param["attack_type"] == "patch-nn":
                lr_scheduler_unet.step(epoch=max(t - len(eps_schedule), 0))
            lr_scheduler.step(epoch=max(t - len(eps_schedule), 0))

            if t >= len(eps_schedule):
                eps = end_epsilon
            else:
                epoch_start_eps = eps_schedule[t]
                if t + 1 >= len(eps_schedule):
                    epoch_end_eps = epoch_start_eps
                else:
                    epoch_end_eps = eps_schedule[t + 1]

            logger.log(
                "Epoch {}, learning rate {}, epsilon {:.6f} - {:.6f}".format(
                    t, lr_scheduler.get_lr(), epoch_start_eps, epoch_end_eps))
            # with torch.autograd.detect_anomaly():
            start_time = time.time()

            Train(model, model_id, t, train_data, epoch_start_eps,
                  epoch_end_eps, max_eps, norm, logger, verbose, True, opt,
                  method, adv_net, unetopt, **method_param)
            epoch_time = time.time() - start_time
            timer += epoch_time
            logger.log('Epoch time: {:.4f}, Total time: {:.4f}'.format(
                epoch_time, timer))

            logger.log("Evaluating...")
            # evaluate
            err, clean_err = Train(model, model_id, t, test_data,
                                   epoch_end_eps, epoch_end_eps, max_eps, norm,
                                   logger, verbose, False, None, method,
                                   adv_net, None, **method_param)
            # err, clean_err = 0, 0

            logger.log('saving to', model_name)
            # torch.save({
            #         'state_dict' : model.state_dict(),
            #         'opt_state_dict': opt.state_dict(),
            #         'robust_err': err,
            #         'clean_err': clean_err,
            #         'epoch' : t,
            #         'lr_scheduler_dict': lr_scheduler.state_dict()
            #         }, model_name)
            torch.save(model.state_dict(), model_name)

            # save the best model after we reached the schedule
            if t >= len(eps_schedule):
                if err <= best_err:
                    best_err = err
                    recorded_clean_err = clean_err
                    logger.log('Saving best model {} with error {}'.format(
                        best_model_name, best_err))
                    torch.save(
                        {
                            'state_dict': model.state_dict(),
                            'opt_state_dict': opt.state_dict(),
                            'robust_err': err,
                            'clean_err': clean_err,
                            'epoch': t,
                            'lr_scheduler_dict': lr_scheduler.state_dict()
                        }, best_model_name)

        logger.log('Total Time: {:.4f}'.format(timer))
        logger.log('Model {} best err {}, clean err {}'.format(
            model_id, best_err, recorded_clean_err))
コード例 #4
0
                'relu', 'leaky_relu'
        ]:
            eval_config['method_params']['bound_opts']['zero-lb'] = True
        elif eval_config["method_params"]["bound_opts"]['activation'] in [
                'leaky_relu_step', 'param_leaky_relu_step'
        ]:
            eval_config['method_params']['bound_opts']['neg-slope-lb'] = True
        else:
            raise Exception(
                'The activation %s is not supported' %
                eval_config["method_params"]["bound_opts"]['activation'])

        # build model and load model state dict
        models, _ = config_modelloader(config, load_pretrain=False)
        models = models[0]
        models = BoundSequential.convert(
            models, eval_config["method_params"]["bound_opts"]).to(device)
        param_pending_init = 'param' in eval_config["method_params"][
            "bound_opts"]['activation']
        if param_pending_init:
            input_data = torch.rand(*input_shape).to(device)
            _ = models(input_data, method_opt='forward')
        models.load_state_dict(
            torch.load(models_path, map_location=device)['state_dict'])
        print('Model state dict loaded from file', models_path)
        models.update_parameter()

        _, loader = config_dataloader(config, **eval_config["loader_params"])

        # test model clean acc
        verbose = False
        model_acc = AverageMeter()
コード例 #5
0
ファイル: train.py プロジェクト: lewis841214/CROWN_bound_new
from bound_layers import BoundSequential, BoundLinear, BoundConv2d, BoundDataParallel
import torch.optim as optim
# from gpu_profile import gpu_profile
import time
from datetime import datetime
torch.manual_seed(5)
from model_defs import *


def simple():
    model = nn.Sequential(nn.Conv2d(1, 1, 2, stride=1, padding=0), nn.ReLU(),
                          Flatten(), nn.Linear(4, 2))
    return model


if __name__ == '__main__':
    model = simple()
    #model.load_state_dict(torch.load('./mnist_crown/cnn_2layer_width_1_best.pth'))
    print(model)
    model = BoundSequential.convert(model)
    c = torch.eye(2)
    print(
        'CROWN BOUND',
        model.full_backward_range(norm=np.inf,
                                  x_U=torch.zeros((1, 1, 3, 3)) - 3,
                                  x_L=torch.zeros((1, 1, 3, 3)) + 5,
                                  eps=0.0001,
                                  C=c,
                                  upper=True,
                                  lower=True))
コード例 #6
0
ファイル: train.py プロジェクト: ANazaret/CROWN-IBP
def main(args):
    config = load_config(args)
    global_train_config = config["training_params"]
    models, model_names = config_modelloader(config)
    for model, model_id, model_config in zip(models, model_names,
                                             config["models"]):
        # make a copy of global training config, and update per-model config
        train_config = copy.deepcopy(global_train_config)
        if "training_params" in model_config:
            train_config = update_dict(train_config,
                                       model_config["training_params"])
        model = BoundSequential.convert(
            model, train_config["method_params"]["bound_opts"])

        # read training parameters from config file
        epochs = train_config["epochs"]
        lr = train_config["lr"]
        weight_decay = train_config["weight_decay"]
        starting_epsilon = train_config["starting_epsilon"]
        end_epsilon = train_config["epsilon"]
        schedule_length = train_config["schedule_length"]
        schedule_start = train_config["schedule_start"]
        optimizer = train_config["optimizer"]
        method = train_config["method"]
        verbose = train_config["verbose"]
        lr_decay_step = train_config["lr_decay_step"]
        lr_decay_milestones = train_config["lr_decay_milestones"]
        lr_decay_factor = train_config["lr_decay_factor"]
        multi_gpu = train_config["multi_gpu"]
        # parameters specific to a training method
        method_param = train_config["method_params"]
        norm = float(train_config["norm"])
        train_data, test_data = config_dataloader(
            config, **train_config["loader_params"])

        if optimizer == "adam":
            opt = optim.Adam(model.parameters(),
                             lr=lr,
                             weight_decay=weight_decay)
        elif optimizer == "sgd":
            opt = optim.SGD(model.parameters(),
                            lr=lr,
                            momentum=0.9,
                            nesterov=True,
                            weight_decay=weight_decay)
        else:
            raise ValueError("Unknown optimizer")

        batch_multiplier = train_config["method_params"].get(
            "batch_multiplier", 1)
        batch_size = train_data.batch_size * batch_multiplier
        num_steps_per_epoch = int(
            np.ceil(1.0 * len(train_data.dataset) / batch_size))
        epsilon_scheduler = EpsilonScheduler(
            train_config.get("schedule_type",
                             "linear"), schedule_start * num_steps_per_epoch,
            ((schedule_start + schedule_length) - 1) * num_steps_per_epoch,
            starting_epsilon, end_epsilon, num_steps_per_epoch)
        max_eps = end_epsilon

        if lr_decay_step:
            # Use StepLR. Decay by lr_decay_factor every lr_decay_step.
            lr_scheduler = optim.lr_scheduler.StepLR(opt,
                                                     step_size=lr_decay_step,
                                                     gamma=lr_decay_factor)
            lr_decay_milestones = None
        elif lr_decay_milestones:
            # Decay learning rate by lr_decay_factor at a few milestones.
            lr_scheduler = optim.lr_scheduler.MultiStepLR(
                opt, milestones=lr_decay_milestones, gamma=lr_decay_factor)
        else:
            raise ValueError(
                "one of lr_decay_step and lr_decay_milestones must be not empty."
            )
        model_name = get_path(config, model_id, "model", load=False)
        best_model_name = get_path(config, model_id, "best_model", load=False)
        model_log = get_path(config, model_id, "train_log")
        logger = Logger(open(model_log, "w"))
        logger.log(model_name)
        logger.log("Command line:", " ".join(sys.argv[:]))
        logger.log("training configurations:", train_config)
        logger.log("Model structure:")
        logger.log(str(model))
        logger.log("data std:", train_data.std)
        best_err = np.inf
        recorded_clean_err = np.inf
        timer = 0.0

        if multi_gpu:
            logger.log(
                "\nUsing multiple GPUs for computing CROWN-IBP bounds\n")
            model = BoundDataParallel(model)
            model = model.cuda()

        for t in range(epochs):
            epoch_start_eps = epsilon_scheduler.get_eps(t, 0)
            epoch_end_eps = epsilon_scheduler.get_eps(t + 1, 0)
            logger.log(
                "Epoch {}, learning rate {}, epsilon {:.6g} - {:.6g}".format(
                    t, lr_scheduler.get_lr(), epoch_start_eps, epoch_end_eps))
            # with torch.autograd.detect_anomaly():
            start_time = time.time()
            Train(model, t, train_data, epsilon_scheduler, max_eps, norm,
                  logger, verbose, True, opt, method, **method_param)
            if lr_decay_step:
                # Use stepLR. Note that we manually set up epoch number here, so the +1 offset.
                lr_scheduler.step(
                    epoch=max(t - (schedule_start + schedule_length - 1) +
                              1, 0))
            elif lr_decay_milestones:
                # Use MultiStepLR with milestones.
                lr_scheduler.step()
            epoch_time = time.time() - start_time
            timer += epoch_time
            logger.log('Epoch time: {:.4f}, Total time: {:.4f}'.format(
                epoch_time, timer))
            logger.log("Evaluating...")
            with torch.no_grad():
                # evaluate
                err, clean_err = Train(
                    model, t, test_data,
                    EpsilonScheduler("linear", 0, 0, epoch_end_eps,
                                     epoch_end_eps, 1), max_eps, norm, logger,
                    verbose, False, None, method, **method_param)

            logger.log('saving to', model_name)
            torch.save(
                {
                    'state_dict':
                    model.module.state_dict()
                    if multi_gpu else model.state_dict(),
                    'epoch':
                    t,
                }, model_name)

            # save the best model after we reached the schedule
            if t >= (schedule_start + schedule_length):
                if err <= best_err:
                    best_err = err
                    recorded_clean_err = clean_err
                    logger.log('Saving best model {} with error {}'.format(
                        best_model_name, best_err))
                    torch.save(
                        {
                            'state_dict':
                            model.module.state_dict()
                            if multi_gpu else model.state_dict(),
                            'robust_err':
                            err,
                            'clean_err':
                            clean_err,
                            'epoch':
                            t,
                        }, best_model_name)

        logger.log('Total Time: {:.4f}'.format(timer))
        logger.log('Model {} best err {}, clean err {}'.format(
            model_id, best_err, recorded_clean_err))
コード例 #7
0
def main(args):
    config = load_config(args)
    global_train_config = config["training_params"]
    models, model_names = config_modelloader(config)

    converted_models = [BoundSequential.convert(model) for model in models]

    for model, model_id, model_config in zip(converted_models, model_names, config["models"]):
        model = model.cuda()

        # make a copy of global training config, and update per-model config
        train_config = copy.deepcopy(global_train_config)
        if "training_params" in model_config:
            train_config = update_dict(train_config, model_config["training_params"])

        # read training parameters from config file
        epochs = train_config["epochs"]
        lr = train_config["lr"]
        weight_decay = train_config["weight_decay"]
        starting_epsilon = train_config["starting_epsilon"]
        end_epsilon = train_config["epsilon"]
        schedule_length = train_config["schedule_length"]
        schedule_start = train_config["schedule_start"]
        optimizer = train_config["optimizer"]
        method = train_config["method"]
        verbose = train_config["verbose"]
        lr_decay_step = train_config["lr_decay_step"]
        lr_decay_factor = train_config["lr_decay_factor"]
        # parameters specific to a training method
        method_param = train_config["method_params"]
        norm = train_config["norm"]
        train_data, test_data = config_dataloader(config, **train_config["loader_params"])

        if optimizer == "adam":
            opt = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
        elif optimizer == "sgd":
            opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9, nesterov=True, weight_decay=weight_decay)
        else:
            raise ValueError("Unknown optimizer")

        eps_schedule = [0] * schedule_start + list(np.linspace(starting_epsilon, end_epsilon, schedule_length))
        max_eps = end_epsilon
        lr_scheduler = optim.lr_scheduler.StepLR(opt, step_size=lr_decay_step, gamma=lr_decay_factor)
        model_name = get_path(config, model_id, "model", load = False)
        best_model_name = get_path(config, model_id, "best_model", load = False)
        print(model_name)
        model_log = get_path(config, model_id, "train_log")
        logger = Logger(open(model_log, "w"))
        logger.log("Command line:", " ".join(sys.argv[:]))
        logger.log("training configurations:", train_config)
        logger.log("Model structure:")
        logger.log(str(model))
        logger.log("data std:", train_data.std)
        best_err = np.inf
        recorded_clean_err = np.inf
        timer = 0.0
        for t in range(epochs):
            lr_scheduler.step(epoch=max(t-len(eps_schedule), 0))
            if t >= len(eps_schedule):
                eps = end_epsilon
            else:
                epoch_start_eps = eps_schedule[t]
                if t + 1 >= len(eps_schedule):
                    epoch_end_eps = epoch_start_eps
                else:
                    epoch_end_eps = eps_schedule[t+1]
            
            logger.log("Epoch {}, learning rate {}, epsilon {:.6f} - {:.6f}".format(t, lr_scheduler.get_lr(), epoch_start_eps, epoch_end_eps))
            # with torch.autograd.detect_anomaly():
            start_time = time.time()
            Train(model, t, train_data, epoch_start_eps, epoch_end_eps, max_eps, logger, verbose, True, opt, method, **method_param)
            epoch_time = time.time() - start_time
            timer += epoch_time
            logger.log('Epoch time: {:.4f}, Total time: {:.4f}'.format(epoch_time, timer))
            logger.log("Evaluating...")
            with torch.no_grad():
                # evaluate
                err, clean_err = Train(model, t, test_data, epoch_end_eps, epoch_end_eps, max_eps, logger, verbose, False, None, method, **method_param)

            logger.log('saving to', model_name)
            torch.save({
                    'state_dict' : model.state_dict(), 
                    'epoch' : t,
                    }, model_name)

            # save the best model after we reached the schedule
            if t >= len(eps_schedule):
                if err <= best_err:
                    best_err = err
                    recorded_clean_err = clean_err
                    logger.log('Saving best model {} with error {}'.format(best_model_name, best_err))
                    torch.save({
                            'state_dict' : model.state_dict(), 
                            'robust_err' : err,
                            'clean_err' : clean_err,
                            'epoch' : t,
                            }, best_model_name)

        logger.log('Total Time: {:.4f}'.format(timer))
        logger.log('Model {} best err {}, clean err {}'.format(model_id, best_err, recorded_clean_err))