Ejemplo n.º 1
0
    def rollout(self, params):
        ''' Perform a single rollout.

        Parameters
        ----------
        params : dict
            controller fully connected layer weights and biases
        Returns
        -------
        total_reward * -1 : float
            The negative reward accumulated during the rollout
        '''
        load_parameters(params, self.control)

        obs = self.env.reset()
        self.env.render()
        (h, c) = [torch.zeros(1, 1, 256).to(self.device) for _ in range(2)]
        total_reward = 0

        for step in range(1000):
            # obs[0] for textured observation
            # obs[1] for semantic segmentation observation
            # obs[2] for standard observation
            obs = self.pre_process(obs[obs_index]).to(self.device)
            # using mean as z vector
            z, _ = self.vae.encode(obs)
            a = self.control.get_action(z, h)
            obs, reward, done, _ = self.env.step(a)

            a = torch.from_numpy(a).view(1, 1, 3).to(self.device)
            x = torch.cat((z.view(1, 1, 32), a), 2)
            _, (h, c) = self.mdnrnn.lstm(x, (h, c))

            total_reward += reward
            if done:
                break
        return -1 * total_reward
Ejemplo n.º 2
0
def main(config_file="parameters.ini"):
    # Step 1. Load configuration
    parameters = load_parameters(config_file)
    is_cuda = eval(parameters["general"]["is_cuda"])
    if is_cuda:
        # gotcha On some platforms, modifying os.environ will not modify the system environment
        os.environ["CUDA_VISIBLE_DEVICES"] = parameters["general"][
            "gpu_device"]

    assertion_message = "Set this flag off to train models."
    assert eval(parameters['dataset']
                ['generate_feature_vector_files']) is False, assertion_message

    log_interval = int(parameters["general"]["log_interval"])
    num_epochs = int(parameters["hyperparam"]["ff_num_epochs"])
    is_synthetic_dataset = eval(parameters["general"]["is_synthetic_dataset"])

    training_method = parameters["general"]["training_method"]
    evasion_method = parameters["general"]["evasion_method"]
    experiment_suffix = parameters["general"]["experiment_suffix"]
    experiment_name = "[training:%s|evasion:%s]_%s" % (
        training_method, evasion_method, experiment_suffix)

    adv_example_filepath = parameters["challenge"]["adv_examples_path"]

    print("Training Method:%s, Evasion Method:%s" %
          (training_method, evasion_method))

    seed_val = int(parameters["general"]["seed"])

    random.seed(seed_val)
    torch.manual_seed(seed_val)
    np.random.seed(seed_val)

    evasion_iterations = int(parameters['hyperparam']['evasion_iterations'])

    save_every_epoch = eval(parameters['general']['save_every_epoch'])

    #train_model_from_scratch = eval(parameters['general']['train_model_from_scratch'])
    load_model_weights = eval(parameters['general']['load_model_weights'])
    model_weights_path = parameters['general']['model_weights_path']

    # Step 2. Load training and test data
    train_dataloader_dict, valid_dataloader_dict, test_dataloader_dict, num_features = load_data(
        parameters)

    # set the bscn metric
    num_samples = len(train_dataloader_dict["malicious"].dataset)
    bscn = CoveringNumber(num_samples, num_epochs * num_samples,
                          train_dataloader_dict["malicious"].batch_size)

    if load_model_weights:
        print("Loading Model Weights From: {path}".format(
            path=model_weights_path))
        if is_cuda:
            model = torch.load(model_weights_path)
        else:
            model = torch.load(model_weights_path,
                               map_location=lambda storage, loc: storage)

    else:
        # Step 3. Construct neural net (N) - this can be replaced with any model of interest
        model = build_ff_classifier(
            input_size=num_features,
            hidden_1_size=int(parameters["hyperparam"]["ff_h1"]),
            hidden_2_size=int(parameters["hyperparam"]["ff_h2"]),
            hidden_3_size=int(parameters["hyperparam"]["ff_h3"]))
    # gpu related setups
    if is_cuda:
        torch.cuda.manual_seed(int(parameters["general"]["seed"]))
        model = model.cuda()

    # Step 4. Define loss function and optimizer  for training (back propagation block in Fig 2.)
    loss_fct = nn.NLLLoss(reduce=False)
    optimizer = optim.Adam(model.parameters(),
                           lr=float(
                               parameters["hyperparam"]["ff_learning_rate"]))

    def train(epoch):
        model.train()
        total_correct = 0.
        total_loss = 0.
        total = 0.

        current_time = time.time()

        if is_synthetic_dataset:
            # since generation of synthetic data set is random, we'd like them to be the same over epochs
            torch.manual_seed(seed_val)
            random.seed(seed_val)

        for batch_idx, ((bon_x, bon_y), (mal_x, mal_y)) in enumerate(
                zip(train_dataloader_dict["benign"],
                    train_dataloader_dict["malicious"])):
            # Check for adversarial learning
            mal_x = inner_maximizer(
                mal_x,
                mal_y,
                model,
                loss_fct,
                iterations=evasion_iterations,
                method=training_method,
                mal_index=batch_idx *
                int(parameters["hyperparam"]["training_batch_size"]),
                dataset=train_dataloader_dict)

            # stack input
            if is_cuda:
                x = Variable(stack_tensors(bon_x, mal_x).cuda())
                y = Variable(stack_tensors(bon_y, mal_y).cuda())
            else:
                x = Variable(stack_tensors(bon_x, mal_x))
                y = Variable(stack_tensors(bon_y, mal_y))

            # forward pass
            y_model = model(x)

            # backward pass
            optimizer.zero_grad()
            loss = loss_fct(y_model, y).mean()
            loss.backward()
            optimizer.step()

            # predict pass
            _, predicted = torch.topk(y_model, k=1)
            correct = predicted.data.eq(y.data.view_as(
                predicted.data)).cpu().sum()

            # metrics
            total_loss += loss.data[0] * len(y)
            total_correct += correct
            total += len(y)

            bscn.update_numerator_batch(batch_idx, mal_x)

            if batch_idx % log_interval == 0:
                print("Time Taken:", time.time() - current_time)
                current_time = time.time()
                print(
                    "Train Epoch ({}) | Batch ({}) | [{}/{} ({:.0f}%)]\tBatch Loss: {:.6f}\tBatch Accuracy: {:.1f}%\t BSCN: {:.12f}"
                    .format(
                        epoch, batch_idx, batch_idx * len(x),
                        len(train_dataloader_dict["malicious"].dataset) +
                        len(train_dataloader_dict["benign"].dataset), 100. *
                        batch_idx / len(train_dataloader_dict["benign"]),
                        loss.data[0], 100. * correct / len(y), bscn.ratio()))

        model_filename = "{name}_epoch_{e}".format(name=experiment_name,
                                                   e=epoch)

        if save_every_epoch:
            torch.save(model, os.path.join("model_weights", model_filename))

    def check_one_category(category="benign",
                           dset_type='test',
                           is_evade=False,
                           evade_method='dfgsm_k'):
        """
        test the model in terms of loss and accuracy on category, this function also allows to perform perturbation
        with respect to loss to evade
        :param category: benign or malicious dataset
        :param dset_type: 'val', 'test', or 'train' dataset
        :param is_evade: to perform evasion or not
        :param evade_method: evasion method (we can use on of the inner maximier methods), it is only relevant if is_evade
          is True
        :return:
        """
        model.eval()
        total_loss = 0
        total_correct = 0
        total = 0
        evasion_mode = ""

        if is_synthetic_dataset:
            # since generation of synthetic data set is random, we'd like them to be the same over epochs
            torch.manual_seed(seed_val)
            random.seed(seed_val)

        if dset_type == 'train':
            dataloader = train_dataloader_dict[category]
        elif dset_type == 'val':
            dataloader = valid_dataloader_dict[category]
        elif dset_type == 'test':
            dataloader = test_dataloader_dict[category]
        else:
            raise Exception("Invalid Dataset type")

        for batch_idx, (x, y) in enumerate(dataloader):
            #
            if is_evade:
                x = inner_maximizer(x,
                                    y,
                                    model,
                                    loss_fct,
                                    iterations=evasion_iterations,
                                    method=evade_method)
                evasion_mode = "(evasion using %s)" % evade_method
            # stack input
            if is_cuda:
                x = Variable(x.cuda())
                y = Variable(y.cuda())
            else:
                x = Variable(x)
                y = Variable(y)

            # forward pass
            y_model = model(x)

            # loss pass
            loss = loss_fct(y_model, y).mean()

            # predict pass
            _, predicted = torch.topk(y_model, k=1)
            correct = predicted.data.eq(y.data.view_as(
                predicted.data)).cpu().sum()

            # metrics
            total_loss += loss.data[0] * len(y)
            total_correct += correct
            total += len(y)

        print(
            "{} set for {} {}: Average Loss: {:.4f}, Accuracy: {:.2f}%".format(
                dset_type, category, evasion_mode, total_loss / total,
                total_correct * 100. / total))

        return total_loss, total_correct, total

    def test(epoch, dset_type='test'):
        """
        Function to be used for both testing and validation
        :param epoch: current epoch
        :param dset_type: 'train', 'test' , or 'val'
        :return: average total loss, dictionary of the metrics for both bon and mal samples
        """
        # test for accuracy and loss
        bon_total_loss, bon_total_correct, bon_total = check_one_category(
            category="benign", is_evade=False, dset_type=dset_type)
        mal_total_loss, mal_total_correct, mal_total = check_one_category(
            category="malicious", is_evade=False, dset_type=dset_type)

        # test for evasion on malicious sample
        evade_mal_total_loss, evade_mal_total_correct, evade_mal_total = check_one_category(
            category="malicious",
            is_evade=True,
            evade_method=evasion_method,
            dset_type=dset_type)

        total_loss = bon_total_loss + mal_total_loss
        total_correct = bon_total_correct + mal_total_correct
        total = bon_total + mal_total

        print("{} set overall: Average Loss: {:.4f}, Accuracy: {:.2f}%".format(
            dset_type, total_loss / total, total_correct * 100. / total))

        metrics = {
            "bscn_num_pts": bscn.num_pts(),
            "bscn_exp_pts": bscn.exp_num_pts(),
            "mal": {
                "total_loss": mal_total_loss,
                "total_correct": mal_total_correct,
                "total": mal_total,
                "evasion": {
                    "total_loss": evade_mal_total_loss,
                    "total_correct": evade_mal_total_correct,
                    "total": evade_mal_total
                }
            },
            "bon": {
                "total_loss": bon_total_loss,
                "total_correct": bon_total_correct,
                "total": bon_total
            }
        }
        print(metrics)

        return (bon_total_loss +
                max(mal_total_loss, evade_mal_total_loss)) / total, metrics

    def process_adv_examples(evade_method='dfgsm_k', mode='gen'):
        """
        This function is used for the `attack` track challenge for two purposes
        With mode='gen', it is meant to craft transferable adversarial examples and store them to a numpy array
        With mode='eval', it loads up the examples from the numpy array and evaluates them on the tested model
        Note, ADV Examples are only crafted for malicious files
        :param evade_method: evasion method (participants can implement their own), here we use `dfgsm_k` as an example
        :param mode: 'gen' to generate and store the adv examples or 'eval' to load them and evaluate
        :return:
        """
        model.eval()
        category = "malicious"
        total_loss = 0
        total_correct = 0
        total = 0

        adv_file = os.path.join(adv_example_filepath, 'aes.npy')
        xs_adv = [] if mode == 'gen' else np.load(adv_file)
        # to be inline with the code base, the attack dataset will also be
        # decomposed into train, val, and test. However, all these subsets will be
        # used as part of the attack challenge.
        xs_adv_offset = 0
        for dset_type in ['train', 'val', 'test']:
            if dset_type == 'train':
                dataloader = train_dataloader_dict[category]
            elif dset_type == 'val':
                dataloader = valid_dataloader_dict[category]
            elif dset_type == 'test':
                dataloader = test_dataloader_dict[category]

            # to impose the order of the crafted examples, we manually loop over the dataset
            # instead of using the dataloader' sampler
            batch_size = dataloader.batch_size
            num_pts = len(dataloader.dataset)
            num_batches = (num_pts + batch_size - 1) // batch_size

            for batch_idx in range(num_batches):
                # get the batch data
                bstart = batch_idx * batch_size
                bend = min(num_pts, bstart + batch_size)
                x, y = zip(
                    *[dataloader.dataset[i] for i in range(bstart, bend)])
                x = torch.stack(x, dim=0)
                y = torch.LongTensor(y)

                if mode == 'gen':
                    # put your method here
                    # ---------------------------
                    x_adv = inner_maximizer(x,
                                            y,
                                            model,
                                            loss_fct,
                                            iterations=evasion_iterations,
                                            method=evade_method)
                    # ---------------------------

                else:
                    x_adv = torch.from_numpy(
                        xs_adv[xs_adv_offset +
                               batch_idx * batch_size:xs_adv_offset +
                               (batch_idx + 1) * batch_size, :])

                # stack input
                if is_cuda:
                    x_adv = Variable(x_adv.cuda())
                    y = Variable(y.cuda())
                else:
                    x_adv = Variable(x_adv)
                    y = Variable(y)

                # forward pass
                y_model = model(x_adv)

                # loss pass
                loss = loss_fct(y_model, y).mean()

                # predict pass
                _, predicted = torch.topk(y_model, k=1)
                correct = predicted.data.eq(y.data.view_as(
                    predicted.data)).cpu().sum()

                # metrics
                total_loss += loss.data[0] * len(y)
                total_correct += correct
                total += len(y)

                # let's save the adversarial examples
                _x = x.numpy()
                _x_adv = x_adv.cpu().data.numpy(
                ) if is_cuda else x_adv.data.numpy()
                assert np.allclose(np.logical_and(_x, _x_adv),
                                   _x), "perturbation constraint violated"
                if mode == 'gen':
                    xs_adv = xs_adv + [_x_adv]

            xs_adv_offset += num_pts

        if mode == 'gen':
            np.save(adv_file, np.concatenate(xs_adv, axis=0))

        # we keep the same structure of metrics for compatibility
        metrics = {
            "bscn_num_pts": 1,
            "bscn_exp_pts": 1,
            "mal": {
                "total_loss": 1,
                "total_correct": 1,
                "total": 1,
                "evasion": {
                    "total_loss": total_loss,
                    "total_correct": total_correct,
                    "total": total
                }
            },
            "bon": {
                "total_loss": 1,
                "total_correct": 1,
                "total": 1
            }
        }

        return metrics

    if not os.path.exists("result_files"):
        os.mkdir("result_files")
    _metrics = None

    # Starter kit for Defend Challenge
    if not eval(parameters["challenge"]["eval"]) and eval(
            parameters["challenge"]["defend"]):
        best_valid_loss = float("inf")
        for _epoch in range(num_epochs):
            # train
            train(_epoch)
            # validate
            valid_loss, _ = test(_epoch, dset_type='val')
            # keep the best parameters w.r.t validation and check the test set
            if best_valid_loss > valid_loss:
                best_valid_loss = valid_loss
                _, _metrics = test(_epoch, dset_type='test')
                bscn_to_save = bscn.ratio()
                with open(
                        os.path.join("result_files",
                                     "%s_bscn.txt" % experiment_name),
                        "w") as f:
                    f.write(str(bscn_to_save))
                torch.save(
                    model,
                    os.path.join("helper_files",
                                 "%s-model.pt" % experiment_name))
            elif _epoch % log_interval == 0:
                test(_epoch, dset_type='test')

    # starter kit for Attack challenge:
    # participants can modify the code highlighted in the `process_adv_examples` function
    if not eval(parameters["challenge"]["eval"]) and eval(
            parameters["challenge"]["attack"]):
        _metrics = process_adv_examples(evade_method=evasion_method,
                                        mode='gen')

    # Code for submission evaluations (this code will be run by the organizers),
    # we are relasing it here for transparency
    # for evaluating submissions under the Attack track
    if eval(parameters["challenge"]["eval"]) and eval(
            parameters["challenge"]["attack"]):
        _metrics = process_adv_examples(evade_method=evasion_method,
                                        mode='eval')

    # for evaluating submissions under the Defend track
    # For compatibility with the code above, our hold-out dataset will also
    # be splitted into test, validation, and train. This is why we evaluate them all below.
    if eval(parameters["challenge"]["eval"]) and eval(
            parameters["challenge"]["defend"]):
        # report results on all datasets
        _, _metrics = test(0, dset_type='test')
        _, _metrics_t = test(0, dset_type='train')
        _, _metrics_v = test(0, dset_type='val')
        _metrics = merge_metrics([_metrics_t, _metrics, _metrics_v])

    with open(os.path.join("result_files", experiment_name + ".json"),
              "w") as result_file:
        json.dump(_metrics, result_file)
Ejemplo n.º 3
0
    saved_vectors_directory = "sample_dataset_saved_feature_vectors"

    if not os.path.exists(saved_vectors_directory):
        os.mkdir(saved_vectors_directory)

    malicious_vector_filepath = os.path.join(saved_vectors_directory,
                                             "malicious")
    benign_vector_filepath = os.path.join(saved_vectors_directory, "benign")

    if not os.path.exists(malicious_vector_filepath):
        os.mkdir(malicious_vector_filepath)

    if not os.path.exists(benign_vector_filepath):
        os.mkdir(benign_vector_filepath)

    parameters = load_parameters("parameters.ini")

    # This flag must be on to generate, changes return of PortableExecutableDataset to return filepath as well
    set_parameter("parameters.ini", "dataset", "generate_feature_vector_files",
                  "True")
    set_parameter("parameters.ini", "hyperparam", "training_batch_size", "1")
    set_parameter("parameters.ini", "hyperparam", "test_batch_size", "1")

    train_dataloader_dict, valid_dataloader_dict, test_dataloader_dict, num_features = load_data(
        parameters)

    print(
        len(train_dataloader_dict['malicious'].dataset) +
        len(test_dataloader_dict['malicious'].dataset) +
        len(valid_dataloader_dict['malicious'].dataset))
    print(
Ejemplo n.º 4
0
import torch.optim as optim
from torch.autograd import Variable
from utils.utils import load_parameters, stack_tensors
from datasets.datasets import load_data
from inner_maximizers.inner_maximizers import inner_maximizer
from nets.ff_classifier import build_ff_classifier
from blindspot_coverage.covering_number import CoveringNumber
import losswise
import time
import json
import numpy as np

# Load Parameters File
if len(sys.argv) == 1:
    # Use default parameters
    parameters = load_parameters("parameters.ini")
else:
    parameters = load_parameters(sys.argv[1])

# Set up CUDA device if exists
is_cuda = eval(parameters["general"]["is_cuda"])
if is_cuda:
    os.environ["CUDA_VISIBLE_DEVICES"] = parameters["general"]["gpu_device"]

assertion_message = "Set this flag off to train models."
assert eval(parameters['dataset']
            ['generate_feature_vector_files']) is False, assertion_message

log_interval = int(parameters["general"]["log_interval"])
num_epochs = int(parameters["hyperparam"]["ff_num_epochs"])
is_losswise = eval(parameters["general"]["is_losswise"])
Ejemplo n.º 5
0
                                                 is_malicious=True,
                                                 parameters=parameters)
    num_workers = int(parameters['general']['num_workers'])

    analysis_batch_size = int(parameters['hyperparam']['analysis_batch_size'])

    adv_dataloader = DataLoader(full_adv_dataset,
                                batch_size=analysis_batch_size,
                                shuffle=True,
                                num_workers=num_workers)

    return adv_dataloader


if __name__ == "__main__":
    print(
        "I am a module to be imported by others, testing some functionalities here"
    )
    from utils.utils import load_parameters
    parameters = load_parameters("../parameters.ini")
    train_data, valid_data, test_data, num_features = load_data(
        parameters=parameters)
    dset_1 = train_data["malicious"].dataset
    dset_2 = train_data["benign"].dataset
    print("A sample from malicious dataset has ", sum(dset_1[0][0]),
          " features, with label", dset_1[0][1])
    print("A sample from benign dataset has ", sum(dset_2[0][0]),
          " features, with label", dset_2[0][1])
    print("Feature space is of %d-dimensionality" % dset_1[10][0].size(),
          num_features)
Ejemplo n.º 6
0
            time, date
        ]],
                             columns=[
                                 'generation', 'max_reward', 'min_reward',
                                 'mean_reward', 'time', 'date'
                             ])
        with open(c_save_path + '_train.txt', 'a') as f:
            record = t_log.to_json(orient='records')
            f.write(record)
            f.write(os.linesep)

    if epoch % weight_save_interval == 0 and epoch != 0:
        # saving controller weights
        index_min = np.argmin(r_list)
        generation_best = solutions[index_min]
        load_parameters(generation_best, controller)
        torch.save(
            {
                'epoch': epoch,
                'reward': -np.min(r_list),
                'state_dict': controller.state_dict()
            }, 'trained/' + c_save_dir + '/' + str(epoch) + '.tar')

    if epoch % eval_interval == 0 and epoch != 0:
        best_params, best, std_best = evaluate(solutions, r_list)
        print('Current evaluation: ' + str(best))
        e_log = pd.DataFrame(
            [[epoch, best, std_best, best_params]],
            columns=['generation', 'avg_reward', 'std_reward', 'parameters'])
        with open(c_save_path + '_eval.txt', 'a') as f:
            record = e_log.to_json(orient='records')
Ejemplo n.º 7
0
import itertools
import pickle

EVASION_METHODS = ['rfgsm_k', 'dfgsm_k', 'bga_k', 'bca_k']
COLORS = ['b', 'g', 'r', 'c']

legend_handles = []
# Create the color legend
for method, c in zip(EVASION_METHODS, COLORS):
    legend_handles.append(mpatches.Patch(color=c))

legend_labels = list(EVASION_METHODS)
legend_labels = [m + " evasion" for m in legend_labels]

parameters_filepath = sys.argv[1]
parameters = load_parameters(parameters_filepath)

exp_time = sys.argv[2]

is_cuda = eval(parameters["general"]["is_cuda"])
if is_cuda:
    os.environ["CUDA_VISIBLE_DEVICES"] = parameters["general"]["gpu_device"]

seed_val = int(parameters["general"]["seed"])
use_seed = eval(parameters["general"]["use_seed"])

if use_seed:
    random.seed(seed_val)
    torch.manual_seed(seed_val)
    np.random.seed(seed_val)
if __name__ == "__main__":
    trained_experiment_model = sys.argv[1]

    original_parameters_filepath = "figure_generation_parameters.ini"
    exp_time = time.strftime("%m_%d_%Hh_%Mm", time.localtime())

    new_params_directory = "experiment_parameters"
    if not os.path.exists(new_params_directory):
        os.mkdir(new_params_directory)
    new_params_name = "loss_landscape_parameters_{exp}_{time}.ini".format(
        exp=trained_experiment_model, time=exp_time)

    new_params_filepath = os.path.join(new_params_directory, new_params_name)
    shutil.copy(original_parameters_filepath, new_params_filepath)
    new_params = load_parameters(new_params_filepath)

    trained_model_directory = "../trained_models"

    model_filepath_base_string = os.path.join(
        trained_model_directory,
        "[training:{train_meth}|evasion:{train_meth}]_{exp_name}-model.pt")

    train_methods = ['natural', 'dfgsm_k', 'rfgsm_k', 'bga_k', 'bca_k']
    evasion_methods = ['rfgsm_k', 'dfgsm_k', 'bga_k', 'bca_k']

    set_parameter(new_params_filepath, "general", "generate_histogram",
                  "False")

    for train_method in train_methods:
        model_filepath = model_filepath_base_string.format(
Ejemplo n.º 9
0
"""
Script for running framework.py under different adversarial conditions.
Set hyperparams in parameters.ini and script will create a duplicate parameters file and run framework.py using that
"""

# coding=utf-8
import os
from os import system
import shutil

from utils.utils import load_parameters, set_parameter

if __name__ == "__main__":
    original_parameters_filepath = "parameters.ini"

    original_parameters = load_parameters(original_parameters_filepath)
    experiment_name = original_parameters['general']['experiment_suffix']

    # Create duplicate parameters file in directory
    new_params_directory = "experiment_parameters"
    new_params_name = "parameters_{exp}.ini".format(exp=experiment_name)

    if not os.path.exists(new_params_directory):
        os.mkdir(new_params_directory)

    new_params_filepath = os.path.join(new_params_directory, new_params_name)
    shutil.copy(original_parameters_filepath, new_params_filepath)

    train_methods = ['natural', 'rfgsm_k', 'dfgsm_k', 'bga_k', 'bca_k']
    evasion_methods = ['natural', 'rfgsm_k', 'dfgsm_k', 'bga_k', 'bca_k']
Ejemplo n.º 10
0
                if is_cuda:
                    param.data -= torch.FloatTensor(
                        (alpha * dir_one + beta * dir_two)).cuda()
                else:
                    param.data -= torch.FloatTensor(
                        (alpha * dir_one + beta * dir_two))

            points.append([alpha, beta, total_loss])
            print(alpha, beta, total_loss, round(time.time() - start_time, 1),
                  "seconds")

    return points


if __name__ == '__main__':
    parameters = load_parameters(sys.argv[1])
    output_directory_name = sys.argv[2]

    adversarial_vector_type = None
    if len(sys.argv) == 4:
        adversarial_vector_type = sys.argv[3]

    if eval(parameters['general']['use_seed']):
        seed_val = int(parameters['general']['seed'])
        random.seed(seed_val)
        torch.manual_seed(seed_val)

    base_output_directory = parameters['dataset']['output_directory']
    experiment_name = parameters['general']['experiment_name']
    training_method = parameters['general']['training_method']
    num_files_used = int(parameters['dataset']['num_files_to_use'])
def main():
    '''
    Generates adversarial samples using each training method and evasion method combination
    '''

    if len(sys.argv) == 1:
        parameters = load_parameters("generate_adversarial_parameters.ini")
    else:
        parameters = load_parameters(sys.argv[1])

    # Seed so that synthetic data is the same
    use_seed = eval(parameters['general']['use_seed'])
    if use_seed:
        seed_val = int(parameters["general"]["seed"])
    else:
        seed_val = random.randint(1, 10000)

    random.seed(seed_val)
    torch.manual_seed(seed_val)

    malicious_dataloader = load_malicious_data(parameters)

    output_directory = parameters['general']['output_directory_for_adv_vecs']

    if not os.path.exists(output_directory):
        os.mkdir(output_directory)

    print("Generating adversarial samples for each method")
    loss_fct = nn.NLLLoss(reduce=False)
    experiment_name = parameters['general']['experiment_name']
    evasion_iterations = int(parameters['hyperparam']['evasion_iterations'])

    print("Using experiment models: ", experiment_name)

    train_methods = ['natural']
    evasion_methods = ['rfgsm_k', 'dfgsm_k', 'bga_k', 'bca_k']

    saved_model_directory = parameters['general']['saved_model_directory']

    is_synthetic = eval(parameters['general']['is_synthetic_dataset'])

    for train_method in train_methods:
        train_directory = os.path.join(output_directory, train_method)
        if not os.path.exists(train_directory):
            os.mkdir(train_directory)

        model_filepath = os.path.join(
            saved_model_directory,
            "[training:{train_meth}|evasion:{train_meth}]_{exp_name}-model.pt".
            format(train_meth=train_method, exp_name=experiment_name))
        model = torch.load(model_filepath)

        for evasion_method in evasion_methods:
            print(train_method, evasion_method)

            evasion_subdirectory = os.path.join(train_directory,
                                                evasion_method)
            if not os.path.exists(evasion_subdirectory):
                os.mkdir(evasion_subdirectory)

            if is_synthetic:
                for i, (mal_x, mal_y) in enumerate(malicious_dataloader):
                    actual_filename = str(i) + ".p"

                    mal_x = inner_maximizer(mal_x,
                                            mal_y,
                                            model,
                                            loss_fct,
                                            iterations=evasion_iterations,
                                            method=evasion_method,
                                            report_loss_diff=False)

                    # Save the adversarial vector as a pickle file
                    pickle.dump(
                        mal_x,
                        open(
                            os.path.join(evasion_subdirectory,
                                         actual_filename), "wb"))
            else:
                for i, (mal_x, mal_y,
                        filepath) in enumerate(malicious_dataloader):
                    actual_filename = filepath[0].split('/')[-1]

                    mal_x = inner_maximizer(mal_x,
                                            mal_y,
                                            model,
                                            loss_fct,
                                            iterations=evasion_iterations,
                                            method=evasion_method,
                                            report_loss_diff=False)

                    # Save the adversarial vector as a pickle file
                    pickle.dump(
                        mal_x,
                        open(
                            os.path.join(evasion_subdirectory,
                                         actual_filename), "wb"))