Exemplo n.º 1
0
def train(args):
    tf.get_logger().setLevel(logging.ERROR)

    mnist = MNIST()
    stylealae = StyleMNIST()

    modelname = args.name
    summary_path = os.path.join(args.summarydir, modelname)
    if not os.path.exists(summary_path):
        os.makedirs(summary_path)
    
    ckpt_path = os.path.join(args.ckptdir, modelname)
    if not os.path.exists(ckpt_path):
        os.makedirs(ckpt_path)

    controller = LevelController(NUM_LAYERS, EPOCHS_PER_LEVEL)
    trainer = Trainer(summary_path, ckpt_path, callback=controller)
    trainer.train(
        stylealae,
        args.epochs,
        mnist.datasets(
            args.batch_size, padding=2, flatten=False),
        mnist.datasets(
            args.batch_size, padding=2, flatten=False, train=False),
        trainlen=len(mnist.x_train) // args.batch_size)

    return 0
Exemplo n.º 2
0
def test_call():
    data_iter = MNIST(source='/Users/devon/Data/mnist.pkl.gz', batch_size=27)
    gbn = test_build_GBN(dim_in=data_iter.dims[data_iter.name])

    X = T.matrix('x', dtype=floatX)
    results, samples = gbn(X, X, n_samples=7)

    f = theano.function([X], samples.values() + results.values())

    x = data_iter.next()[data_iter.name]
    assert False, f(x)
Exemplo n.º 3
0
    def prepare_data(self):
        data_root = os.path.join(hydra.utils.get_original_cwd(),
                                 self.cfg.data_root)
        os.makedirs(data_root, exist_ok=True)  # make in case does not exist
        mnist_dataset = MNIST(root=data_root)

        train_samples_n = int(len(mnist_dataset) * self.cfg.train_val_split)
        val_samples_n = len(mnist_dataset) - train_samples_n

        self.training_dataset, self.validation_dataset = random_split(
            mnist_dataset, lengths=[train_samples_n, val_samples_n])
        self.test_dataset = MNIST(root=data_root, train=False)
Exemplo n.º 4
0
def test_build_gdir(model=None):
    if model is None:
        data_iter = MNIST(source='/Users/devon/Data/mnist.pkl.gz',
                          batch_size=27)
        model = test_vae.test_build_GBN(dim_in=data_iter.dims[data_iter.name])
    gdir = MomentumGDIR(model)
    return gdir
Exemplo n.º 5
0
def test_infer():
    data_iter = MNIST(source='/Users/devon/Data/mnist.pkl.gz', batch_size=27)
    gbn = test_vae.test_build_GBN(dim_in=data_iter.dims[data_iter.name])

    gdir = test_build_gdir(gbn)

    X = T.matrix('x', dtype=floatX)

    inference_args = dict(n_inference_samples=13,
                          n_inference_steps=17,
                          pass_gradients=True)

    rval, constants, updates = gdir.inference(X, X, **inference_args)

    f = theano.function([X], rval.values(), updates=updates)
    x = data_iter.next()[data_iter.name]

    results, samples, full_results, updates = gdir(X, X, **inference_args)
    f = theano.function([X], results.values(), updates=updates)

    print f(x)
Exemplo n.º 6
0
def train(args):
    mnist = MNIST()
    mlpalae = MnistAlae()

    modelname = args.name
    summary_path = os.path.join(args.summarydir, modelname)
    if not os.path.exists(summary_path):
        os.makedirs(summary_path)
    
    ckpt_path = os.path.join(args.ckptdir, modelname)
    if not os.path.exists(ckpt_path):
        os.makedirs(ckpt_path)

    trainer = Trainer(summary_path, ckpt_path)
    trainer.train(
        mlpalae,
        args.epochs,
        mnist.datasets(bsize=args.batch_size, flatten=True, condition=True),
        mnist.datasets(bsize=args.batch_size, flatten=True, condition=True, train=False),
        len(mnist.x_train) // args.batch_size)

    return 0
Exemplo n.º 7
0
def main(unused_argv):
    # Load training and eval data
    mnist = MNIST('/backups/work/mnist',
                  shuffle=True,
                  normalize=True,
                  augment=False,
                  one_hot=False)

    def train_input_fn():
        dataset = mnist.train_set
        dataset = dataset.repeat(100)
        iterator = dataset.make_one_shot_iterator()
        features, labels = iterator.get_next()
        return {'x': features}, labels

    def eval_input_fn():
        dataset = mnist.test_set
        dataset = dataset.repeat(1)
        iterator = dataset.make_one_shot_iterator()
        features, labels = iterator.get_next()
        return {'x': features}, labels

    # Create the Estimator
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    config = tf.estimator.RunConfig().replace(session_config=sess_config)

    mnist_estimator = tf.estimator.Estimator(
        model_fn=cnn_model_fn,
        model_dir="/backups/work/logs/mnist_convnet_model",
        config=config)

    train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
                                        max_steps=60000)
    eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)

    tf.estimator.train_and_evaluate(mnist_estimator, train_spec, eval_spec)
Exemplo n.º 8
0
def unpack_model_and_data(model_dir):
    name = model_dir.split('/')[-2]
    yaml = glob(path.join(model_dir, '*.yaml'))[0]
    model_file = glob(path.join(model_dir, '*best*npz'))[0]
    exp_dict = load_experiment(path.abspath(yaml))
    dataset_args = exp_dict['dataset_args']
    dataset = dataset_args['dataset']

    def filter(prior=None,
               dim_hs=None,
               dim_h=None,
               inference_args=None,
               learning_args=None,
               **kwargs):
        if dim_h is not None:
            dim_hs = [dim_h]
        return OrderedDict(prior=prior,
                           dim_hs=dim_hs,
                           inference_args=inference_args,
                           learning_args=learning_args)

    exp_dict = filter(**exp_dict)
    prior = exp_dict['prior']
    deep = len(exp_dict['dim_hs']) > 1

    if dataset == 'mnist':
        print 'Loading MNIST'
        train_iter = MNIST(mode='train', batch_size=10, **dataset_args)
        data_iter = MNIST(batch_size=10, mode='test', **dataset_args)
    elif dataset == 'caltech':
        print 'Loading Caltech 101 Silhouettes'
        train_iter = CALTECH(mode='train', batch_size=10, **dataset_args)
        data_iter = CALTECH(batch_size=10, mode='test', **dataset_args)
    elif dataset == 'uci':
        print 'Loading the %s UCI dataset' % dataset
        train_iter = UCI(mode='train', batch_size=10, **dataset_args)
        data_iter = UCI(batch_size=10, mode='test', **dataset_args)
    mean_image = train_iter.mean_image.astype(floatX)

    if prior == 'gaussian':
        unpack = unpack_gbn
        model_name = 'gbn'
        inference_method = 'momentum'
    elif prior in ['binomial', 'darn'] and not deep:
        unpack = unpack_sbn
        model_name = 'sbn'
        inference_method = 'air'
    elif prior == 'binomial' and deep:
        unpack = unpack_dsbn
        model_name = 'sbn'
        inference_method = 'air'
    else:
        raise ValueError(prior)

    models, _ = load_model(model_file,
                           unpack,
                           distributions=data_iter.distributions,
                           dims=data_iter.dims)

    models['main'] = models[model_name]

    return models, data_iter, name, exp_dict, mean_image, deep, inference_method
Exemplo n.º 9
0
def main(args):

    global results
    # config
    cuda = torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")
    num_workers = 4 if cuda else 0

    # Hyper-parameters
    batch_size = args.batch_size
    num_prunes = args.n_prune
    prune_amount = args.prune_amount
    epochs = args.n_epoch
    learning_rate = args.lr
    weight_decay = args.weight_decay
    warm_up_k = args.n_warm_up
    warm_up_iter = 0
    weight_init_type = args.weight_init_type
    momentum = args.momentum

    # Create datasets and loaders
    if args.dataset == "MNIST":
        from datasets.mnist import MNIST

        training_dataset = MNIST(
            root="./data",
            transform=torchvision.transforms.Compose(
                [transforms.Grayscale(3),
                 torchvision.transforms.ToTensor()]),
            download=True,
        )
        validation_dataset = MNIST(
            root="./data",
            train=False,
            transform=torchvision.transforms.Compose(
                [transforms.Grayscale(3),
                 torchvision.transforms.ToTensor()]),
            download=True,
        )
    elif args.dataset == "nyudepthv2":
        from datasets.nyu import NYUDataset

        training_dataset = NYUDataset(root="./data/nyudepthv2/train",
                                      split="train")
        #training_dataset = NYUDataset(root="./data/nyudepthv2/val", split="train")
        validation_dataset = NYUDataset(root="./data/nyudepthv2/val",
                                        split="val")
    else:
        raise NotImplementedError("Invalid dataset input")

    training_loader_args = (dict(
        shuffle=True,
        batch_size=batch_size,
        num_workers=num_workers,
        pin_memory=True,
    ) if cuda else dict(shuffle=True, batch_size=batch_size))
    training_loader = DataLoader(training_dataset, **training_loader_args)

    validation_loader_args = (dict(
        shuffle=False,
        batch_size=batch_size,
        num_workers=num_workers,
        pin_memory=True,
    ) if cuda else dict(shuffle=False, batch_size=batch_size))
    validation_loader = DataLoader(validation_dataset,
                                   **validation_loader_args)

    # intialize model and training parameters
    if args.model == "resnet18":
        from models.resnet18 import resnet18

        net = resnet18(pretrained=False)
    elif args.model == "FastDepth":
        from models.fastdepth import MobileNetSkipAdd

        # Unsure of output size
        net = MobileNetSkipAdd(output_size=224, pretrained_encoder=True)
    else:
        raise NotImplementedError("Invalid model input")

    global net2
    net2 = copy.deepcopy(net)
    # if (weight_init_flag == "originial_first")
    initial_state = copy.deepcopy(net.state_dict())

    if args.model == "resnet18":
        criterion = nn.CrossEntropyLoss()
    elif args.model == "FastDepth":
        criterion = nn.L1Loss()
    else:
        raise NotImplementedError("No loss function defined for that model")

    optimizer = optim.SGD(net.parameters(),
                          lr=learning_rate,
                          momentum=momentum,
                          weight_decay=weight_decay)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           "min",
                                                           factor=0.5,
                                                           patience=2)
    net.to(device)

    global_sparsity = 0
    results = {}

    run_id = str(int(time.time()))

    for prune_cycle in range(num_prunes):
        writer = SummaryWriter("./runs/" + run_id +
                               "/Lottery_prune_{0}".format(global_sparsity))
        for epoch in range(epochs):
            print("Epoch: " + str(epoch) + " (" + str(prune_cycle) +
                  " prunings)")

            training_metrics, warm_up_iter = train_epoch(
                net,
                device,
                training_loader,
                criterion,
                optimizer,
                warm_up_iter,
                warm_up_k,
                learning_rate,
                writer,
                epoch,
            )
            validation_metrics = validate_epoch(net, device, validation_loader,
                                                criterion, scheduler, writer,
                                                epoch)
            if (args.vanilla_train):
                continue
            for metric, value in training_metrics.items():
                if prune_cycle == 0 and epoch == 0:
                    results["train_" + metric] = {}
                if epoch == 0:
                    results["train_" +
                            metric]["prune_{0}".format(global_sparsity)] = []

                print("Training " + metric + ": ", value)
                results["train_" + metric]["prune_{0}".format(
                    global_sparsity)].append(value)

            for metric, value in validation_metrics.items():
                if prune_cycle == 0 and epoch == 0:
                    results["validation_" + metric] = {}
                if epoch == 0:
                    results["validation_" +
                            metric]["prune_{0}".format(global_sparsity)] = []

                print("Validation " + metric + ": ", value)
                results["validation_" + metric]["prune_{0}".format(
                    global_sparsity)].append(value)

            print("=" * 50)
        if (args.vanilla_train):
            continue
        writer.close()
        pkl.dump(
            results,
            open("results/results_prune_{0}.p".format(global_sparsity), "wb"))

        if weight_init_type == "carry_previous":
            initial_state = copy.deepcopy(net.state_dict())
        else:
            # this will keep the original weights or use xavier intialization
            pass

        global_sparsity = my_prune(net, prune_amount, initial_state,
                                   weight_init_type, args.model)

    if (not (args.vanilla_train)):
        pkl.dump(results, open("results/results_final.pkl", "wb"))
    else:
        torch.save(net.state_dict(), 'model.pth')
Exemplo n.º 10
0
    parser.add_argument('--eager',
                        action='store_true',
                        default=False,
                        help='Whether to run in eager mode.')

    return parser.parse_args()


if __name__ == "__main__":
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

    args = parse_args()

    tf.config.experimental_run_functions_eagerly(args.eager)

    dataset, meta_info = Dataset.create(args, batch_size=args.batch_size)

    Model.add_additional_args(args, meta_info)
    model = Model(args)

    trainer = model.create_trainer()

    trainer.add_callbacks([
        callbacks.Checkpointer(args.root_dir + '/ckpt',
                               model.gen_ckpt_objs(),
                               save_interval=1,
                               max_to_keep=10),
        callbacks.ModelArgsSaverLoader(model, True, args.root_dir),
        callbacks.TqdmProgressBar(args.epochs, len(dataset))
    ])
Exemplo n.º 11
0
def eval_model(model_file,
               steps=50,
               data_samples=10000,
               out_path=None,
               optimizer=None,
               optimizer_args=dict(),
               batch_size=100,
               valid_scores=None,
               mode='valid',
               prior='logistic',
               center_input=True,
               n_layers=2,
               z_init='recognition_net',
               inference_method='momentum',
               inference_rate=.01,
               rate=0.,
               n_mcmc_samples=20,
               posterior_samples=20,
               inference_samples=20,
               dataset=None,
               dataset_args=None,
               extra_inference_args=dict(),
               **kwargs):

    if rate > 0:
        inference_rate = rate

    model_args = dict(prior=prior,
                      n_layers=n_layers,
                      z_init=z_init,
                      inference_method=inference_method,
                      inference_rate=inference_rate,
                      n_inference_samples=inference_samples)

    models, _ = load_model(model_file, unpack, **model_args)

    if dataset == 'mnist':
        data_iter = MNIST(batch_size=data_samples,
                          mode=mode,
                          inf=False,
                          **dataset_args)
        valid_iter = MNIST(batch_size=500,
                           mode='valid',
                           inf=False,
                           **dataset_args)
    else:
        raise ValueError()

    model = models['sbn']
    tparams = model.set_tparams()

    # ========================================================================
    print 'Setting up Theano graph for lower bound'

    X = T.matrix('x', dtype=floatX)

    if center_input:
        print 'Centering input with train dataset mean image'
        X_mean = theano.shared(data_iter.mean_image.astype(floatX),
                               name='X_mean')
        X_i = X - X_mean
    else:
        X_i = X

    x, _ = data_iter.next()
    x_v, _ = valid_iter.next()

    dx = 100
    data_samples = min(data_samples, data_iter.n)
    xs = [x[i:(i + dx)] for i in range(0, data_samples, dx)]
    N = data_samples // dx

    print(
        'Calculating final lower bound and marginal with %d data samples, %d posterior samples '
        'with %d validated inference steps' %
        (N * dx, posterior_samples, steps))

    outs_s, updates_s = model(X_i,
                              X,
                              n_inference_steps=steps,
                              n_samples=posterior_samples,
                              calculate_log_marginal=True)
    f_lower_bound = theano.function([X],
                                    [outs_s['lower_bound'], outs_s['nll']] +
                                    outs_s['lower_bounds'] + outs_s['nlls'],
                                    updates=updates_s)
    lb_t = []
    nll_t = []
    nlls_t = []
    lbs_t = []

    pbar = ProgressBar(maxval=len(xs)).start()
    for i, x in enumerate(xs):
        outs = f_lower_bound(x)
        lb, nll = outs[:2]
        outs = outs[2:]
        lbs = outs[:len(outs) / 2]
        nlls = outs[len(outs) / 2:]
        lbs_t.append(lbs)
        nlls_t.append(nlls)
        lb_t.append(lb)
        nll_t.append(nll)
        pbar.update(i)

    lb_t = np.mean(lb_t)
    nll_t = np.mean(nll_t)
    lbs_t = np.mean(lbs_t, axis=0).tolist()
    nlls_t = np.mean(nlls_t, axis=0).tolist()
    print 'Final lower bound and NLL: %.2f and %.2f' % (lb_t, nll_t)
    print lbs_t
    print nlls_t

    if out_path is not None:
        plt.savefig(out_path)
        print 'Sampling from the prior'

        np.save(path.join(out_path, 'lbs.npy'), lbs_t)
        np.save(path.join(out_path, 'nlls.npy'), nlls_t)

        py_p = model.sample_from_prior()
        f_prior = theano.function([], py_p)

        samples = f_prior()
        data_iter.save_images(samples[:, None],
                              path.join(out_path, 'samples_from_prior.png'),
                              x_limit=10)
Exemplo n.º 12
0
# -*- coding: utf-8 -*-
import argparse
from datetime import datetime
import numpy as np
from sklearn import linear_model
import tensorflow as tf
from datasets.mnist import mnist as MNIST

mnist = MNIST()


class CNNNetwork(object):
    def __init__(self, cout1, cout2, features=784, size=28, classes=10):
        self.x = tf.placeholder(dtype=tf.float32, shape=(None, features))
        self.y = tf.placeholder(dtype=tf.int64, shape=(None, ))
        self.classes = classes
        self.session = tf.Session()
        self.cnn, self.X = self.build_network(cout1, cout2, size, classes)
        self.train_step, self.prediction, self.accuracy = self.build_model()

    @staticmethod
    def fully_connected(input, units):
        weight = tf.Variable(
            tf.truncated_normal([input.shape.as_list()[1], units], stddev=0.1))
        bias = tf.Variable(tf.constant(0.1, shape=[units]))
        return tf.matmul(input, weight) + bias

    def build_network(self, cout1, cout2, size, classes):
        input_layer = tf.reshape(self.x, shape=[-1, size, size, 1])
        conv1 = tf.layers.conv2d(input_layer, cout1, 5, activation=tf.nn.relu)
Exemplo n.º 13
0
# -*- coding: utf-8 -*-
from datetime import datetime
from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D, Flatten, Dense
from datasets.mnist import mnist as MNIST


mnist = MNIST(one_hot=True)


class LeNet(object):
    def __init__(self, c1=6, s1=2, c2=16, s2=2):
        self.model = self.build_structure(c1, s1, c2, s2)
    
    def build_structure(self, c1, s1, c2, s2):
        model = Sequential()
        model.add(Conv2D(c1, 5, activation="relu"))
        model.add(MaxPool2D(pool_size=(s1, s1)))
        model.add(Conv2D(c2, 5, activation="relu"))
        model.add(MaxPool2D(pool_size=(s2, s2)))
        model.add(Flatten())
        model.add(Dense(120, activation="relu"))
        model.add(Dense(84, activation="relu"))
        model.add(Dense(10, activation="softmax"))
        model.compile(loss='categorical_crossentropy', optimizer='adamax', metrics=["accuracy"])
        return model
    
    def train(self, x, y):
        start = datetime.now()
        self.model.fit(x, y, epochs=10)
        end = datetime.now()
Exemplo n.º 14
0
# -*- coding: utf-8 -*-
import argparse
from datetime import datetime
import numpy as np
import tensorflow as tf
from sklearn import linear_model
from datasets.mnist import mnist as MNIST

mnist = MNIST(True)


def _weight_variable(shape):
    return tf.Variable(tf.random_normal(shape, stddev=0.1))


def _bias_variable(shape):
    return tf.Variable(tf.constant(0.1, shape=shape))


def _bp_network(units: int):
    x = tf.placeholder(dtype=tf.float32, shape=(None, 784))
    y = tf.placeholder(dtype=tf.int64, shape=(None, 10))
    weight = _weight_variable([784, units])
    bias = _bias_variable([units])
    hide = tf.sigmoid(tf.matmul(x, weight) + bias)
    weight = _weight_variable([units, 10])
    bias = _bias_variable([10])
    output = tf.nn.softmax(tf.matmul(hide, weight) + bias)
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=output))
    train = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
Exemplo n.º 15
0
print(f"Logging to {log_path}")

# Dumping all script arguments
common.dump_params(join(log_path, 'config.cfg'), args)

# Set custom seed before doing anything
common.set_custom_seed(args.seed)

# Load dataset and create model
print(f"[Task: {task.upper()}]")
print(f"[Loss: {args.loss.upper()}]")
print('[Loading Dataset...]')
nfeat, nclass = 2, 10
config = common.get_config(args.loss, nfeat, nclass, task, args.margin)
model = MNISTNet(nfeat, loss_module=config.loss_module)
dataset = MNIST(args.path, args.batch_size)

dev = dataset.dev_partition()
train = dataset.training_partition()

print('[Dataset Loaded]')

# Train and evaluation plugins
test_callbacks = []
train_callbacks = []

# Logging configuration
if args.log_interval in range(1, 101):
    print(
        f"[Logging: {common.enabled_str(True)} (every {args.log_interval}%)]")
    test_callbacks.append(TestLogger(args.log_interval, dev.nbatches()))
Exemplo n.º 16
0
    parser.add_argument('--eager',
                        action='store_true',
                        default=False,
                        help='Whether to run in eager mode.')

    return parser.parse_args()


if __name__ == "__main__":
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

    args = parse_args()
    tf.config.experimental_run_functions_eagerly(args.eager)

    dataset, meta_info = Dataset.create(args,
                                        batch_size=args.batch_size,
                                        train=False)

    Model.add_additional_args(args, meta_info)
    model = Model(args)

    evaluator = model.create_evaluator()

    evaluator.add_callbacks([
        callbacks.Checkpointer(args.root_dir + '/ckpt',
                               model.gen_ckpt_objs(),
                               is_training=False),
        callbacks.ModelArgsSaverLoader(model, False, args.root_dir),
        callbacks.TqdmProgressBar(args.epochs, len(dataset))
    ])
Exemplo n.º 17
0
# -*- coding: utf-8 -*-
import logging
from sklearn.svm import LinearSVC
from sklearn.linear_model import LinearRegression
from sklearn.utils import resample
import numpy as np
from datetime import datetime
from datasets.mnist import mnist as MNIST


mnist = MNIST(one_hot=False)


def sample(x: np.ndarray, y: np.ndarray, size: int):
    return resample(x, y, n_samples=size, replace=False)


def svm(x, y, test_x, test_y, size, mark=False):
    if not mark:
        clf = LinearSVC(loss="hinge")
        train_x, train_y = sample(x, y, size)
        start = datetime.now()
        clf.fit(train_x, train_y)
        end = datetime.now()
        acc = np.sum(clf.predict(test_x) == test_y) / len(test_y)
        return (end-start).total_seconds(), acc
    times = []
    accs = []
    for i in range(5):
        clf = LinearSVC(loss="hinge")
        train_x, train_y = sample(x, y, size)
Exemplo n.º 18
0
# -*- coding: utf-8 -*-
import argparse
from datetime import datetime
import numpy as np
from sklearn import tree, linear_model
from datasets.mnist import mnist as MNIST


mnist = MNIST(False)


def log(*args, **kwargs):
    print(*args, **kwargs, sep='\t')
    with open("tree-log.txt", "a") as f:
        print(*args, **kwargs, file=f, sep='\t')


def cartree_classifier(max_depth):
    clf = tree.DecisionTreeClassifier(max_depth=max_depth)
    start = datetime.now()
    clf.fit(mnist.train.images, mnist.train.labels)
    end = datetime.now()
    acc = np.sum(clf.predict(mnist.test.images) == mnist.test.labels) / len(mnist.test.labels)
    return (end-start).total_seconds(), acc


def regression(x, y):
    x = np.array(x).reshape(-1, 1)
    y = np.array(y)
    reg = linear_model.LinearRegression()
    reg.fit(x, y)
Exemplo n.º 19
0
    parser.add_argument('--seed', type=int, default=18,
                        help='The seed for some random operations')
    parser.add_argument('--only_foreground', type=int, default=0,
                        help='The seed for some random operations')
    args = parser.parse_args()

    # Fix the seed to make the results is re-implementable
    seed = args.seed
    # random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

    trans = ([transforms.ToTensor()])
    trans = transforms.Compose(trans)
    fulltrainset = MNIST(root=data_path, train=True, download=False, transform=trans)
    trainloader = torch.utils.data.DataLoader(fulltrainset, batch_size=2000, shuffle=False, num_workers=2,
                                              pin_memory=True)
    test_set = torchvision.datasets.MNIST(root=data_path, train=False, download=True, transform=trans)
    testloader = torch.utils.data.DataLoader(test_set, batch_size=2000, shuffle=False, num_workers=2, pin_memory=True)
    nb_classes = 10
    if args.only_foreground:
        dir_name = data_path + 'cmnist/' + 'fg_cmnist_cpr' + '-'.join(str(p) for p in args.cpr) + '/'
    else:
        dir_name = data_path + 'cmnist/' + 'fgbg_cmnist_cpr' + '-'.join(str(p) for p in args.cpr) + '/'
    print(dir_name)
    if not os.path.exists(data_path + 'cmnist/'):
        os.mkdir(data_path + 'cmnist/')
    if not os.path.exists(dir_name):
        os.mkdir(dir_name)