Esempio n. 1
0
def train() -> None:
    dataset_train = MNISTDataset("./data",
                                 train=True,
                                 batch_size=32,
                                 transform=None)

    model = Net()
    loss_fn = ts.nn.CrossEntropyLoss()
    optimizer = ts.libtensor.Adagrad(model.parameters(), 0.01)
    saver = ts.libtensor.Saver(model)

    time_start = time.time()
    for batch_idx, (data, target) in enumerate(dataset_train):
        x = Variable(data)
        y = Variable(target)

        output = model.forward(x)
        loss = loss_fn(output, y)
        loss.backward()
        optimizer.step()
        optimizer.zero_gradients()

        if batch_idx != 0 and batch_idx % 10 == 0:
            print("Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
                1, batch_idx * dataset_train.batch_size,
                dataset_train.example_num,
                100.0 * batch_idx / len(dataset_train), loss.value[0]))
    time_end = time.time()
    print(f"Training time: {time_end - time_start}")

    saver.save(MODEL_SAVE_NAME)
def get_dataset():
    datasets = dict()
    trainval_dataset = MNISTDataset(path='data/train.csv', is_train=True)
    train_ratio = 0.8
    train_size = int(train_ratio * len(trainval_dataset))
    val_size = len(trainval_dataset) - train_size
    datasets['train'], datasets['val'] = data.random_split(
        trainval_dataset, [train_size, val_size])

    datasets['test'] = MNISTDataset(path='data/test.csv', is_train=False)

    dataloaders = dict()
    dataloaders = {
        split: data.DataLoader(dataset, batch_size=4, shuffle=True)
        for split, dataset in datasets.items()
    }

    return dataloaders
Esempio n. 3
0
def main(args):
    # Set random seed if given
    torch.manual_seed(args.random_seed)

    # Define dataset
    trainloader, testloader = MNISTDataset(args.batch_size, args.num_workers)

    # Set model parameters
    model_params = {
        "batch_size": args.batch_size,
        "layer_sizes": args.layer_sizes,
        "learning_rates": args.learning_rates,
        "free_iters": args.free_iters,
        "clamped_iters": args.clamped_iters,
        "beta": args.beta,
        "dt": args.dt,
    }

    # Define network
    if args.graph:
        eqpropnet = EqPropGraph(*create_ffn_graph(args.layer_sizes))
    elif args.spiking:
        eqpropnet = EqPropSpikingNet(
            **model_params) if not args.no_grad else EqPropSpikingNet_NoGrad(
                **model_params)
    elif args.continual:
        eqpropnet = ContEqPropNet(
            **model_params) if not args.no_grad else ContEqPropNet_NoGrad(
                **model_params)
    else:
        eqpropnet = EqPropNet(
            **model_params) if not args.no_grad else EqPropNet_NoGrad(
                **model_params)

    if args.load_model and not args.graph:
        eqpropnet.load_parameters(args.load_model)

    # Train
    train(trainloader,
          eqpropnet,
          num_epochs=args.num_epochs,
          report_interval=args.report_interval,
          save_interval=args.save_interval)

    # Validate
    test(testloader, eqpropnet, report_interval=args.report_interval)
Esempio n. 4
0
def eval() -> None:
    dataset = MNISTDataset("./data",
                           train=False,
                           batch_size=32,
                           transform=None)
    model = Net()
    saver = ts.libtensor.Saver(model)
    saver.load(MODEL_SAVE_NAME)

    correct = 0
    for data, target in tqdm(dataset):
        x = Variable(data)
        y = Variable(target)
        output = model.forward(x)
        pred = ts.argmax(output.value)
        correct += np.sum(pred.numpy == y.value.numpy)

    print('\nTest set: Accuracy: {}/{} ({:.0f}%)\n'.format(
        correct, dataset.example_num, 100. * correct / dataset.example_num))
Esempio n. 5
0
def main(opt):
    opt = dot_opt(opt)
    ctx = try_gpu()

    # datasets
    mnist_train_dataset = MNISTDataset(train=True, transform=transform)
    assert mnist_train_dataset

    dataloader = DataLoader(mnist_train_dataset, batch_size=opt.batch_size,
                            shuffle=True, last_batch='discard',
                            pin_memory=True, num_workers=opt.num_workers)
    print("Data ready...")

    # Conditional GAN: G and D
    netG = ConditionalG(opt)
    netD = ConditionalD(opt)

    trainer = CGANTrainer(opt, dataloader, netG=netG, netD=netD)
    trainer.train()
Esempio n. 6
0
import pytorch_lightning as pl
from pytorch_lightning.loggers import WandbLogger

from model import Net
from dataset import MNISTDataset

if __name__ == "__main__":
    model = Net()
    dm = MNISTDataset()

    # Create logger
    LOG = True
    if LOG:
        logger = WandbLogger(project="MNIST_Lightning_V2")
        logger.watch(model, log='all', log_freq=100)
    else:
        logger = None

    trainer = pl.Trainer(max_epochs=50, logger=logger)

    trainer.fit(model, dm)
Esempio n. 7
0
def main():
    parser = ArgParser()
    args = parser.parse_args()

    gen = Generator(args.latent_dim).to(args.device)
    disc = Discriminator().to(args.device)
    if args.device != 'cpu':
        gen = nn.DataParallel(gen, args.gpu_ids)
        disc = nn.DataParallel(disc, args.gpu_ids)
    # gen = gen.apply(weights_init)
    # disc = disc.apply(weights_init)

    gen_opt = torch.optim.RMSprop(gen.parameters(), lr=args.lr)
    disc_opt = torch.optim.RMSprop(disc.parameters(), lr=args.lr)
    gen_scheduler = torch.optim.lr_scheduler.LambdaLR(gen_opt, lr_lambda=lr_lambda(args.num_epochs))
    disc_scheduler = torch.optim.lr_scheduler.LambdaLR(disc_opt, lr_lambda=lr_lambda(args.num_epochs))
    disc_loss_fn = DiscriminatorLoss().to(args.device)
    gen_loss_fn = GeneratorLoss().to(args.device)

    # dataset = Dataset()
    dataset = MNISTDataset()
    loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=args.num_workers)

    logger = TrainLogger(args, len(loader), phase=None)
    logger.log_hparams(args)

    if args.privacy_noise_multiplier != 0:
        privacy_engine = PrivacyEngine(
            disc,
            batch_size=args.batch_size,
            sample_size=len(dataset),
            alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
            noise_multiplier=.8,
            max_grad_norm=0.02,
            batch_first=True,
        )
        privacy_engine.attach(disc_opt)
        privacy_engine.to(args.device)

    for epoch in range(args.num_epochs):
        logger.start_epoch()
        for cur_step, img in enumerate(tqdm(loader, dynamic_ncols=True)):
            logger.start_iter()
            img = img.to(args.device)
            fake, disc_loss = None, None
            for _ in range(args.step_train_discriminator):
                disc_opt.zero_grad()
                fake_noise = get_noise(args.batch_size, args.latent_dim, device=args.device)
                fake = gen(fake_noise)
                disc_loss = disc_loss_fn(img, fake, disc)
                disc_loss.backward()
                disc_opt.step()

            gen_opt.zero_grad()
            fake_noise_2 = get_noise(args.batch_size, args.latent_dim, device=args.device)
            fake_2 = gen(fake_noise_2)
            gen_loss = gen_loss_fn(img, fake_2, disc)
            gen_loss.backward()
            gen_opt.step()
            if args.privacy_noise_multiplier != 0:
                epsilon, best_alpha = privacy_engine.get_privacy_spent(args.privacy_delta)

            logger.log_iter_gan_from_latent_vector(img, fake, gen_loss, disc_loss, epsilon if args.privacy_noise_multiplier != 0 else 0)
            logger.end_iter()

        logger.end_epoch()
        gen_scheduler.step()
        disc_scheduler.step()
Esempio n. 8
0
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

from dataset import MNISTDataset
from model import *

from scipy.spatial.distance import cdist
from matplotlib import gridspec

dataset = MNISTDataset()
train_images = dataset.images_train[:20000]
test_images = dataset.images_test
len_test = len(train_images)
len_train = len(test_images)


#helper function to plot image
def show_image(idxs, data):
    if type(idxs) != np.ndarray:
        idxs = np.array([idxs])
    fig = plt.figure()
    gs = gridspec.GridSpec(1, len(idxs))
    for i in range(len(idxs)):
        ax = fig.add_subplot(gs[0, i])
        ax.imshow(data[idxs[i], :, :, 0])
        ax.axis('off')
    plt.show()


img_placeholder = tf.placeholder(tf.float32, [None, 28, 28, 1], name='img')