Esempio n. 1
0
def main(config):
    # 연산 디바이스 설정
    if config.gpu_id < 0:
        print("Device: CPU")
        device = torch.device('cpu')
    else:
        print("Device:", torch.cuda.get_device_name(0))
        device = torch.device('cuda:%d' % config.gpu_id)

    #학습데이터 호출
    train_x, train_y = load_mnist(is_train=True, flatten=True)

    # train_ratio에 따라 학습, 검증 데이터 분할
    train_cnt = int(train_x.size(0) * config.train_ratio)
    valid_cnt = train_x.size(0) - train_cnt
    indices = torch.randperm(train_x.size(0))
    train_x, valid_x = torch.index_select(
        train_x, dim=0, index=indices).to(device).split([train_cnt, valid_cnt],
                                                        dim=0)
    train_y, valid_y = torch.index_select(
        train_y, dim=0, index=indices).to(device).split([train_cnt, valid_cnt],
                                                        dim=0)

    print("Train:", train_x.shape, train_y.shape)
    print("Valid:", valid_x.shape, valid_y.shape)

    # 모델 객체 호출 및 학습
    model = Autoencoder(btl_size=config.btl_size).to(device)
    optimizer = optim.Adam(model.parameters())
    crit = nn.MSELoss()

    trainer = Trainer(model, optimizer, crit)
    trainer.train((train_x, train_x), (valid_x, valid_x), config)

    #모델 저장
    torch.save({
        'model': trainer.model.state_dict(),
        'config': config
    }, config.model_fn)
Esempio n. 2
0
def main():
    

    trainset = Autoencoder_dataset(True ,root,transforms=transforms.Compose([
        transforms.Rescale(576,288),
        transforms.ToTensor(),
        transforms.Normalize(mean = [0.485, 0.456, 0.406],
                            std = [0.229, 0.224, 0.225])
    ]))
    train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True)

    valset = Autoencoder_dataset(False ,root,transforms=transforms.Compose([
        transforms.Rescale(576,288),
        transforms.ToTensor(),
        transforms.Normalize(mean = [0.485, 0.456, 0.406],
                            std = [0.229, 0.224, 0.225]))
    ]))
    val_loader = DataLoader(valset, batch_size=batch_size)


    model = Autoencoder().cuda()
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
                                weight_decay=1e-5)
Esempio n. 3
0
def main():
    parser = argparse.ArgumentParser(
        description='Simple training script for training model')

    parser.add_argument(
        '--epochs', help='Number of epochs (default: 75)', type=int, default=75)
    parser.add_argument(
        '--batch-size', help='Batch size of the data (default: 16)', type=int, default=16)
    parser.add_argument(
        '--learning-rate', help='Learning rate (default: 0.001)', type=float, default=0.001)
    parser.add_argument(
        '--seed', help='Random seed (default:1)', type=int, default=1)
    parser.add_argument(
        '--data-path', help='Path for the downloaded dataset (default: ../dataset/)', default='../dataset/')
    parser.add_argument(
        '--dataset', help='Dataset name. Must be one of MNIST, STL10, CIFAR10')
    parser.add_argument(
        '--use-cuda', help='CUDA usage (default: False)', type=bool, default=False)
    parser.add_argument(
        '--network-type', help='Type of the network layers. Must be one of Conv, FC (default: FC)', default='FC')
    parser.add_argument(
        '--weight-decay', help='weight decay (L2 penalty) (default: 1e-5)', type=float, default=1e-5)
    parser.add_argument(
        '--log-interval', help='No of batches to wait before logging training status (default: 50)', type=int, default=50)
    parser.add_argument(
        '--save-model', help='For saving the current model (default: True)', type=bool, default=True)

    args = parser.parse_args()

    epochs = args.epochs  # number of epochs
    batch_size = args.batch_size  # batch size
    learning_rate = args.learning_rate  # learning rate
    torch.manual_seed(args.seed)  # seed value

    # Creating dataset path if it doesn't exist
    if args.data_path is None:
        raise ValueError('Must provide dataset path')
    else:
        data_path = args.data_path
        if not os.path.isdir(data_path):
            os.mkdir(data_path)

    # Downloading proper dataset and creating data loader
    if args.dataset == 'MNIST':
        T = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))
        ])

        train_data = torchvision.datasets.MNIST(
            data_path, train=True, download=True, transform=T)
        test_data = torchvision.datasets.MNIST(
            data_path, train=False, download=True, transform=T)

        ip_dim = 1 * 28 * 28  # input dimension
        h1_dim = int(ip_dim / 2)  # hidden layer 1 dimension
        op_dim = int(ip_dim / 4)  # output dimension
    elif args.dataset == 'STL10':
        T = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])

        train_data = torchvision.datasets.STL10(
            data_path, split='train', download=True, transform=T)
        test_data = torchvision.datasets.STL10(
            data_path, split='test', download=True, transform=T)

        ip_dim = 3 * 96 * 96  # input dimension
        h1_dim = int(ip_dim / 2)  # hidden layer 1 dimension
        op_dim = int(ip_dim / 4)  # output dimension
    elif args.dataset == 'CIFAR10':
        T = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])

        train_data = torchvision.datasets.CIFAR10(
            data_path, train=True, download=True, transform=T)
        test_data = torchvision.datasets.CIFAR10(
            data_path, train=False, download=True, transform=T)

        ip_dim = 3 * 32 * 32  # input dimension
        h1_dim = int(ip_dim / 2)  # hidden layer 1 dimension
        op_dim = int(ip_dim / 4)  # output dimension
    elif args.dataset is None:
        raise ValueError('Must provide dataset')
    else:
        raise ValueError('Dataset name must be MNIST, STL10 or CIFAR10')

    train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)

    # use CUDA or not
    device = 'cpu'
    if args.use_cuda is False:
        if torch.cuda.is_available():
            warnings.warn(
                'CUDA is available, please use for faster convergence')
        else:
            device = 'cpu'
    else:
        if torch.cuda.is_available():
            device = 'cuda'
        else:
            raise ValueError('CUDA is not available, please set it False')

    # Type of layer
    if args.network_type == 'FC':
        auto_encoder = Autoencoder(ip_dim, h1_dim, op_dim).to(device)
    elif args.network_type == 'Conv':
        auto_encoder = ConvolutionAE().to(device)
    else:
        raise ValueError('Network type must be either FC or Conv type')

    # Train the model
    auto_encoder.train()
    criterion = torch.nn.MSELoss()
    optimizer = torch.optim.Adam(
        lr=learning_rate, params=auto_encoder.parameters(), weight_decay=args.weight_decay)

    for n_epoch in range(epochs):  # loop over the dataset multiple times
        reconstruction_loss = 0.0
        for batch_idx, (X, Y) in enumerate(train_loader):
            X = X.view(X.size()[0], -1)
            X = Variable(X).to(device)

            encoded, decoded = auto_encoder(X)

            optimizer.zero_grad()
            loss = criterion(X, decoded)
            loss.backward()
            optimizer.step()

            reconstruction_loss += loss.item()
            if (batch_idx + 1) % args.log_interval == 0:
                print('[%d, %5d] Reconstruction loss: %.5f' %
                      (n_epoch + 1, batch_idx + 1, reconstruction_loss / args.log_interval))
                reconstruction_loss = 0.0
    if args.save_model:
        torch.save(auto_encoder.state_dict(), "Autoencoder.pth")

    # Save real images
    data_iter = iter(test_loader)
    images, labels = data_iter.next()
    torchvision.utils.save_image(torchvision.utils.make_grid(
        images, nrow=4), 'images/actual_img.jpeg')

    # Load trained model and get decoded images
    auto_encoder.load_state_dict(torch.load('Autoencoder.pth'))
    auto_encoder.eval()
    images = images.view(images.size()[0], -1)
    images = Variable(images).to(device)
    encoded, decoded = auto_encoder(images)

    # Save decoded images
    if args.dataset == 'MNIST':
        decoded = decoded.view(decoded.size()[0], 1, 28, 28)
    elif args.dataset == 'STL10':
        decoded = decoded.view(decoded.size()[0], 3, 96, 96)
    elif args.dataset == 'CIFAR10':
        decoded = decoded.view(decoded.size()[0], 3, 32, 32)
    torchvision.utils.save_image(torchvision.utils.make_grid(
        decoded, nrow=4), 'images/decoded_img.jpeg')
Esempio n. 4
0
def train():
    ae = Autoencoder()
    # load trained model
    # model_path = ''
    # g.load_state_dict(torch.load(model_path))

    criterion = torch.nn.MSELoss()
    optimizer = optim.Adam(ae.parameters(), lr=opt.lr, weight_decay=opt.decay)

    # load dataset
    # ==========================
    kwargs = dict(num_workers=1, pin_memory=True) if cuda else {}
    dataloader = DataLoader(
        datasets.MNIST('MNIST', download=True,
                       transform=transforms.Compose([
                           transforms.ToTensor()
                       ])),
        batch_size=opt.batch_size, shuffle=True, **kwargs
    )
    N = len(dataloader)

    # get sample batch
    dataiter = iter(dataloader)
    samples, _ = dataiter.next()
    # cuda
    if cuda:
        ae.cuda()
        criterion.cuda()
        samples = samples.cuda()
    samples = Variable(samples)

    if opt.history:
        loss_history = np.empty(N*opt.epochs, dtype=np.float32)
    # train
    # ==========================
    for epoch in range(opt.epochs):
        loss_mean = 0.0
        for i, (imgs, _) in enumerate(dataloader):
            if cuda:
                imgs = imgs.cuda()
            imgs = Variable(imgs)

            # forward & backward & update params
            ae.zero_grad()
            _, outputs = ae(imgs)
            loss = criterion(outputs, imgs)
            loss.backward()
            optimizer.step()

            loss_mean += loss.data[0]
            if opt.history:
                loss_history[N*epoch + i] = loss.data[0]
            show_progress(epoch+1, i+1, N, loss.data[0])

        print('\ttotal loss (mean): %f' % (loss_mean/N))
        # generate fake images
        _, reconst = ae(samples)
        vutils.save_image(reconst.data,
                          os.path.join(IMAGE_PATH,'%d.png' % (epoch+1)),
                          normalize=False)
    # save models
    torch.save(ae.state_dict(), MODEL_FULLPATH)
    # save loss history
    if opt.history:
        np.save('history/'+opt.name, loss_history)
Esempio n. 5
0
def main(args):
    # Create model directory
    if not os.path.exists(args.model_path):
        os.makedirs(args.model_path)

    # Image preprocessing, normalization for the pretrained resnet
    transform = transforms.Compose([
        transforms.RandomCrop(args.crop_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    ])

    # Load vocabulary wrapper
    with open(args.vocab_path, 'rb') as f:
        vocab = pickle.load(f)

    # Build data loader
    data_loader = get_loader(args.image_dir,
                             args.caption_path,
                             vocab,
                             transform,
                             args.batch_size,
                             shuffle=True,
                             num_workers=args.num_workers)

    # Build the models
    encoder = EncoderCNN(args.embed_size).to(device)
    autoencoder = Autoencoder(args.embed_size, args.embeddings_path,
                              args.hidden_size, len(vocab),
                              args.num_layers).to(device)
    print(len(vocab))

    # optimizer
    params = list(
        filter(
            lambda p: p.requires_grad,
            list(autoencoder.parameters())[1:] +
            list(encoder.linear.parameters())))
    # print(params)
    optimizer = torch.optim.Adam(params, lr=args.learning_rate)

    # Define summary writer
    writer = SummaryWriter()

    # Loss tracker
    best_loss = float('inf')

    # Train the models
    total_step = len(data_loader)
    for epoch in range(args.num_epochs):
        for i, (images, captions, lengths) in enumerate(data_loader):
            # print(captions)
            # Set mini-batch dataset
            images = images.to(device)
            captions = captions.to(device)
            targets = pack_padded_sequence(captions, lengths,
                                           batch_first=True)[0]

            # Forward, backward and optimize
            features = encoder(images)
            L_ling, L_vis = autoencoder(features, captions, lengths)
            loss = 0.2 * L_ling + 0.8 * L_vis  # Want visual loss to have bigger impact
            autoencoder.zero_grad()
            encoder.zero_grad()
            loss.backward()
            optimizer.step()

            # Save the model checkpoints when loss improves
            if loss.item() < best_loss:
                best_loss = loss
                print("Saving checkpoints")
                torch.save(
                    autoencoder.state_dict(),
                    os.path.join(
                        args.model_path, 'autoencoder-frozen-best.ckpt'.format(
                            epoch + 1, i + 1)))
                torch.save(
                    encoder.state_dict(),
                    os.path.join(
                        args.model_path,
                        'encoder-frozen-best.ckpt'.format(epoch + 1, i + 1)))

            # Print log info
            if i % args.log_step == 0:
                print(
                    'Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Perplexity: {:5.4f}'
                    .format(epoch, args.num_epochs, i, total_step, loss.item(),
                            np.exp(loss.item())))
                # Log train loss on tensorboard
                writer.add_scalar('frozen-loss/L_ling', L_ling.item(),
                                  epoch * total_step + i)
                writer.add_scalar('frozen-loss/L_vis', L_vis.item(),
                                  epoch * total_step + i)
                writer.add_scalar('frozen-loss/combined', loss.item(),
                                  epoch * total_step + i)

            # Save the model checkpoints
            if (i + 1) % args.save_step == 0:
                torch.save(
                    autoencoder.state_dict(),
                    os.path.join(
                        args.model_path,
                        'autoencoder-frozen-{}-{}.ckpt'.format(
                            epoch + 1, i + 1)))
                torch.save(
                    encoder.state_dict(),
                    os.path.join(
                        args.model_path,
                        'encoder-frozen-{}-{}.ckpt'.format(epoch + 1, i + 1)))
Esempio n. 6
0
if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    dataset = FaceDataset(data_path)
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=batch_size,
                                              shuffle=True)

    num_points = dataset.num_points()

    model = Autoencoder(num_points)
    if pretrained_model is not None:
        model.load_state_dict(torch.load(pretrained_model))
    model = model.to(device)
    loss_fn = MSELoss()
    optimizer = torch.optim.Adamax(model.parameters(),
                                   lr=learning_rate,
                                   eps=1e-7)

    for epoch in range(epochs):
        for batch_id, x in enumerate(data_loader):
            x = x.to(device)
            x_hat = model.forward(x)
            criterion = loss_fn(x, x_hat)

            optimizer.zero_grad()
            criterion.backward()
            optimizer.step()

        print("{}: {}".format(epoch, criterion.item()))
Esempio n. 7
0
def main(**kwargs):
    """
	Main function that trains the model.
	1. Retrieve arguments from kwargs
	2. Prepare MNIST
	3. Train
	4. Display first batch of test set
	
	Args:
		add_noise: Whether to add noise (DAE) to input image or not (AE).
		binarize_input: Whether to binarize input image pixels to 0 and 1.
		epochs: How many epochs to train model.
		loss: Which loss function to use. Either cross-entropy or mean square error.
		lr: Learning rate.
		latent_dim: Dimension of latent variable.
		print_every: How often to print training progress.
	"""
    # Retrieve arguments
    add_noise = kwargs.get('add_noise', defaults['add_noise'])
    binarize_input = kwargs.get('binarize_input', defaults['binarize_input'])
    epochs = kwargs.get('epochs', defaults['epochs'])
    loss = kwargs.get('loss', defaults['loss'])
    lr = kwargs.get('learning_rate', defaults['learning_rate'])
    latent_dim = kwargs.get('latent_dim', defaults['latent_dim'])
    print_every = kwargs.get('print_every', defaults['print_every'])

    # Load and transform MNIST dataset
    if binarize_input:
        trsf = transforms.Compose([
            transforms.ToTensor(),
            transforms.Lambda(lambda x: (x >= 0.5).float())
        ])
    else:
        trsf = transforms.ToTensor()
    MNIST_train = datasets.MNIST(root='MNIST',
                                 train=True,
                                 transform=trsf,
                                 download=True)
    MNIST_test = datasets.MNIST(root='MNIST',
                                train=False,
                                transform=trsf,
                                download=True)

    # Create dataloader
    train_loader = torch.utils.data.DataLoader(MNIST_train,
                                               batch_size=64,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(MNIST_test,
                                              batch_size=64,
                                              shuffle=False)

    # Create model and optimizer
    autoencoder = Autoencoder(latent_dim=latent_dim).to(device)
    optimizer = optim.Adam(autoencoder.parameters(), lr=lr)

    # Select loss function
    criterion = CE_criterion if loss == 'CE' else MSE_criterion

    # Train
    autoencoder.train()
    for epoch in range(epochs):
        for batch_ind, (input_data, _) in enumerate(train_loader):
            input_data = input_data.to(device)

            # Forward propagation
            if add_noise:
                noised_input_data = F.dropout(input_data, p=0.5)
                output = autoencoder(noised_input_data)
            else:
                output = autoencoder(input_data)

            # Calculate loss
            loss = criterion(output, input_data)

            # Backward propagation
            optimizer.zero_grad()
            loss.backward()

            # Update parameters
            optimizer.step()

            # Print progress
            if batch_ind % print_every == 0:
                train_log = 'Epoch {:2d}/{:2d}\tLoss: {:.6f}\tTrain: [{}/{} ({:.0f}%)]      '.format(
                    epoch + 1, epochs,
                    loss.cpu().item(), batch_ind + 1, len(train_loader),
                    100. * batch_ind / len(train_loader))
                print(train_log, end='\r')
                sys.stdout.flush()

        # Learning rate decay
        if epoch == 4:
            optimizer = optim.Adam(autoencoder.parameters(), lr=lr / 10)

    # Display training result with test set
    with torch.no_grad():
        images, _ = iter(test_loader).next()
        images = images.to(device)

        if add_noise:
            noise_images = F.dropout(images, p=0.5)
            denoised_output = autoencoder(noise_images)
            output = autoencoder(images)
            display_batch("Binarized truth" if binarize_input else "Truth",
                          images, binarize_input)
            display_batch("Truth with noise", noise_images, binarize_input)
            display_batch("Reconstruction of noised image", output,
                          binarize_input)
            display_batch("Reconstruction of clean image", denoised_output,
                          binarize_input)
        else:
            output = autoencoder(images)
            display_batch("Binarized truth" if binarize_input else "Truth",
                          images, binarize_input)
            display_batch("Reconstruction", output, binarize_input)
    elif method == 'LLE':
        data_transformed = myLocallyLinearEmbedding(
            n_components=d, eigen_solver='auto').fit_transform(adj_mfd, data)

    elif method == 'LTSA':
        data_transformed = myLocallyLinearEmbedding(
            n_components=d, eigen_solver='auto',
            method='ltsa').fit_transform(adj_mfd, data)

    elif method == 'AE':

        data_torch = torch.from_numpy(data).float()

        AE = Autoencoder(D, d)
        loss_fn = nn.MSELoss()
        optimizer = torch.optim.Adam(AE.parameters(), lr=0.1)

        epochs = 1000
        for i in range(epochs):
            optimizer.zero_grad()

            output = AE(data_torch)
            loss = loss_fn(output, data_torch)

            loss.backward()
            optimizer.step()

            if i % 100 == 0:
                print('Epoch {}: {:.4f}'.format(i, loss))

        AE.eval()
Esempio n. 9
0
train_cnt = int(x_train.size(0) * config.train_ratio)
valid_cnt = x_train.size(0) - train_cnt

# Shuffle dataset (Train - Valid)
index = torch.randperm(x_train.size(0))
x_train, x_valid = torch.index_select(x_train, dim=0, index=index).split([train_cnt, valid_cnt], dim=0)
y_train, y_valid = torch.index_select(y_train, dim=0, index=index).split([train_cnt, valid_cnt], dim=0)

print("Train: ", x_train.shape, y_train.shape)
print("Valid: ", x_valid.shape, y_valid.shape)
print("Test: ", x_test.shape, y_test.shape)

# Model Object & Optimizer, Criterion Settings
model = Autoencoder(btl_size=config.btl_size)
optimizer = optim.Adam(model.parameters())
criterion = nn.MSELoss()

# Model Train
trainer = Trainer(model, optimizer, criterion)
trainer.train((x_train, x_train), (x_valid, x_valid), config)  # Encoder - Decoder 구조이기 때문에 x 에 대한 것만 사용

# Model Test
with torch.no_grad():
    import random

    idx = int(random.random() * x_test.size(0))

    recon = model(x_test[idx].view(1, -1)).squeeze()

    show_image(x_test[idx])
Esempio n. 10
0
                                      shuffle=shuffle)
    stream = data_io.threaded(stream, queue_size=5)
    return stream


if __name__ == "__main__":
    directory = sys.argv[1]
    filenames = [directory + "/%05d_batched.pkl.gz" % i for i in range(9000)]
    # print(filenames)
    train_count = int(len(filenames) * 0.9)
    train_filenames = filenames[:train_count]
    valid_filenames = filenames[train_count:]

    model = Autoencoder(0, 1)
    # valid_data = torch.from_numpy(signal_data_valid).cuda()[:, None, :]
    for p in model.parameters():
        if p.dim() > 1:
            torch.nn.init.xavier_uniform_(p)
    # model = torch.load('model.pt')
    model = model.cuda()

    parameters = model.parameters()
    optimizer = optim.Adam(parameters, lr=1e-3)  # , weight_decay=1e-6)
    # optimizer = optim.SGD(parameters, lr=0.05, momentum=0.999)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     mode='min',
                                                     factor=0.5,
                                                     patience=10,
                                                     verbose=True,
                                                     threshold=0.0001,
                                                     threshold_mode='rel',
Esempio n. 11
0
# # plt.figure(dpi=500)
# # librosa.display.specshow(test_mtx)
# # plt.savefig('./test_mtx.pdf')

# # # org
# # file_name = Info.test_path+Info.test_arr[0]
# # org = np.load(Info.test_path+Info.test_arr[0])
# # org = librosa.amplitude_to_db(org, ref=1.0)
# # plt.figure(dpi=500)
# # librosa.display.specshow(org)
# # plt.savefig('./org.pdf')
# ######################################Test End##################################

model = Autoencoder().cpu()
distance = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(),weight_decay=1e-5)
num_epochs = Info.num_epochs
print(model)

####################################Training####################################
print("Start training {} epochs.".format(num_epochs))
loss_arr=[]
embed_mtx=[]
decode_mtx=[]

for epoch in range(num_epochs):
    for data in train_loader:
        data = Variable(data).cpu()
        _, output = model(data)
        # if epoch == num_epochs-1: #get decoded graph
        #     o = output.data.numpy()
Esempio n. 12
0
        w2 = w2.cuda()
        loss = l1+bce+w2
        
        return {'loss': loss, 'bce': bce, 'l1': l1, 'w2': w2, 'encode': z, 'decode': recon_x}



mnist = torch.utils.data.DataLoader(datasets.MNIST("./mnist/", train=True, download=True,
                                                   transform=transforms.Compose([
                                                       transforms.ToTensor()
                                                   ])), batch_size=128, shuffle=True)

cudnn.benchmark = True
ae = Autoencoder().cuda()
print(ae)
optimizer = torch.optim.Adam(ae.parameters())

total_epoch = 50

trainer = SAE(ae, optimizer, random_uniform, num_projections=25)
ae.train()


for epoch in range(total_epoch):

    for index, (img, label) in enumerate(mnist):
        img = img.cuda()
        #img = img.expand(img.data.shape[0], 3, 28, 28)
        batch_result = trainer.train(img)
        if (index+1) % 10 == 0:
            print("loss: {:.4f} \t l1:{:.4f} \t bce:{:.4f} \t w2:{:.4f}".format(
Esempio n. 13
0
from model import Autoencoder
from visualize import *

# hyperparameters
num_epochs = 100
batch_size = 128
lr = 1e-3

# get images from MNIST database
dataset = MNIST('../data', transform=transforms.ToTensor(), download=True)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

# create autoencoder and optimizer for it
autoencoder = Autoencoder()
optimizer = optim.Adam(autoencoder.parameters(), lr=lr)

# start training
for epoch in range(num_epochs):

    # minibatch optimization with Adam
    for data in dataloader:
        img, _ = data

        # change the images to be 1D
        img = img.view(img.size(0), -1)

        # get output from network
        out = autoencoder(img)

        # calculate MSE loss and update network