Пример #1
0
 def get_device_by_class_name(self, class_name, instance_name=None):
     """ Returns the device in the device list associated with the
     given class name. Can optionally specify an instance_name, which will
     only retrieve the device in the list that match the class and instance name.
     Note, this can't be used in place of function in Utils as this class contains
     the mixer wrapper. """
     return get_device(self, name=instance_name, class_name=class_name)
Пример #2
0
    def get_multi_device_track_parameter(self,
                                         dev_dict,
                                         param_index,
                                         page_index=None):
        """ Returns the track-based parameter from the given dev_dict at the given index.
        The dict should be in the form (used OrderedDict to preserve key order):
        {dev1_class_name: {'instance_name': name, 'parameters': param_list},
         dev2_class_name: {'instance_name': name, 'parameters': param_list}, etc}
        If passing page_index, param_lists should be nested. """
        for k, v in dev_dict.iteritems():
            dev = get_device(self, name=v['instance_name'], class_name=k)
            if live_object_is_valid(dev):
                if page_index is not None:
                    return get_device_parameter(
                        dev, v['parameters'][page_index][param_index])
                return get_device_parameter(dev, v['parameters'][param_index])

        return
from Utils import MnistLoadData
from Utils import CIFARLoadData
from Models.VAE_Model.Parser_args import parse_Arg
from Models.VAE_Model.VAE import vae
from torchvision.utils import save_image
from Utils import get_device

args = parse_Arg()
image_shape = (args.channels, args.image_size, args.image_size)
data_loader = CIFARLoadData(args.batch_size, True, True)

device = get_device()

model = vae(args, device)

for epoch in range(args.n_epochs):
    for i, data in enumerate(data_loader, 0):
        inputs, _ = data
        current_batch_size = inputs.size(0)
        inputs = inputs.view(current_batch_size, args.input_dim * args.channels).to(device)

        loss = model.learn(inputs)

        print("[Epoch %d/%d] [Batch %d/%d] [loss: %f]]" % (epoch + 1, args.n_epochs, i + 1, len(data_loader), loss))

        batches_done = epoch * len(data_loader) + i
        if batches_done % args.sample_interval == 0:
            output = model(inputs).data.reshape(args.batch_size, args.channels, args.image_size, args.image_size)
            save_image(output, "images/%d.png" % batches_done, nrow=args.nrow, normalize=True)

import torch
from Models.SAGAN_Model.Parser_args import parse_Arg
from Models.SAGAN_Model.SAGAN import sa_gan
from torchvision.utils import save_image
from Utils import CIFARLoadData
from Utils import get_device

args = parse_Arg()
device1 = get_device('cuda:0')
device2 = get_device('cuda:1')

train_loader = CIFARLoadData(args.batch_size, True, True)

model = sa_gan(args, [device1, device2])

for epoch in range(args.n_epochs):
    for i, data in enumerate(train_loader):
        real_images, _ = data
        current_batch_size = real_images.size(0)

        inputs = real_images.clone().to(device1)
        noise = torch.zeros(current_batch_size, args.noise_dim, 1, 1).normal_(0, 1)

        real_labels = torch.ones(current_batch_size, 1).detach()
        fake_labels = torch.zeros(current_batch_size, 1).detach()

        discriminator_loss, fake_image = model.learn_discriminator(inputs, noise, real_labels, fake_labels)
        generator_loss = model.learn_generator(noise, real_labels)

        print("[Epoch %d/%d] [Batch %d/%d] [Discriminator_loss: %f] [Generator_loss: %f]"
            % (epoch + 1, args.n_epochs, i + 1, len(train_loader), discriminator_loss, generator_loss))