示例#1
0
def create_generator(opt):
    if opt.pre_train:
        # Initialize the network
        generator = network.Generator(opt)
        # Init the network
        network.weights_init(generator, init_type = opt.init_type, init_gain = opt.init_gain)
        print('Generator is created!')
    else:
        # Initialize the network
        generator = network.Generator(opt)
        # Load a pre-trained network
        pretrained_net = torch.load(opt.load_name + '.pth')
        load_dict(generator, pretrained_net)
        print('Generator is loaded!')
    return generator
def create_generator(opt):
    if opt.pre_train == True:
        # Initialize the network
        generator_a = network.Generator(opt)
        generator_b = network.Generator(opt)
        # Init the network
        network.weights_init(generator_a, init_type = opt.init_type, init_gain = opt.init_gain)
        network.weights_init(generator_b, init_type = opt.init_type, init_gain = opt.init_gain)
        print('Generator is created!')
    else:
        # Load the weights
        generator_a = torch.load(opt.load_name + '_a.pth')
        generator_b = torch.load(opt.load_name + '_b.pth')
        print('Generator is loaded!')
    return generator_a, generator_b
示例#3
0
    def __init__(self, config):
        self.config = config
        if torch.cuda.is_available():
            self.use_cuda = True
            torch.set_default_tensor_type('torch.cuda.FloatTensor')
        else:
            self.use_cuda = False
            torch.set_default_tensor_type('torch.FloatTensor')

        self.nz = config.nz
        self.optimizer = config.optimizer

        self.resl = 2  # we start from 2^2 = 4
        self.max_resl = config.max_resl
        self.trns_tick = config.trns_tick
        self.stab_tick = config.stab_tick
        self.TICK = config.TICK
        self.globalIter = 0
        self.globalTick = 0
        self.kimgs = 0
        self.stack = 0
        self.epoch = 0
        self.fadein = {'gen': None, 'dis': None}
        self.complete = {'gen': 0, 'dis': 0}
        self.phase = 'init'
        self.flag_flush_gen = False
        self.flag_flush_dis = False
        self.trns_tick = self.config.trns_tick
        self.stab_tick = self.config.stab_tick

        # network and cirterion
        self.G = net.Generator(config)
        self.D = net.Discriminator(config)
        print('Generator structure: ')
        print(self.G.model)
        print('Discriminator structure: ')
        print(self.D.model)
        self.mse = torch.nn.MSELoss()
        if self.use_cuda:
            self.mse = self.mse.cuda()
            torch.cuda.manual_seed(config.random_seed)
            if config.n_gpu == 1:
                #self.G = self.G.cuda()
                #self.D = self.D.cuda()         # It seems simply call .cuda() on the model does not function. PyTorch bug when we use modules.
                self.G = torch.nn.DataParallel(self.G).cuda(device_id=0)
                self.D = torch.nn.DataParallel(self.D).cuda(device_id=0)
            else:
                gpus = []
                for i in range(config.n_gpu):
                    gpus.append(i)
                self.G = torch.nn.DataParallel(self.G, device_ids=gpus).cuda()
                self.D = torch.nn.DataParallel(self.D, device_ids=gpus).cuda()

        # define tensors, and get dataloader.
        self.renew_everything()

        # tensorboard
        self.use_tb = config.use_tb
        if self.use_tb:
            self.tb = tensorboard.tf_recorder()
示例#4
0
def generate():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--gen', type=str, default=None)
    parser.add_argument('--depth', '-d', type=int, default=0)
    parser.add_argument('--out', '-o', type=str, default='img/')
    parser.add_argument('--num', '-n', type=int, default=10)
    args = parser.parse_args()

    gen = network.Generator(depth=args.depth)
    print('loading generator model from ' + args.gen)
    serializers.load_npz(args.gen, gen)

    if args.gpu >= 0:
        cuda.get_device_from_id(0).use()
        gen.to_gpu()

    xp = gen.xp

    z1 = gen.z(1)
    z2 = gen.z(1)

    for i in range(args.num):
        print(i)
        p = i / (args.num - 1)
        z = z1 * p + z2 * (1 - p)
        x = gen(z, alpha=1.0)
        x = chainer.cuda.to_cpu(x.data)

        img = x[0].copy()
        filename = os.path.join(args.out, 'gen_%04d.png' % i)
        utils.save_image(img, filename)
示例#5
0
 def __init__(self):
     super().__init__()
     self.title = 'PyQt5 image - pythonspot.com'
     self.left = 300
     self.top = 300
     self.width = 900
     self.height = 600
     self.gen = network.Generator(depth=depth)
     serializers.load_npz(gen_path, self.gen)
     # file
     self.csv_file = [[random.random() for j in range(csv_len)]]
     self.vec2rand_model = load_model(vec2rand_model_path)
     # gpu using
     if gpu >= 0:
         cuda.get_device_from_id(0).use()
         self.gen.to_gpu()
     self.xp = self.gen.xp
     self.initUI()
     #generating image.
     z = self.xp.random.randn(1, 512, 1, 1).astype('f')
     x = self.gen(z, alpha=1.0)
     x = chainer.cuda.to_cpu(x.data)
     img = x[0].copy()
     utils.save_image(img, 'temp.jpg')
     _img = Image.open('temp.jpg')
     self.img = np.asarray(_img)
     self.initFigure()
     self.show()
    def __init__(self,config):
        self.config=config
        if torch.cuda.is_available():
            self.use_cuda=True
            torch.set_default_tensor_type('torch.cuda.FloatTensor')
        else:
            self.use_cuda=False
            torch.set_default_tensor_type('torch.FloatTensor')

        self.nz = config.nz
        self.optimizer = config.optimizer
        self.resl = 2  # we start from 2^2 = 4
        self.lr = config.lr
        self.eps_drift = config.eps_drift
        self.smoothing = config.smoothing
        self.max_resl = config.max_resl
        self.trns_tick = config.trns_tick
        self.stab_tick = config.stab_tick
        self.TICK = config.TICK
        self.globalIter = 0
        self.globalTick = 0
        self.kimgs = 0
        self.stack = 0
        self.epoch = 0
        self.fadein = {'gen': None, 'dis': None}
        self.complete = {'gen': 0, 'dis': 0}
        self.phase = 'init'
        self.flag_flush_gen = False
        self.flag_flush_dis = False
        self.flag_add_noise = self.config.flag_add_noise
        self.flag_add_drift = self.config.flag_add_drift
        self.loader=DL.dataloader(config)
        self.LAMBDA=2

        # network
        self.G = network.Generator(config)
        self.D = network.Discriminator(config)
        print('Generator structure: ')
        print(self.G.model)
        print('Discriminator structure: ')
        print(self.D.model)
        if self.use_cuda:
            torch.cuda.manual_seed(config.random_seed)
            #self.G = self.G.cuda()
            #self.D = self.D.cuda()
            if config.n_gpu==1:
                self.G=torch.nn.DataParallel(self.G).cuda(device=0)
                self.D=torch.nn.DataParallel(self.D).cuda(device=0)
            else:
                gpus=[]
                for i in range(config.n_gpu):
                    gpus.append(i)
                self.G=torch.nn.DataParallel(self.G,device_ids=gpus).cuda()
                self.D=torch.nn.DataParallel(self.D,device_ids=gpus).cuda()
        self.renew_everything()
        self.use_tb=config.use_tb
        if self.use_tb:
            self.tb=tensorboard.tf_recorder()
示例#7
0
def create_generator_val(opt):
    # Initialize the network
    generator = network.Generator(opt)
    # Init or Load value for the network
    if opt.finetune_path != "":
        pretrained_net = torch.load(opt.finetune_path)
        generator = load_dict(generator, pretrained_net)
        print('Generator is loaded!')
    return generator
示例#8
0
    def __init__(self, config):
        self.config = config
        if torch.cuda.is_available():
            self.use_cuda = True
            torch.set_default_tensor_type('torch.cuda.FloatTensor')
        else:
            self.use_cuda = False
            torch.set_default_tensor_type('torch.FloatTensor')

        self.nz = config.nz
        self.optimizer = config.optimizer

        self.resl = config.start_res  # we start from 2^2 = 4
        self.lr = config.lr
        self.eps_drift = config.eps_drift
        self.smoothing = config.smoothing
        self.max_resl = config.max_resl
        self.trns_tick = config.trns_tick
        self.stab_tick = config.stab_tick
        self.TICK = config.TICK
        self.globalIter = 0
        self.globalTick = 0
        self.kimgs = 0
        self.stack = 0
        self.epoch = 0
        self.fadein = {'gen': None, 'dis': None}
        self.complete = {'gen': 0, 'dis': 0}
        self.phase = 'init'
        self.flag_flush_gen = False
        self.flag_flush_dis = False
        self.flag_add_noise = self.config.flag_add_noise
        self.flag_add_drift = self.config.flag_add_drift

        # network and criterion
        if config.start_res == 2:
            self.G = net.Generator(config)
            self.D = net.Discriminator(config)
        else:
            self.G, self.D = g_d_interpolated.recup_nets(config)
        self.mse = torch.nn.MSELoss()
        if self.use_cuda:
            self.mse = self.mse.cuda()
            torch.cuda.manual_seed(config.random_seed)
            """
            if config.n_gpu==1:
                self.G = torch.nn.DataParallel(self.G).cuda(device=0)
                self.D = torch.nn.DataParallel(self.D).cuda(device=0)
            else:
                gpus = []
                for i  in range(config.n_gpu):
                    gpus.append(i)
                self.G = torch.nn.DataParallel(self.G, device_ids=gpus).cuda()
                self.D = torch.nn.DataParallel(self.D, device_ids=gpus).cuda()  
            """

        # define tensors, ship model to cuda, and get dataloader.
        self.renew_everything()
示例#9
0
def generate():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--gen',
                        type=str,
                        default="predicted_model",
                        help='input generate model path')
    parser.add_argument('--depth', '-d', type=int, default=6)
    parser.add_argument('--out', '-o', type=str, default='out/gen.png')
    parser.add_argument('--num', '-n', type=int, default=7)
    parser.add_argument('--seed', '-s', type=int, default=0)
    parser.add_argument('--vector_ope',
                        '-v',
                        nargs=2,
                        type=str,
                        help='input 2 npz path')
    args = parser.parse_args()

    gen = network.Generator(depth=args.depth)
    print('loading generator model from ' + args.gen)
    serializers.load_npz(args.gen, gen)

    if args.gpu >= 0:
        cuda.get_device_from_id(0).use()
        gen.to_gpu()

    xp = gen.xp
    xp.random.seed(args.seed)

    if args.vector_ope:
        print('load vector from {}'.format(args.vector_ope))
        z1 = np.load(args.vector_ope[0])
        z2 = np.load(args.vector_ope[1])
        for i in range(args.num):
            p = i / (args.num - 1)
            z = z1 * (1 - p) + z2 * p
            x = gen(z, alpha=1.0)
            x = chainer.cuda.to_cpu(x.data)
            img = x[0].copy()
            out_file = Path(args.out).with_name(Path(args.out).stem +
                                                str(i)).with_suffix('.png')
            print(out_file)
            utils.save_image(img, out_file)
            # np.save(out_file.with_suffix('.npy'), z)

    else:
        z = gen.z(1)
        x = gen(z, alpha=1.0)
        x = chainer.cuda.to_cpu(x.data)
        img = x[0].copy()
        out_file = Path(args.out)
        utils.save_image(img, out_file)
        np.save(out_file.with_suffix('.npy'), z)
示例#10
0
def create_generator(opt):
    # Initialize the network
    generator = network.Generator(opt)
    # Init or Load value for the network
    network.weights_init(generator,
                         init_type=opt.init_type,
                         init_gain=opt.init_gain)
    print('Generator is created!')
    if opt.finetune_path != "":
        pretrained_net = torch.load(opt.finetune_path)
        generator = load_dict(generator, pretrained_net)
        print('Generator is loaded!')
    return generator
示例#11
0
    def __init__(self, sample_dimension, noise_dimension, bs):
        self.sample_dimension = sample_dimension
        self.noise_dimension = noise_dimension
        self.bs = bs

        self.D = network.Discriminator(sample_dimension=sample_dimension)
        self.G = network.Generator(noise_dimension=noise_dimension,
                                   sample_dimension=sample_dimension)

        self.optimizer_d = torch.optim.RMSprop(self.D.parameters())
        self.optimizer_g = torch.optim.RMSprop(self.G.parameters())

        self.criterion = nn.BCELoss()
示例#12
0
def generate():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--gen', type=str, default=None)
    parser.add_argument('--depth', '-d', type=int, default=0)
    parser.add_argument('--out', '-o', type=str, default='img/')
    parser.add_argument('--num', '-n', type=int, default=10)
    args = parser.parse_args()

    male = [0, 0, 0, 1, 0]

    gen = network.Generator(depth=args.depth)
    print('loading generator model from ' + args.gen)
    serializers.load_npz(args.gen, gen)

    if args.gpu >= 0:
        cuda.get_device_from_id(0).use()
        gen.to_gpu()

    xp = gen.xp

    #xp.random.seed(seed=44)
    z = gen.z(1)
    #z2 = gen.z(1)
    attr = xp.asarray([male], dtype=xp.float32)
    #atte = Variable(attr)
    #z = Variable(z)

    #attr_num = [4, 3, 2, 1, 0]
    if not os.path.exists(args.out):
        os.makedirs(args.out)
    f = 0
    for i in range(5):
        for j in range(args.num):
            p = j / (args.num - 1)
            if i != 3:
                attr[0][i] = p
            else:
                attr[0][i] = 1 - p
            #print(attr)
            #c = xp.reshape(c, (1, c.shape[0]))
            x = gen(z, attr, alpha=1.0)
            x = chainer.cuda.to_cpu(x.data)

            img = x[0].copy()
            Attr_dir = '/Attr_{}'.format(i)
            if not os.path.exists(args.out + Attr_dir):
                os.makedirs(args.out + Attr_dir)
            filename = os.path.join(args.out + Attr_dir,
                                    'gen_{}.png'.format(j))
            utils.save_image(img, filename)
示例#13
0
    def __init__(self, config):
        self.config = config
        if torch.cuda.is_available():
            self.use_cuda = True
            torch.set_default_tensor_type('torch.cuda.FloatTensor')
        else:
            self.use_cuda = False
            torch.set_default_tensor_type('torch.FloatTensor')

        self.nz = config.nz
        self.optimizer = config.optimizer

        self.resolution = 2  # we start from 2^2 = 4
        self.lr = config.lr
        self.eps_drift = config.eps_drift
        self.smoothing = config.smoothing
        self.max_resolution = config.max_resolution
        self.transition_tick = config.transition_tick
        self.stablize_tick = config.stablize_tick
        self.TICK = config.TICK
        self.globalIter = 0
        self.globalTick = 0
        self.kimgs = 0
        self.stack = 0
        self.epoch = 0
        self.fadein = {'gen': None, 'dis': None}
        self.complete = {'gen': 0, 'dis': 0}
        self.phase = 'init'
        self.flag_flush_gen = False
        self.flag_flush_dis = False
        self.flag_add_noise = self.config.flag_add_noise
        self.flag_add_drift = self.config.flag_add_drift

        # network and cirterion
        self.G = nn.DataParallel(net.Generator(config))
        self.D = nn.DataParallel(net.Discriminator(config))
        print('Generator structure: ')
        print(self.G.module.model)
        print('Discriminator structure: ')
        print(self.D.module.model)
        self.mse = torch.nn.MSELoss()
        self.renew_everything()

        # tensorboard
        self.use_tb = config.use_tb
        if self.use_tb:
            self.tb = tensorboard.tf_recorder()

        if config.pretrained is not None:
            self.load_pretrained(config.pretrained)
    def build_model(self):
        # Define generators and discriminators
        self.E = network.Encoder(self.e_conv_dim)
        self.G = network.Generator(self.g_conv_dim)
        for i in self.cls:
            setattr(
                self, "D_" + i,
                net.Discriminator(self.img_size, self.d_conv_dim,
                                  self.d_repeat_num, self.norm))

        # Define vgg for perceptual loss
        self.vgg = net.VGG()
        self.vgg.load_state_dict(torch.load('addings/vgg_conv.pth'))

        # Define loss
        self.criterionL1 = torch.nn.L1Loss()
        self.criterionL2 = torch.nn.MSELoss()
        self.criterionGAN = GANLoss(use_lsgan=True,
                                    tensor=torch.cuda.FloatTensor)

        # Optimizers
        self.e_optimizer = torch.optim.Adam(self.E.parameters(), self.e_lr,
                                            [self.beta1, self.beta2])
        self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr,
                                            [self.beta1, self.beta2])
        for i in self.cls:
            setattr(self, "d_" + i + "_optimizer", \
                    torch.optim.Adam(filter(lambda p: p.requires_grad, getattr(self, "D_" + i).parameters()), \
                                     self.d_lr, [self.beta1, self.beta2]))

        # Weights initialization
        self.E.apply(self.weights_init_xavier)
        self.G.apply(self.weights_init_xavier)
        for i in self.cls:
            getattr(self, "D_" + i).apply(self.weights_init_xavier)

        # Print networks
        self.print_network(self.E, 'E')
        self.print_network(self.G, 'G')
        for i in self.cls:
            self.print_network(getattr(self, "D_" + i), "D_" + i)

        if torch.cuda.is_available():
            self.E.cuda()
            self.G.cuda()
            self.vgg.cuda()
            for i in self.cls:
                getattr(self, "D_" + i).cuda()
示例#15
0
    def __init__(self, config):

        self.config = config
        if torch.cuda.is_available():
            self.use_cuda = True
        ngpu = 1

        # network
        self.G = net.Generator(config).cuda()

        print('Generator structure: ')
        print(self.G.model)

        devices = [i for i in range(ngpu)]
        self.G = MyDataParallel(self.G, device_ids=devices)

        self.start_resl = config.start_resl
        self.max_resl = config.max_resl
示例#16
0
def generate(z, args):

    gen = network.Generator(depth=args.depth)
    print('loading generator model from ' + args.gen)
    serializers.load_npz(args.gen, gen)

    if args.gpu >= 0:
        cuda.get_device_from_id(0).use()
        gen.to_gpu()

    xp = gen.xp

    x = gen(z, alpha=1.0)
    x = chainer.cuda.to_cpu(x.data)
    img = x[0].copy()
    out_file = Path(args.out)
    utils.save_image(img, out_file)
    np.save(out_file.with_suffix('.npy'), z)
    print(out_file)
示例#17
0
def recup_nets(config):
    use_cuda = True
    checkpoint_path_g = config.checkpoint_generator
    checkpoint_path_d = config.checkpoint_discriminator

    # load trained model.
    model_g = net.Generator(config)
    model_d = net.Discriminator(config)
    if use_cuda:
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        model_g = torch.nn.DataParallel(model_g).cuda(device=0)
        model_d = torch.nn.DataParallel(model_d).cuda(device=0)
    else:
        torch.set_default_tensor_type('torch.FloatTensor')

    for resl in range(3, config.start_res + 1):
        model_g.module.grow_network(resl)
        model_d.module.grow_network(resl)
        model_g.module.flush_network()
        model_d.module.flush_network()
    print('generator :')
    print(model_g)
    print('discriminator :')
    print(model_d)

    print('load generator from checkpoint  ... {}'.format(checkpoint_path_g))
    print(
        'load discriminator from checkpoint ... {}'.format(checkpoint_path_d))
    checkpoint_g = torch.load(os.path.join('repo/model', checkpoint_path_g))
    checkpoint_d = torch.load(os.path.join('repo/model', checkpoint_path_d))
    print(type(checkpoint_g['state_dict']))
    print(type(checkpoint_d['state_dict']))
    model_g.module.load_state_dict(checkpoint_g['state_dict'], False)
    model_d.module.load_state_dict(checkpoint_d['state_dict'], False)

    return model_g, model_d
示例#18
0
    def __init__(self, config):
        self.config = config
        if torch.cuda.is_available():
            self.use_cuda = True
            torch.set_default_tensor_type("torch.cuda.FloatTensor")
        else:
            self.use_cuda = False
            torch.set_default_tensor_type("torch.FloatTensor")

        self.nz = config.nz
        self.optimizer = config.optimizer

        self.resl = 2  # we start from 2^2 = 4
        self.lr = config.lr
        self.eps_drift = config.eps_drift
        self.smoothing = config.smoothing
        self.max_resl = config.max_resl
        self.accelerate = 1
        self.wgan_target = 1.0
        self.trns_tick = config.trns_tick
        self.stab_tick = config.stab_tick
        self.TICK = config.TICK
        self.skip = False
        self.globalIter = 0
        self.globalTick = 0
        self.wgan_epsilon = 0.001
        self.stack = 0
        self.wgan_lambda = 10.0
        self.just_passed = False
        if self.config.resume:
            saved_models = os.listdir("repo/model/")
            iterations = list(
                map(lambda x: int(x.split("_")[-1].split(".")[0][1:]),
                    saved_models))
            self.last_iteration = max(iterations)
            selected_indexes = np.where(
                [x == self.last_iteration for x in iterations])[0]
            G_last_model = [
                saved_models[x] for x in selected_indexes
                if "gen" in saved_models[x]
            ][0]
            D_last_model = [
                saved_models[x] for x in selected_indexes
                if "dis" in saved_models[x]
            ][0]
            saved_grids = os.listdir("repo/save/grid")
            global_iterations = list(
                map(lambda x: int(x.split("_")[0]), saved_grids))
            self.globalIter = self.config.save_img_every * max(
                global_iterations)
            print("Resuming after " + str(self.last_iteration) +
                  " ticks and " + str(self.globalIter) + " iterations")
            G_weights = torch.load("repo/model/" + G_last_model)
            D_weights = torch.load("repo/model/" + D_last_model)
            self.resuming = True
        else:
            self.resuming = False

        self.kimgs = 0
        self.stack = 0
        self.epoch = 0
        self.fadein = {"gen": None, "dis": None}
        self.complete = {"gen": 0, "dis": 0}
        self.phase = "init"
        self.flag_flush_gen = False
        self.flag_flush_dis = False
        self.flag_add_noise = self.config.flag_add_noise
        self.flag_add_drift = self.config.flag_add_drift

        # network and cirterion
        self.G = net.Generator(config)
        self.D = net.Discriminator(config)
        print("Generator structure: ")
        print(self.G.model)
        print("Discriminator structure: ")
        print(self.D.model)
        self.mse = torch.nn.MSELoss()
        if self.use_cuda:
            self.mse = self.mse.cuda()
            torch.cuda.manual_seed(config.random_seed)
            self.G = torch.nn.DataParallel(self.G,
                                           device_ids=[0]).cuda(device=0)
            self.D = torch.nn.DataParallel(self.D,
                                           device_ids=[0]).cuda(device=0)

        # define tensors, ship model to cuda, and get dataloader.
        self.renew_everything()
        if self.resuming:
            self.resl = G_weights["resl"]
            self.globalIter = G_weights["globalIter"]
            self.globalTick = G_weights["globalTick"]
            self.kimgs = G_weights["kimgs"]
            self.epoch = G_weights["epoch"]
            self.phase = G_weights["phase"]
            self.fadein = G_weights["fadein"]
            self.complete = G_weights["complete"]
            self.flag_flush_gen = G_weights["flag_flush_gen"]
            self.flag_flush_dis = G_weights["flag_flush_dis"]
            self.stack = G_weights["stack"]

            print("Resuming at " + str(self.resl) + " definition after " +
                  str(self.epoch) + " epochs")
            self.G.module.load_state_dict(G_weights["state_dict"])
            self.D.module.load_state_dict(D_weights["state_dict"])
            self.opt_g.load_state_dict(G_weights["optimizer"])
            self.opt_d.load_state_dict(D_weights["optimizer"])

        # tensorboard
        self.use_tb = config.use_tb
        if self.use_tb:
            self.tb = tensorboard.tf_recorder()
示例#19
0
IfInitial = False
dp = data_processor.DataProcessor()
if IfInitial:
    dp.init_data()
    print('initial done!')

# get training data and test data
train_set_np = np.load("data/train_set.npy")
train_set_label_np = np.load("data/train_set_label.npy")

test_set_np = np.load("data/test_set.npy")
test_set_label_np = np.load("data/test_set_label.npy")

# network
G = nw.Generator().cuda()
D = nw.Discriminator().cuda()

G.weight_init(mean=0, std=0.01)
D.weight_init(mean=0, std=0.01)

# load train data
BatchSize = 32

train_set = torch.load('data/train_data_set.lib')
train_data = torch_data.DataLoader(
    train_set,
    batch_size=BatchSize,
    shuffle=True,
    num_workers=2,
)
示例#20
0
def meta_train(gpu, dataset_path, continue_id):
    run_start = datetime.now()
    logging.info('===== META-TRAINING =====')
    logging.info(f'Running on {"GPU" if gpu else "CPU"}.')

    # region DATASET----------------------------------------------------------------------------------------------------
    logging.info(f'Training using dataset located in {dataset_path}')
    raw_dataset = VoxCelebDataset(
        root=dataset_path,
        extension='.vid',
        shuffle_frames=True,
        subset_size=config.SUBSET_SIZE,
        transform=transforms.Compose([
            transforms.Resize(config.IMAGE_SIZE),
            transforms.CenterCrop(config.IMAGE_SIZE),
            transforms.ToTensor(),
        ])
    )
    dataset = DataLoader(raw_dataset, batch_size=config.BATCH_SIZE, shuffle=True)

    # endregion

    # region NETWORK ---------------------------------------------------------------------------------------------------

    E = network.Embedder(GPU['Embedder'])
    G = network.Generator(GPU['Generator'])
    D = network.Discriminator(len(raw_dataset), GPU['Discriminator'])
    criterion_E_G = network.LossEG(config.FEED_FORWARD, GPU['LossEG'])
    criterion_D = network.LossD(GPU['LossD'])

    optimizer_E_G = Adam(
        params=list(E.parameters()) + list(G.parameters()),
        lr=config.LEARNING_RATE_E_G
    )
    optimizer_D = Adam(
        params=D.parameters(),
        lr=config.LEARNING_RATE_D
    )

    if continue_id is not None:
        E = load_model(E, continue_id)
        G = load_model(G, continue_id)
        D = load_model(D, continue_id)

    # endregion

    # region TRAINING LOOP ---------------------------------------------------------------------------------------------
    logging.info(f'Epochs: {config.EPOCHS} Batches: {len(dataset)} Batch Size: {config.BATCH_SIZE}')

    for epoch in range(config.EPOCHS):
        epoch_start = datetime.now()

        E.train()
        G.train()
        D.train()

        for batch_num, (i, video) in enumerate(dataset):

            # region PROCESS BATCH -------------------------------------------------------------------------------------
            batch_start = datetime.now()

            # video [B, K+1, 2, C, W, H]

            # Put one frame aside (frame t)
            t = video[:, -1, ...]  # [B, 2, C, W, H]
            video = video[:, :-1, ...]  # [B, K, 2, C, W, H]
            dims = video.shape

            # Calculate average encoding vector for video
            e_in = .reshape(dims[0] * dims[1], dims[2], dims[3], dims[4], dims[5])  # [BxK, 2, C, W, H]
            x, y = e_in[:, 0, ...], e_in[:, 1, ...]
            e_vectors = E(x, y).reshape(dims[0], dims[1], -1)  # B, K, len(e)
            e_hat = e_vectors.mean(dim=1)
 
            # Generate frame using landmarks from frame t
            x_t, y_t = t[:, 0, ...], t[:, 1, ...]
            x_hat = G(y_t, e_hat)

            # Optimize E_G and D
            r_x_hat, _ = D(x_hat, y_t, i)
            r_x, _ = D(x_t, y_t, i)

            optimizer_E_G.zero_grad()
            optimizer_D.zero_grad()

            loss_E_G = criterion_E_G(x_t, x_hat, r_x_hat, e_hat, D.W[:, i].transpose(1, 0))
            loss_D = criterion_D(r_x, r_x_hat)
            loss = loss_E_G + loss_D
            loss.backward()

            optimizer_E_G.step()
            optimizer_D.step()

            # Optimize D again
            x_hat = G(y_t, e_hat).detach()
            r_x_hat, D_act_hat = D(x_hat, y_t, i)
            r_x, D_act = D(x_t, y_t, i)

            optimizer_D.zero_grad()
            loss_D = criterion_D(r_x, r_x_hat)
            loss_D.backward()
            optimizer_D.step()

            batch_end = datetime.now()

            # endregion

            # region SHOW PROGRESS -------------------------------------------------------------------------------------
            if (batch_num + 1) % 1 == 0 or batch_num == 0:
                logging.info(f'Epoch {epoch + 1}: [{batch_num + 1}/{len(dataset)}] | '
                             f'Time: {batch_end - batch_start} | '
                             f'Loss_E_G = {loss_E_G.item():.4f} Loss_D = {loss_D.item():.4f}')
                logging.debug(f'D(x) = {r_x.mean().item():.4f} D(x_hat) = {r_x_hat.mean().item():.4f}')
            # endregion

            # region SAVE ----------------------------------------------------------------------------------------------
            save_image(os.path.join(config.GENERATED_DIR, f'last_result_x.png'), x_t[0])
            save_image(os.path.join(config.GENERATED_DIR, f'last_result_x_hat.png'), x_hat[0])

            if (batch_num + 1) % 100 == 0:
                save_image(os.path.join(config.GENERATED_DIR, f'{datetime.now():%Y%m%d_%H%M%S%f}_x.png'), x_t[0])
                save_image(os.path.join(config.GENERATED_DIR, f'{datetime.now():%Y%m%d_%H%M%S%f}_x_hat.png'), x_hat[0])

            if (batch_num + 1) % 100 == 0:
                save_model(E, gpu, run_start)
                save_model(G, gpu, run_start)
                save_model(D, gpu, run_start)

            # endregion

        # SAVE MODELS --------------------------------------------------------------------------------------------------

        save_model(E, gpu, run_start)
        save_model(G, gpu, run_start)
        save_model(D, gpu, run_start)
        epoch_end = datetime.now()
        logging.info(f'Epoch {epoch + 1} finished in {epoch_end - epoch_start}. ')
示例#21
0
def to_categorical(y, num_columns):
    """Returns one-hot encoded Variable"""
    y_cat = np.zeros((y.shape[0], num_columns))
    y_cat[range(y.shape[0]), y] = 1.

    return Variable(FloatTensor(y_cat))


# GAN Loss function
adversarial_loss = nn.MSELoss()
categorical_loss = nn.CrossEntropyLoss()
continuous_loss = nn.MSELoss()

# Initialize generator and discriminator
generator = network.Generator(latent_dim=latent_dim,
                              categorical_dim=n_classes,
                              continuous_dim=code_dim)
discriminator = network.Discriminator(categorical_dim=n_classes)

if cuda:
    generator.cuda()
    discriminator.cuda()
    adversarial_loss.cuda()
    categorical_loss.cuda()
    continuous_loss.cuda()

# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=lr, betas=(b1, b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(),
                               lr=lr,
                               betas=(b1, b2))
示例#22
0
                    type=int,
                    default=200,
                    help="interval betwen image samples")
opt = parser.parse_args()
print(opt)

img_shape = (opt.channels, opt.img_size, opt.img_size)

cuda = True if torch.cuda.is_available() else False

# Loss weight for gradient penalty
lambda_gp = 10

# Initialize generator and discriminator
generator = network.Generator(image_size=opt.img_size,
                              z_dim=opt.latent_dim,
                              conv_dim=opt.conv_dim,
                              selfattn=True)
discriminator = network.Discriminator(image_size=opt.img_size,
                                      conv_dim=opt.conv_dim,
                                      selfattn=True)

if cuda:
    generator.cuda()
    discriminator.cuda()

# Configure data loader
dataloader = DataLoader(datasets.CIFAR10(
    train=True,
    download=True,
    transform=transforms.Compose([
        transforms.ToTensor(),
示例#23
0
def meta_train(device, dataset_path, continue_id):
    run_start = datetime.now()
    logging.info('===== META-TRAINING =====')
    # GPU / CPU --------------------------------------------------------------------------------------------------------
    if device is not None and device != 'cpu':
        dtype = torch.cuda.FloatTensor
        torch.cuda.set_device(device)
        logging.info(f'Running on GPU: {torch.cuda.current_device()}.')
    else:
        dtype = torch.FloatTensor
        logging.info(f'Running on CPU.')

    # DATASET-----------------------------------------------------------------------------------------------------------
    logging.info(f'Training using dataset located in {dataset_path}')
    dataset = VoxCelebDataset(root=dataset_path,
                              extension='.vid',
                              shuffle=False,
                              shuffle_frames=True,
                              transform=transforms.Compose([
                                  transforms.Resize(config.IMAGE_SIZE),
                                  transforms.CenterCrop(config.IMAGE_SIZE),
                                  transforms.ToTensor(),
                                  transforms.Normalize([0.485, 0.456, 0.406],
                                                       [0.229, 0.224, 0.225]),
                              ]))

    # NETWORK ----------------------------------------------------------------------------------------------------------

    E = network.Embedder().type(dtype)
    G = network.Generator().type(dtype)
    D = network.Discriminator(143000).type(dtype)

    if continue_id is not None:
        E = load_model(E, continue_id)
        G = load_model(G, continue_id)
        D = load_model(D, continue_id)

    optimizer_E_G = Adam(params=list(E.parameters()) + list(G.parameters()),
                         lr=config.LEARNING_RATE_E_G)
    optimizer_D = Adam(params=D.parameters(), lr=config.LEARNING_RATE_D)

    criterion_E_G = network.LossEG(device, feed_forward=True)
    criterion_D = network.LossD(device)

    # TRAINING LOOP ----------------------------------------------------------------------------------------------------
    logging.info(
        f'Starting training loop. Epochs: {config.EPOCHS} Dataset Size: {len(dataset)}'
    )

    for epoch in range(config.EPOCHS):
        epoch_start = datetime.now()
        batch_durations = []

        E.train()
        G.train()
        D.train()

        for batch_num, (i, video) in enumerate(dataset):
            batch_start = datetime.now()

            # Put one frame aside (frame t)
            t = video.pop()

            # Calculate average encoding vector for video
            e_vectors = []
            for s in video:
                x_s = s['frame'].type(dtype)
                y_s = s['landmarks'].type(dtype)
                e_vectors.append(E(x_s, y_s))
            e_hat = torch.stack(e_vectors).mean(dim=0)

            # Generate frame using landmarks from frame t
            x_t = t['frame'].type(dtype)
            y_t = t['landmarks'].type(dtype)
            x_hat = G(y_t, e_hat)

            # Optimize E_G and D
            r_x_hat, D_act_hat = D(x_hat, y_t, i)
            r_x, D_act = D(x_t, y_t, i)

            optimizer_E_G.zero_grad()
            optimizer_D.zero_grad()

            loss_E_G = criterion_E_G(x_t, x_hat, r_x_hat, e_hat, D.W[:, i],
                                     D_act, D_act_hat)
            loss_D = criterion_D(r_x, r_x_hat)
            loss = loss_E_G + loss_D
            loss.backward(retain_graph=True)

            optimizer_E_G.step()
            optimizer_D.step()

            # Optimize D again
            r_x_hat, D_act_hat = D(G(y_t, e_hat), y_t, i)
            r_x, D_act = D(x_t, y_t, i)

            optimizer_D.zero_grad()
            loss_D = criterion_D(r_x, r_x_hat)
            loss_D.backward()
            optimizer_D.step()

            batch_end = datetime.now()
            batch_durations.append(batch_end - batch_start)
            # SHOW PROGRESS --------------------------------------------------------------------------------------------
            if (batch_num + 1) % 100 == 0 or batch_num == 0:
                avg_time = sum(batch_durations,
                               timedelta(0)) / len(batch_durations)
                logging.info(
                    f'Epoch {epoch+1}: [{batch_num + 1}/{len(dataset)}] | '
                    f'Avg Time: {avg_time} | '
                    f'Loss_E_G = {loss_E_G.item():.4} Loss_D {loss_D.item():.4}'
                )
                logging.debug(
                    f'D(x) = {r_x.item():.4} D(x_hat) = {r_x_hat.item():.4}')

            # SAVE IMAGES ----------------------------------------------------------------------------------------------
            if (batch_num + 1) % 100 == 0:
                if not os.path.isdir(config.GENERATED_DIR):
                    os.makedirs(config.GENERATED_DIR)

                save_image(
                    os.path.join(config.GENERATED_DIR,
                                 f'{datetime.now():%Y%m%d_%H%M}_x.png'), x_t)
                save_image(
                    os.path.join(config.GENERATED_DIR,
                                 f'{datetime.now():%Y%m%d_%H%M}_x_hat.png'),
                    x_hat)

            if (batch_num + 1) % 2000 == 0:
                save_model(E, device)
                save_model(G, device)
                save_model(D, device)

        # SAVE MODELS --------------------------------------------------------------------------------------------------

        save_model(E, device, run_start)
        save_model(G, device, run_start)
        save_model(D, device, run_start)
        epoch_end = datetime.now()
        logging.info(
            f'Epoch {epoch+1} finished in {epoch_end - epoch_start}. '
            f'Average batch time: {sum(batch_durations, timedelta(0)) / len(batch_durations)}'
        )
示例#24
0
    train=True,
    download=True,
    transform=transforms.Compose([
        transforms.Resize((img_size, img_size), Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])),
                                         batch_size=batch_size,
                                         shuffle=True)

# GAN Loss function
adversarial_loss = nn.MSELoss()

# Initialize generator and discriminator
generator = network.Generator(latent_dim=latent_dim,
                              classes=n_classes,
                              channels=n_channels)
discriminator = network.Discriminator(classes=n_classes, channels=n_channels)

# Label embedding
label_emb = nn.Embedding(n_classes, n_classes)

if cuda:
    generator.cuda()
    discriminator.cuda()
    adversarial_loss.cuda()
    label_emb.cuda()

# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=lr, betas=(b1, b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(),
import os

import torch
from torch.autograd import Variable

import utils as utils
from config import config

use_cuda = True
checkpoint_path = os.path.join(config.output_dir, 'model/gen_R8_T55.pth.tar')
n_intp = 20

# load trained model.
import network as net

test_model = net.Generator(config)
if use_cuda:
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    test_model = torch.nn.DataParallel(test_model).cuda(device=0)
    print(test_model.module.named_children())
else:
    torch.set_default_tensor_type('torch.FloatTensor')

for resl in range(3, config.max_resl + 1):
    test_model.module.grow_network(resl)
    test_model.module.flush_network()
print(test_model)

print('load checkpoint form ... {}'.format(checkpoint_path))
checkpoint = torch.load(checkpoint_path)
test_model.module.load_state_dict(checkpoint['state_dict'])
示例#26
0
    def __init__(self, config):

        nvidia_smi.nvmlInit()
        self.handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)

        self.config = config
        if torch.cuda.is_available():
            self.use_cuda = True
        ngpu = config.n_gpu

        # prepare folders
        if not os.path.exists('./checkpoint_dir/' + config.model_name):
            os.mkdir('./checkpoint_dir/' + config.model_name)
        if not os.path.exists('./images/' + config.model_name):
            os.mkdir('./images/' + config.model_name)
        if not os.path.exists('./tb_log/' + config.model_name):
            os.mkdir('./tb_log/' + config.model_name)
        if not os.path.exists('./code_backup/' + config.model_name):
            os.mkdir('./code_backup/' + config.model_name)
        os.system('cp *.py ' + './code_backup/' + config.model_name)

        # network
        self.G = net.Generator(config).cuda()
        self.D = net.Discriminator(config).cuda()
        print('Generator structure: ')
        print(self.G.model)
        print('Discriminator structure: ')
        print(self.D.model)

        devices = [i for i in range(ngpu)]
        self.G = MyDataParallel(self.G, device_ids=devices)
        self.D = MyDataParallel(self.D, device_ids=devices)

        self.start_resl = config.start_resl
        self.max_resl = config.max_resl

        self.load_model(G_pth=config.G_pth, D_pth=config.D_pth)

        self.nz = config.nz
        self.optimizer = config.optimizer
        self.lr = config.lr

        self.fadein = {'gen': None, 'dis': None}
        self.upsam_mode = self.config.G_upsam_mode  # either 'nearest' or 'tri-linear'

        self.batchSize = {
            2: 64 * ngpu,
            3: 64 * ngpu,
            4: 64 * ngpu,
            5: 64 * ngpu,
            6: 48 * ngpu,
            7: 12 * ngpu
        }
        self.fadeInEpochs = {2: 0, 3: 1, 4: 1, 5: 2000, 6: 2000, 7: 2000}
        self.stableEpochs = {2: 0, 3: 0, 4: 3510, 5: 10100, 6: 10600, 7: 50000}
        self.ncritic = {2: 5, 3: 5, 4: 5, 5: 3, 6: 3, 7: 3}

        # size 16 need 5000-7000 enough
        # size 32 need 16000-30000 enough

        self.global_batch_done = 0

        # define all dataloaders into a dictionary
        self.dataloaders = {}
        for resl in range(self.start_resl, self.max_resl + 1):
            self.dataloaders[resl] = DataLoader(
                DL.Data(config.train_data_root + 'resl{}/'.format(2**resl)),
                batch_size=self.batchSize[resl],
                shuffle=True,
                drop_last=True)

        # ship new model to cuda, and update optimizer
        self.renew_everything()
示例#27
0
def generate():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--gen', type=str, default=None)
    parser.add_argument('--depth', '-d', type=int, default=0)
    parser.add_argument('--out', '-o', type=str, default='img/')
    parser.add_argument('--num', '-n', type=int, default=10)
    args = parser.parse_args()

    gen = network.Generator(depth=args.depth)
    print('loading generator model from ' + str(args.gen))
    serializers.load_npz(args.gen, gen)

    if not os.path.exists(args.out):
        os.makedirs(args.out)

    if args.gpu >= 0:
        cuda.get_device_from_id(0).use()
        gen.to_gpu()

    xp = gen.xp
    attr = [1, 1, 1, 1, 1]
    #attr = [attribute for i in range(10)]
    attr = xp.array([attr], dtype=xp.float32)
    #xp.random.seed(seed=11)
    z1 = gen.z(1)
    z2 = gen.z(1)
    z3 = gen.z(1)

    for i in range(args.num):
        print(i)
        p = i / (args.num - 1)
        z = z2 * p + z1 * (1 - p)
        x = gen(z, attr, alpha=1.0)
        x = chainer.cuda.to_cpu(x.data)

        img = x[0].copy()
        filename = os.path.join(args.out, 'gen_1to2_%04d.png' % i)
        utils.save_image(img, filename)

    for i in range(args.num):
        print(i)
        p = i / (args.num - 1)
        z = z3 * p + z2 * (1 - p)
        x = gen(z, attr, alpha=1.0)
        x = chainer.cuda.to_cpu(x.data)

        img = x[0].copy()
        filename = os.path.join(args.out, 'gen_2to3_%04d.png' % i)
        utils.save_image(img, filename)

    for i in range(args.num):
        print(i)
        p = i / (args.num - 1)
        z = z1 * p + z3 * (1 - p)
        x = gen(z, attr, alpha=1.0)
        x = chainer.cuda.to_cpu(x.data)

        img = x[0].copy()
        filename = os.path.join(args.out, 'gen_3to1_%04d.png' % i)
        utils.save_image(img, filename)
示例#28
0
    def __init__(self, config):
        self.config = config
        if torch.cuda.is_available():
            self.use_cuda = True
            torch.set_default_tensor_type('torch.cuda.FloatTensor')
        else:
            self.use_cuda = False
            torch.set_default_tensor_type('torch.FloatTensor')
        
        self.nz = config.nz
        self.optimizer = config.optimizer

        self.resl = 2           # we start from 2^2 = 4
        self.lr = config.lr
        self.eps_drift = config.eps_drift
        self.smoothing = config.smoothing
        self.max_resl = config.max_resl
        self.trns_tick = config.trns_tick
        self.stab_tick = config.stab_tick
        self.TICK = config.TICK
        self.globalIter = 0
        self.globalTick = 0
        self.kimgs = 0
        self.stack = 0
        self.epoch = 0
        self.fadein = {'gen':None, 'dis':None}
        self.complete = {'gen':0, 'dis':0}
        self.phase = 'init'
        self.flag_flush_gen = False
        self.flag_flush_dis = False
        self.flag_add_noise = self.config.flag_add_noise
        self.flag_add_drift = self.config.flag_add_drift
        self.use_captions = config.use_captions
        self.gan_type = config.gan_type
        self.lambda = config.lambda

        if self.use_captions:
            self.ncap = config.ncap
        
        # network and cirterion
        self.G = net.Generator(config, use_captions=self.use_captions)
        self.D = net.Discriminator(config, use_captions=self.use_captions)
        print ('Generator structure: ')
        print(self.G.model)
        print ('Discriminator structure: ')
        print(self.D.model)

        if self.gan_type == 'lsgan':
            self.mse = torch.nn.MSELoss()

        if self.use_cuda:
            if self.gan_type == 'lsgan':
                self.mse = self.mse.cuda()
            torch.cuda.manual_seed(config.random_seed)

            if config.n_gpu==1:
                # if only on single GPU
                self.G = torch.nn.DataParallel(self.G).cuda(device=0)
                self.D = torch.nn.DataParallel(self.D).cuda(device=0)
            else:
                # if more we're doing multiple GPUs
                gpus = []
                for i  in range(config.n_gpu):
                    gpus.append(i)
                self.G = torch.nn.DataParallel(self.G, device_ids=gpus).cuda()
                self.D = torch.nn.DataParallel(self.D, device_ids=gpus).cuda()  

        # define tensors, ship model to cuda, and get dataloader.
        self.renew_everything()
        
        # tensorboard
        self.use_tb = config.use_tb
        if self.use_tb:
            self.tb = tensorboard.tf_recorder()
示例#29
0
OUTPUT_PARAMS = {'n_units': 1,
                 'value': 1,
                 'center_ms': 1250,
                 'width_ms': 30,
                 'baseline_val': 0.2, }

TRAIN_PARAMS = {'tau_ms': 10,
                'sigmoid': np.tanh,
                'noise_harvest': 0,
                'noise_train': 0.001,
                'n_trials_recurrent': 20,  # 20
                'n_trials_readout': 10,  # 10
                'n_trials_test': 10, }  # 10

# instantiating objects
GEN = N.Generator(**GENERATOR_PARAMS)
TRYAL = N.Trial(**TRIAL_PARAMS)
IN = N.Input(TRYAL, **INPUT_PARAMS)
OUT = N.Output(TRYAL, **OUTPUT_PARAMS)
TRAIN = N.Trainer(GEN, IN, OUT, TRYAL, **TRAIN_PARAMS)

# performing the experimental protocol
TRAIN.initialize_weights()
TRAIN.harvest_innate()
TRAIN.train_recurrent()
x_history, out_history, error_history, wxout_history = TRAIN.train_readout()
f_lst = TRAIN.test()

# saving the test trial figures into a PDF (one trial per page)
cwd = os.getcwd()
fig_dir = os.path.join(cwd, 'figs')
示例#30
0
    global_step_discriminator = tf.Variable(0, trainable=False)
    lr_discriminator = tf.train.exponential_decay(args.discrimator_learning_rate, 
        global_step_discriminator, 8e4, 0.5, staircase=False)
    lr_generator = tf.train.exponential_decay(args.generator_learning_rate, 
        global_step_generator, 4e4, 0.5, staircase=False)  
    '''
    # Set up models
    discriminator = network.Discriminator(loss_type=args.gan_type,
                                          image_size=args.patch_size,
                                          batch_size=args.batch_size,
                                          norm=args.norm,
                                          run_60=args.run_60)
    generator = network.Generator(adversarial_loss=args.gan_type,
                                  content_loss=args.contentloss,
                                  batch_size=args.batch_size,
                                  discriminator=discriminator,
                                  norm=False,
                                  adv_weight=args.adv_weight,
                                  relu=args.relu,
                                  run_60=args.run_60)

    # Generator
    if args.run_60:
        g_y_pred = generator.forward(train10, train20, train60)
    else:
        g_y_pred = generator.forward(train10, train20)

    contloss = generator._content_loss(label, g_y_pred)
    adverloss = generator._adversarial_loss(label, g_y_pred)
    g_loss = contloss + adverloss
    args.generator_learning_rate
    #g_train_step = hvd.DistributedOptimizer(tf.train.AdamOptimizer(lr_generator, beta1=0., beta2=0.9),