Пример #1
0
 def __call__(self, batchsize=64, z=None, y=None, **kwargs):
     if z is None:
         z = sample_continuous(self.dim_z,
                               batchsize,
                               distribution=self.distribution,
                               xp=self.xp)
     if y is None:
         y = sample_categorical(
             self.n_classes, batchsize, distribution="uniform",
             xp=self.xp) if self.n_classes > 0 else None
     if (y is not None) and z.shape[0] != y.shape[0]:
         raise Exception(
             'z.shape[0] != y.shape[0], z.shape[0]={}, y.shape[0]={}'.
             format(z.shape[0], y.shape[0]))
     h = z
     h = self.l1(h)
     h = F.reshape(h,
                   (h.shape[0], -1, self.bottom_width, self.bottom_width))
     h = self.block2(h, y, **kwargs)
     h = self.block3(h, y, **kwargs)
     h = self.block4(h, y, **kwargs)
     h = self.block5(h, y, **kwargs)
     h = self.block6(h, y, **kwargs)
     h = self.block7(h, y, **kwargs)
     h = self.b8(h)
     h = self.activation(h)
     h = F.tanh(self.l8(h))
     return h
Пример #2
0
def make_image(G,
               D,
               batchsize,
               N_update=100,
               ot=True,
               mode='latent',
               k=1,
               lr=0.05,
               optmode='sgd'):
    label = sample_categorical(1000, batchsize, distribution="uniform", xp=xp)
    labels = label * xp.ones(batchsize).astype(xp.int32)
    zs = sample_continuous(128, batchsize, distribution=G.distribution, xp=xp)
    if k != 1:
        k = k(labels).data

    with chainer.using_config('train', False):
        if ot:
            z_xp = zs
            if optmode == 'sgd':
                Opt = chainer.optimizers.SGD(lr)
            elif optmode == 'adam':
                Opt = chainer.optimizers.Adam(lr, beta1=0.0, beta2=0.9)
            T = Transporter_in_latent(G, D, k, Opt, z_xp, labels, mode=mode)
            discriminator_optimal_transport_from(z_xp, T, N_update)
            tz_y = T.get_z_va().data
            y = G(batchsize=batchsize, y=labels, z=tz_y)
        else:
            y = G(batchsize=batchsize, y=labels, z=zs)
    return cuda.to_cpu(y.data)
    def update_core(self):
        gen = self.models['gen']
        dis = self.models['dis']
        gen_optimizer = self.get_optimizer('opt_gen')
        dis_optimizer = self.get_optimizer('opt_dis')
        xp = gen.xp

        x_real, y_real = self.get_batch(xp)
        # First, sample data from G through langevin dynamics
        batchsize = len(x_real)
        # x_fake, y_fake = self._generete_samples(n_gen_samples=batchsize)
        if self.conditional:
            y_fake = sample_categorical(gen.n_classes, batchsize, xp=gen.xp)
        else:
            y_fake = None
        if args.sampling_space == 'pixel':
            x_fake = gen(batchsize, y=y_fake)
            x_fake = sampler.langevin(x_fake, y_fake, dis)
        elif args.sampling_space == 'latent':
            x_fake, _ = latent_sampler.langevin(batchsize,
                                                gen,
                                                dis,
                                                y_fake=y_fake)

        # Then, update energy critic
        for i in range(self.n_dis):
            dis_real = dis(x_real, y=y_real)
            dis_fake = dis(x_fake, y=y_fake)
            x_fake.unchain_backward()
            loss_dis = self.loss_dis(dis_fake=dis_fake, dis_real=dis_real)
            dis.cleargrads()
            loss_dis.backward()
            dis_optimizer.update()
            chainer.reporter.report({'loss_dis': loss_dis})
def gen_images(gen, n=50000, batchsize=100):
    ims = []
    xp = gen.xp
    # start_time = time.time()
    # print('Start!')
    for i in range(0, n, batchsize):
        if i % 2500 == 2500 - batchsize:
            print(str(i) + " generated")
        config = yaml_utils.Config(yaml.load(open(args.config_path)))
        is_conditional = config.updater['args']['conditional']
        if is_conditional:
            y = sample_categorical(gen.n_classes, batchsize, xp=gen.xp)
        else:
            y = None
        if args.sampling_space == 'pixel':
            with chainer.using_config('train', False), chainer.using_config(
                    'enable_backprop', False):
                x = gen(batchsize, y=y)
            x = sampler.langevin(x, y, dis)
        elif args.sampling_space == 'latent':
            x, _ = latent_sampler.langevin(batchsize,
                                           gen,
                                           dis,
                                           y_fake=y,
                                           eval=True)
        x = chainer.cuda.to_cpu(x.data)
        x = np.asarray(np.clip(x * 127.5 + 127.5, 0.0, 255.0), dtype=np.uint8)
        ims.append(x)
    ims = np.asarray(ims)
    _, _, _, h, w = ims.shape
    ims = ims.reshape((n, 3, h, w))
    # stop_time = time.time()
    # print('Stop! Time: '+str(stop_time-start_time))
    return ims
 def __call__(self, batchsize=64, z=None, y=None):
     if z is None:
         z = sample_continuous(self.dim_z,
                               batchsize,
                               distribution=self.distribution,
                               xp=self.xp)
     if y is None:
         y = sample_categorical(
             self.n_classes, batchsize, distribution="uniform",
             xp=self.xp) if self.n_classes > 0 else None
     if (y is not None) and z.shape[0] != y.shape[0]:
         raise ValueError('z.shape[0] != y.shape[0]')
     h = z
     h = self.l1(h)
     h = F.reshape(h,
                   (h.shape[0], -1, self.bottom_width, self.bottom_width))
     h = self.block2(h, y)
     out1 = h
     h = self.block3(h, y)
     out2 = h
     h = self.block4(h, y)
     out3 = h
     h = self.b5(h)
     h = self.activation(h)
     h = F.tanh(self.c5(h))
     return h, out1, out2, out3
 def _generete_samples(self, gen, n_gen_samples=None):
     if n_gen_samples is None:
         n_gen_samples = self.n_gen_samples
     if self.conditional:
         y = sample_categorical(gen.n_classes, n_gen_samples, xp=gen.xp)
     else:
         y = None
     x_fake = gen(n_gen_samples, y=y)
     return x_fake, y
Пример #7
0
 def _generete_samples(self, n_gen_samples=None):
     if n_gen_samples is None:
         n_gen_samples = self.n_gen_samples
     gen = self.models['gen']
     if self.conditional:
         y = sample_categorical(gen.n_classes, n_gen_samples, xp=gen.xp)
     else:
         y = None
     x_fake, out1, out2, out3 = gen(n_gen_samples, y=y)
     return x_fake, y, out1, out2, out3
def gen_eval_images(gen, n=50000, batchsize=100, seeds=1234, langevin_steps=5):
    '''
    langevin_steps: column
    '''
    ims = []
    xp = gen.xp
    xp.random.seed(seeds)
    for i in range(0, n, batchsize):
        print(i)
        config = yaml_utils.Config(yaml.load(open(args.config_path)))
        is_conditional = config.updater['args']['conditional']
        if is_conditional:
            y = sample_categorical(gen.n_classes, batchsize, xp=gen.xp)
        else:
            y = None
        if args.sampling_space == 'pixel':
            with chainer.using_config('train', False), chainer.using_config(
                    'enable_backprop', False):
                x = gen(batchsize, y=y)
            for j in range(langevin_steps):
                x = sampler.langevin(x, y, dis)
                nx = chainer.cuda.to_cpu(x.data)
                nx = np.asarray(np.clip(nx * 127.5 + 127.5, 0.0, 255.0),
                                dtype=np.uint8)
                ims.append(nx)
        elif args.sampling_space == 'latent':
            z = Variable(
                sample_continuous(gen.dim_z,
                                  batchsize,
                                  distribution=gen.distribution,
                                  xp=gen.xp))
            x = gen(batchsize, y=y, z=z)
            nx = chainer.cuda.to_cpu(x.data)
            nx = np.asarray(np.clip(nx * 127.5 + 127.5, 0.0, 255.0),
                            dtype=np.uint8)
            ims.append(nx)
            for j in range(langevin_steps):
                x, z = latent_sampler.langevin(batchsize,
                                               gen,
                                               dis,
                                               y_fake=y,
                                               eval=True,
                                               given_z=z)
                nx = chainer.cuda.to_cpu(x.data)
                nx = np.asarray(np.clip(nx * 127.5 + 127.5, 0.0, 255.0),
                                dtype=np.uint8)
                ims.append(nx)
    ims = list(map(list, zip(*ims)))
    ims = np.asarray(ims)
    _, _, _, h, w = ims.shape
    if args.sampling_space == 'latent':
        langevin_steps += 1
    ims = ims.reshape((n * langevin_steps, 3, h, w))
    return ims
    def __call__(self, batchsize=64, z=None, y=None, gt=None, **kwargs):
        outs = []
        fast_losses = []

        if z is None:
            z = sample_continuous(self.dim_z,
                                  batchsize,
                                  distribution=self.distribution,
                                  xp=self.xp)
        if y is None:
            y = sample_categorical(
                self.n_classes, batchsize, distribution="uniform",
                xp=self.xp) if self.n_classes > 0 else None
        if (y is not None) and z.shape[0] != y.shape[0]:
            raise Exception(
                'z.shape[0] != y.shape[0], z.shape[0]={}, y.shape[0]={}'.
                format(z.shape[0], y.shape[0]))

        # forward calculation without auxiliary network
        out_noab = self.forward(z=z, y=y, noAB=True, **kwargs)

        out, z, zeta, z_recon = self.forward(z=z,
                                             y=y,
                                             return_zs=True,
                                             **kwargs)
        outs.append(out)

        # beta1=0, beta2=0.9 <-> initial_t = 100
        optimizer = MyAdaGrad(zeta, self.xp, lr=self.fast_alpha())

        for _ in range(self.T):
            loss = F.sum(self.fast_loss(out, gt))
            fast_losses.append(loss)

            grads = chainer.grad([loss], [zeta],
                                 enable_double_backprop=True)[0]
            # use learned learning rate
            # z2 += - F.broadcast_to(self.lr(), grads[0].shape) * grads[0]
            zeta += optimizer.calc_update(grads)

            # forward run with z2 supply
            out = self.forward(z=z, y=y, zeta=zeta)
            outs.append(out)

        return outs, fast_losses, out_noab, zeta, z_recon
Пример #10
0
    def __call__(self, batchsize=64, z=None, y=None):
        if z is None:
            z = sample_continuous(self.dim_z, batchsize, distribution=self.distribution, xp=self.xp)
        if y is None:
            y = sample_categorical(self.n_classes, batchsize, distribution="uniform",
                                   xp=self.xp) if self.n_classes > 0 else None
        if (y is not None) and z.shape[0] != y.shape[0]:
            raise ValueError('z.shape[0] != y.shape[0]')
        print("B0", np.sum(z.data))
        print("C2B0", np.sum(self.block2.c2.b.data))
        
        h = z
        h = self.l1(h)
        h = F.reshape(h, (h.shape[0], -1, self.bottom_width, self.bottom_width))
        print("B1", np.sum(h.data))
        print("C2B1", np.sum(self.block2.c2.b.data))

        h = self.block2(h, y)
        print("B2", np.sum(h.data))
        print("C2B2", np.sum(self.block2.c2.b.data))

        h = self.block3(h, y)
        print("B3", np.sum(h.data))
        print("C2B3", np.sum(self.block2.c2.b.data))

        h = self.block4(h, y)
        print("B4", np.sum(h.data))
        print("C2B4", np.sum(self.block2.c2.b.data))

        h = self.b5(h)
        print("B5", np.sum(h.data))
        print("C2B5", np.sum(self.block2.c2.b.data))

        h = self.activation(h)
        print("B6", np.sum(h.data))
        print("C2B6", np.sum(self.block2.c2.b.data))

        h = F.tanh(self.c5(h))
        print("B7", np.sum(h.data))
        print("C2B7", np.sum(self.block2.c2.b.data))

        return h
 def sample_y(self, batchsize=64):
     return sample_categorical(self.n_classes,
                               batchsize,
                               distribution="uniform",
                               xp=self.xp)
    def __call__(self,
                 batchsize=None,
                 y=None,
                 z=None,
                 mult_until_exec=None,
                 **kwargs):
        if z is None:
            z = sample_continuous(self.dim_z,
                                  batchsize,
                                  distribution=self.distribution,
                                  xp=self.xp)
        if y is None:
            y = sample_categorical(
                self.n_classes, batchsize, distribution="uniform",
                xp=self.xp) if self.n_classes > 0 else None
        activ = self.activation

        # # mult_until_exec: If set, we perform the multiplications until that layer.
        # # In the product of polynomials, it applies the same rule for *every*
        # # polynomial. E.g. if mult_until_exec == 2 it will perform the hadamard
        # # products until second order terms in every polynomial.
        if mult_until_exec is None:
            mult_until_exec = 10000
        z = self.prod_poly_FC(z, mult_until_exec, batchsize=batchsize, y=y)

        h = z + 0
        if self.bottom_width > 1:
            h = getattr(self, 'lin0')(h)
        h = F.reshape(h,
                      (h.shape[0], -1, self.bottom_width, self.bottom_width))

        # # loop over the layers and get the layers along with the
        # # normalizations per layer.
        for l in range(1, self.n_l + 1):
            if self.skip_rep:
                h_hold = h + 0
            if self.use_bn and y is None:
                h = getattr(self, 'bn{}'.format(l))(h)
            elif self.use_bn:
                h = getattr(self, 'bn{}'.format(l))(h, y)
            h = activ(getattr(self, 'l{}'.format(l))(h))
            h = self.return_injected(h, z, l, mult_until_exec=mult_until_exec)
            if self.skip_rep:
                # # transform the channels of h_hold if required.
                h_hold = getattr(self, 'skipch{}'.format(l))(h_hold)
                # # upsample if required.
                if h_hold.shape[-1] != h.shape[-1]:
                    h_hold = _upsample(h_hold)
                h += h_hold
        if self.order_out_poly is not None:
            z0 = h + 0
            for i in range(1, self.order_out_poly):
                h1 = getattr(self, 'oro{}'.format(i + 1))(h)
                if self.skip_rep:
                    # # model3 polynomial.
                    h += z0 * h1
                else:
                    h = z0 * h1
        # # last layer (no activation).
        output = getattr(self, 'l{}'.format(self.n_l + 1))(h)
        out = self.out_act(output)
        return out