Exemple #1
0
    def get_loss_func(self,x, C=1.0):

        batchsize = len(self.encode(x)[0])
        z=list()
        mu, ln_var = self.encode(x)
        for l in six.moves.range(self.sampling_number):
            z.append(F.gaussian(mu, ln_var))
        for iii in range(self.sampling_number):

            if iii==0:
                
                rec_loss=0
                z = F.gaussian(mu, ln_var)
                rec_loss += F.sum(F.bernoulli_nll(x, self.decode(z, sigmoid=False), reduce='no'),axis=1)/(batchsize)
                loss=rec_loss+F.sum(C * gaussian_kl_divergence(mu, ln_var,reduce='no'),axis=1)/ batchsize
                loss=F.reshape(loss,[batchsize,1])

            else:
                rec_loss=0
                z = F.gaussian(mu, ln_var)
                rec_loss += F.sum(F.bernoulli_nll(x, self.decode(z, sigmoid=False), reduce='no'),axis=1)/(batchsize)
                tmp_loss=rec_loss+F.sum(C * gaussian_kl_divergence(mu, ln_var,reduce='no'),axis=1)/ batchsize
                tmp_loss=F.reshape(tmp_loss,[batchsize,1])
                loss=F.concat((loss,tmp_loss),axis=1)
        importance_weight = F.softmax(loss)
        self.total_loss=F.sum(importance_weight*loss)
        return self.total_loss
Exemple #2
0
 def check_bernoulli_nll(self, x, y):
     if self.wrap_x:
         x = chainer.Variable(x)
     if self.wrap_y:
         y = chainer.Variable(y)
     actual = cuda.to_cpu(F.bernoulli_nll(x, y, self.reduce).data)
     testing.assert_allclose(self.expect, actual)
Exemple #3
0
        def lf(x):
            mu, ln_var = self.encode(x)
            mean_mu, mean_sigma = calculate_means(mu, ln_var)
            batchsize = len(mu.data)
            std_mu, std_ln_var = generate_std_params(mu)

            # reconstruction loss
            rec_loss = 0
            kl_loss = 0
            for l in six.moves.range(k):
                z = F.gaussian(mu, ln_var)
                rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) / (k * batchsize)
                kl_loss += -F.gaussian_nll(z, mu, ln_var) / (k * batchsize)
                kl_loss += F.gaussian_nll(z, std_mu, std_ln_var) / (k * batchsize)

            self.rec_loss = rec_loss
            self.kl_loss = kl_loss
            self.loss = self.rec_loss + C * self.kl_loss
            chainer.report(
                {
                    'rec_loss': rec_loss,
                    'kl': self.kl_loss,
                    'loss': self.loss,
                    'mu': mean_mu,
                    'sigma': mean_sigma,
                },
                observer=self
            )
            return self.loss
Exemple #4
0
        def lf(x):
            # x = x.reshape(-1, 3*64*64)
            mu, ln_var = self.encode(x)
            batchsize = len(mu.data)

            # reconstruction loss
            rec_loss = 0
            for _ in range(self.k):
                z = F.gaussian(mu, ln_var)
                rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \
                    / (self.k * batchsize)
            self.rec_loss = rec_loss

            # latent loss
            lat_loss = self.beta * gaussian_kl_divergence(mu,
                                                          ln_var) / batchsize
            self.lat_loss = lat_loss

            self.loss = rec_loss + lat_loss
            chainer.report(
                {
                    "rec_loss": rec_loss,
                    "lat_loss": lat_loss,
                    "loss": self.loss
                },
                observer=self)
            return self.loss
Exemple #5
0
 def __call__(self, xs, zs, es):
     batch_size = xs.shape[0]
     encoded_zs = self.encoder(xs, es)
     ys = self.decoder(encoded_zs)
     d_loss = F.bernoulli_nll(xs, ys) / batch_size
     t_loss = F.sum(self.discriminator(xs, encoded_zs)) / batch_size
     return t_loss + d_loss, encoded_zs
Exemple #6
0
    def forward(self, inp, target=None, reward=None):
        self.gru.reset_state()
        if target is None and reward is None:
            sample_size = inp
            return self.sample(sample_size)

        else:
            batch_size, seq_len = inp.shape
            inp = inp.T
            target = target.T

            loss = 0
            if not target is None and reward is None:
                for i in range(seq_len):
                    out = self.calc(inp[i])
                    loss += F.bernoulli_nll(out, target[i])

                return loss

            elif not target is None and not reward is None:
                for i in range(seq_len):
                    out = self.calc(inp[i])
                    for j in range(batch_size):
                        loss += -out[j][target[i][j]] * reward[j]

                return loss / batch_size
Exemple #7
0
def bernoulli_nll(x, y):
    x = np.array(x, dtype=np.float32)
    y = np.array(y, dtype=np.float32)
    loss = F.bernoulli_nll(x, y, reduce='no')
    plt.plot(loss.data)
    plt.show()
    print loss
Exemple #8
0
 def check_bernoulli_nll(self, x, y):
     if self.wrap_x:
         x = chainer.Variable(x)
     if self.wrap_y:
         y = chainer.Variable(y)
     actual = cuda.to_cpu(F.bernoulli_nll(x, y, self.reduce).data)
     testing.assert_allclose(self.expect, actual)
Exemple #9
0
    def update_core(self):
        batch = self._iterators['main'].next()
        x = Variable(self.converter(batch, self.device))
        xp = cuda.get_array_module(x.data)

        enc = self.enc
        opt_enc = self._optimizers['enc']
        dec = self.dec
        opt_dec = self._optimizers['dec']

        mu, ln_var = enc(x)

        batchsize = len(mu.data)
        rec_loss = 0
        k = 10
        for l in range(k):
            z = F.gaussian(mu, ln_var)
            rec_loss += F.bernoulli_nll(x, dec(
                z, sigmoid=False)) / (k * batchsize)

        loss = rec_loss + 1.0 * F.gaussian_kl_divergence(mu,
                                                         ln_var) / batchsize

        enc.cleargrads()
        dec.cleargrads()
        loss.backward()
        opt_enc.update()
        opt_dec.update()

        chainer.report({'rec_loss': rec_loss})
        chainer.report({'loss': loss})
Exemple #10
0
    def __call__(self, x, t):
        # データ数
        num_data = x.shape[0]

        # Forwardとlossの計算
        mu, var = self.vae.encoder(x)
        z = F.gaussian(mu, var)
        reconst_loss = 0
        for i in range(self.k):
            # MSEで誤差計算を行う
            if self.loss_function == 'mse':
                reconst = self.vae.decoder(z, use_sigmoid=True)
                reconst_loss += F.mean_squared_error(x, reconst) / self.k

            # その他の場合はベルヌーイ分布により計算
            else:
                # bernoulli_nllがsigmoidを内包しているので学習時はsigmoid=False
                reconst = self.vae.decoder(z, use_sigmoid=False)
                reconst_loss += F.bernoulli_nll(x,
                                                reconst) / (self.k * num_data)

        kld = gaussian_kl_divergence(mu, var, reduce='mean')
        loss = reconst_loss + self.beta * kld

        # report
        reporter.report({'loss': loss}, self)
        reporter.report({'reconst_loss': reconst_loss}, self)
        reporter.report({'kld': kld}, self)

        return loss
 def free_energy(self, x):
     #return -(free energy)
     enc_mu, enc_log_sigma_2 = self.encode(x)
     kl = F.gaussian_kl_divergence(enc_mu, enc_log_sigma_2)
     z = F.gaussian(enc_mu, enc_log_sigma_2)
     dec_mu = self.decode(z)
     nll = F.bernoulli_nll(x, dec_mu)
     return nll + kl
Exemple #12
0
 def free_energy(self,x):
     #return -(free energy)
     enc_mu, enc_log_sigma_2 = self.encode(x)
     kl = F.gaussian_kl_divergence(enc_mu,enc_log_sigma_2)
     z = F.gaussian(enc_mu,enc_log_sigma_2)
     dec_mu = self.decode(z)
     nll = F.bernoulli_nll(x,dec_mu)
     return nll+kl
Exemple #13
0
 def lf(x):
     z = self.encode(x)
     batchsize = len(x.data)
     # reconstruction loss
     #self.rec_loss = F.mean_squared_error(x, self.decode(z))
     self.rec_loss = F.bernoulli_nll(x, self.decode(z, sigmoid=False)) / (batchsize)
     # total_loss vanilla AEの場合はreconstruction lossとtotal lossは一緒
     self.loss = self.rec_loss
     return self.loss
Exemple #14
0
 def calc_reconst_loss(self, encoder_input, decoder_output):
     if self.is_gauss_dist:
         dec_mu, dec_var = decoder_output
         m_vae = 0.5* (encoder_input - dec_mu)**2 * F.exp(-dec_var)
         a_vae = 0.5* (log2pi+dec_var)
         # reconst = F.sum(m_vae + a_vae, axis=1)
         reconst = F.sum(m_vae + a_vae)
     else:
         reconst = F.bernoulli_nll(encoder_input, decoder_output)
     return reconst
Exemple #15
0
def train(model, epoch0=0):
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    for epoch in xrange(epoch0, n_epoch):
        logger.print_epoch(epoch)

        # training
        perm = np.random.permutation(N_train)
        for i in xrange(0, N_train, batchsize):
            x = Variable(xp.asarray(x_train[perm[i:i + batchsize]]))
            mu, ln_var = model.encode(x)
            rec_loss = 0
            for l in xrange(1):
                z = F.gaussian(mu, ln_var)
                rec_loss += F.bernoulli_nll(x, model.decode(z, sigmoid=False)) / batchsize
            reg_loss = gaussian_kl_divergence(mu, ln_var) / batchsize
            loss = rec_loss + reg_loss
            optimizer.zero_grads()
            loss.backward()
            loss.unchain_backward()
            optimizer.update()
            logger.save_loss(reg_loss.data, rec_loss.data, train=True)

        # evaluation
        for i in xrange(0, N_test, batchsize):
            x = Variable(xp.asarray(x_train[i:i + batchsize]), volatile='on')
            mu, ln_var = model.encode(x)
            rec_loss = 0
            for l in xrange(1):
                z = F.gaussian(mu, ln_var)
                rec_loss += F.bernoulli_nll(x, model.decode(z, sigmoid=False)) / batchsize
            reg_loss = gaussian_kl_divergence(mu, ln_var) / batchsize
            loss = rec_loss + reg_loss
            logger.save_loss(reg_loss.data, rec_loss.data, train=False)

        logger.epoch_end()

    logger.terminate()
    serializer.save(model, optimizer, epoch+1)

    # everything works well
    return 0
Exemple #16
0
    def update_core(self):
        vae_optimizer = self.get_optimizer('opt_vae')
        xp = self.vae.xp

        batch = self.get_iterator('main').next()
        batchsize = len(batch)
        x = chainer.dataset.concat_examples(batch, device=self.device)

        latent_dist = self.vae.encode(x)

        # reconstruction loss
        rec_loss = 0
        for _ in range(self.vae.k):
            reconstructions = self.vae(x, sigmoid=False, mode="sample")
            rec_loss += F.bernoulli_nll(x, reconstructions) \
                / (self.vae.k * batchsize)
        ### latent loss
        # latent loss for continuous
        cont_capacity_loss = 0
        if self.vae.is_continuous:
            mu, ln_var = latent_dist['cont']
            kl_cont_loss = gaussian_kl_divergence(mu, ln_var) / batchsize
            # Anealing loss
            cont_min, cont_max, cont_num_iters, cont_gamma = \
                self.vae.cont_capacity
            cont_cap_now = (cont_max - cont_min) * self.iteration / float(cont_num_iters) + cont_min
            cont_cap_now = min(cont_cap_now, cont_max)
            cont_capacity_loss = cont_gamma * F.absolute(cont_cap_now - kl_cont_loss)

        # latent loss for discrete
        disc_capacity_loss = 0
        if self.vae.is_discrete:
            kl_disc_loss = kl_multiple_discrete_loss(latent_dist['disc'])
            # Anealing loss
            disc_min, disc_max, disc_num_iters, disc_gamma = \
                self.vae.disc_capacity
            disc_cap_now = (disc_max - disc_min) * self.iteration / float(disc_num_iters) + disc_min
            disc_cap_now = min(disc_cap_now, disc_max)
            # Require float conversion here to not end up with numpy float
            disc_theoretical_max = 0
            for disc_dim in self.vae.latent_spec["disc"]:
                disc_theoretical_max += xp.log(disc_dim)
            disc_cap_now = min(disc_cap_now, disc_theoretical_max.astype("float32"))
            disc_capacity_loss = disc_gamma * F.absolute(disc_cap_now - kl_disc_loss)

        joint_vae_loss = rec_loss + cont_capacity_loss + disc_capacity_loss

        self.vae.cleargrads()
        joint_vae_loss.backward()
        vae_optimizer.update()

        chainer.reporter.report({"rec_loss": rec_loss, "cont_loss": cont_capacity_loss,
                                "disc_loss": disc_capacity_loss, "vae_loss": joint_vae_loss, })
        return
Exemple #17
0
 def cost(self, x_var, C=1.0, k=1):
 	mu, ln_var = self.encode(x_var)
 	batchsize = len(mu.data)
 	rec_loss = 0
 	for l in six.moves.range(k):
 		z = F.gaussian(mu, ln_var)
 		rec_loss += F.bernoulli_nll(x_var, self.decode(z, sigmoid=False)) \
 		/ (k * batchsize)
 	self.rec_loss = rec_loss
 	self.loss = self.rec_loss + C * gaussian_kl_divergence(mu, ln_var) / batchsize
 	return self.loss
Exemple #18
0
    def __call__(self, x):
        x = Variable(x)
        start = time.time()
        zm, zv = self.encoder((x,))
        z = F.gaussian(zm, zv)
        y = self.decoder((z,))[0]
        kl_loss = F.gaussian_kl_divergence(zm, zv)
        nll_loss = F.bernoulli_nll(x, y)

        loss = kl_loss + nll_loss
        return loss
Exemple #19
0
 def lf(x, x_next):
     z = self.encode(x)
     batchsize = len(z.data)
     # reconstruction loss
     loss = 0
     for l in six.moves.range(k):
         loss += F.bernoulli_nll(x_next, self.decode(z, batchsize, sigmoid=False)) \
             / (k * batchsize)                
     self.loss = loss
     chainer.report({'loss': self.loss}, observer=self)
     return self.loss
Exemple #20
0
 def get_loss(self, x, y, train=True):
     mu, ln_var = self.encode(x, y)
     batchsize = len(mu.data)
     # reconstruction loss
     rec_loss = 0
     z = F.gaussian(mu, ln_var)
     rec_loss += F.bernoulli_nll(y, self.decode(z, x)) / (batchsize)
     self.rec_loss = rec_loss
     self.loss = self.rec_loss + F.gaussian_kl_divergence(
         mu, ln_var) / batchsize
     return self.loss
Exemple #21
0
 def reconstruction_loss(self, x, k):
     batchsize = len(x)
     mu, ln_var = self.encode(x)
     samples = []
     loss = 0.
     for _ in range(k):
         z = F.gaussian(mu, ln_var)
         samples.append(z)
         loss += F.bernoulli_nll(x, self.decode(z)) / (k * batchsize)
     dist = Distribution(mu, ln_var, samples)
     return loss, dist
Exemple #22
0
    def __call__(self, text, x, t, textlens, xlens):
        batchsize = text.shape[0]

        vk = self.text_enc(text)

        v = vk[:, :self.d, :]
        k = vk[:, self.d:, :]
        q = self.audio_enc(x)

        a = F.matmul(F.transpose(k, (0, 2, 1)), q)
        a = F.softmax(a / self.xp.sqrt(self.d))
        r = F.matmul(v, a)
        rd = F.concat((r, q))

        y = self.audio_dec(rd)

        loss_bin = 0
        for i in range(batchsize):
            loss_bin += F.mean(
                F.bernoulli_nll(t[i, :, :xlens[i]], y[i, :, :xlens[i]], 'no'))
        loss_bin /= batchsize

        y = F.sigmoid(y)

        loss_l1 = 0
        for i in range(batchsize):
            loss_l1 += F.mean_absolute_error(t[i, :, :xlens[i]],
                                             y[i, :, :xlens[i]])
        loss_l1 /= batchsize

        loss_att = 0
        for i in range(batchsize):
            N = textlens[i]
            T = xlens[i]

            def w_fun(n, t):
                return 1 - np.exp(-((n / (N - 1) - t / (T - 1))**2) /
                                  (2 * self.g**2))

            w = np.fromfunction(w_fun, (a.shape[1], T), dtype='f')
            w = self.xp.array(w)
            loss_att += F.mean(w * a[i, :, :T])
        loss_att /= batchsize

        loss = loss_bin + loss_l1 + loss_att

        chainer.reporter.report({
            'loss_bin': loss_bin,
            'loss_l1': loss_l1,
            'loss_att': loss_att,
        })

        return loss, y, a
    def update_core(self):
        vae_optimizer = self.get_optimizer('opt_vae')
        dis_optimizer = self.get_optimizer('opt_dis')
        xp = self.vae.xp

        batch = self.get_iterator('main').next()
        batchsize = len(batch)
        x = chainer.dataset.concat_examples(batch, device=self.device)

        mu, ln_var = self.vae.encode(x)
        z_sampled = F.gaussian(mu, ln_var)
        z_shuffled = shuffle_codes(z_sampled)

        logits_z, probs_z = self.dis(z_sampled)
        _, probs_z_shuffle = self.dis(z_shuffled)

        reconstructions = self.vae.decode(z_sampled, sigmoid=False)

        # reconstruction loss
        rec_loss = 0
        for _ in range(self.vae.k):
            rec_loss += F.bernoulli_nll(x, reconstructions) \
                / (self.vae.k * batchsize)
        # latent loss
        lat_loss = self.vae.beta * gaussian_kl_divergence(mu,
                                                          ln_var) / batchsize

        tc_loss = F.mean(logits_z[:, 0] - logits_z[:, 1])
        factor_vae_loss = rec_loss + lat_loss + self.vae.gamma * tc_loss
        dis_loss = -(0.5 * F.mean(F.log(probs_z[:, 0])) \
                     + 0.5 * F.mean(F.log(probs_z_shuffle[:, 1])))

        self.vae.cleargrads()
        self.dis.cleargrads()
        factor_vae_loss.backward()
        vae_optimizer.update()

        # avoid backword duplicate
        z_sampled.unchain_backward()
        self.dis.cleargrads()
        self.vae.cleargrads()
        dis_loss.backward()
        dis_optimizer.update()

        chainer.reporter.report({
            "rec_loss": rec_loss,
            "lat_loss": lat_loss,
            "tc_loss": tc_loss,
            "vae_loss": factor_vae_loss,
            "dis_loss": dis_loss
        })
        return
Exemple #24
0
 def lf(x):
     mu, ln_var = self.encode(x)
     batchsize = len(mu.data)
     # reconstruction loss
     rec_loss = 0
     for l in six.moves.range(k):
         z = F.gaussian(mu, ln_var)
         rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \
             / (k * batchsize)
     self.rec_loss = rec_loss
     self.loss = self.rec_loss + \
         C * gaussian_kl_divergence(mu, ln_var) / batchsize
     return self.loss
Exemple #25
0
 def lf(x):
     mu, ln_var = self.encode(x)
     batchsize = len(mu.data)
     # reconstruction loss
     rec_loss = 0
     for l in six.moves.range(k):
         z = F.gaussian(mu, ln_var)
         rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \
             / (k * batchsize)
     self.rec_loss = rec_loss
     self.loss = self.rec_loss + \
         C * gaussian_kl_divergence(mu, ln_var) / batchsize
     return self.loss
Exemple #26
0
 def lf(self, x, mu, ln_var, split=False):
     batchsize = len(mu.data)
     # reconstruction loss
     rec_loss = 0
     for l in range(k):
         z = F.gaussian(mu, ln_var)
         rec_loss += F.bernoulli_nll(x, self.decoder_model(z, sigmoid=False)) / (k * batchsize)
     rec_loss = rec_loss
     kl_loss = C * F.gaussian_kl_divergence(mu, ln_var) / batchsize
     loss = rec_loss +  kl_loss
     if split:
         return rec_loss, kl_loss
     else:
         return loss
Exemple #27
0
 def lf(x):
     mu, ln_var = self.encode(x)
     batchsize = len(mu)
     # 復元誤差の計算
     rec_loss = 0
     for l in six.moves.range(k):
         z = F.gaussian(mu, ln_var)
         rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) / (k * batchsize)
     self.rec_loss = rec_loss
     self.loss = self.rec_loss + \
         beta * gaussian_kl_divergence(mu, ln_var) / batchsize
     chainer.report(
         {'rec_loss': rec_loss, 'loss': self.loss}, observer=self)
     return self.loss
Exemple #28
0
 def __call__(self, x, sigmoid=True):
     """AutoEncoder"""
     mu, ln_var = self.encode(x)
     batchsize = len(mu.data)
     # reconstruction loss
     rec_loss = 0
     for l in six.moves.range(self.k):
         z = F.gaussian(mu, ln_var)
         rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \
             / (self.k * batchsize)
     loss = rec_loss + \
         self.C * gaussian_kl_divergence(mu, ln_var) / batchsize
     chainer.report({'loss': loss}, self)
     return loss
Exemple #29
0
    def __call__(self, x, C=1.0, k=1):
        mu, ln_var = self.encode(x)
        mb_size = mu.data.shape[0]

        # reconstruction loss
        rec_loss = 0
        for l in range(k):
            z = F.gaussian(mu, ln_var)
            rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False))
        rec_loss /= (k * mb_size)

        kld_loss = gaussian_kl_divergence(mu, ln_var) / mb_size
        loss = rec_loss + C * kld_loss

        return loss, float(rec_loss.data), float(kld_loss.data)
Exemple #30
0
 def lf(x):
     mu, ln_var = self.encode(x)
     batchsize = len(mu)
     # reconstruction loss
     rec_loss = 0
     for l in six.moves.range(k):
         z = F.gaussian(mu, ln_var)
         rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \
             / (k * batchsize)
     self.rec_loss = rec_loss
     self.loss = self.rec_loss + \
         beta * gaussian_kl_divergence(mu, ln_var) / batchsize
     chainer.report(
         {'rec_loss': rec_loss, 'loss': self.loss}, observer=self)
     return self.loss
Exemple #31
0
        def lf(x):

            in_img = x[0]
            in_labels = x[1:-1]

            mask = x[-1]
            # escape dividing by 0 when there are no labelled data points in the batch
            non_masked = sum(mask) + 1
            mask_flipped = 1 - mask

            rec_loss = 0
            label_loss = 0
            label_acc = 0

            mu, ln_var = self.encode(in_img)
            batchsize = len(mu.data)

            for l in six.moves.range(k):
                z = F.gaussian(mu, ln_var)

                out_img = self.decode(z, sigmoid=False)
                rec_loss += F.bernoulli_nll(in_img, out_img) / (k * batchsize)

                out_labels = self.predict_label(mu, ln_var, softmax=False)
                for i in range(self.n_latent):
                    n = self.groups_len[i] - 1

                    # certain labels should not contribute to the calculation of the label loss values
                    fixed_labels = (cupy.tile(cupy.array([1] + [-100] * n),
                                              (batchsize, 1)) *
                                    mask_flipped[:, cupy.newaxis])
                    out_labels[i] = out_labels[
                        i] * mask[:, cupy.newaxis] + fixed_labels

                    label_acc += F.accuracy(out_labels[i], in_labels[i])
                    label_loss += F.softmax_cross_entropy(
                        out_labels[i], in_labels[i]) / (k * non_masked)

            self.rec_loss = self.alpha * rec_loss
            self.label_loss = self.gamma * label_loss
            self.label_acc = label_acc

            kl = gaussian_kl_divergence(mu, ln_var) / (batchsize)
            self.kl = self.beta * kl

            self.loss = self.rec_loss + self.label_loss + self.kl

            return self.loss, self.rec_loss, self.label_loss, self.label_acc, self.kl
 def lf(x):
     # getting the mu and ln_var of the prior of z with the encoder
     mu, ln_var = self.encode(x)
     batchsize = len(mu.data)
     # creating the latent variable z by sampling from the encoder output
     z = F.gaussian(mu, ln_var)
     # computing the reconstruction loss
     self.rec_loss = F.bernoulli_nll(x, self.decode(
         z, sigmoid=False)) / batchsize
     #self.rec_loss = F.sigmoid_cross_entropy(x, self.decode(z, sigmoid=False))
     # computing the KL divergence
     self.KL_loss = gaussian_kl_divergence(mu, ln_var) / batchsize
     # computing the total loss
     self.loss = self.rec_loss + self.KL_loss
     # returning the losses separately
     return [self.rec_loss, self.loss]
Exemple #33
0
    def __call__(self, x, t):
        y = self.ssrn(x)

        loss_l1 = F.mean_absolute_error(t, y)
        loss_bin = F.mean(F.bernoulli_nll(t, y, 'no'))

        loss = loss_l1 + loss_bin

        chainer.reporter.report(
            {
                'loss_l1': loss_l1,
                'loss_bin': loss_bin,
            }
        )
        
        return loss, y
Exemple #34
0
 def lf(x):
     mu, ln_var = self.encode(x)
     batch_size = len(mu.data)
     # reconstruction loss
     rec_loss = 0
     for l in range(k):
         z = f.gaussian(mu, ln_var)
         z.name = "z"
         rec_loss += f.bernoulli_nll(x, self.decode(z, sigmoid=True)) \
             / (k * batch_size)
     self.rec_loss = rec_loss
     self.rec_loss.name = "reconstruction error"
     self.latent_loss = C * gaussian_kl_divergence(mu, ln_var) / batch_size
     self.latent_loss.name = "latent loss"
     self.loss = self.rec_loss + self.latent_loss
     self.loss.name = "loss"
     return self.loss
Exemple #35
0
    def loss_func(self, x, t):
        y = self.predict(x)

        # loss = F.sigmoid_cross_entropy(y, t)
        loss = F.bernoulli_nll(t.astype("f"), y) / len(y)
        # labelが付いている(t_が1)場合:   -log(y_)       y_は0となるかも?
        #      付いていない(t_が0)場合:   -log(1-y_)     ここでt_,y_ はx, yの要素
        # 以上の総和をバッチサイズで割る

        chainer.reporter.report({'loss': loss}, self)
        accuracy = self.accuracy(y.data, t)
        chainer.reporter.report({'accuracy': accuracy[0]},
                                self)  # dataひとつひとつのlabelが完全一致している確率
        chainer.reporter.report({'frequent_error': accuracy[1]},
                                self)  # batchの中で最も多く間違って判断したlabel
        chainer.reporter.report({'acc_66': accuracy[2]}, self)  # 66番ラベルの正解率
        return loss
Exemple #36
0
 def lf(x):
     self.other_loss = 0.
     ze = self.encode(x)
     decoded_x = self.decode(ze, sigmoid=False)
     batchsize = x.shape[0]
     # self.rec_loss = F.mean_squared_error(x, decoded_x)
     self.rec_loss = F.bernoulli_nll(x, decoded_x) / batchsize
     self.loss = self.rec_loss + self.other_loss
     chainer.report(
         {
             'rec_loss': self.rec_loss,
             'other_loss': self.other_loss,
             'loss': self.loss
         },
         observer=self)
     del self.rec_loss
     del self.other_loss
     return self.loss
    sum_loss_reconstruction = 0

    start_time = time.time()

    perm = np.random.permutation(N)

    for i in range(0, N, batchsize):
        x = chainer.Variable(xp.asarray(x_train[perm[i:i + batchsize]]))
        y = chainer.Variable(xp.asarray(y_train[perm[i:i + batchsize]].astype(np.float32)))
        y_label = np.argmax(y_train[perm[i:i + batchsize]], axis=1)

        # Reconstruction phase
        z_fake_batch = model.encode(x)
        _x = model.decode(z_fake_batch, sigmoid=False)

        loss_reconstruction = F.bernoulli_nll(x, _x) / batchsize

        # Adversarial phase
        z_real_batch = utils.sample_z_from_n_2d_gaussian_mixture(batchsize, n_z, y_label, 10, use_gpu)
        z_real_batch_with_label = F.concat((z_real_batch, y))
        p_real_batch = dis(z_real_batch_with_label)

        z_fake_batch_with_label = F.concat((z_fake_batch, y))
        p_fake_batch = dis(z_fake_batch_with_label)

        loss_dis_real = F.softmax_cross_entropy(p_real_batch, chainer.Variable(xp.zeros(batchsize, dtype=np.int32)))
        loss_dis_fake = F.softmax_cross_entropy(p_fake_batch, chainer.Variable(xp.ones(batchsize, dtype=np.int32)))
        loss_dis = loss_dis_fake + loss_dis_real

        loss_gen = F.softmax_cross_entropy(p_fake_batch, chainer.Variable(xp.zeros(batchsize, dtype=np.int32)))
        loss_aae = loss_reconstruction + C * loss_gen
Exemple #38
0
 def check_bernoulli_nll(self, x_data, y_data):
     x = chainer.Variable(x_data)
     y = chainer.Variable(y_data)
     actual = cuda.to_cpu(F.bernoulli_nll(x, y, self.reduce).data)
     testing.assert_allclose(self.expect, actual)
Exemple #39
0
 def check_invalid_option(self, xp):
     x = chainer.Variable(xp.asarray(self.x))
     y = chainer.Variable(xp.asarray(self.y))
     with self.assertRaises(ValueError):
         F.bernoulli_nll(x, y, 'invalid_option')