Esempio n. 1
0
    def loss_cnn(self,
                 cnn,
                 x_out,
                 t_out,
                 y_out,
                 x_in,
                 lam1=1,
                 lam2=1,
                 lam3=10):  # important!!!============
        loss_rec = lam1 * (F.mean_absolute_error(x_out, t_out))
        loss_adv = lam2 * y_out  # y_out is the loss itself
        if self._colormode == 'YUV':
            l_t = self.YUV2Gray(t_out)
            l_x = self.YUV2Gray(x_out)
        elif self._colormode == 'LAB':
            l_t = self.Lab2Gray(t_out, x_in)
            l_x = self.Lab2Gray(x_out, x_in)
        loss_l = lam3 * (F.mean_absolute_error(l_x, l_t))
        loss = loss_rec + loss_adv + loss_l
        chainer.report(
            {
                'loss': loss,
                "loss_rec": loss_rec,
                'loss_adv': loss_adv,
                "loss_l": loss_l
            }, cnn)
        # chainer.report({'loss': loss, "loss_rec": loss_rec,
        #                 'loss_adv': loss_adv}, cnn)

        return loss
Esempio n. 2
0
 def test_invalid_dtype2(self):
     x0 = chainer.Variable(
         numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32))
     x1 = chainer.Variable(
         numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float16))
     with self.assertRaises(type_check.InvalidType):
         functions.mean_absolute_error(x0, x1)
Esempio n. 3
0
    def gene_update_full(self):
        a = Variable(self.converter(self.itr_a.next(), self.device))
        b = Variable(self.converter(self.itr_b.next(), self.device))

        ab = self.generator_ab(a)
        ba = self.generator_ba(b)
        aba = self.generator_ba(ab)
        bab = self.generator_ab(ba)
        aa = self.generator_ba(a)
        bb = self.generator_ab(b)

        ab_disc = self.discriminator_b(ab)
        ba_disc = self.discriminator_a(ba)

        recon_loss = F.mean_absolute_error(a, aba) + F.mean_absolute_error(
            b, bab)
        gan_loss = self.loss_hinge_gene(ab_disc) + self.loss_hinge_gene(
            ba_disc)
        ident_loss = F.mean_absolute_error(a, aa) + F.mean_absolute_error(
            b, bb)

        loss_gene = recon_loss * 3.0 + gan_loss + ident_loss * 0.5

        self.generator_ab.cleargrads()
        self.generator_ba.cleargrads()
        loss_gene.backward()
        self.opt_g_a.update()
        self.opt_g_b.update()

        chainer.reporter.report({
            'loss/g/recon': recon_loss,
            'loss/g/ident': ident_loss,
            'loss/g/gene': gan_loss
        })
 def test_invalid_dtype2(self):
     x0 = chainer.Variable(
         numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32))
     x1 = chainer.Variable(
         numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float16))
     with self.assertRaises(type_check.InvalidType):
         functions.mean_absolute_error(x0, x1)
    def update_core(self):
        xp = self.model.xp
        self._iter += 1
        batch = self.get_iterator('main').next(
        )  #img_processed (B,4,H,W), origin (B,3,H,W), mask (B,1,H,W)
        batchsize = len(batch)

        w_in = self._image_size

        zero_f = Variable(xp.zeros((batchsize, 3, w_in, w_in)).astype("f"))

        x_train = np.zeros((batchsize, 3, w_in, w_in)).astype("f")
        mask_train = np.zeros((batchsize, 3, w_in, w_in)).astype("f")

        for i in range(batchsize):
            x_train[i, :] = batch[i][0]  #original image
            mask_train[i, :] = batch[i][1]  #0-1 mask of c

        x_train = xp.array(x_train)
        mask_train = xp.array(mask_train)
        mask_b = xp.array(mask_train.astype("bool"))

        I_gt = Variable(x_train)
        M = Variable(mask_train)
        M_b = Variable(mask_b)

        I_out = self.model(I_gt, M)
        I_comp = F.where(
            M_b, I_gt, I_out
        )  #if an element of Mc_b is True, return the corresponded element of I_gt, otherwise return that of I_out)

        fs_I_gt = vgg_extract(self.vgg, I_gt)  #feature dict
        fs_I_out = vgg_extract(self.vgg, I_out)  #feature dict
        fs_I_comp = vgg_extract(self.vgg, I_comp)  #feature dict

        opt_model = self.get_optimizer('model')

        L_valid = F.mean_absolute_error(M * I_out, M * I_gt)
        L_hole = F.mean_absolute_error((1 - M) * I_out, (1 - M) * I_gt)

        L_perceptual = calc_loss_perceptual(fs_I_gt, fs_I_out, fs_I_comp)

        L_style = calc_loss_style(fs_I_out, fs_I_comp,
                                  fs_I_gt)  #Loss style out and comp
        L_tv = calc_loss_tv(I_comp, M, xp=xp)

        L_total = L_valid + self._lambda1 * L_hole + self._lambda2 * L_perceptual + \
                  self._lambda3 * L_style + self._lambda4 * L_tv

        self.vgg.cleargrads()
        self.model.cleargrads()
        L_total.backward()
        opt_model.update()

        chainer.report({'L_valid': L_valid})
        chainer.report({'L_hole': L_hole})
        chainer.report({'L_perceptual': L_perceptual})
        chainer.report({'L_style': L_style})
        chainer.report({'L_tv': L_tv})
Esempio n. 6
0
    def get_loss(self, perm, vgg, target, dis=None, layers=[]):
        xp = self.xp
        target_ = target[perm]
        x, z = self(perm, return_z=True)
        losses = []
        # x = x.array if hinge and x is None else x  # when only update classifier
        loss = F.mean_absolute_error(x, target_)
        losses.append(backward(loss / loss.array))

        # x_resize = x if x.shape[2] == 256 else F.unpooling_2d(x, 2, 2, 0, outsize=(256, 256))
        # target_resize = target_ if target_.shape[2] == 256 else F.unpooling_2d(target, 2, 2, 0, outsize=(256, 256))
        if vgg is not None:
            with chainer.using_config('train', False), chainer.using_config(
                    'enable_backprop', False):
                mean = xp.array([103.939, 116.779, 123.68],
                                dtype="float32")[None, :, None, None]
                target_features = vgg((target_ + 1) * 127.5 - mean,
                                      layers=layers)

            mean = xp.array([103.939, 116.779, 123.68],
                            dtype="float32")[None, :, None, None]
            vgg_features = vgg((x + 1) * 127.5 - mean, layers=layers)

            for layer in layers:
                if layer in ["conv1_1", "conv1_2", "conv2_1", "conv2_2"]:
                    if self.config.perceptual_type == "l1":
                        loss = F.mean_absolute_error(target_features[layer],
                                                     vgg_features[layer])
                    elif self.config.perceptual_type == "l2":
                        loss = F.mean_squared_error(target_features[layer],
                                                    vgg_features[layer])
                    l_per = 1 / loss.array / 10 if self.l_per < 0 or self.l_per > 1000 else self.l_per
                    losses.append(backward(loss * l_per))
                else:
                    if self.config.perceptual_type == "l1":
                        loss = F.mean_absolute_error(target_features[layer],
                                                     vgg_features[layer])
                    elif self.config.perceptual_type == "l2":
                        loss = F.mean_squared_error(target_features[layer],
                                                    vgg_features[layer])
                    l_per = 1 / loss.array / 10 if self.l_per < 0 or self.l_per > 1000 else self.l_per
                    losses.append(backward(loss * l_per))
                chainer.reporter.report({f'loss_{layer}': loss})
            # loss += F.mean(F.square(z)) * self.lam

        if self.l_emd > 0:
            losses.append(backward(self.EMD(self.z.W[perm]) * self.l_emd))

        if self.l_re > 0:
            losses.append(backward(self.l1_reg() * self.l_re))

        if self.l_patch_dis > 0:
            losses.append(backward(
                self.patch_loss_gen(dis) * self.l_patch_dis))

        if self.l_gp > 0:
            losses.append(backward(self.gp_loss(x, z) * self.l_gp))
        return x, losses
    def get_loss(self, perm, vgg, target, dis=None, layers=[]):
        xp = self.xp
        target_ = target[perm]
        x, z = self(perm, return_z=True)
        losses = []

        loss = F.mean_absolute_error(x, target_)
        if self.config.normalize_l1_loss:
            losses.append(backward(loss / loss.array))
        else:
            losses.append(backward(loss))

        if vgg is not None:
            with chainer.using_config('train', False), chainer.using_config(
                    'enable_backprop', False):
                mean = xp.array([103.939, 116.779, 123.68],
                                dtype="float32")[None, :, None, None]
                target_features = vgg((target_ + 1) * 127.5 - mean,
                                      layers=layers)

            mean = xp.array([103.939, 116.779, 123.68],
                            dtype="float32")[None, :, None, None]
            vgg_features = vgg((x + 1) * 127.5 - mean, layers=layers)

            for layer in layers:
                if layer in ["conv1_1", "conv1_2", "conv2_1", "conv2_2"]:
                    if self.config.perceptual_type == "l1":
                        loss = F.mean_absolute_error(target_features[layer],
                                                     vgg_features[layer])
                    elif self.config.perceptual_type == "l2":
                        loss = F.mean_squared_error(target_features[layer],
                                                    vgg_features[layer])
                    l_per = 1 / loss.array / 10 if self.l_per < 0 or self.l_per > 1000 else self.l_per
                    losses.append(backward(loss * l_per))
                else:
                    if self.config.perceptual_type == "l1":
                        loss = F.mean_absolute_error(target_features[layer],
                                                     vgg_features[layer])
                    elif self.config.perceptual_type == "l2":
                        loss = F.mean_squared_error(target_features[layer],
                                                    vgg_features[layer])
                    l_per = 1 / loss.array / 10 if self.l_per < 0 or self.l_per > 1000 else self.l_per
                    losses.append(backward(loss * l_per))
                chainer.reporter.report({f'loss_{layer}': loss})

        if self.l_emd > 0:
            losses.append(backward(self.EMD(self.z.W[perm]) * self.l_emd))

        if self.l_re > 0:
            losses.append(backward(self.l1_reg() * self.l_re))

        if self.l_patch_dis > 0:
            losses.append(backward(
                self.patch_loss_gen(dis) * self.l_patch_dis))

        if self.l_gp > 0:
            losses.append(backward(self.gp_loss(x, z) * self.l_gp))
        return x, losses
def calc_loss_perceptual(hout_dict,hcomp_dict,hgt_dict):
    layers = list(hout_dict.keys())
    layer_name =  layers[0]
    loss = F.mean_absolute_error(hout_dict[layer_name],hgt_dict[layer_name])
    loss += F.mean_absolute_error(hout_dict[layer_name],hgt_dict[layer_name])
    for layer_name in layers[1:]: 
        loss += F.mean_absolute_error(hcomp_dict[layer_name],hgt_dict[layer_name])
        loss += F.mean_absolute_error(hcomp_dict[layer_name],hgt_dict[layer_name])
    return loss
Esempio n. 9
0
 def loss_cnn(self, cnn, x_out, t_out, y_out, lam1=1, lam2=1,lam3=10):
     loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
     loss_adv = lam2*y_out 
     l_t,e_t = self.l.calc((t_out-128)/128)
     l_x,e_x = self.l.calc((x_out-128)/128)
     loss_l = lam3*(F.mean_absolute_error(l_x,l_t))
     loss = loss_rec + loss_adv + loss_l 
     chainer.report({'loss': loss,"loss_rec":loss_rec, 'loss_adv': loss_adv, "loss_l": loss_l }, cnn)        
     
     return loss
Esempio n. 10
0
    def gen_loss(self, discriminator, y_convert, y_recon, s, c, si, ci):
        fake_convert_feat, fake_convert = discriminator(y_convert, si)
        fake_recon_feat, fake_recon = discriminator(y_recon, ci)
        real_feat_c, _ = discriminator(c, ci)
        real_feat_s, _ = discriminator(s, si)

        adv_loss = (self.gen_hinge_loss(fake_convert) +
                    self.gen_hinge_loss(fake_recon)) * 0.5
        recon_loss = F.mean_absolute_error(c, y_recon)
        fm_loss = F.mean_absolute_error(fake_convert_feat, real_feat_s)
        fm_loss += F.mean_absolute_error(fake_recon_feat, real_feat_c)

        return (adv_loss, recon_loss, fm_loss)
Esempio n. 11
0
def learning_consist(gen_BtoA, gen_AtoB, optgen_BtoA, optgen_AtoB, data, T=5):
    a = 10
    for time in range(T):
        optgen_BtoA.target.cleargrads()
        optgen_AtoB.target.cleargrads()
        ytemp1 = gen_BtoA(data[1])
        ytemp2 = gen_AtoB(data[0])
        loss_train = 0.5*a*F.mean_absolute_error(ytemp1,data[1])\
                     + 0.5*a*F.mean_absolute_error(ytemp2,data[0])
        loss_train.backward()
        result = loss_train.data
        optgen_BtoA.update()
        optgen_AtoB.update()
def calc_loss_tv(Icomp, mask, xp=np):
    canvas = mask.data
    canvas[:, :, :, :-1] += mask.data[:, :, :, 1:]  #mask left overlap
    canvas[:, :, :, 1:] += mask.data[:, :, :, :-1]  #mask right overlap
    canvas[:, :, :-1, :] += mask.data[:, :, 1:, :]  #mask up overlap
    canvas[:, :, 1:, :] += mask.data[:, :, :-1, :]  #mask bottom overlap

    P = Variable((xp.sign(canvas - 0.5) + 1.0) *
                 0.5)  #P region (hole mask: 1 pixel dilated region from hole)
    return F.mean_absolute_error(
        P[:, :, :, 1:] * Icomp[:, :, :, 1:],
        P[:, :, :, :-1] * Icomp[:, :, :, :-1]) + F.mean_absolute_error(
            P[:, :, 1:, :] * Icomp[:, :, 1:, :],
            P[:, :, :-1, :] * Icomp[:, :, :-1, :])
    def backward_EG(self, fake_data_encoded, fake_data_random, fake_B_encoded,
                    real_B_encoded, D, D2, lambda_GAN, lambda_GAN2, mu,
                    logvar):
        lambda_kl = 0.01
        lambda_L1 = 10.0

        # 1, G(A) should fool D
        loss_G_GAN = self.loss_G_GAN(fake_data_encoded, D, lambda_GAN)
        loss_G_GAN2 = self.loss_G_GAN(fake_data_random, D2, lambda_GAN2)
        # 2. KL loss
        if lambda_kl > 0:
            kl_element = (((mu**2) + F.exp(logvar)) * -1) + 1 + logvar
            loss_kl = F.sum(kl_element) * -0.5 * lambda_kl
        else:
            loss_kl = 0
        # 3, reconstruction |fake_B-real_B|
        if lambda_L1 > 0:
            loss_G_L1 = F.mean_absolute_error(fake_B_encoded, real_B_encoded)
            loss_G_L1 = lambda_L1 * loss_G_L1
        else:
            loss_G_L1 = 0

        loss_G = loss_G_GAN + loss_G_GAN2 + loss_G_L1 + loss_kl
        loss_G.backward()  # Not unchain_backward for backward_G_alone

        chainer.report({
            'loss_G_GAN': loss_G_GAN,
            'loss_G_GAN2': loss_G_GAN2,
            'loss_G_L1': loss_G_L1,
            'loss_kl': loss_kl,
            'loss_G': loss_G,
        })
Esempio n. 14
0
    def calc_loss(self, x, t):
        # h = self.encode_model.feature(x)
        # print('encode_model_space', h)
        xp = backend.get_array_module(x)

        VAE_LOSS_SCALE = 1e-5
        JOINT_LOSS_WEIGHTS = xp.linspace(10, 1, t.shape[1])
        # print(JOINT_LOSS_WEIGHTS)

        # action model output
        output, z_mu, z_ln_var = self.forward_with_z(x)
        # print(t.shape, output.shape, z_mu.shape, z_ln_var.shape)

        # MAR of action model
        self.mean_abs_error = F.mean_absolute_error(t, output)
        self.weighted_joint_error = F.mean(F.squared_error(t, output) * JOINT_LOSS_WEIGHTS)
        self.gnll = self.action.negative_log_likelihood(F.concat((z_mu, z_ln_var)), t)

        # VAE loss
        self.vae_loss_rec, self.vae_loss_kl = self.VAE_loss_func()(self, x, z_mu, z_ln_var, split=True)
        self.vae_loss = VAE_LOSS_SCALE * (self.vae_loss_rec + self.vae_loss_kl)
        
        # Total loss
        self.total_loss = self.weighted_joint_error + \
                          self.vae_loss + self.gnll

        chainer.report({'mae': self.mean_abs_error}, self)
        chainer.report({'gnll': self.gnll}, self)
        chainer.report({'weighted': self.weighted_joint_error}, self)
        chainer.report({'VAE': self.vae_loss}, self)
        chainer.report({'VAE_KL': self.vae_loss_kl}, self)
        chainer.report({'VAE_REC': self.vae_loss_rec}, self)
        chainer.report({'loss': self.total_loss}, self)
        return self.total_loss
Esempio n. 15
0
    def learn(self, batch_state_int, batch_action, batch_reward, batch_done, batch_next_state_int):
        batch_state_int, batch_action, batch_reward, batch_done, batch_next_state_int = to_device(
            self.network._device_id,
            (batch_state_int, batch_action, batch_reward, batch_done, batch_next_state_int))

        batch_state = batch_state_int.astype(np.float32) / 255  # [0, 255] -> [0.0, 1.0]
        batch_next_state = batch_next_state_int.astype(np.float32) / 255  # [0, 255] -> [0.0, 1.0]

        batch_y, batch_q = self._compute_q_y(batch_state, batch_action, batch_reward, batch_done, batch_next_state)
        # assert len(batch_q.shape) == 1
        # assert len(batch_y.shape) == 1
        # assert batch_q.shape[0] == batch_y.shape[0]

        # loss = F.mean(F.huber_loss(batch_q, batch_y, delta=1.0, reduce='no'))
        loss = F.sum(F.huber_loss(batch_q, batch_y, delta=1.0, reduce='no'))

        with chainer.no_backprop_mode():
            td_error = F.mean_absolute_error(batch_q, batch_y)

        self.network.cleargrads()
        loss.backward()
        self.optimizer.update()

        loss_cpu = to_device(CPU_ID, loss.array)
        td_error_cpu = to_device(CPU_ID, td_error.array)
        return loss_cpu, td_error_cpu
Esempio n. 16
0
    def update_core(self):
        iterator = self.get_iterator('main')  # type: Iterator
        g_opt = self.get_optimizer('gen')  # type: chainer.Optimizer
        d_opt = self.get_optimizer('dis')  # type: chainer.Optimizer

        batch = iterator.next()
        batch_size = len(batch)
        x1, x2 = self.converter(batch, self.device)

        generated = self.gen(x1)
        dis_real = self.dis(x1, x2)
        dis_fake = self.dis(x1, generated)

        g_loss = F.sum(F.softplus(
            -dis_fake)) / dis_fake.size + self.lambda_ * F.mean_absolute_error(
                generated, x2)
        reporter.report({'loss': g_loss}, self.gen)
        self.gen.cleargrads()
        g_loss.backward()
        g_opt.update()
        del g_loss

        d_loss = F.sum(F.softplus(-dis_real)) / dis_real.size + F.sum(
            F.softplus(dis_fake)) / dis_fake.size
        reporter.report({'loss': d_loss}, self.dis)
        self.dis.cleargrads()
        d_loss.backward()
        d_opt.update()
        del d_loss, dis_fake, dis_real, x1, x2, batch, generated
Esempio n. 17
0
 def pixel_wise_loss(self, x, y):
     if self.loss_norm == 1:
         return F.mean_absolute_error(x, y)
     elif self.loss_norm == 2:
         return F.mean_squared_error(x, y)
     else:
         raise ValueError('Invalid norm {}'.format(self.loss_norm))
Esempio n. 18
0
 def check_fp16_overflow(self, xp):
     x0 = chainer.Variable(
         xp.full(shape=(64, 1, 16, 16), fill_value=2, dtype=xp.float16))
     x1 = chainer.Variable(
         xp.full(shape=(64, 1, 16, 16), fill_value=-2, dtype=xp.float16))
     loss = functions.mean_absolute_error(x0, x1)
     self.assertFalse(xp.isinf(loss.array))
Esempio n. 19
0
 def total_loss_gen(self, dis_fake, x_fake, y_real, lat=None):
     """
     Function to compute the total loss of the generator.
     """
     # # adversarial loss for generator.
     lgen = self.loss_gen(dis_fake=dis_fake)
     chainer.reporter.report({'lgen_adv': lgen.array})
     loss = lgen + 0
     # # loop over the additional loss types.
     for lt in self.add_loss_gen:
         if lt == 'l1':
             # # L1 loss.
             #print(y_real.array.max(), x_fake.array.max(), x_fake.array.mean())
             loss_l1 = F.mean_absolute_error(y_real, x_fake)
             chainer.reporter.report({'loss_l1': loss_l1.array})
             loss += self.l1_weight * loss_l1
         elif lt == 'decovl':
             # # decov loss.
             loss_decov = losses.decov_loss(lat)
             chainer.reporter.report({'ldecov': loss_decov.array})
             loss += self.decovl_weight * loss_decov
         else:
             m1 = 'Not recognized loss type ({}).'
             raise RuntimeError(m1.format(lt))
     return loss
Esempio n. 20
0
def loss_gen(d_fake_result, fake_image, correct_image, lamda):
    batchsize, ch, w, h = d_fake_result.data.shape
    adversarial_loss = F.mean_squared_error(
        d_fake_result,
        Variable(xp.ones((batchsize, ch, w, h), dtype=xp.float32)))
    consistency_loss = F.mean_absolute_error(correct_image, fake_image)
    return adversarial_loss + lamda * consistency_loss
Esempio n. 21
0
    def __call__(self, *args):
        """Computes the loss value for an input and label pair.
        It also computes accuracy and stores it to the attribute.
        Args:
            x: batch of sample data points
            t: bath of ground truth points.
        It feeds features to the predictor and compare the result with ground truth.
        Returns:
            ~chainer.Variable: Loss value.
        """

        self.y = self.predictor(*args[:-1]) # The last argument is the targets (ground truths)
        t = args[-1]

        if self.last_relu:
            self.y = F.relu(self.y)

        mae = F.mean_absolute_error(self.y, t)
        mse = F.mean_squared_error(self.y, t)
        mrae = self.mrae(self.y, t)

        self.loss = self.loss_coeffs[0] * mae + self.loss_coeffs[1] * mse \
                    + self.loss_coeffs[2] * mrae

        if self.calc_sid:
            sid = self.sid(F.relu(self.y), t)
            if self.loss_coeffs[3] != 0:
                self.loss += self.loss_coeffs[3] * sid
        else:
            sid = -1 # flag of no calculation

        reporter.report({'MSE': mse, 'MAE': mae, 'MRAE': mrae, 'SID': sid, 'loss': self.loss}, self)
        return self.loss
    def __call__(self, input_array, dists, pairs_index, targets):

        out = self.predict(input_array, dists, pairs_index)
        loss = F.mean_absolute_error(
            out, np.concatenate([targets, targets], axis=0))
        reporter.report({'loss': loss}, self)
        return loss
Esempio n. 23
0
    def update_core(self):
        # read data
        batch = self._iterators['main'].next()
        x, x_real, char = self.converter(batch, self.device)
        iteration = self.iteration

        # forward
        x_fake = self.generator(x, char)

        y_real = self.discriminator(x_real)
        y_fake = self.discriminator(x_fake)

        h_real = self.generator.encode(x_real)
        h_fake = self.generator.encode(x_fake)

        # compute loss
        loss_recon = F.mean_absolute_error(x_real, x_fake)

        # update
        self.generator.cleargrads()
        loss_recon.backward()
        self._optimizers['generator'].update()

        # report
        chainer.reporter.report({'loss/recon': loss_recon})
Esempio n. 24
0
def calc_loss(fake,real):
    loss = 0
    for f,r in zip(fake,real):
        _,c,h,w=f.shape
        loss+=F.mean_absolute_error(f,r) / (c*h*w)

    return loss
Esempio n. 25
0
 def __call__(self, x):
     f = self.extract(x)
     x_recon = self.reconstruct(f)
     loss = F.mean_absolute_error(F.scale(
         x, self.mask), F.scale(x_recon, self.mask)) * self.loss_const
     report({'loss': loss}, self)
     return loss
Esempio n. 26
0
 def loss_dec(self, dec, x_out, t_out, y_out, lam1=100, lam2=1):
     batchsize, _, w, h = y_out.data.shape
     loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
     loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
     loss = loss_rec + loss_adv
     chainer.report({'loss': loss}, dec)
     return loss
Esempio n. 27
0
    def gen_loss(discriminator, y_fake, x_fake, x, y_label):
        fake = discriminator(y_fake, y_label)

        loss = F.mean(F.softplus(-fake))
        loss += 10 * F.mean_absolute_error(x_fake, x)

        return loss
Esempio n. 28
0
def _loss_predictor_cg(predictor, reconstruct, output, target, d_fake,
                       loss_config: LossConfig):
    b, _, t = d_fake.data.shape

    loss_mse = (F.mean_absolute_error(reconstruct, target))
    chainer.report({'mse': loss_mse}, predictor)

    loss_identity = (F.mean_absolute_error(output, target))
    chainer.report({'identity': loss_identity}, predictor)

    loss_adv = F.sum(F.softplus(-d_fake)) / (b * t)
    chainer.report({'adversarial': loss_adv}, predictor)

    loss = loss_config.mse * loss_mse + loss_config.mse / 100 * loss_identity + loss_config.adversarial * loss_adv
    chainer.report({'loss': loss}, predictor)
    return loss
Esempio n. 29
0
 def loss_enc(self, enc, x_out, t_out, lam1=100, lam2=1):
     # batchsize,_,w,h = y_out.data.shape
     loss_rec = lam1 * (F.mean_absolute_error(x_out, t_out))
     # loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h # これ何だろう
     loss = loss_rec  # + loss_adv
     chainer.report({'loss': loss}, enc)
     return loss
Esempio n. 30
0
    def fit(self, X, Y, epochs=30):
        self.X = X
        self.Y = Y
        self.epochs = epochs

        o_resnet = optimizers.Adam(alpha=1e-5, beta1=0.1)
        o_resnet.setup(self.tgs)
        self.loss = []
        for epoch in range(1, epochs + 1):

            sum_loss = np.float32(0)

            for i in range(10):
                l = len(X)
                image = X[i:i * l:]
                mask = Y[i:i * l:]

                output = self.ResNet(image, mask)

                loss = F.mean_absolute_error(mask, output)

                self.ResNet.cleargrads()
                loss.backward()
                o_resnet.update()

                sum_loss += loss.data

            print('epoch:', epoch, 'loss:', loss, 'sum_loss_2:', sum_loss)

            self.loss.append(sum_loss)

        print(self.loss)
Esempio n. 31
0
 def __call__(self, x, t):
     y = self.predictor(x)
     #print("y_shape:", y.shape)
     #print("t_shape:", t.shape)
     #loss = F.softmax_cross_entropy(y, t)
     loss = F.mean_absolute_error(y, t.astype(np.float32))
     chainer.report({'loss': loss}, self)
     return loss
Esempio n. 32
0
 def test_backward_non_default_gpu(self):
     x0 = chainer.Variable(cuda.to_gpu(self.x0, 1))
     x1 = chainer.Variable(cuda.to_gpu(self.x1, 1))
     gy = cuda.to_gpu(self.gy, 1)
     with cuda.get_device_from_id(0):
         y = functions.mean_absolute_error(x0, x1)
         y.grad = gy
         y.backward()
Esempio n. 33
0
    def _loss_predictor(self, predictor, output, target, d_fake):
        b, _, t = d_fake.data.shape

        loss_mse = (F.mean_absolute_error(output, target))
        chainer.report({'mse': loss_mse}, predictor)

        loss_adv = F.sum(F.softplus(-d_fake)) / (b * t)
        chainer.report({'adversarial': loss_adv}, predictor)

        loss = self.loss_config.mse * loss_mse + self.loss_config.adversarial * loss_adv
        chainer.report({'loss': loss}, predictor)
        return loss
    def check_forward(self, x0_data, x1_data):
        x0 = chainer.Variable(x0_data)
        x1 = chainer.Variable(x1_data)
        loss = functions.mean_absolute_error(x0, x1)
        loss_value = cuda.to_cpu(loss.data)
        self.assertEqual(loss_value.dtype, numpy.float32)
        self.assertEqual(loss_value.shape, ())

        # Compute expected value
        loss_expect = 0.
        for i in numpy.ndindex(self.x0.shape):
            loss_expect += abs(self.x0[i] - self.x1[i])
        loss_expect /= self.x0.size

        self.assertAlmostEqual(loss_expect, loss_value, places=5)
Esempio n. 35
0
    def check_forward(self, x0_data, x1_data):
        x0 = chainer.Variable(x0_data)
        x1 = chainer.Variable(x1_data)
        loss = functions.mean_absolute_error(x0, x1)
        loss_value = cuda.to_cpu(loss.data)

        assert loss_value.dtype == numpy.float32
        assert loss_value.shape == ()

        # Compute expected value
        loss_expect = 0.
        for i in numpy.ndindex(self.x0.shape):
            loss_expect += abs(self.x0[i] - self.x1[i])
        loss_expect /= self.x0.size

        assert round(loss_expect - loss_value, 5) == 0
Esempio n. 36
0
 def __call__(self, x0, x1):
     if self.scaler is not None:
         x0 = self.scaler.inverse_transform(x0)
         x1 = self.scaler.inverse_transform(x1)
     return F.mean_absolute_error(x0, x1)
Esempio n. 37
0
File: losses.py Progetto: kzky/works
    def __call__(self, x_recon, x):
        bs = x.shape[0]
        d = np.prod(x.shape[1:])
        self.loss = F.mean_absolute_error(x_recon, x) / d

        return self.loss
 def __call__(self, X, Y):
     O = self.unet(X)
     self.loss = F.mean_absolute_error(X*O, Y)
     return self.loss
Esempio n. 39
0
 def forward(self, inputs, device):
     x0, x1 = inputs
     loss = functions.mean_absolute_error(x0, x1)
     return loss,