コード例 #1
0
  def dis_update(self, images_a, images_b, hyperparameters):

    # for para in self.dis.parameters():
    #   para.data.clamp_(-0.1, 0.1)

    self.dis.zero_grad()
    x_aa, x_ba, x_ab, x_bb, shared = self.gen(images_a, images_b)
    data_a = torch.cat((images_a, x_ba), 0)
    data_b = torch.cat((images_b, x_ab), 0)
    res_a, res_b = self.dis(data_a,data_b)

    # res_true_a, res_true_b = self.dis(images_a,images_b)
    # res_fake_a, res_fake_b = self.dis(x_ba, x_ab)
    for it, (this_a, this_b) in enumerate(itertools.izip(res_a, res_b)):
      out_a = nn.functional.sigmoid(this_a)
      out_b = nn.functional.sigmoid(this_b)
      # print out_a.size()
      out_true_a, out_fake_a = torch.split(out_a, out_a.size(0) // 2, 0)
      out_true_b, out_fake_b = torch.split(out_b, out_b.size(0) // 2, 0)
      # print out_true_a.size()
      out_true_n = out_true_a.size(0)
      out_fake_n = out_fake_a.size(0)
      # all1 = Variable(torch.ones((out_true_n)).cuda(self.gpu))
      tmp_all1 = torch.from_numpy(np.random.uniform(1.0,1.0, size=out_true_n))
      # print tmp_all1
      # print out_true_a
      all1 = Variable(tmp_all1).float().cuda()
      # all0 = Variable(torch.zeros((out_fake_n)).cuda(self.gpu))
      tmp_all0 = torch.from_numpy(np.random.uniform(0.0, 0.0, size=out_fake_n))
      all0 = Variable(tmp_all0).float().cuda()
      # dis only contains classification loss
      ad_true_loss_a = nn.functional.binary_cross_entropy(out_true_a, all1)
      ad_true_loss_b = nn.functional.binary_cross_entropy(out_true_b, all1)
      ad_fake_loss_a = nn.functional.binary_cross_entropy(out_fake_a, all0)
      ad_fake_loss_b = nn.functional.binary_cross_entropy(out_fake_b, all0)
      if it==0:
        ad_loss_a = ad_true_loss_a + ad_fake_loss_a
        ad_loss_b = ad_true_loss_b + ad_fake_loss_b
      else:
        ad_loss_a += ad_true_loss_a + ad_fake_loss_a
        ad_loss_b += ad_true_loss_b + ad_fake_loss_b
      true_a_acc = _compute_true_acc(out_true_a)
      true_b_acc = _compute_true_acc(out_true_b)
      fake_a_acc = _compute_fake_acc(out_fake_a)
      fake_b_acc = _compute_fake_acc(out_fake_b)
      exec( 'self.dis_true_acc_%d = 0.5 * (true_a_acc + true_b_acc)' %it)
      exec( 'self.dis_fake_acc_%d = 0.5 * (fake_a_acc + fake_b_acc)' %it)

    loss = hyperparameters['gan_w'] * ( ad_loss_a + ad_loss_b )
    loss.backward()
    self.dis_opt.step()
    self.dis_loss = loss.data.cpu().numpy()[0]
    print("dis_loss: ", self.dis_loss)
    print("dis_true_acc: ", 0.5*(true_a_acc + true_b_acc))
    print("dis_fake_acc: ", 0.5*(fake_a_acc + fake_b_acc))
    return
    def dis_update(self, images_a, images_b, hyperparameters):
        self.dis.zero_grad()
        x_aa, x_ba, x_ab, x_bb, shared = self.gen(images_a, images_b)
        data_a = torch.cat((images_a, x_ba), 0)
        data_b = torch.cat((images_b, x_ab), 0)
        res_a, res_b = self.dis(data_a, data_b)
        # res_true_a, res_true_b = self.dis(images_a,images_b)
        # res_fake_a, res_fake_b = self.dis(x_ba, x_ab)
        for it, (this_a, this_b) in enumerate(itertools.izip(res_a, res_b)):
            out_a = nn.functional.sigmoid(this_a)
            out_b = nn.functional.sigmoid(this_b)
            out_true_a, out_fake_a = torch.split(out_a, out_a.size(0) // 2, 0)
            out_true_b, out_fake_b = torch.split(out_b, out_b.size(0) // 2, 0)
            out_true_n = out_true_a.size(0)
            out_fake_n = out_fake_a.size(0)
            all1 = Variable(torch.ones((out_true_n)).cuda(self.gpu))
            all0 = Variable(torch.zeros((out_fake_n)).cuda(self.gpu))
            ad_true_loss_a = nn.functional.binary_cross_entropy(
                out_true_a, all1)
            ad_true_loss_b = nn.functional.binary_cross_entropy(
                out_true_b, all1)
            ad_fake_loss_a = nn.functional.binary_cross_entropy(
                out_fake_a, all0)
            ad_fake_loss_b = nn.functional.binary_cross_entropy(
                out_fake_b, all0)
            if it == 0:
                ad_loss_a = ad_true_loss_a + ad_fake_loss_a
                ad_loss_b = ad_true_loss_b + ad_fake_loss_b
            else:
                ad_loss_a += ad_true_loss_a + ad_fake_loss_a
                ad_loss_b += ad_true_loss_b + ad_fake_loss_b
            true_a_acc = _compute_true_acc(out_true_a)
            true_b_acc = _compute_true_acc(out_true_b)
            fake_a_acc = _compute_fake_acc(out_fake_a)
            fake_b_acc = _compute_fake_acc(out_fake_b)
            exec('self.dis_true_acc_%d = 0.5 * (true_a_acc + true_b_acc)' % it)
            exec('self.dis_fake_acc_%d = 0.5 * (fake_a_acc + fake_b_acc)' % it)
        loss = hyperparameters['gan_w'] * (ad_loss_a + ad_loss_b)
        loss.backward()

        self.gen_update_gen_enc_param_norm = calc_grad_norm(
            self.gen.enc_shared.parameters())
        self.gen_update_gen_dec_param_norm = calc_grad_norm(
            self.gen.dec_shared.parameters())
        self.gen_update_dis_param_norm = calc_grad_norm(
            self.dis.model_S.parameters())
        self.dis_opt.step()
        self.dis_loss = loss.data.cpu().numpy()[0]
        return
コード例 #3
0
    def dis_update(self, images_a, images_b, hyperparameters):
        if self.use_xy:
            images_a_xy = torch.cat((images_a, self.xy), 1)
            images_b_xy = torch.cat((images_b, self.xy), 1)
        else:
            images_a_xy = images_a
            images_b_xy = images_b
        x_aa, x_ba, x_ab, x_bb, shared = self.gen(images_a_xy, images_b_xy)

        data_a = torch.cat((images_a, x_ba), 0)
        data_b = torch.cat((images_b, x_ab), 0)
        #res_a, res_b = self.dis(data_a,data_b)
        res_true_a, res_true_b = self.dis(images_a, images_b)
        res_fake_a, res_fake_b = self.dis(x_ba, x_ab)

        # res_true_a, res_true_b = self.dis(images_a,images_b)
        # res_fake_a, res_fake_b = self.dis(x_ba, x_ab)
        if 0 != hyperparameters['gamma_js_regularization']:
            js_reg_a = 0
            js_reg_b = 0
        for it, (this_true_a, this_true_b, this_fake_a, this_fake_b, in_a,
                 in_b, fake_a, fake_b) in enumerate(
                     itertools.izip(res_true_a, res_true_b, res_fake_a,
                                    res_fake_b, images_a, images_b, x_ba,
                                    x_ab)):
            out_true_a, out_fake_a = nn.functional.sigmoid(
                this_true_a), nn.functional.sigmoid(this_fake_a)
            out_true_b, out_fake_b = nn.functional.sigmoid(
                this_true_b), nn.functional.sigmoid(this_fake_b)

            out_true_n = out_true_a.size(0)
            out_fake_n = out_fake_a.size(0)
            all1 = Variable(torch.ones((out_true_n)).cuda(self.gpu))
            all0 = Variable(torch.zeros((out_fake_n)).cuda(self.gpu))
            ad_true_loss_a = nn.functional.binary_cross_entropy(
                out_true_a, all1)
            ad_true_loss_b = nn.functional.binary_cross_entropy(
                out_true_b, all1)
            ad_fake_loss_a = nn.functional.binary_cross_entropy(
                out_fake_a, all0)
            ad_fake_loss_b = nn.functional.binary_cross_entropy(
                out_fake_b, all0)
            if it == 0:
                ad_loss_a = ad_true_loss_a + ad_fake_loss_a
                ad_loss_b = ad_true_loss_b + ad_fake_loss_b
            else:
                ad_loss_a += ad_true_loss_a + ad_fake_loss_a
                ad_loss_b += ad_true_loss_b + ad_fake_loss_b
            true_a_acc = _compute_true_acc(out_true_a)
            true_b_acc = _compute_true_acc(out_true_b)
            fake_a_acc = _compute_fake_acc(out_fake_a)
            fake_b_acc = _compute_fake_acc(out_fake_b)
            exec('self.dis_true_acc_%d = 0.5 * (true_a_acc + true_b_acc)' % it)
            exec('self.dis_fake_acc_%d = 0.5 * (fake_a_acc + fake_b_acc)' % it)

            if 0 != hyperparameters['gamma_js_regularization']:
                js_reg_a += self.js_regularization(this_true_a, out_true_a,
                                                   in_a, out_fake_a,
                                                   this_fake_a, fake_a)
                js_reg_b += self.js_regularization(this_true_b, out_true_b,
                                                   in_b, out_fake_b,
                                                   this_fake_b, fake_b)

        d_loss = (ad_loss_a +
                  ad_loss_b) * hyperparameters['gamma_js_regularization'] / 2.
        if 0 != hyperparameters['gamma_js_regularization']:
            d_loss += (js_reg_a + js_reg_b) * (
                hyperparameters['gamma_js_regularization'] / 2.)
        loss = hyperparameters['gan_w'] * d_loss
        self.dis.zero_grad()
        loss.backward()
        self.dis_opt.step()
        self.dis_loss = loss.data.cpu().numpy()[0]
        return
    def dis_update(self, images_a, images_b, images_c, images_d,
                   hyperparameters):
        self.dis.zero_grad()
        x_aa, x_ba, x_ab, x_bb, x_cc, x_dc, x_cd, x_dd, shared = \
            self.gen(images_a, images_b, images_c, images_d)
        data_a = torch.cat((images_a, x_ba), 0)
        data_b = torch.cat((images_b, x_ab), 0)
        data_c = torch.cat((images_c, x_dc), 0)
        data_d = torch.cat((images_d, x_cd), 0)
        res_a, res_b, res_c, res_d = self.dis(data_a, data_b, data_c, data_d)
        # res_true_a, res_true_b = self.dis(images_a,images_b)
        # res_fake_a, res_fake_b = self.dis(x_ba, x_ab)

        # Loss for (A, B)
        for it, (this_a, this_b) in enumerate(itertools.izip(res_a, res_b)):
            out_a = nn.functional.sigmoid(this_a)
            out_b = nn.functional.sigmoid(this_b)
            out_true_a, out_fake_a = torch.split(out_a, out_a.size(0) // 2, 0)
            out_true_b, out_fake_b = torch.split(out_b, out_b.size(0) // 2, 0)
            out_true_n = out_true_a.size(0)
            out_fake_n = out_fake_a.size(0)
            all1 = Variable(torch.ones((out_true_n)).cuda(self.gpu))
            all0 = Variable(torch.zeros((out_fake_n)).cuda(self.gpu))
            ad_true_loss_a = nn.functional.binary_cross_entropy(
                out_true_a, all1)
            ad_true_loss_b = nn.functional.binary_cross_entropy(
                out_true_b, all1)
            ad_fake_loss_a = nn.functional.binary_cross_entropy(
                out_fake_a, all0)
            ad_fake_loss_b = nn.functional.binary_cross_entropy(
                out_fake_b, all0)
            if it == 0:
                ad_loss_a = ad_true_loss_a + ad_fake_loss_a
                ad_loss_b = ad_true_loss_b + ad_fake_loss_b
            else:
                ad_loss_a += ad_true_loss_a + ad_fake_loss_a
                ad_loss_b += ad_true_loss_b + ad_fake_loss_b
            true_a_acc = _compute_true_acc(out_true_a)
            true_b_acc = _compute_true_acc(out_true_b)
            fake_a_acc = _compute_fake_acc(out_fake_a)
            fake_b_acc = _compute_fake_acc(out_fake_b)
            exec('self.dis_true_acc_%d = 0.5 * (true_a_acc + true_b_acc)' % it)
            exec('self.dis_fake_acc_%d = 0.5 * (fake_a_acc + fake_b_acc)' % it)

        # Loss for (C, D)
        for it, (this_c, this_d) in enumerate(itertools.izip(res_c, res_d)):
            out_c = nn.functional.sigmoid(this_c)
            out_d = nn.functional.sigmoid(this_d)
            out_true_c, out_fake_c = torch.split(out_c, out_c.size(0) // 2, 0)
            out_true_d, out_fake_d = torch.split(out_d, out_d.size(0) // 2, 0)
            out_true_n = out_true_c.size(0)
            out_fake_n = out_fake_c.size(0)
            all1 = Variable(torch.ones((out_true_n)).cuda(self.gpu))
            all0 = Variable(torch.zeros((out_fake_n)).cuda(self.gpu))
            ad_true_loss_c = nn.functional.binary_cross_entropy(
                out_true_c, all1)
            ad_true_loss_d = nn.functional.binary_cross_entropy(
                out_true_d, all1)
            ad_fake_loss_c = nn.functional.binary_cross_entropy(
                out_fake_c, all0)
            ad_fake_loss_d = nn.functional.binary_cross_entropy(
                out_fake_d, all0)
            if it == 0:
                ad_loss_c = ad_true_loss_c + ad_fake_loss_d
                ad_loss_d = ad_true_loss_d + ad_fake_loss_d
            else:
                ad_loss_c += ad_true_loss_c + ad_fake_loss_c
                ad_loss_d += ad_true_loss_d + ad_fake_loss_d
            true_c_acc = _compute_true_acc(out_true_c)
            true_d_acc = _compute_true_acc(out_true_d)
            fake_c_acc = _compute_fake_acc(out_fake_c)
            fake_d_acc = _compute_fake_acc(out_fake_d)
            exec('self.dis_true_acc_%d += 0.5 * (true_c_acc + true_d_acc)' %
                 it)
            exec('self.dis_fake_acc_%d += 0.5 * (fake_c_acc + fake_d_acc)' %
                 it)

        loss = hyperparameters['gan_w'] * (ad_loss_a + ad_loss_b) + \
               1.5 * hyperparameters['gan_w'] * (ad_loss_c + ad_loss_d)
        loss.backward()
        self.dis_opt.step()
        self.dis_loss = loss.data.cpu().numpy()[0]
        return
コード例 #5
0
    def dis_update(self, images_a, images_b, images_c, hyperparameters):

        # weight clipping
        # for para in self.dis.parameters():
        #     para.data.clamp_(-0.1, 0.1)

        self.dis.zero_grad()
        x_aa, x_ba, x_ca, x_ab, x_bb, x_cb, x_ac, x_bc, x_cc, shared = self.gen(
            images_a, images_b, images_c)
        data_a = torch.cat((images_a, x_ba), 0)
        data_b = torch.cat((images_b, x_ab), 0)
        data_c = torch.cat((images_c, x_bc), 0)
        data_d = torch.cat((images_b, x_cb), 0)
        res_a, res_b, res_c, res_d = self.dis(data_a, data_b, data_c, data_d)

        # res_true_a, res_true_b = self.dis(images_a,images_b)
        # res_fake_a, res_fake_b = self.dis(x_ba, x_ab)
        for it, (this_a, this_b, this_c,
                 this_d) in enumerate(zip(res_a, res_b, res_c, res_d)):
            out_a = nn.functional.sigmoid(this_a)
            out_b = nn.functional.sigmoid(this_b)

            out_c = nn.functional.sigmoid(this_c)
            out_d = nn.functional.sigmoid(this_d)

            # print out_a.size()
            out_true_a, out_fake_a = torch.split(out_a, out_a.size(0) // 2, 0)
            out_true_b, out_fake_b = torch.split(out_b, out_b.size(0) // 2, 0)

            out_true_c, out_fake_c = torch.split(out_c, out_c.size(0) // 2, 0)
            out_true_d, out_fake_d = torch.split(out_d, out_d.size(0) // 2, 0)

            # print out_true_a.size()
            out_true_n = out_true_a.size(0)
            out_fake_n = out_fake_a.size(0)
            # all1 = Variable(torch.ones((out_true_n)).cuda(self.gpu))
            tmp_all1 = torch.from_numpy(
                np.random.uniform(0.8, 1.2, size=out_true_n))
            # print tmp_all1
            # print out_true_a
            all1 = Variable(tmp_all1).float().cuda()
            # all0 = Variable(torch.zeros((out_fake_n)).cuda(self.gpu))
            tmp_all0 = torch.from_numpy(
                np.random.uniform(0.0, 0.3, size=out_fake_n))
            all0 = Variable(tmp_all0).float().cuda()
            # dis only contains classification loss
            ad_true_loss_a = nn.functional.binary_cross_entropy(
                out_true_a, all1)
            ad_true_loss_b = nn.functional.binary_cross_entropy(
                out_true_b, all1)
            ad_true_loss_c = nn.functional.binary_cross_entropy(
                out_true_c, all1)
            ad_true_loss_d = nn.functional.binary_cross_entropy(
                out_true_d, all1)

            ad_fake_loss_a = nn.functional.binary_cross_entropy(
                out_fake_a, all0)
            ad_fake_loss_b = nn.functional.binary_cross_entropy(
                out_fake_b, all0)
            ad_fake_loss_c = nn.functional.binary_cross_entropy(
                out_fake_c, all0)
            ad_fake_loss_d = nn.functional.binary_cross_entropy(
                out_fake_d, all0)

            if it == 0:
                ad_loss_a = ad_true_loss_a + ad_fake_loss_a
                ad_loss_b = ad_true_loss_b + ad_fake_loss_b
                ad_loss_c = ad_true_loss_c + ad_fake_loss_c
                ad_loss_d = ad_true_loss_d + ad_fake_loss_d
            else:
                ad_loss_a += ad_true_loss_a + ad_fake_loss_a
                ad_loss_b += ad_true_loss_b + ad_fake_loss_b
                ad_loss_c += ad_true_loss_c + ad_fake_loss_c
                ad_loss_d += ad_true_loss_d + ad_fake_loss_d

            true_a_acc = _compute_true_acc(out_true_a)
            true_b_acc = _compute_true_acc(out_true_b)
            true_c_acc = _compute_true_acc(out_true_c)
            true_d_acc = _compute_true_acc(out_true_d)

            fake_a_acc = _compute_fake_acc(out_fake_a)
            fake_b_acc = _compute_fake_acc(out_fake_b)
            fake_c_acc = _compute_fake_acc(out_fake_c)
            fake_d_acc = _compute_fake_acc(out_fake_d)
            exec('self.dis_true_acc_a2b_%d = 0.5 * (true_a_acc + true_b_acc)' %
                 it)
            exec('self.dis_fake_acc_a2b_%d = 0.5 * (fake_a_acc + fake_b_acc)' %
                 it)
            exec('self.dis_true_acc_c2b_%d = 0.5 * (true_c_acc + true_d_acc)' %
                 it)
            exec('self.dis_fake_acc_b2c_%d = 0.5 * (fake_c_acc + fake_d_acc)' %
                 it)

        loss = hyperparameters['gan_w'] * (ad_loss_a + ad_loss_b + ad_loss_c +
                                           ad_loss_d)
        loss.backward()
        self.dis_opt.step()

        self.dis_loss = loss.item()
        print("Total disc loss: ", self.dis_loss)
        # print("dis_true_acc(a2b): ", 0.5 * (true_a_acc + true_b_acc))
        # print("dis_fake_acc(a2b): ", 0.5 * (fake_a_acc + fake_b_acc))
        # print("dis_true_acc(c2b): ", 0.5 * (true_c_acc + true_d_acc))
        # print("dis_fake_acc(c2b): ", 0.5 * (fake_c_acc + fake_d_acc))
        return