def plotLocations(self):
     from tools.plot import plot
     if self.locations.empty: print("Cannot Plot, Dataframe is empty")
     else: plot(self.locations)
Beispiel #2
0
    def train(self):
        # Start with trained model if exists
        cls_A = self.cls[0]
        cls_B = self.cls[1]
        g_lr = self.g_lr
        d_lr = self.d_lr
        if self.checkpoint:
            start = int(self.checkpoint.split('_')[0])
            self.vis_test()
        else:
            start = 0
        # Start training
        self.start_time = time.time()
        for self.e in range(start, self.num_epochs):
            current_iter = 0
            for self.i, (img_A, img_B, mask_A,
                         mask_B) in enumerate(self.data_loader_train):
                # Convert tensor to variable
                # mask attribute: 0:background 1:face 2:left-eyebrown 3:right-eyebrown 4:left-eye 5: right-eye 6: nose
                # 7: upper-lip 8: teeth 9: under-lip 10:hair 11: left-ear 12: right-ear 13: neck
                if self.checkpoint or self.direct:
                    if self.lips:
                        mask_A_lip = (mask_A == 7).float() + (mask_A
                                                              == 9).float()
                        mask_B_lip = (mask_B == 7).float() + (mask_B
                                                              == 9).float()
                        mask_A_lip, mask_B_lip, index_A_lip, index_B_lip = self.mask_preprocess(
                            mask_A_lip, mask_B_lip)
                    if self.skin:
                        mask_A_skin = (mask_A == 1).float() + (
                            mask_A == 6).float() + (mask_A == 13).float()
                        mask_B_skin = (mask_B == 1).float() + (
                            mask_B == 6).float() + (mask_B == 13).float()
                        mask_A_skin, mask_B_skin, index_A_skin, index_B_skin = self.mask_preprocess(
                            mask_A_skin, mask_B_skin)
                    if self.eye:
                        mask_A_eye_left = (mask_A == 4).float()
                        mask_A_eye_right = (mask_A == 5).float()
                        mask_B_eye_left = (mask_B == 4).float()
                        mask_B_eye_right = (mask_B == 5).float()
                        mask_A_face = (mask_A == 1).float() + (mask_A
                                                               == 6).float()
                        mask_B_face = (mask_B == 1).float() + (mask_B
                                                               == 6).float()
                        # avoid the situation that images with eye closed
                        if not ((mask_A_eye_left > 0).any() and
                                (mask_B_eye_left > 0).any() and
                                (mask_A_eye_right > 0).any() and
                                (mask_B_eye_right > 0).any()):
                            continue
                        mask_A_eye_left, mask_A_eye_right = self.rebound_box(
                            mask_A_eye_left, mask_A_eye_right, mask_A_face)
                        mask_B_eye_left, mask_B_eye_right = self.rebound_box(
                            mask_B_eye_left, mask_B_eye_right, mask_B_face)
                        # 这里可以修改成同时计算双眼的loss,因为有时双眼的妆是看起来不对称的
                        mask_A_eye = mask_A_eye_left + mask_A_eye_right
                        mask_B_eye = mask_B_eye_left + mask_B_eye_right
                        mask_A_eye, mask_B_eye, index_A_eye, index_B_eye = self.mask_preprocess(
                            mask_A_eye, mask_B_eye)
                try:
                    processed_img_A = Image.open(img_A[0])
                    processed_img_B = Image.open(img_B[0])
                    processed_img_A = preprocess_makeup_gan(processed_img_A)
                    processed_img_B = preprocess_makeup_gan(processed_img_B)
                    processed_org_A = [
                        self.to_var(item, requires_grad=False)
                        for item in processed_img_A
                    ]
                    processed_ref_B = [
                        self.to_var(item, requires_grad=False)
                        for item in processed_img_B
                    ]
                except Exception as e:
                    print(str(e))
                    print('current iteration is: ', current_iter)
                    print('image_A is: ', img_A[0])
                    print('image_B is: ', img_B[0])
                    continue

                org_A = processed_org_A[0]
                ref_B = processed_ref_B[0]

                # ================== Train D ================== #
                # training D_A, D_A aims to distinguish class B
                # Real
                out = getattr(self, "D_" + cls_A)(ref_B)
                d_loss_real = self.criterionGAN(out, True)
                # Fake
                fake_A = Solver_MakeupGAN.generate(processed_org_A[0],
                                                   processed_ref_B[0],
                                                   processed_org_A[1],
                                                   processed_ref_B[1],
                                                   generator=self.G)
                fake_B = Solver_MakeupGAN.generate(processed_ref_B[0],
                                                   processed_org_A[0],
                                                   processed_ref_B[1],
                                                   processed_org_A[1],
                                                   generator=self.G)
                fake_A = Variable(fake_A.data).detach()
                fake_B = Variable(fake_B.data).detach()
                out = getattr(self, "D_" + cls_A)(fake_A)
                d_loss_fake = self.criterionGAN(out, False)

                # Backward + Optimize
                d_loss = (d_loss_real + d_loss_fake) * 0.5
                getattr(self, "d_" + cls_A + "_optimizer").zero_grad()
                d_loss.backward(retain_graph=True)
                getattr(self, "d_" + cls_A + "_optimizer").step()

                # Logging
                self.loss = {'D-A-loss_real': d_loss_real.item()}

                # training D_B, D_B aims to distinguish class A
                # Real
                out = getattr(self, "D_" + cls_B)(org_A)
                d_loss_real = self.criterionGAN(out, True)
                # Fake
                out = getattr(self, "D_" + cls_B)(fake_B)
                d_loss_fake = self.criterionGAN(out, False)

                # Backward + Optimize
                d_loss = (d_loss_real + d_loss_fake) * 0.5
                getattr(self, "d_" + cls_B + "_optimizer").zero_grad()
                d_loss.backward(retain_graph=True)
                getattr(self, "d_" + cls_B + "_optimizer").step()

                # Logging
                self.loss['D-B-loss_real'] = d_loss_real.item()

                # ================== Train G ================== #
                if (self.i + 1) % self.ndis == 0:
                    # adversarial loss, i.e. L_trans,v in the paper
                    # identity loss
                    # 论文里没有这个identity loss啊?
                    if self.lambda_idt > 0:
                        # G should be identity if ref_B or org_A is fed
                        idt_A1 = Solver_MakeupGAN.generate(processed_org_A[0],
                                                           processed_org_A[0],
                                                           processed_org_A[1],
                                                           processed_org_A[1],
                                                           generator=self.G)
                        idt_A2 = Solver_MakeupGAN.generate(processed_org_A[0],
                                                           processed_org_A[0],
                                                           processed_org_A[1],
                                                           processed_org_A[1],
                                                           generator=self.G)
                        idt_B1 = Solver_MakeupGAN.generate(processed_ref_B[0],
                                                           processed_ref_B[0],
                                                           processed_ref_B[1],
                                                           processed_ref_B[1],
                                                           generator=self.G)
                        idt_B2 = Solver_MakeupGAN.generate(processed_ref_B[0],
                                                           processed_ref_B[0],
                                                           processed_ref_B[1],
                                                           processed_ref_B[1],
                                                           generator=self.G)
                        # lambda_A和B都是啥?
                        loss_idt_A1 = self.criterionL1(
                            idt_A1, org_A) * self.lambda_A * self.lambda_idt
                        loss_idt_A2 = self.criterionL1(
                            idt_A2, org_A) * self.lambda_A * self.lambda_idt
                        loss_idt_B1 = self.criterionL1(
                            idt_B1, ref_B) * self.lambda_B * self.lambda_idt
                        loss_idt_B2 = self.criterionL1(
                            idt_B2, ref_B) * self.lambda_B * self.lambda_idt
                        # loss_idt
                        loss_idt = (loss_idt_A1 + loss_idt_A2 + loss_idt_B1 +
                                    loss_idt_B2) * 0.5
                    else:
                        loss_idt = 0

                    # GAN loss D_A(G_A(A))
                    # fake_A in class B
                    fake_A = Solver_MakeupGAN.generate(processed_org_A[0],
                                                       processed_ref_B[0],
                                                       processed_org_A[1],
                                                       processed_ref_B[1],
                                                       generator=self.G)
                    fake_B = Solver_MakeupGAN.generate(processed_ref_B[0],
                                                       processed_org_A[0],
                                                       processed_ref_B[1],
                                                       processed_org_A[1],
                                                       generator=self.G)

                    pred_fake = getattr(self, "D_" + cls_A)(fake_A)
                    g_A_loss_adv = self.criterionGAN(pred_fake, True)
                    # GAN loss D_B(G_B(B))
                    pred_fake = getattr(self, "D_" + cls_B)(fake_B)
                    g_B_loss_adv = self.criterionGAN(pred_fake, True)

                    rec_A = Solver_MakeupGAN.generate(fake_A,
                                                      processed_org_A[0],
                                                      processed_org_A[1],
                                                      processed_org_A[1],
                                                      generator=self.G)
                    rec_B = Solver_MakeupGAN.generate(fake_B,
                                                      processed_ref_B[0],
                                                      processed_ref_B[1],
                                                      processed_ref_B[1],
                                                      generator=self.G)

                    # color_histogram loss
                    # 这里作者的实现是不是有点问题啊,论文里的loss是计算的G(x,y)和HM(x,y)的,
                    # 也就是fake_A和HM(org_A, ref_B)的啊
                    # github issue 中作者也说这里和论文中little different
                    g_A_loss_his = 0
                    g_B_loss_his = 0
                    # 这里可以进行修改,这样是将左右眼分开计算loss,但如果是侧脸时
                    # 双眼的光影和妆的浓淡看起来是不对称的,可以改成同时计算双眼的makeup loss
                    if self.checkpoint or self.direct:
                        if self.lips:
                            g_A_lip_loss_his = self.criterionHis(
                                fake_A, ref_B, mask_A_lip, mask_B_lip,
                                index_A_lip) * self.lambda_his_lip
                            g_B_lip_loss_his = self.criterionHis(
                                fake_B, org_A, mask_B_lip, mask_A_lip,
                                index_B_lip) * self.lambda_his_lip
                            g_A_loss_his += g_A_lip_loss_his
                            g_B_loss_his += g_B_lip_loss_his
                        if self.skin:
                            g_A_skin_loss_his = self.criterionHis(
                                fake_A, ref_B, mask_A_skin, mask_B_skin,
                                index_A_skin) * self.lambda_his_skin_1
                            g_B_skin_loss_his = self.criterionHis(
                                fake_B, org_A, mask_B_skin, mask_A_skin,
                                index_B_skin) * self.lambda_his_skin_2
                            g_A_loss_his += g_A_skin_loss_his
                            g_B_loss_his += g_B_skin_loss_his
                        if self.eye:
                            g_A_eye_loss_his = self.criterionHis(
                                fake_A, ref_B, mask_A_eye, mask_B_eye,
                                index_A_eye) * self.lambda_his_eye
                            g_B_eye_loss_his = self.criterionHis(
                                fake_B, org_A, mask_B_eye, mask_A_eye,
                                index_B_eye) * self.lambda_his_eye
                            g_A_loss_his += g_A_eye_loss_his
                            g_B_loss_his += g_B_eye_loss_his

                        # cycle loss
                    g_loss_rec_A = self.criterionL1(rec_A,
                                                    org_A) * self.lambda_A
                    g_loss_rec_B = self.criterionL1(rec_B,
                                                    ref_B) * self.lambda_B

                    # vgg loss
                    vgg_org = self.vgg_forward(self.vgg, org_A)
                    vgg_org = Variable(vgg_org.data).detach()
                    vgg_fake_A = self.vgg_forward(self.vgg, fake_A)
                    g_loss_A_vgg = self.criterionL2(
                        vgg_fake_A, vgg_org) * self.lambda_A * self.lambda_vgg

                    vgg_ref = self.vgg_forward(self.vgg, ref_B)
                    vgg_ref = Variable(vgg_ref.data).detach()
                    vgg_fake_B = self.vgg_forward(self.vgg, fake_B)
                    g_loss_B_vgg = self.criterionL2(
                        vgg_fake_B, vgg_ref) * self.lambda_B * self.lambda_vgg

                    loss_rec = (g_loss_rec_A + g_loss_rec_B + g_loss_A_vgg +
                                g_loss_B_vgg) * 0.5

                    # Combined loss
                    g_loss = g_A_loss_adv + g_B_loss_adv + loss_rec + loss_idt
                    if self.checkpoint or self.direct:
                        g_loss = g_A_loss_adv + g_B_loss_adv + loss_rec + loss_idt + g_A_loss_his + g_B_loss_his

                    self.g_optimizer.zero_grad()
                    g_loss.backward(retain_graph=True)
                    self.g_optimizer.step()

                    # Logging
                    self.loss['G-A-loss-adv'] = g_A_loss_adv.item()
                    self.loss['G-B-loss-adv'] = g_A_loss_adv.item()
                    self.loss['G-loss-org'] = g_loss_rec_A.item()
                    self.loss['G-loss-ref'] = g_loss_rec_B.item()
                    # self.loss['G-loss-idt'] = loss_idt.item()
                    self.loss['G-loss-idt'] = loss_idt
                    self.loss['G-loss-img-rec'] = (g_loss_rec_A +
                                                   g_loss_rec_B).item()
                    self.loss['G-loss-vgg-rec'] = (g_loss_A_vgg +
                                                   g_loss_B_vgg).item()
                    if self.direct:
                        self.loss['G-A-loss-his'] = g_A_loss_his.item()
                        self.loss['G-B-loss-his'] = g_B_loss_his.item()

                # Print out log info
                if (current_iter + 1) % self.log_step == 0:
                    self.log_terminal()

                # plot the figures
                for key_now in self.loss.keys():
                    plot_fig.plot(key_now, self.loss[key_now])

                # save the images
                if (current_iter + 1) % self.vis_step == 0:
                    print("Saving middle output...")
                    self.vis_train(
                        [org_A, ref_B, fake_A, fake_B, rec_A, rec_B])

                # Save model checkpoints
                if (current_iter + 1) % self.snapshot_step == 0:
                    self.save_models()
                if current_iter % 100 == 99:
                    plot_fig.flush(self.task_name)

                plot_fig.tick()

                current_iter += 1

            # Decay learning rate
            if (self.e + 1) > (self.num_epochs - self.num_epochs_decay):
                g_lr -= (self.g_lr / float(self.num_epochs_decay))
                d_lr -= (self.d_lr / float(self.num_epochs_decay))
                self.update_lr(g_lr, d_lr)
                print('Decay learning rate to g_lr: {}, d_lr:{}.'.format(
                    g_lr, d_lr))

            if self.e % 20 == 0:
                print("Saving output...")
                self.vis_test()
Beispiel #3
0
    def train(self):
        """Train StarGAN within a single dataset."""
        # The number of iterations per epoch
        self.iters_per_epoch = len(self.data_loader_train)
        # Start with trained model if exists
        g_lr = self.g_lr
        d_lr = self.d_lr
        if self.checkpoint:
            start = int(self.checkpoint.split('_')[0])
        else:
            start = 0
        # Start training
        self.start_time = time.time()
        for self.e in range(start, self.num_epochs):
            for self.i, (img_A, img_B, _,
                         _) in enumerate(self.data_loader_train):
                # Convert tensor to variable
                org_A = self.to_var(img_A, requires_grad=False)
                ref_B = self.to_var(img_B, requires_grad=False)

                # ================== Train D ================== #
                # training D_A
                # Real
                out = self.D_A(ref_B)
                d_loss_real = self.criterionGAN(out, True)
                # Fake
                fake = self.G_A(org_A)
                fake = Variable(fake.data)
                fake = fake.detach()
                out = self.D_A(fake)
                #d_loss_fake = self.get_D_loss(out, "fake")
                d_loss_fake = self.criterionGAN(out, False)

                # Backward + Optimize
                d_loss = (d_loss_real + d_loss_fake) * 0.5
                self.d_A_optimizer.zero_grad()
                d_loss.backward(retain_graph=True)
                self.d_A_optimizer.step()

                # Logging
                self.loss = {}
                self.loss['D-A-loss_real'] = d_loss_real.item()

                # training D_B
                # Real
                out = self.D_B(org_A)
                d_loss_real = self.criterionGAN(out, True)
                # Fake
                fake = self.G_B(ref_B)
                fake = Variable(fake.data)
                fake = fake.detach()
                out = self.D_B(fake)
                #d_loss_fake = self.get_D_loss(out, "fake")
                d_loss_fake = self.criterionGAN(out, False)

                # Backward + Optimize
                d_loss = (d_loss_real + d_loss_fake) * 0.5
                self.d_B_optimizer.zero_grad()
                d_loss.backward(retain_graph=True)
                self.d_B_optimizer.step()

                # Logging
                self.loss['D-B-loss_real'] = d_loss_real.item()

                # ================== Train G ================== #
                if (self.i + 1) % self.ndis == 0:
                    # adversarial loss, i.e. L_trans,v in the paper

                    # identity loss
                    if self.lambda_idt > 0:
                        # G_A should be identity if ref_B is fed
                        idt_A = self.G_A(ref_B)
                        loss_idt_A = self.criterionL1(
                            idt_A, ref_B) * self.lambda_B * self.lambda_idt
                        # G_B should be identity if org_A is fed
                        idt_B = self.G_B(org_A)
                        loss_idt_B = self.criterionL1(
                            idt_B, org_A) * self.lambda_A * self.lambda_idt
                        g_loss_idt = loss_idt_A + loss_idt_B
                    else:
                        g_loss_idt = 0

                    # GAN loss D_A(G_A(A))
                    fake_B = self.G_A(org_A)
                    pred_fake = self.D_A(fake_B)
                    g_A_loss_adv = self.criterionGAN(pred_fake, True)
                    #g_loss_adv = self.get_G_loss(out)

                    # GAN loss D_B(G_B(B))
                    fake_A = self.G_B(ref_B)
                    pred_fake = self.D_B(fake_A)
                    g_B_loss_adv = self.criterionGAN(pred_fake, True)

                    # Forward cycle loss
                    rec_A = self.G_B(fake_B)
                    g_loss_rec_A = self.criterionL1(rec_A,
                                                    org_A) * self.lambda_A

                    # Backward cycle loss
                    rec_B = self.G_A(fake_A)
                    g_loss_rec_B = self.criterionL1(rec_B,
                                                    ref_B) * self.lambda_B

                    # Combined loss
                    g_loss = g_A_loss_adv + g_B_loss_adv + g_loss_rec_A + g_loss_rec_B + g_loss_idt

                    self.g_optimizer.zero_grad()
                    g_loss.backward(retain_graph=True)
                    self.g_optimizer.step()

                    # Logging
                    self.loss['G-A-loss_adv'] = g_A_loss_adv.item()
                    self.loss['G-B-loss_adv'] = g_A_loss_adv.item()
                    self.loss['G-loss_org'] = g_loss_rec_A.item()
                    self.loss['G-loss_ref'] = g_loss_rec_B.item()
                    self.loss['G-loss_idt'] = g_loss_idt.item()

                # Print out log info
                if (self.i + 1) % self.log_step == 0:
                    self.log_terminal()

                #plot the figures
                for key_now in self.loss.keys():
                    plot_fig.plot(key_now, self.loss[key_now])

                #save the images
                if (self.i + 1) % self.vis_step == 0:
                    print("Saving middle output...")
                    self.vis_train(
                        [org_A, ref_B, fake_A, fake_B, rec_A, rec_B])
                    self.vis_test()

                # Save model checkpoints
                if (self.i + 1) % self.snapshot_step == 0:
                    self.save_models()

                if (self.i % 100 == 99):
                    plot_fig.flush(self.task_name)

                plot_fig.tick()

            # Decay learning rate
            if (self.e + 1) > (self.num_epochs - self.num_epochs_decay):
                g_lr -= (self.g_lr / float(self.num_epochs_decay))
                d_lr -= (self.d_lr / float(self.num_epochs_decay))
                self.update_lr(g_lr, d_lr)
                print('Decay learning rate to g_lr: {}, d_lr:{}.'.format(
                    g_lr, d_lr))
Beispiel #4
0
    def train(self):
        # The number of iterations per epoch
        self.iters_per_epoch = len(self.data_loader_train)
        # Start with trained model if exists
        g_lr = self.g_lr
        d_lr = self.d_lr
        start = 0

        for self.e in range(start, self.num_epochs): # epoch
            for self.i, (source_input, reference_input) in enumerate(self.data_loader_train): # batch
                # image, mask, dist
                image_s, image_r = source_input[0].to(self.device), reference_input[0].to(self.device)
                mask_s, mask_r = source_input[1].to(self.device), reference_input[1].to(self.device) 
                dist_s, dist_r = source_input[2].to(self.device), reference_input[2].to(self.device)
                self.track("data")

                # ================== Train D ================== #
                # training D_A, D_A aims to distinguish class B  判断是否是“真reference” y
                # Real
                out = self.D_A(image_r)
                self.track("D_A")
                d_loss_real = self.criterionGAN(out, True)
                self.track("D_A_loss")
                # Fake
                # 利用生成网络生成fake_y
                fake_A = self.G(image_s, image_r, mask_s, mask_r, dist_s, dist_r)
                self.track("G")
                # 判别网络的输入,判别网络的损失 requires_grad=False
                fake_A = Variable(fake_A.data).detach()
                out = self.D_A(fake_A)
                self.track("D_A_2")
                d_loss_fake =  self.criterionGAN(out, False)
                self.track("D_A_loss_2")

                # Backward + Optimize
                # 判别器网络反向传播,更新网络参数
                d_loss = (d_loss_real.mean() + d_loss_fake.mean()) * 0.5
                self.d_A_optimizer.zero_grad()
                d_loss.backward(retain_graph=False) ##retain_graph=False 释放计算图
                self.d_A_optimizer.step()

                # Logging
                self.loss = {}
                self.loss['D-A-loss_real'] = d_loss_real.mean().item()

                # training D_B, D_B aims to distinguish class A 判断是否是“真source” x
                # Real
                out = self.D_B(image_s)
                d_loss_real = self.criterionGAN(out, True)
                # Fake 利用生成网络生成fake_x
                self.track("G-before")
                fake_B = self.G(image_r, image_s, mask_r, mask_s, dist_r, dist_s)
                self.track("G-2")
                fake_B = Variable(fake_B.data).detach()
                out = self.D_B(fake_B)
                d_loss_fake =  self.criterionGAN(out, False)

                # Backward + Optimize
                d_loss = (d_loss_real.mean() + d_loss_fake.mean()) * 0.5
                self.d_B_optimizer.zero_grad()
                d_loss.backward(retain_graph=False)
                self.d_B_optimizer.step()

                # Logging
                self.loss['D-B-loss_real'] = d_loss_real.mean().item()

                # self.track("Discriminator backward")
               
                # ================== Train G ================== #
                if (self.i + 1) % self.g_step == 0:
                    # identity loss
                    assert self.lambda_idt > 0
                    
                    # G should be identity if ref_B or org_A is fed
                    idt_A = self.G(image_s, image_s, mask_s, mask_s, dist_s, dist_s)
                    idt_B = self.G(image_r, image_r, mask_r, mask_r, dist_r, dist_r)
                    loss_idt_A = self.criterionL1(idt_A, image_s) * self.lambda_A * self.lambda_idt
                    loss_idt_B = self.criterionL1(idt_B, image_r) * self.lambda_B * self.lambda_idt
                    # loss_idt
                    loss_idt = (loss_idt_A + loss_idt_B) * 0.5
                    # loss_idt = loss_idt_A * 0.5
                    # self.track("Identical")

                    # GAN loss D_A(G_A(A))
                    # fake_A in class B,  # 生成器对抗损失 L_G^adv
                    fake_A = self.G(image_s, image_r, mask_s, mask_r, dist_s, dist_r)
                    pred_fake = self.D_A(fake_A)
                    g_A_loss_adv = self.criterionGAN(pred_fake, True)

                    # GAN loss D_B(G_B(B))
                    fake_B = self.G(image_r, image_s, mask_r, mask_s, dist_r, dist_s)
                    pred_fake = self.D_B(fake_B)
                    g_B_loss_adv = self.criterionGAN(pred_fake, True)

                    # self.track("Generator forward")

                    # color_histogram loss
                    # 各局部颜色直方图损失  Makeup loss
                    g_A_loss_his = 0
                    g_B_loss_his = 0
                    g_A_lip_loss_his = self.criterionHis(
                        fake_A, image_r, mask_s[:, 0], mask_r[:, 0]
                    ) * self.lambda_his_lip
                    g_B_lip_loss_his = self.criterionHis(
                        fake_B, image_s, mask_r[:, 0], mask_s[:, 0]
                    ) * self.lambda_his_lip
                    g_A_loss_his += g_A_lip_loss_his
                    g_B_loss_his += g_B_lip_loss_his

                    g_A_skin_loss_his = self.criterionHis(
                        fake_A, image_r, mask_s[:, 1], mask_r[:, 1]
                    ) * self.lambda_his_skin
                    g_B_skin_loss_his = self.criterionHis(
                        fake_B, image_s, mask_r[:, 1], mask_s[:, 1]
                    ) * self.lambda_his_skin
                    g_A_loss_his += g_A_skin_loss_his
                    g_B_loss_his += g_B_skin_loss_his

                    g_A_eye_loss_his = self.criterionHis(
                        fake_A, image_r, mask_s[:, 2], mask_r[:, 2]
                    ) * self.lambda_his_eye
                    g_B_eye_loss_his = self.criterionHis(
                        fake_B, image_s, mask_r[:, 2], mask_s[:, 2]
                    ) * self.lambda_his_eye
                    g_A_loss_his += g_A_eye_loss_his
                    g_B_loss_his += g_B_eye_loss_his

                    # self.track("Generator histogram")

                    # cycle loss
                    # fake_A: fake_x/source
                    rec_A = self.G(fake_A, image_s, mask_s, mask_s, dist_s, dist_s)
                    rec_B = self.G(fake_B, image_r, mask_r, mask_r, dist_r, dist_r)

                    g_loss_rec_A = self.criterionL1(rec_A, image_s) * self.lambda_A
                    g_loss_rec_B = self.criterionL1(rec_B, image_r) * self.lambda_B
                    # self.track("Generator recover")

                    # vgg loss
                    # Perceptual loss
                    vgg_s = self.vgg(image_s)
                    vgg_s = Variable(vgg_s.data).detach()
                    vgg_fake_A = self.vgg(fake_A)
                    g_loss_A_vgg = self.criterionL2(vgg_fake_A, vgg_s) * self.lambda_A * self.lambda_vgg
                    # self.track("Generator vgg")

                    vgg_r = self.vgg(image_r)
                    vgg_r = Variable(vgg_r.data).detach()
                    vgg_fake_B = self.vgg(fake_B)
                    g_loss_B_vgg = self.criterionL2(vgg_fake_B, vgg_r) * self.lambda_B * self.lambda_vgg

                    loss_rec = (g_loss_rec_A + g_loss_rec_B + g_loss_A_vgg + g_loss_B_vgg) * 0.5
                    # loss_rec = (g_loss_rec_A + g_loss_A_vgg) * 0.5

                    # Combined loss
                    g_loss = (g_A_loss_adv + g_B_loss_adv + loss_rec + loss_idt + g_A_loss_his + g_B_loss_his).mean()
                    # g_loss = (g_A_loss_adv + loss_rec + loss_idt + g_A_loss_his).mean()

                    self.g_optimizer.zero_grad()
                    g_loss.backward(retain_graph=False)
                    self.g_optimizer.step()
                    # self.track("Generator backward")

                    # Logging
                    self.loss['G-A-loss-adv'] = g_A_loss_adv.mean().item()
                    self.loss['G-B-loss-adv'] = g_A_loss_adv.mean().item()
                    self.loss['G-loss-org'] = g_loss_rec_A.mean().item()
                    self.loss['G-loss-ref'] = g_loss_rec_B.mean().item()
                    self.loss['G-loss-idt'] = loss_idt.mean().item()
                    self.loss['G-loss-img-rec'] = (g_loss_rec_A + g_loss_rec_B).mean().item()
                    self.loss['G-loss-vgg-rec'] = (g_loss_A_vgg + g_loss_B_vgg).mean().item()
                    self.loss['G-loss-img-rec'] = g_loss_rec_A.mean().item()
                    self.loss['G-loss-vgg-rec'] = g_loss_A_vgg.mean().item()

                    self.loss['G-A-loss-his'] = g_A_loss_his.mean().item()


                # Print out log info
                if (self.i + 1) % self.log_step == 0:
                    self.log_terminal()

                #plot the figures
                for key_now in self.loss.keys():
                    plot_fig.plot(key_now, self.loss[key_now])

                #save the images
                if (self.i) % self.vis_step == 0:
                    print("Saving middle output...")
                    self.vis_train([image_s, image_r, fake_A, rec_A, mask_s[:, :, 0], mask_r[:, :, 0]])

                # Save model checkpoints
                if (self.i) % self.snapshot_step == 0:
                    self.save_models()

                if (self.i % 100 == 99):
                    plot_fig.flush(self.log_path)

                plot_fig.tick()

            # Decay learning rate
            if (self.e+1) > (self.num_epochs - self.num_epochs_decay):
                g_lr -= (self.g_lr / float(self.num_epochs_decay))
                d_lr -= (self.d_lr / float(self.num_epochs_decay))
                self.update_lr(g_lr, d_lr)
                print('Decay learning rate to g_lr: {}, d_lr:{}.'.format(g_lr, d_lr))
    def train(self):
        # The number of iterations per epoch
        self.iters_per_epoch = len(self.data_loader_train)

        # Start with trained model if exists
        cls_A = self.cls[0]
        cls_B = self.cls[1]
        e_lr = self.e_lr
        g_lr = self.g_lr
        d_lr = self.d_lr

        if self.checkpoint:
            start = int(self.checkpoint.split('_')[0])
            self.vis_test()
        else:
            start = 0
        
        # Start training
        self.start_time = time.time()
        for self.e in range(start, self.num_epochs):
            for self.i, (img_A, img_B, mask_A, mask_B) in enumerate(self.data_loader_train):
                # Convert tensor to variable
                # mask attribute: 0:background 1:face 2:left-eyebrown 3:right-eyebrown 4:left-eye 5: right-eye 6: nose 
                # 7: upper-lip 8: teeth 9: under-lip 10:hair 11: left-ear 12: right-ear 13: neck
                if self.checkpoint or self.direct:
                    if self.lips==True:
                        mask_A_lip = (mask_A==7).float() + (mask_A==9).float()
                        mask_B_lip = (mask_B==7).float() + (mask_B==9).float()
                        mask_A_lip, mask_B_lip, index_A_lip, index_B_lip = self.mask_preprocess(mask_A_lip, mask_B_lip)
                    if self.skin==True:
                        mask_A_skin = (mask_A==1).float() + (mask_A==6).float() + (mask_A==13).float()
                        mask_B_skin = (mask_B==1).float() + (mask_B==6).float() + (mask_B==13).float()
                        mask_A_skin, mask_B_skin, index_A_skin, index_B_skin = self.mask_preprocess(mask_A_skin, mask_B_skin)
                    if self.eye==True:
                        mask_A_eye_left = (mask_A==4).float()
                        mask_A_eye_right = (mask_A==5).float()
                        mask_B_eye_left = (mask_B==4).float()
                        mask_B_eye_right = (mask_B==5).float()
                        mask_A_face = (mask_A==1).float() + (mask_A==6).float()
                        mask_B_face = (mask_B==1).float() + (mask_B==6).float()
                        # avoid the situation that images with eye closed
                        if not ((mask_A_eye_left>0).any() and (mask_B_eye_left>0).any() and \
                            (mask_A_eye_right > 0).any() and (mask_B_eye_right > 0).any()):
                            continue
                        mask_A_eye_left, mask_A_eye_right = self.rebound_box(mask_A_eye_left, mask_A_eye_right, mask_A_face)
                        mask_B_eye_left, mask_B_eye_right = self.rebound_box(mask_B_eye_left, mask_B_eye_right, mask_B_face)
                        mask_A_eye_left, mask_B_eye_left, index_A_eye_left, index_B_eye_left = \
                            self.mask_preprocess(mask_A_eye_left, mask_B_eye_left)
                        mask_A_eye_right, mask_B_eye_right, index_A_eye_right, index_B_eye_right = \
                            self.mask_preprocess(mask_A_eye_right, mask_B_eye_right)
                
                
                # ================== Train D ================== #
                # Real
                org_A = self.to_var(img_A, requires_grad=False)
                ref_B = self.to_var(img_B, requires_grad=False)
                # Fake
                base_A, makeup_A = self.E(org_A)
                base_B, makeup_B = self.E(ref_B)

                fake_A = self.G(base_A, makeup_B)
                fake_B = self.G(base_B, makeup_A)

                fake_A = Variable(fake_A.data).detach()
                fake_B = Variable(fake_B.data).detach()

                # training D_A, D_A aims to distinguish class B
                out = getattr(self, "D_" + cls_A)(ref_B)
                d_A_loss_real = self.criterionGAN(out, True)

                out = getattr(self, "D_" + cls_A)(fake_A)
                d_A_loss_fake = self.criterionGAN(out, False)

                # Backward + Optimize
                d_A_loss = (d_A_loss_real + d_A_loss_fake) * 0.5
                getattr(self, "d_" + cls_A + "_optimizer").zero_grad()
                d_A_loss.backward(retain_graph=True)
                getattr(self, "d_" + cls_A + "_optimizer").step()

                # Logging
                self.loss = {}
                self.loss['D-A-loss'] = (d_A_loss_real.item() + d_A_loss_fake.item()) * 0.5

                # training D_B, D_B aims to distinguish class A
                out = getattr(self, "D_" + cls_B)(org_A)
                d_B_loss_real = self.criterionGAN(out, True)

                out = getattr(self, "D_" + cls_B)(fake_B)
                d_B_loss_fake =  self.criterionGAN(out, False)

                # Backward + Optimize
                d_B_loss = (d_B_loss_real + d_B_loss_fake) * 0.5
                getattr(self, "d_" + cls_B + "_optimizer").zero_grad()
                d_B_loss.backward(retain_graph=True)
                getattr(self, "d_" + cls_B + "_optimizer").step()

                # Logging
                self.loss['D-B-loss'] = (d_B_loss_real.item() + d_B_loss_fake.item()) * 0.5

                 # ================== Train G ================== #
                if (self.i + 1) % self.ndis == 0:

                    # identity loss
                    if self.lambda_idt > 0:
                        # G should be identity if ref_B or org_A is fed
                        base_A, makeup_A = self.E(org_A)
                        base_B, makeup_B = self.E(ref_B)
                        
                        idt_A = self.G(base_A, makeup_A)
                        idt_B = self.G(base_B, makeup_B)

                        loss_idt_A = self.criterionL1(idt_A, org_A) * self.lambda_A * self.lambda_idt
                        loss_idt_B = self.criterionL1(idt_B, ref_B) * self.lambda_B * self.lambda_idt

                        loss_idt = loss_idt_A + loss_idt_B
                    else:
                        loss_idt = 0
                
                    # Fake
                    base_A, makeup_A = self.E(org_A)
                    base_B, makeup_B = self.E(ref_B)

                    fake_A = self.G(base_A, makeup_B)
                    fake_B = self.G(base_B, makeup_A)

                    # GAN loss D_A(G_A(A))
                    pred_fake = getattr(self, "D_" + cls_A)(fake_A)
                    g_A_loss_adv = self.criterionGAN(pred_fake, True)

                    # GAN loss D_B(G_B(B))
                    pred_fake = getattr(self, "D_" + cls_B)(fake_B)
                    g_B_loss_adv = self.criterionGAN(pred_fake, True)

                    # color_histogram loss
                    g_A_loss_his = 0
                    g_B_loss_his = 0
                    if self.checkpoint or self.direct:
                        if self.lips==True:
                            g_A_lip_loss_his = self.criterionHis(fake_A, ref_B, mask_A_lip, mask_B_lip, index_A_lip) * self.lambda_his_lip
                            g_B_lip_loss_his = self.criterionHis(fake_B, org_A, mask_B_lip, mask_A_lip, index_B_lip) * self.lambda_his_lip
                            g_A_loss_his += g_A_lip_loss_his
                            g_B_loss_his += g_B_lip_loss_his
                        if self.skin==True:
                            g_A_skin_loss_his = self.criterionHis(fake_A, ref_B, mask_A_skin, mask_B_skin, index_A_skin) * self.lambda_his_skin_1
                            g_B_skin_loss_his = self.criterionHis(fake_B, org_A, mask_B_skin, mask_A_skin, index_B_skin) * self.lambda_his_skin_2
                            g_A_loss_his += g_A_skin_loss_his
                            g_B_loss_his += g_B_skin_loss_his
                        if self.eye==True:
                            g_A_eye_left_loss_his = self.criterionHis(fake_A, ref_B, mask_A_eye_left, mask_B_eye_left, index_A_eye_left) * self.lambda_his_eye
                            g_B_eye_left_loss_his = self.criterionHis(fake_B, org_A, mask_B_eye_left, mask_A_eye_left, index_B_eye_left) * self.lambda_his_eye
                            g_A_eye_right_loss_his = self.criterionHis(fake_A, ref_B, mask_A_eye_right, mask_B_eye_right, index_A_eye_right) * self.lambda_his_eye
                            g_B_eye_right_loss_his = self.criterionHis(fake_B, org_A, mask_B_eye_right, mask_A_eye_right, index_B_eye_right) * self.lambda_his_eye
                            g_A_loss_his += g_A_eye_left_loss_his + g_A_eye_right_loss_his
                            g_B_loss_his += g_B_eye_left_loss_his + g_B_eye_right_loss_his
                    
                    # cycle loss
                    base_fake_A, makeup_fake_A = self.E(fake_A)
                    base_fake_B, makeup_fake_B = self.E(fake_B)

                    rec_A = self.G(base_fake_A, makeup_fake_B)
                    rec_B = self.G(base_fake_B, makeup_fake_A)

                    g_loss_rec_A = self.criterionL1(rec_A, org_A) * self.lambda_A
                    g_loss_rec_B = self.criterionL1(rec_B, ref_B) * self.lambda_B

                    # vgg loss
                    vgg_org = self.vgg(org_A, self.content_layer)[0]
                    vgg_org = Variable(vgg_org.data).detach()
                    vgg_fake_A = self.vgg(fake_A, self.content_layer)[0]
                    g_loss_A_vgg = self.criterionL2(vgg_fake_A, vgg_org) * self.lambda_A * self.lambda_vgg
                    
                    vgg_ref = self.vgg(ref_B, self.content_layer)[0]
                    vgg_ref = Variable(vgg_ref.data).detach()
                    vgg_fake_B = self.vgg(fake_B, self.content_layer)[0]
                    g_loss_B_vgg = self.criterionL2(vgg_fake_B, vgg_ref) * self.lambda_B * self.lambda_vgg
					
                    loss_rec = (g_loss_rec_A + g_loss_rec_B + g_loss_A_vgg + g_loss_B_vgg) * 0.5

                    # Combined loss
                    g_loss = g_A_loss_adv + g_B_loss_adv + loss_rec + loss_idt
                    if self.checkpoint or self.direct:
                        g_loss = g_A_loss_adv + g_B_loss_adv + loss_rec + loss_idt + g_A_loss_his + g_B_loss_his
                    
                    self.e_optimizer.zero_grad()
                    self.g_optimizer.zero_grad()
                    g_loss.backward(retain_graph=True)
                    self.e_optimizer.step()
                    self.g_optimizer.step()

                    # Logging
                    self.loss['G-A-loss-adv'] = g_A_loss_adv.item()
                    self.loss['G-B-loss-adv'] = g_A_loss_adv.item()
                    self.loss['G-loss-org'] = g_loss_rec_A.item()
                    self.loss['G-loss-ref'] = g_loss_rec_B.item()
                    self.loss['G-loss-idt'] = loss_idt.item()
                    self.loss['G-loss-img-rec'] = (g_loss_rec_A + g_loss_rec_B).item()
                    self.loss['G-loss-vgg-rec'] = (g_loss_A_vgg + g_loss_B_vgg).item()
                    if self.direct:
                        self.loss['G-A-loss-his'] = g_A_loss_his.item()
                        self.loss['G-B-loss-his'] = g_B_loss_his.item()
                
                # Print out log info
                if (self.i + 1) % self.log_step == 0:
                    self.log_terminal()
                    self.log_tensorboard()

                #plot the figures
                for key_now in self.loss.keys():
                    plot_fig.plot(key_now, self.loss[key_now])

                #save the images
                if (self.i + 1) % self.vis_step == 0:
                    print("Saving middle output...")
                    self.vis_train([org_A, ref_B, fake_A, fake_B, rec_A, rec_B])

                # Save model checkpoints
                if (self.i + 1) % self.snapshot_step == 0:
                    self.save_models()

                if (self.i % 100 == 99):
                    plot_fig.flush(self.task_name)

                plot_fig.tick()
            
            # Decay learning rate
            if (self.e+1) > (self.num_epochs - self.num_epochs_decay):
                g_lr -= (self.g_lr / float(self.num_epochs_decay))
                d_lr -= (self.d_lr / float(self.num_epochs_decay))
                self.update_lr(g_lr, d_lr)
                print('Decay learning rate to g_lr: {}, d_lr:{}.'.format(g_lr, d_lr))

            if self.e % 2 == 0:
                print("Saving output...")
                self.vis_test()