Ejemplo n.º 1
0
    def test_loop(self):
        # Test on validation set
        self.model_dict["G"].eval()

        for loss in self.losses:
            self.loss_epoch_dict_test[loss] = []
        # test loop #
        for real_vgg, real_default in tqdm(self.test_loader):
            real_vgg = Variable(real_vgg.cuda())
            real_default = Variable(real_default.cuda())

            # TEST GENERATOR
            style_losses, content_losses = self.test_gen(
                real_default, real_vgg)

            # append all losses in loss dict #
            [
                self.loss_epoch_dict_test[loss].append(
                    self.loss_batch_dict_test[loss].item())
                for loss in self.losses
            ]
        [
            self.train_hist_dict_test[loss].append(
                helper.mft(self.loss_epoch_dict_test[loss]))
            for loss in self.losses
        ]
Ejemplo n.º 2
0
    def test_loop(self):
        # Test on validation set
        self.model_dict["G"].eval()
        self.set_grad(False)

        for loss in self.losses:
            self.loss_epoch_dict_test[loss] = []
        # test loop #
        for low_res, high_res in tqdm(self.test_loader):
            low_res = Variable(low_res.cuda())
            high_res = Variable(high_res.cuda())

            # TEST GENERATOR
            l1_losses, content_losses = self.test_gen(low_res, high_res)

            # append all losses in loss dict #
            [
                self.loss_epoch_dict_test[loss].append(
                    self.loss_batch_dict_test[loss].item())
                for loss in self.losses
            ]
        [
            self.train_hist_dict_test[loss].append(
                helper.mft(self.loss_epoch_dict_test[loss]))
            for loss in self.losses
        ]
Ejemplo n.º 3
0
    def train_loop(self):
        self.model_dict["G"].train()
        self.set_grad("G", True)
        self.model_dict["D"].train()
        self.set_grad("D", True)

        for loss in self.losses:
            self.loss_epoch_dict[loss] = []

        # Set learning rate
        self.opt_dict["G"].param_groups[0]['lr'] = self.params['lr_gen']
        self.opt_dict["D"].param_groups[0]['lr'] = self.params['lr_disc']
        # print LR and weight decay
        print(
            f"Sched Sched Iter:{self.current_iter}, Sched Epoch:{self.current_epoch}"
        )
        [
            print(
                f"Learning Rate({opt}): {self.opt_dict[opt].param_groups[0]['lr']}"
            ) for opt in self.opt_dict.keys()
        ]

        for (real_data) in tqdm(self.train_loader):
            real = real_data.cuda()

            # TRAIN GEN
            self.set_grad("G", True)
            self.set_grad("D", False)
            fake = self.train_gen()

            # TRAIN DISC
            self.set_grad("G", False)
            self.set_grad("D", True)
            self.train_disc(fake, real)

            # append all losses in loss dict
            [
                self.loss_epoch_dict[loss].append(
                    self.loss_batch_dict[loss].item()) for loss in self.losses
            ]
            self.current_iter += 1
        [
            self.train_hist_dict[loss].append(
                helper.mft(self.loss_epoch_dict[loss])) for loss in self.losses
        ]
Ejemplo n.º 4
0
    def test_loop(self):
        # Test on validation set
        self.model_dict["D"].eval()
        self.model_dict["G"].eval()
        self.opt_dict["G"].zero_grad()
        self.opt_dict["D"].zero_grad()

        for loss in self.losses:
            self.loss_epoch_dict_test[loss] = []
        self.set_grad_req(d=False, g=False)
        # test loop #
        for (real_a, real_b) in tqdm(self.test_loader):
            real_a, real_b = Variable(real_a.cuda()), Variable(real_b.cuda())
            # GENERATE
            fake_b = self.model_dict["G"](real_a)
            # TEST DISCRIMINATOR
            self.test_disc(real_a, real_b, fake_b)
            # TEST GENERATOR
            self.test_gen(real_a, real_b, fake_b)
            # append all losses in loss dict #
            [self.loss_epoch_dict_test[loss].append(self.loss_batch_dict_test[loss].data[0]) for loss in self.losses]

        [self.train_hist_dict_test[loss].append(helper.mft(self.loss_epoch_dict_test[loss])) for loss in self.losses]
Ejemplo n.º 5
0
    def train_loop(self):
        # Train on train set
        self.model_dict["G"].train()
        self.set_grad(True)

        for loss in self.losses:
            self.loss_epoch_dict[loss] = []

        # train loop
        lr_mult = self.lr_lookup()
        self.opt_dict["G"].param_groups[0]['lr'] = lr_mult * self.params['lr']
        print(
            f"Sched Sched Iter:{self.current_iter}, Sched Epoch:{self.current_epoch}"
        )
        [
            print(
                f"Learning Rate({opt}): {self.opt_dict[opt].param_groups[0]['lr']}"
            ) for opt in self.opt_dict.keys()
        ]
        for low_res, high_res in tqdm(self.train_loader):
            low_res = Variable(low_res.cuda())
            high_res = Variable(high_res.cuda())

            # TRAIN GENERATOR
            l1_losses, content_losses = self.train_gen(low_res, high_res)

            # append all losses in loss dict
            [
                self.loss_epoch_dict[loss].append(
                    self.loss_batch_dict[loss].item()) for loss in self.losses
            ]
            self.current_iter += 1
        [
            self.train_hist_dict[loss].append(
                helper.mft(self.loss_epoch_dict[loss])) for loss in self.losses
        ]
    def train(self):
        # Train
        params = self.params
        for epoch in range(params["train_epoch"]):

            # clear last epopchs losses
            for loss in self.losses:
                self.loss_epoch_dict[loss] = []

            print(
                f"Sched Iter:{self.current_iter}, Sched Epoch:{self.current_epoch}"
            )
            [
                print(
                    f"Learning Rate({opt}): {self.opt_dict[opt].param_groups[0]['lr']}"
                ) for opt in self.opt_dict.keys()
            ]

            self.loop_iter = 0
            epoch_start_time = time.time()

            for (real_data) in tqdm(self.train_loader):
                real = real_data.cuda()
                # add some noise
                real = (real * .9) + (
                    .1 * torch.FloatTensor(real.shape).normal_(0, .5).cuda())

                # DREAM #
                self.set_grad_req(d=False, g=True)
                fake = self.dream_on_mesh(batch_size=real.shape[0])

                # TRAIN DISC #
                self.set_grad_req(d=True, g=False)
                self.train_disc(fake, real)

                # append all losses in loss dict #
                [
                    self.loss_epoch_dict[loss].append(
                        self.loss_batch_dict[loss].data.item())
                    for loss in self.losses
                ]
                self.loop_iter += 1
                self.current_iter += 1

            self.current_epoch += 1

            if self.loop_iter % params['save_img_every'] == 0:
                helper.show_test(
                    real,
                    fake,
                    self.t2v(self.model_dict['M'].textures.unsqueeze(0).view(
                        1, params['grid_res'] - 1, params['grid_res'] - 1,
                        48).permute(0, 3, 1, 2)),
                    self.transform,
                    save=
                    f'output/{params["save_root"]}_{self.current_epoch}.jpg')
            save_str = self.save_state(
                f'output/{params["save_root"]}_{self.current_epoch}.json')
            print(save_str)

            epoch_end_time = time.time()
            per_epoch_ptime = epoch_end_time - epoch_start_time
            [
                self.train_hist_dict[loss].append(
                    helper.mft(self.loss_epoch_dict[loss]))
                for loss in self.losses
            ]
            print(f'Epoch:{self.current_epoch}, Epoch Time:{per_epoch_ptime}')
            [
                print(
                    f'Train {loss}: {helper.mft(self.loss_epoch_dict[loss])}')
                for loss in self.losses
            ]

        self.display_history()
        print('Hit End of Learning Schedule!')
Ejemplo n.º 7
0
    def train(self):
        # Train following learning rate schedule
        params = self.params
        done = False
        while not done:
            # clear last epochs losses
            for loss in self.losses:
                self.loss_epoch_dict[loss] = []

            self.model_dict["D"].train()
            self.model_dict["G"].train()
            self.set_grad_req(d=True, g=True)

            epoch_start_time = time.time()
            num_iter = 0

            print(f"Sched Cycle:{self.current_cycle}, Sched Iter:{self.current_iter}, Sched Epoch:{self.current_epoch}")
            [print(f"Learning Rate({opt}): {self.opt_dict[opt].param_groups[0]['lr']}") for opt in
             self.opt_dict.keys()]

            for (real_a, real_b) in tqdm(self.train_loader):

                if self.current_iter > len(self.iter_stack) - 1:
                    done = True
                    self.display_history()
                    print('Hit End of Learning Schedule!')
                    break

                # set learning rate
                lr_mult, save = self.lr_lookup()
                self.opt_dict["D"].param_groups[0]['lr'] = lr_mult * params['lr_disc']
                self.opt_dict["G"].param_groups[0]['lr'] = lr_mult * params['lr_gen']

                real_a, real_b = Variable(real_a.cuda()), Variable(real_b.cuda())

                # GENERATE
                fake_b = self.model_dict["G"](real_a)
                # TRAIN DISCRIMINATOR
                self.train_disc(real_a, real_b, fake_b)
                # TRAIN GENERATOR
                self.train_gen(real_a, real_b, fake_b)

                # append all losses in loss dict
                [self.loss_epoch_dict[loss].append(self.loss_batch_dict[loss].data[0]) for loss in self.losses]

                if save:
                    save_str = self.save_state(f'output/{params["save_root"]}_{self.current_epoch}.json')
                    tqdm.write(save_str)
                    self.current_epoch += 1

                self.current_iter += 1
                num_iter += 1

            # generate test images and save to disk
            helper.show_test(params,
                             self.transform,
                             self.test_loader,
                             self.model_dict['G'],
                             save=f'output/{params["save_root"]}_{self.current_cycle}.jpg')

            # run validation set loop to get losses
            self.test_loop()

            if not done:
                self.current_cycle += 1
                epoch_end_time = time.time()
                per_epoch_ptime = epoch_end_time - epoch_start_time
                print(f'Epoch Training Training Time: {per_epoch_ptime}')
                [print(f'Train {loss}: {helper.mft(self.loss_epoch_dict[loss])}') for loss in self.losses]
                [print(f'Val {loss}: {helper.mft(self.loss_epoch_dict_test[loss])}') for loss in self.losses]
                print('\n')
                [self.train_hist_dict[loss].append(helper.mft(self.loss_epoch_dict[loss])) for loss in self.losses]
Ejemplo n.º 8
0
    def train(self):
        # Train following learning rate schedule
        params = self.params
        while self.current_epoch < params["train_epoch"]:
            # clear last epochs losses
            for loss in self.losses:
                self.loss_epoch_dict[loss] = []

            self.model_dict["G"].train()
            epoch_start_time = time.time()
            num_iter = 0

            print(
                f"Sched Sched Iter:{self.current_iter}, Sched Epoch:{self.current_epoch}"
            )
            [
                print(
                    f"Learning Rate({opt}): {self.opt_dict[opt].param_groups[0]['lr']}"
                ) for opt in self.opt_dict.keys()
            ]
            for real_vgg, real_default in tqdm(self.train_loader):
                real_vgg = Variable(real_vgg.cuda())
                real_default = Variable(real_default.cuda())

                # TRAIN GENERATOR
                style_losses, content_losses = self.train_gen(
                    real_default, real_vgg)

                # append all losses in loss dict
                [
                    self.loss_epoch_dict[loss].append(
                        self.loss_batch_dict[loss].item())
                    for loss in self.losses
                ]
                self.current_iter += 1
                num_iter += 1

            # generate test images and save to disk
            if self.current_epoch % params["save_img_every"] == 0:
                helper.show_test(
                    params,
                    self.transform,
                    self.tensor_transform,
                    self.test_loader,
                    self.style,
                    self.model_dict['G'],
                    save=
                    f'output/{params["save_root"]}_val_{self.current_epoch}.jpg'
                )

            # run validation set loop to get losses
            self.test_loop()
            if self.current_epoch % params["save_every"] == 0:
                save_str = self.save_state(
                    f'output/{params["save_root"]}_{self.current_epoch}.json')
                tqdm.write(save_str)

            self.current_epoch += 1
            epoch_end_time = time.time()
            per_epoch_ptime = epoch_end_time - epoch_start_time
            print(f'Epoch Training Training Time: {per_epoch_ptime}')
            [
                print(
                    f'Train {loss}: {helper.mft(self.loss_epoch_dict[loss])}')
                for loss in self.losses
            ]
            [
                print(
                    f'Val {loss}: {helper.mft(self.loss_epoch_dict_test[loss])}'
                ) for loss in self.losses
            ]
            print('\n')
            [
                self.train_hist_dict[loss].append(
                    helper.mft(self.loss_epoch_dict[loss]))
                for loss in self.losses
            ]

        self.display_history()
        print('Hit End of Learning Schedule!')
Ejemplo n.º 9
0
    def train(self):
        # Train loop using custom batch feeder to pull samples
        params = self.params
        for epoch in range(params["train_epoch"]):

            # clear last epopchs losses
            for loss in self.losses:
                self.loss_epoch_dict[loss] = []

            print(
                f"Sched Iter:{self.current_iter}, Sched Epoch:{self.current_epoch}"
            )
            [
                print(
                    f"Learning Rate({opt}): {self.opt_dict[opt].param_groups[0]['lr']}"
                ) for opt in self.opt_dict.keys()
            ]

            self.model_dict["G"].train()
            self.model_dict["D"].train()

            batch_feeder = helper.BatchFeeder(self.train_loader)

            epoch_iter_count = 0
            epoch_start_time = time.time()

            # Run progress bar for length of dataset
            with tqdm(total=self.data_len) as epoch_bar:

                while epoch_iter_count < self.data_len:

                    # Set Discriminator loop length, should start large and then
                    disc_loop_total = 100 if ((self.current_iter < 25) or
                                              (self.current_iter %
                                               500 == 0)) else 5
                    self.set_grad_req(d=True, g=False)

                    # TRAIN DISC #
                    disc_loop_count = 0
                    while (disc_loop_count < disc_loop_total
                           ) and epoch_iter_count < self.data_len:
                        data_iter = batch_feeder.get_new()
                        self.train_disc(data_iter)

                        disc_loop_count += 1
                        epoch_iter_count += 1
                        epoch_bar.update()

                    # TRAIN GEN #
                    self.set_grad_req(d=False, g=True)
                    self.train_gen()

                    # append all losses in loss dict #
                    [
                        self.loss_epoch_dict[loss].append(
                            self.loss_batch_dict[loss].data[0])
                        for loss in self.losses
                    ]
                    self.current_iter += 1

            self.current_epoch += 1

            if self.current_epoch % params['save_every'] == 0:
                helper.show_test(
                    self.model_dict['G'],
                    Variable(self.preview_noise),
                    self.transform,
                    save=
                    f'output/{params["save_root"]}_{self.current_epoch}.jpg')

                save_str = self.save_state(
                    f'output/{params["save_root"]}_{self.current_epoch}.json')
                print(save_str)

            epoch_end_time = time.time()
            per_epoch_ptime = epoch_end_time - epoch_start_time
            [
                self.train_hist_dict[loss].append(
                    helper.mft(self.loss_epoch_dict[loss]))
                for loss in self.losses
            ]
            print(f'Epoch:{self.current_epoch}, Epoch Time:{per_epoch_ptime}')
            [
                print(
                    f'Train {loss}: {helper.mft(self.loss_epoch_dict[loss])}')
                for loss in self.losses
            ]

        self.display_history()
        print('Hit End of Learning Schedule!')
Ejemplo n.º 10
0
    def train_loop(self):

        self.current_epoch_iter = 0
        for key in self.model_dict.keys():
            self.model_dict[key].train()
            self.set_grad(key, True)

        for loss in self.losses:
            self.loss_epoch_dict[loss] = []

        # Set Learning rate
        lr_mult = self.lr_lookup()
        self.opt_dict["AE_A"].param_groups[0]['lr'] = self.params['lr'] * (
            lr_mult / 2)
        self.opt_dict["AE_B"].param_groups[0]['lr'] = self.params['lr'] * (
            lr_mult / 2)
        self.opt_dict["DISC_A"].param_groups[0][
            'lr'] = self.params['lr'] * lr_mult
        self.opt_dict["DISC_B"].param_groups[0][
            'lr'] = self.params['lr'] * lr_mult

        # Print Learning Rate
        print(
            f"Sched Sched Iter:{self.current_iter}, Sched Epoch:{self.current_epoch}"
        )
        [
            print(
                f"Learning Rate({opt}): {self.opt_dict[opt].param_groups[0]['lr']}"
            ) for opt in self.opt_dict.keys()
        ]

        # Train loop

        for package_a, package_b in tqdm(self.get_train_loop()):
            warped_a = Variable(package_a[0]).cuda()
            target_a = Variable(package_a[1]).cuda()
            warped_b = Variable(package_b[0]).cuda()
            target_b = Variable(package_b[1]).cuda()

            # TRAIN AUTOENCODERS
            fake_a = self.train_ae('A', warped_a, target_a)
            fake_b = self.train_ae('B', warped_b, target_b)

            # TRAIN DISCRIMINATORS
            self.train_disc('A', target_a, fake_a, warped_a)
            self.train_disc('B', target_b, fake_b, warped_b)

            # MAKE PREVIEW IMAGES
            if self.current_epoch_iter == 0 and self.current_epoch % self.params[
                    "save_img_every"] == 0:
                self.show_result('A', warped_a, target_a)
                self.show_result('B', warped_b, target_b)

            # append all losses in loss dict
            [
                self.loss_epoch_dict[loss].append(
                    self.loss_batch_dict[loss].item()) for loss in self.losses
            ]
            self.current_iter += 1
            self.current_epoch_iter += 1
        [
            self.train_hist_dict[loss].append(
                helper.mft(self.loss_epoch_dict[loss])) for loss in self.losses
        ]