Esempio n. 1
0
    def train(self):
        start_time = time()
        pp('Start training')
        pp({
            'train': self.train_data_loader.dataset,
            'validation': self.val_data_loader.dataset,
            'test': self.test_data_loader.dataset
        })
        pp(vars(self.args))

        # TODO(lyj):
        for self.cur_epoch in range(self.args.epochs):
            self.train_epoch()

        v_b_i = self.results["val"].argmax()
        t_b_i = self.results["test"].argmax()
        test_best = self.results["test"].max()
        test_select = self.results["test"][v_b_i]

        # print("Best val %g, corresponding test %g - best test: %g" % (val_res.max(), test_res[idx_best], test_res.max()))
        temp_dict = {
            'now':
            strftime("%Y-%m-%d %H:%M:%S", localtime()),
            'source':
            self.args.source,
            'target':
            self.args.target,
            'param':
            self.args.parameters,
            'bs':
            self.args.batch_size,
            'lr':
            self.args.learning_rate,
            'Highest accuracy on validation set appears on epoch':
            t_b_i.item(),
            'Highest accuracy on test set appears on epoch':
            v_b_i.item(),
            'Accuracy on test set when the accuracy on validation set is highest':
            test_select.item(),
            'Highest accuracy on test set':
            test_best.item(),
            'duration':
            time() - start_time
        }
        pp(temp_dict)
        self.writer.w(temp_dict)
Esempio n. 2
0
    def train_epoch(self):
        self.scheduler.step()
        lrs = self.scheduler.get_lr()
        criterion = nn.CrossEntropyLoss()

        # Set the mode of the model to trainer, then the parameters can begin to be trained
        self.model.train()
        for i, (data, n, c_l) in enumerate(self.train_data_loader):
            data, n, c_l = data.to(self.device), n.to(self.device), c_l.to(
                self.device)
            self.optimizer.zero_grad()

            n_logit, c_l_logit = self.model(data)  # , lambda_val=lambda_val)
            usv_loss = criterion(n_logit, n)
            sv_loss = criterion(c_l_logit[n == 0], c_l[n == 0])

            _, c_l_pred = c_l_logit.max(dim=1)
            _, usv_pred = n_logit.max(dim=1)
            # _, domain_pred = domain_logit.max(dim=1)
            loss = sv_loss + usv_loss * self.args.usvt_weight

            loss.backward()
            self.optimizer.step()

            # record and print
            acc_class = torch.sum(c_l_pred == c_l).item() / data.shape[0]
            acc_u = torch.sum(usv_pred == n).item() / data.shape[0]
            if i == 0:
                col_n = ceil(
                    len(self.train_data_loader) / self.collect_per_batch)
                print(
                    f'epoch:{self.cur_epoch}/{self.args.epochs};bs:{data.shape[0]};'
                    f'lr:{" ".join([str(lr) for lr in lrs])}; '
                    f'{len(self.train_data_loader)}/{self.collect_per_batch}={col_n}|',
                    end='')
            if self.args.wandb and self.args.nth_repeat == 0 and i % self.collect_per_batch == 0:
                print('#', end='')
                wandb.log({
                    'acc/train/sv_task': acc_class,
                    'acc/train/usv_task': acc_u,
                    'loss/train/class': sv_loss.item(),
                    'loss/train/usv_task': usv_loss.item(),
                    'loss/train/sum': loss.item()
                })

            if i == len(self.train_data_loader) - 1:
                print()
                pp([
                    f'train_acc:u:{acc_class};c:{acc_u}'
                    f'train_loss:j:{usv_loss.item()};c:{sv_loss.item()}'
                ])

            del loss, sv_loss, usv_loss, n_logit, c_l_logit

        # eval
        self.model.eval()
        with torch.no_grad():
            for phase, loader in self.test_loaders.items():
                l_acc, _ = Trainer.test(self.model, loader, device=self.device)
                pp(f'{phase}_acc:c:{l_acc}')
                if self.args.wandb and self.args.nth_repeat == 0:
                    wandb.log({f'acc/{phase}/sv_task': l_acc})
                self.results[phase][self.cur_epoch] = l_acc
Esempio n. 3
0
    def train_epoch(self):
        print('******************************')
        self.scheduler.step()
        lrs = self.scheduler.get_lr()
        criterion = nn.CrossEntropyLoss()

        # Set the mode of the model to trainer, then the parameters can begin to be trained

        for i, (data, n, c_l) in enumerate(self.train_data_loader):
            self.model.train()
            data, n, c_l = data.to(self.device), n.to(self.device), c_l.to(
                self.device)
            # for i in range(20):
            #     print(n[i])
            #     tf.ToPILImage()(data[i]).show()
            #     print()
            self.optimizer.zero_grad()

            n_logit, c_l_logit = self.model(data)  # , lambda_val=lambda_val)
            # us_loss = criterion(n_logit, n)
            ## temporary used
            c_l_logit = n_logit
            # s_loss = criterion(c_l_logit[n == 0], c_l[n == 0])
            s_loss = criterion(c_l_logit, c_l)
            # s_loss = torch.tensor(-1)

            _, c_l_pred = c_l_logit.max(dim=1)
            # _, n_pred = n_logit.max(dim=1)
            # _, domain_pred = domain_logit.max(dim=1)

            # self.model.eval()
            n_random = torch.randint(0, 4, (len(data), ))

            n_logit2, _ = self.model(data)
            activations = self.input_grad.activations['out']

            grad = self.input_grad.get_input_gradient(n_logit2,
                                                      create_graph=True)
            abs_grad = torch.norm(grad)

            mask = self.input_grad.get_mask(activations, grad).data
            # k = 5
            # tf.ToPILImage()(visualize_cam(mask[k], data[k])[0]).show()
            # tf.ToPILImage()(visualize_cam(mask[k], data[k])[1]).show()

            grad *= 100000

            data_r = torch.zeros_like(data)
            for j in range(len(data)):
                data_r[j] = torch.rot90(data[j], -n_random[j].item(), [1, 2])
            n_logit_r, _ = self.model(data_r)
            activations_r = self.input_grad.activations['out']

            grad_r = self.input_grad.get_input_gradient(n_logit_r,
                                                        create_graph=True)
            abs_grad_r = torch.norm(grad_r)

            mask_r = self.input_grad.get_mask(activations_r, grad_r)
            # tf.ToPILImage()(visualize_cam(mask_r[k], data_r[k])[0]).show()
            # tf.ToPILImage()(visualize_cam(mask_r[k], data_r[k])[1]).show()

            for j in range(len(mask_r)):
                mask_r[j] = torch.rot90(mask_r[j], n_random[j].item(), [1, 2])

            grad_r_r = torch.zeros_like(grad_r)
            for j in range(len(grad_r)):
                grad_r_r[j] = torch.rot90(grad_r[j], n_random[j].item(),
                                          [1, 2])
            grad_r *= 100000
            # grad_ori = grad_ori.reshape(grad_ori.shape[0], -1)
            # print(data_ori[0] == data[0])
            # print(grad_ori[0] == grad[0])

            # self.model.train()

            # input_gradient_loss = nn.MSELoss()(grad_r, grad)
            mask_loss = nn.MSELoss()(mask_r, mask)
            input_gradient_loss = nn.MSELoss()(grad_r, grad_r_r)
            # loss = s_loss + us_loss * self.args.usvt_weight
            # loss = us_loss * self.args.usvt_weight
            # loss = s_loss

            # if self.cur_epoch != 0 and 20*input_gradient_loss.item() < s_loss.item():
            #     loss = s_loss
            # else:
            #     loss = s_loss +  5*input_gradient_loss
            #     # loss = s_loss
            # if abs_grad.item()>0.4 and abs_grad.item() < 1.0:
            #     loss = s_loss  +  5*input_gradient_loss
            # else:
            #     loss = s_loss
            loss = mask_loss

            # loss = s_loss + 80 * input_gradient_loss
            loss.backward()
            self.optimizer.step()

            # record and print
            acc_s = torch.sum(c_l_pred == c_l).item() / data.shape[0]
            # acc_s = -1
            # acc_u = torch.sum(n_pred == n).item() / data.shape[0]
            acc_u = -1
            if i == 0:
                col_n = ceil(
                    len(self.train_data_loader) / self.collect_per_batch)
                print(
                    f'epoch:{self.cur_epoch}/{self.args.epochs};bs:{data.shape[0]};'
                    f'lr:{" ".join([str(lr) for lr in lrs])}; '
                    f'{len(self.train_data_loader)}/{self.collect_per_batch}={col_n}|',
                    end='')
            if i % self.collect_per_batch == 0:
                print('#', end='')
                if self.args.wandb:
                    wandb.log({
                        'acc/train/sv_task': acc_s,
                        'acc/train/usv_task': acc_u,
                        'loss/train/class': s_loss.item(),
                        # 'loss/train/usv_task': us_loss.item(),
                        'loss/train/sum': loss.item()
                    })

            if i == len(self.train_data_loader) - 1:
                print()
                pp(f'@{acc_s}:train_acc:s;{acc_u}:u')
                pp(f'train_loss:s:{s_loss.item()};input_gradient_loss:{input_gradient_loss.item()}'
                   )
                pp(f'mask_loss:{mask_loss.item()}')
                pp(f'loss:{loss.item()},abs_grad:{abs_grad.item()},abs_grad_r:{abs_grad_r.item()}'
                   )

                # pp(f'train_loss:s:{s_loss.item()};u:{us_loss.item()}')
                # pp(f'@{acc_u}:train_acc:u')
                # pp(f'train_loss:u:{us_loss.item()}')

            # del loss, s_loss, us_loss, n_logit, c_l_logit
            torch.cuda.empty_cache()

        # eval
        self.model.eval()
        with torch.no_grad():
            for phase, loader in self.test_s_loaders.items():
                s_acc, us_acc = Trainer.test(self.model,
                                             loader,
                                             device=self.device)
                pp(f'${s_acc}:{phase}_acc')
                if self.args.wandb:
                    wandb.log({f'acc/{phase}': s_acc})
                self.results[phase][self.cur_epoch] = s_acc
Esempio n. 4
0
    def train(self):
        start_time = time()
        pp('Start training')
        pp({
            'train': self.train_data_loader.dataset,
            'validation': self.val_s_data_loader.dataset,
            'test': self.test_s_data_loader.dataset
        })
        pp(vars(self.args))

        # TODO(lyj):
        for self.cur_epoch in range(self.args.epochs):
            self.train_epoch()

        r = Result()
        r.v_s_b_i = self.results["val_s"].argmax()
        r.t_s_b_i = self.results["test_s"].argmax()
        r.val_s_best = self.results["val_s"].max()
        r.test_s_best = self.results["test_s"].max()
        r.test_s_select = self.results["test_s"][r.v_s_b_i]
        r.u_when_s = self.results["test_us"][r.v_s_b_i]

        r.v_u_b_i = self.results["val_us"].argmax()
        r.t_u_b_i = self.results["test_us"].argmax()
        r.val_u_best = self.results["val_us"].max()
        r.test_u_best = self.results["test_us"].max()
        r.test_u_select = self.results["test_us"][r.v_u_b_i]
        r.s_when_u = self.results["test_s"][r.v_u_b_i]

        r.v_s_means = self.results["val_s"].mean()
        r.t_s_means = self.results["test_s"].mean()
        r.v_u_means = self.results["val_us"].mean()
        r.t_u_means = self.results["test_us"].mean()

        r.v_s_std = self.results["val_s"].std()
        r.t_s_std = self.results["test_s"].std()
        r.v_u_std = self.results["val_us"].std()
        r.t_u_std = self.results["test_us"].std()

        # print("Best val %g, corresponding test %g - best test: %g" % (val_res.max(), test_res[idx_best], test_res.max()))
        temp_dict = {
            'now':
            strftime("%Y-%m-%d %H:%M:%S", localtime()),
            'source':
            self.args.source,
            'target':
            self.args.target,
            'param':
            self.args.params,
            'bs':
            self.args.batch_size,
            'lr':
            self.args.learning_rate,
            'Highest accuracy on validation set appears on epoch':
            r.t_s_b_i.item(),
            'Highest accuracy on test set appears on epoch':
            r.v_s_b_i.item(),
            'Accuracy on test set when the accuracy on validation set is highest':
            r.test_s_select.item(),
            'Highest accuracy on test set':
            r.test_s_best.item(),
            'duration':
            time() - start_time
        }
        pp(temp_dict)
        pp(vars(r))
        self.writer.w(temp_dict)
        if self.args.wandb:
            # wandb.log({'r/test_select': test_s_select.item(),
            #             'r/val_best': val_s_best.item(),
            #            'r/test_best': test_s_best.item(),
            #            'r/v_b_i': v_s_b_i,
            #            'r/t_bi': t_s_b_i})
            wandb.log(vars(r))
            # table = wandb.Table(columns=[
            #     f'{self.args.source[0]}->{self.args.target}-{"-".join([str(_) for _ in self.args.params])}',
            #     "val_best", "test_best", "test_select"])
            # table.add_data("epoch", v_b_i, t_b_i, "#")
            # table.add_data("acc", val_best.item(), test_best.item(), test_select.item())
            # wandb.log({"summary": table})

        torch.cuda.empty_cache()
Esempio n. 5
0
    def train_epoch(self):
        print('******************************')
        self.scheduler.step()
        lrs = self.scheduler.get_lr()
        criterion = nn.CrossEntropyLoss()

        # Set the mode of the model to trainer, then the parameters can begin to be trained
        self.model.train()
        for i, (data, n, c_l) in enumerate(self.train_data_loader):

            data, n, c_l = data.to(self.device), n.to(self.device), c_l.to(
                self.device)
            # for i in range(20):
            #     print(n[i])
            #     tf.ToPILImage()(data[i]).show()
            #     print()
            self.optimizer.zero_grad()

            n_logit, c_l_logit = self.model(data)  # , lambda_val=lambda_val)
            us_loss = criterion(n_logit, n)
            # s_loss = criterion(c_l_logit[n == 0], c_l[n == 0])
            s_loss = torch.tensor(-1)

            grad = self.input_grad.get_input_gradient(n_logit)
            grad *= 100000
            # grad = grad.reshape(grad.shape[0], -1)

            data_ori = torch.zeros_like(data)
            for j in range(len(data)):
                data_ori[j] = torch.rot90(data[j], -n[j], [1, 2])
            # data_ori = torch.tensor([torch.rot90(data[i], -n[i], [1, 2]) for i in range(len(data))])
            n_logit_ori, _ = self.model(data_ori)
            grad_ori = self.input_grad.get_input_gradient(n_logit_ori)
            for j in range(len(grad_ori)):
                grad_ori[j] = torch.rot90(grad_ori[j], n[j], [1, 2])
            grad_ori *= 100000
            # grad_ori = grad_ori.reshape(grad_ori.shape[0], -1)

            input_gradient_loss = nn.MSELoss()(grad_ori, grad)

            # _, c_l_pred = c_l_logit.max(dim=1)
            _, n_pred = n_logit.max(dim=1)
            # _, domain_pred = domain_logit.max(dim=1)
            # loss = s_loss + us_loss * self.args.usvt_weight
            # loss = us_loss * self.args.usvt_weight
            loss = us_loss * self.args.usvt_weight + 0.01 * input_gradient_loss

            loss.backward()
            self.optimizer.step()

            # record and print
            # acc_s = torch.sum(c_l_pred == c_l).item() / data.shape[0]
            acc_s = -1
            acc_u = torch.sum(n_pred == n).item() / data.shape[0]
            if i == 0:
                col_n = ceil(
                    len(self.train_data_loader) / self.collect_per_batch)
                print(
                    f'epoch:{self.cur_epoch}/{self.args.epochs};bs:{data.shape[0]};'
                    f'lr:{" ".join([str(lr) for lr in lrs])}; '
                    f'{len(self.train_data_loader)}/{self.collect_per_batch}={col_n}|',
                    end='')
            if i % self.collect_per_batch == 0:
                print('#', end='')
                if self.args.wandb:
                    wandb.log({
                        'acc/train/sv_task': acc_s,
                        'acc/train/usv_task': acc_u,
                        'loss/train/class': s_loss.item(),
                        'loss/train/usv_task': us_loss.item(),
                        'loss/train/sum': loss.item()
                    })

            if i == len(self.train_data_loader) - 1:
                print()
                # pp(f'train_acc:s:{acc_s};u:{acc_u}')
                # pp(f'train_loss:s:{s_loss.item()};u:{us_loss.item()}')
                pp(f'@{acc_u}:train_acc:u')
                pp(f'train_loss:u:{us_loss.item()}')
                pp(f'input_gradient_loss:u:{input_gradient_loss.item()}')

            # del loss, s_loss, us_loss, n_logit, c_l_logit
            torch.cuda.empty_cache()

        # eval
        self.model.eval()
        with torch.no_grad():
            # for phase, loader in self.test_s_loaders.items():
            #     s_acc, us_acc = Trainer.test(self.model, loader, device=self.device)
            #     pp(f'{phase}_acc:{s_acc}')
            #     if self.args.wandb:
            #         wandb.log({f'acc/{phase}': s_acc})
            #     self.results[phase][self.cur_epoch] = s_acc

            for phase, loader in self.test_us_loaders.items():
                s_acc, us_acc = Trainer.test(self.model,
                                             loader,
                                             device=self.device)
                pp(f'${us_acc}:{phase}_acc')
                if self.args.wandb:
                    wandb.log({f'acc/{phase}': us_acc})
                self.results[phase][self.cur_epoch] = us_acc