Esempio n. 1
0
    def __init__(self):
        super().__init__(param)

        # self.eval_after = int(0.15 * self.total_steps)
        self.early_stop = -1
        self.topn = 5
        
        from mground.gpu_utils import anpai
        postive_label = torch.Tensor(5, 1).fill_(0.75)
        negtive_label = torch.Tensor(5, 1).fill_(0.25)
        self.p_label = anpai(torch.cat([postive_label, negtive_label]), True, False)

        postive_label = torch.Tensor(5, 1).fill_(1)
        negtive_label = torch.Tensor(5, 1).fill_(0)
        self.pp_label = anpai(torch.cat([postive_label, negtive_label]), True, False)

        source_class = set(OFFICE_CLASS[0:31])
        target_class = set(OFFICE_CLASS[0:10])

        assert len(source_class.intersection(target_class)) == 10
        assert len(source_class) == 31 and len(target_class) == 10

        self.source_class = source_class
        self.target_class = target_class
        self.class_num = len(self.source_class) + 1

        self.element_bce = torch.nn.BCELoss(reduction="none")
        self.bce = torch.nn.BCELoss()
        self.element_ce = torch.nn.CrossEntropyLoss(reduction="none")
        self.DECISION_BOUNDARY = self.TARGET.fill_(1)

        self._all_ready()
Esempio n. 2
0
    def sample_elbo(self, input, target, samples):
        p = self.param

        outputs = torch.zeros(samples, p.batch_size, p.class_num)
        log_priors = torch.zeros(samples)
        log_var_posteriors = torch.zeros(samples)

        outputs, log_priors, log_var_posteriors = anpai(
            [outputs, log_priors, log_var_posteriors], p.use_gpu, False)

        for i in range(samples):
            outputs[i] = self(input, sample=True)
            log_priors[i] = self.log_prior()
            log_var_posteriors[i] = self.log_variational_posterior()

        log_prior = log_priors.mean()
        log_variational_posterior = log_var_posteriors.mean()

        negative_log_likelihood = F.nll_loss(outputs.mean(0),
                                             target,
                                             reduction="sum")

        loss = (log_variational_posterior -
                log_prior) / 118 + negative_log_likelihood

        return (
            loss,
            log_prior,
            log_variational_posterior,
            negative_log_likelihood,
        )
Esempio n. 3
0
    def __init__(self, params):

        super(TrainableModule, self).__init__()

        self.params = params
        self.log_step = self.params.log_per_step
        self.eval_step = self.params.eval_per_step
        self.TrainCpasule = TrainCapsule

        self.relr_everytime = False
        self.eval_once = False
        self.eval_after = -1

        self.total_steps = self.params.steps
        self.current_step = 0.0
        self.current_epoch = 0.0

        self.losses = LossHolder()
        self.train_caps = dict()
        self.train_loggers = dict()
        self.valid_loggers = dict()

        T = torch.zeros(1)
        S = torch.ones(1)
        self.T, self.S = anpai(
            (T, S), use_gpu=params.use_gpu, need_logging=False
        )
Esempio n. 4
0
    def train_module(self, **kwargs):

        # fixed loss key to prenvent missing
        self.losses.fix_loss_keys()

        for _ in range(self.total_steps):

            # set all networks to train mode
            for _, i in self.networks.items():
                i.train(True)

            # re calculate learning rates
            for c in self.train_caps.values():
                c.decary_lr_rate()
                c.make_zero_grad()

            datas = self._feed_data(mode="train")

            datas = anpai(datas, self.params.use_gpu, need_logging=False)
            self._train_process(datas, **kwargs)

            # making log
            if self.current_step % self.log_step == (self.log_step - 1):

                losses = [(k, v.log_current_avg_loss(self.current_step + 1))
                          for k, v in self.train_loggers.items()]

                if not self.params.disable_std:

                    logger.log(
                        HINTS,
                        self.params.tag + " - " +
                        "Steps %3d ends. Remain %3d steps to go. Fished %.2f%%"
                        % (
                            self.current_step + 1,
                            self.params.steps - self.current_step - 1,
                            (self.current_step + 1) /
                            (self.params.steps + 1) * 100,
                        ),
                    )

                    logger.log(
                        HINTS,
                        "Current best accurace is %3.3f%%." %
                        (self.best_accurace * 100),
                    )

                    tabulate_log_losses(losses, trace="dalosses", mode="train")

            # begain eval
            if (self.current_step % self.eval_step == (self.eval_step - 1)
                    and self.current_step > self.eval_after):
                self.eval_module(**kwargs)
                # set all networks to train mode
                for _, i in self.networks.items():
                    i.train(True)

            self._finish_a_train_process()
Esempio n. 5
0
    def __batch_domain_label(self, batch_size):
        # Generate all Source and Domain label.
        SOURCE = 1
        TARGET = 0

        sd = torch.Tensor(batch_size, 1).fill_(SOURCE)
        td = torch.Tensor(batch_size, 1).fill_(TARGET)

        sd, td, = anpai((sd, td), self.params.use_gpu, False)
        return sd, td
Esempio n. 6
0
    def _all_ready(self):

        # Registe all needed work
        # registe weights initial funcion
        WeightedModule.register_weight_handler(_basic_weights_init_helper)
        # get all networks and init weights
        networks = self._regist_networks()
        assert type(networks) is dict

        def init_weight_and_key(n, k):
            try:
                n.weight_init()
                n.tag = k
            except AttributeError:
                logger.log(
                    BUILD,
                    "%s is not instace of WeightedModule."
                    % n.__class__.__name__,
                )

        for k, i in networks.items():
            if type(i) is nn.Sequential:
                i.tag = k
                for c in i.children():
                    init_weight_and_key(c, k)
            else:
                init_weight_and_key(i, k)

        # send networks to gup
        networks = {
            i: anpai(j, use_gpu=self.params.use_gpu)
            for i, j in networks.items()
        }
        # make network be class attrs
        for i, j in networks.items():
            self.__setattr__(i, j)
        self.networks = networks

        # generate train dataloaders and valid dataloaders
        # data_info is a dict contains basic data infomations
        d_info, iters = self._prepare_data()
        if self.params.classwise_valid:
            valid_list = ['valid_' + t for t in iters['valid'].keys()]
            self.define_log(*valid_list, group="valid")
        else:
            self.define_log("valid_accu", group="valid")
        self.iters = iters

        # regist losses
        # train_caps used to update networks
        # loggers used to make logs
        self._regist_losses()
Esempio n. 7
0
        def handle_datas(datas):

            img, target = datas
            # get result from a valid_step
            correct_count = 0
            corrects = torch.zeros(params.samples + 1)
            predicts = torch.zeros(params.samples + 1, params.eval_batch_size,
                                   params.class_num)

            corrects, predicts = anpai((corrects, predicts), True, False)

            for i in range(params.samples):
                predicts[i] = self.BN(img, sample=True)
            predicts[params.samples] = self.BN(img, sample=False)
            predict = predicts.mean(0)

            preds = preds = predicts.max(2, keepdim=True)[1]
            pred = predict.max(1, keepdim=True)[1]  # 最大的log的索引
            corrects += (preds.eq(
                target.view_as(pred)).float().sum(dim=1).squeeze())
            correct_count += pred.eq(target.view_as(pred)).sum().item()

            current_size = target.size()[0]

            for index, num in enumerate(corrects):
                if index == param.samples:
                    #print("Component{} Accurancy:{}/{}".format(index, num, current_size))

                    print("Posterior Mean Accurancy: {}/{}".format(
                        num, current_size))

            #predict = self.BN(img)

            # calculate valid accurace and make record

            # pred_cls = predict.data.max(1)[1]
            # corrent_count = pred_cls.eq(label.data).sum()

            self._update_logs(
                {
                    "valid_accu": correct_count * 100 / current_size,
                },
                group="valid",
            )

            return correct_count, current_size
Esempio n. 8
0
    def _train_step(self, s_img, s_label, t_img, t_label):

        b_img, b_label = self.iters["train"]["B"].next()
        b_img, b_label = anpai((b_img, b_label), True, False)

        source_f = self.F(s_img)
        bias_f = self.F(b_img)
        target_f = self.F(t_img)

        s_cls_p, s_un_p = self.C(source_f, adapt=False)
        b_cls_p, b_un_p = self.C(bias_f, adapt=False)
        t_cls_p, t_un_p = self.C(target_f, adapt=True)

        s_loss_classify = self.ce(s_cls_p, s_label)
        b_loss_classify = self.ce(b_cls_p, b_label)

        loss_adv = self.bce(t_un_p, self.DECISION_BOUNDARY)

        if self.current_step < 200:
            loss_classify = 0.2 * s_loss_classify + 0.8 * b_loss_classify

        else:
            loss_classify = s_loss_classify
        
        if self.current_step < 100:
            self._update_loss("global_looss", loss_classify)

        else:
            self._update_loss("global_looss", loss_classify + loss_adv)


        self._update_logs(
            {
                "classify": loss_classify,
                "adv": loss_adv,
                "se": norm_entropy(s_cls_p, reduction="mean"),
                "be": norm_entropy(b_cls_p, reduction="mean"),
                "te": norm_entropy(t_cls_p, reduction="mean"),
            }
        )

        del loss_classify, loss_adv
Esempio n. 9
0
    def eval_module(self, **kwargs):
        # set all networks to eval mode
        for _, i in self.networks.items():
            i.eval()

        while True:

            datas = self._feed_data(mode="valid")

            if datas is not None:
                datas = anpai(datas, self.params.use_gpu, need_logging=False)
            self._eval_process(datas, **kwargs)

            if datas is None or self.eval_once:
                break

        losses = [(k, v.log_current_avg_loss(self.current_step + 1))
                  for k, v in self.valid_loggers.items()]

        logger.log(VALID, "End a evaling step.")
        tabulate_log_losses(losses, trace="validloss", mode="valid")
Esempio n. 10
0
    def eval_module(self, **kwargs):
        # set all networks to eval mode
        for _, i in self.networks.items():
            i.eval()

        if self.params.classwise_valid:
            valid_queue = list(self.iters["valid"].keys())
        else:
            valid_queue = ['accu',]

        current_target = valid_queue.pop()
        while True:

            datas = self._feed_data(mode="valid", valid_target=current_target)

            # print('eval ' + current_target)

            if datas is not None:
                datas = anpai(
                    datas, self.params.use_gpu, need_logging=False
                )
            self._eval_process(datas, valid_target=current_target)

            if datas is None or self.eval_once:
                if len(valid_queue) == 0:
                    break
                else:
                    current_target = valid_queue.pop()

        losses = [
            (k, v.log_current_avg_loss(self.current_step + 1))
            for k, v in self.valid_loggers.items()
        ]

        
        logger.log(VALID, "End a evaling step.")
        tabulate_log_losses(losses, trace="validloss", mode="valid")
Esempio n. 11
0
    def __init__(self, params):

        super(TrainableModule, self).__init__()

        self.params = params

        self.writer = None

        # all key point of training steps
        self.log_step = self.params.log_per_step
        self.eval_step = self.params.eval_per_step
        self.eval_after_step = -1
        self.total_steps = self.params.steps
        self.current_step = 0.0
        self.current_epoch = 0.0
        self.need_pre_eval = False
        self.has_pre_eval = False
        self.best_accurace = 0.0

        # loss changing driven
        self.loss_holder = LossHolder()
        self.train_caps = dict()
        self.train_loggers = dict()
        self.proce_loggers = dict()
        self.valid_loggers = dict()

        # get all networks and init weights
        networks = self._regist_networks()
        assert type(networks) is dict

        def init_weight_and_key(n, k):
            # n.weight_init()
            n.tag = k

        for k, i in networks.items():
            if type(i) is nn.Sequential:
                i.tag = k
                for c in i.children():
                    init_weight_and_key(c, k)
            else:
                init_weight_and_key(i, k)

        # send networks to gup
        networks = {
            i: anpai(j, use_gpu=self.params.use_gpu)
            for i, j in networks.items()
        }

        # make network to be class attrs
        for i, j in networks.items():
            self.__setattr__(i, j)
        self.networks = networks

        # generate train dataloaders and valid dataloaders
        # data_info is a dict contains basic data infomations
        cls_num, data_fn = self._prepare_data()
        confusion_matrix = torch.zeros(cls_num, cls_num)

        self.confusion_matrix = confusion_matrix
        self.data_feeding_fn = data_fn
        self._define_log("valid_accurace", group="valid")

        # regist losses
        self._regist_losses()
Esempio n. 12
0
 def _feed_data_with_anpai(self, mode):
     data = self._feed_data(mode)
     if data is not None:
         data = anpai(data, self.params.use_gpu, need_logging=False)
     return data