示例#1
0
        # meter.nres = residual[n_mask].mean()
        # meter.nsum = self.sum_mem[ids][n_mask].mean()
        # meter.tres = residual[n_mask.logical_not()].mean()
        # meter.tsum = self.sum_mem[ids][n_mask.logical_not()].mean()
        self.acc_precise_(self.pred_mem[ids].argmax(dim=1),
                          ys,
                          meter,
                          name='sacc')

        return meter

    def to_logits(self, xs) -> torch.Tensor:
        return self.model(xs)


if __name__ == '__main__':
    params = NoisyParams()
    params.optim.args.lr = 0.06
    params.epoch = 400
    params.device = 'cuda:0'
    params.filter_ema = 0.999

    params.mixup = True
    params.ideal_mixup = True
    params.worst_mixup = False
    params.noisy_ratio = 0.8
    params.from_args()
    params.initial()
    trainer = MixupEvalTrainer(params)
    trainer.train()
示例#2
0
        self.optim.zero_grad()
        meter.Lall.backward()
        self.optim.step()

        self.acc_precise_(logits.argmax(dim=1),
                          ys,
                          meter=meter,
                          name='true_acc')
        self.acc_precise_(logits.argmax(dim=1),
                          nys,
                          meter=meter,
                          name='noisy_acc')

        return meter


if __name__ == '__main__':
    params = NoisyParams()
    params.ema = True  # l2r have no ema for model
    params.epoch = 120
    params.batch_size = 100
    params.device = 'cuda:3'
    params.optim.args.lr = 0.1
    params.meta_optim = {
        'lr': 0.1,
        'momentum': 0.9,
    }
    params.from_args()
    trainer = L2RTrainer(params)
    trainer.train()
示例#3
0
                # self.noisy_cls[self.noisy_cls >= 0.5].clamp_min_(params.gmm_w_sche(params.eidx))

            m2 = self.acc_mixture_(true_cls, (self.count_mem >= 0).cpu().numpy(), pre='con')
            meter.update(m)
            self.logger.info(m2)


if __name__ == '__main__':
    # for retry,params in enumerate(params.grid_range(5)):
    # for retry in range(5):
    #     retry = retry + 1
    params = NoisyParams()
    params.right_n = 10
    params.use_right_label = False
    params.optim.args.lr = 0.06
    params.epoch = 500
    params.device = 'cuda:2'
    params.filter_ema = 0.99
    params.burnin = 20
    params.mix_burnin = 20
    params.targets_ema = 0.3
    params.pred_thresh = 0.9
    params.feature_mean = False
    params.local_filter = True  # 局部的筛选方法
    params.mixt_ema = True  # 是否对 BMM 的预测结果用 EMA 做平滑
    params.from_args()
    params.initial()
    trainer = MultiHeadTrainer(params)
    if params.ss_pretrain:
        ckpt = torch.load(params.ss_pretrain_fn)
        trainer.model.load_state_dict(ckpt)
示例#4
0
                self.noisy_cls = self.noisy_cls_mem.clone()
                self.noisy_cls[self.noisy_cls < 0.5] = 0

                # 随时间推移,越难以区分的样本越应该直接挂掉,而不是模糊来模糊去的加权(或许)
                self.noisy_cls[self.noisy_cls >= 0.5].clamp_min_(
                    params.gmm_w_sche(params.eidx))


if __name__ == '__main__':
    # for retry,params in enumerate(params.grid_range(5)):
    # for retry in range(5):
    #     retry = retry + 1
    params = NoisyParams()
    params.right_n = 10
    params.use_right_label = False
    params.optim.args.lr = 0.06
    params.epoch = 300
    params.device = 'cuda:2'
    params.filter_ema = 0.999
    params.burnin = 2
    params.gmm_burnin = 20
    params.targets_ema = 0.3
    # params.tolerance_type = 'exp'
    params.pred_thresh = 0.9
    # params.widen_factor = 10
    params.from_args()
    params.initial()
    trainer = MultiHeadTrainer(params)

    trainer.train()