Esempio n. 1
0
    def __init__(self, args, loader):
        super(StaticMethod, self).__init__(args, loader)

        if args.mode == 'train':
            self.criterion = losses.StaticLoss(args)
            self.criterion = self.criterion.to(self.device)
            self.optimizer = get_optimizer(self.G.parameters(), args)
Esempio n. 2
0
    def __init__(self, args, loader):
        super(CoVWeightingMethod, self).__init__(args, loader)

        if args.mode == 'train':
            self.criterion = losses.CoVWeightingLoss(args)
            self.criterion.to(self.device)
            self.optimizer = get_optimizer(self.G.parameters(), args)

            # Record the mean weights for an epoch.
            self.mean_weights = [
                0.0 for _ in range(self.criterion.alphas.shape[0])
            ]
    def __init__(self, args, loader):
        super(UncertaintyMethod, self).__init__(args, loader)

        if args.mode == 'train':
            self.criterion = losses.UncertaintyLoss(args)
            self.criterion.to(self.device)

            # Specifically for this architecture, both model parameters and the estimates
            # of the log variances have to be optimized.
            params_to_optimize = list(
                self.G.parameters()) + [self.criterion.log_vars]
            self.optimizer = get_optimizer(params_to_optimize, args)
Esempio n. 4
0
    def __init__(self, args, loader):
        super(GradNormMethod, self).__init__(args, loader)

        if args.mode == 'train':
            self.criterion = losses.BaseLoss(args)
            self.criterion = self.criterion.to(self.device)
            self.optimizer = get_optimizer(self.G.parameters(), args)

            # Parameters specific to GradNorm.
            self.params = torch.tensor(
                [1.0 / NUM_LOSSES for _ in range(NUM_LOSSES)],
                requires_grad=True,
                device=self.device)

            self.first_iter = True
            self.L0 = None

            self.gamma = args.init_gamma
            self.Gradloss = nn.L1Loss()
            # Taking optimizer parameters from the GradNorm paper.
            self.optimizer_params = optim.Adam([self.params], lr=0.025)