Exemple #1
0
    def adapt(self,
              inputs,
              targets,
              is_classification_task=None,
              num_adaptation_steps=1,
              step_size=0.1,
              first_order=False):
        if is_classification_task is None:
            is_classification_task = (not targets.dtype.is_floating_point)
        params = None

        results = {
            'inner_losses': np.zeros((num_adaptation_steps, ),
                                     dtype=np.float32)
        }

        for step in range(num_adaptation_steps):
            logits = self.model(inputs, params=params)
            inner_loss = self.loss_function(logits, targets)
            results['inner_losses'][step] = inner_loss.item()

            if (step == num_adaptation_steps - 1) and is_classification_task:
                results['accuracy_before'] = compute_accuracy(logits, targets)
                #print(step, logits,targets,results['accuracy_before'])

            self.model.zero_grad()
            params = gradient_update_parameters(
                self.model,
                inner_loss,
                step_size=step_size,
                params=params,
                first_order=(not self.model.training) or first_order)

        return params, results
Exemple #2
0
    def adapt(self,
              inputs,
              targets,
              num_adaptation_steps=1,
              step_size=0.1,
              first_order=False):
        params = None
        results = {
            'inner_losses': np.zeros((num_adaptation_steps, ),
                                     dtype=np.float32)
        }

        for step in range(num_adaptation_steps):
            logits = self.model(inputs, params=params)
            inner_loss = self.loss_function(logits, targets)
            results['inner_losses'][step] = inner_loss.item()

            if step == 0:
                results['accuracy_before'] = compute_accuracy(logits, targets)

            self.model.zero_grad()
            params = gradient_update_parameters(
                self.model,
                inner_loss,
                step_size=step_size,
                params=params,
                first_order=(not self.model.training) or first_order)

        return params, results