コード例 #1
0
    def finetune(self, epoch=0, model=None):
        """Finetune model."""
        self.logger.info("Finetune epoch: {}".format(epoch))
        if model is None:
            model = self.model
        model.train()

        train_loss = 0
        score = 0
        n_batches = int(
            len(self.finetuneloader.dataset) / self.args.batch_size)

        for batch_idx, (inputs, targets, _) in enumerate(self.finetuneloader):
            inputs, targets = wrap_cuda(inputs), wrap_cuda(targets)
            self.optimizer.zero_grad()
            outputs = model(inputs)
            batch_size = outputs.shape[0]
            loss = F.binary_cross_entropy(outputs, targets,
                                          reduction='none').sum() / batch_size
            loss.backward()
            self.optimizer.step()
            train_loss += loss.item()
            score += compute_f_score(outputs, targets).item()
            self.logger.info(
                STATUS_MSG.format(batch_idx + 1, n_batches,
                                  train_loss / (batch_idx + 1),
                                  score / (batch_idx + 1)))
コード例 #2
0
    def test(self,
             epoch=0,
             val=False,
             save_submission=False,
             run_on_finetune=False):
        """Test model."""
        self.model.eval()

        if val:
            self.logger.info("Val epoch: {}".format(epoch))
            loader = self.valloader
        else:
            if run_on_finetune:
                self.logger.info(
                    "Running on test set with labels, save predictions")
                loader = self.finetuneloader
            else:
                self.logger.info("Running on full test set, save predictions")
                loader = self.testloader

        test_loss = 0
        score = 0
        n_batches = int(len(loader.dataset) / self.args.batch_size)

        with torch.no_grad():
            for batch_idx, (inputs, targets, image_ids) in enumerate(loader):
                inputs, targets = wrap_cuda(inputs), wrap_cuda(targets)
                outputs = self.model(inputs)
                batch_size = outputs.shape[0]
                loss = F.binary_cross_entropy(
                    outputs, targets, reduction='none').sum() / batch_size
                test_loss += loss.item()
                score += compute_f_score(outputs, targets).item()
                self.logger.info(
                    STATUS_MSG.format(batch_idx + 1, n_batches,
                                      test_loss / (batch_idx + 1),
                                      score / (batch_idx + 1)))
                f2_score = score / (batch_idx + 1)

                if save_submission:
                    preds = self.convert_outputs_to_label_predictions(outputs)
                    for idx, image_id in enumerate(image_ids):
                        self.submission['labels'][image_id] = preds[idx]

        if save_submission:
            self.submission.update(self.tuning_labels)
            submission_file_path = os.path.join(
                self.args.submissions_path,
                'submission_{}_{}.csv'.format(self.args.exp_name, epoch))
            self.submission.to_csv(submission_file_path)

        return f2_score
コード例 #3
0
 def prepare_model(self):
     """Prepare model."""
     if self.args.use_ensemble:
         self.model = ModelEnsemble(CNN, self.num_classes,
                                    self.args.n_models)
         self.model.cuda()
         self.model.load_checkpoint_list(
             self.args.ensemble_checkpoint_paths)
     else:
         self.model = wrap_cuda(CNN(self.num_classes))
         self.load_checkpoint()
コード例 #4
0
    def generate_predictions(self):
        self.model.eval()
        n_batches = int(len(self.testloader.dataset) / self.args.batch_size)
        with torch.no_grad():
            for batch_idx, (inputs, _,
                            image_ids) in enumerate(self.testloader):
                self.logger.info("Predicting batch {}/{}".format(
                    batch_idx, n_batches))
                inputs = wrap_cuda(inputs)
                outputs = self.model(inputs)
                avg_preds = self.model.get_ensemble_average(outputs)
                preds = self.convert_outputs_to_label_predictions(avg_preds)
                for idx, image_id in enumerate(image_ids):
                    self.submission['labels'][image_id] = preds[idx]

            # NOTE: Use only for stage-1 test set
            self.submission.update(self.tuning_labels)
            submission_file_path = 'predictions.csv'  # used as pseudo labels
            self.submission.to_csv(submission_file_path)
コード例 #5
0
 def cuda(self):
     """Transfer to GPU."""
     for idx, _ in enumerate(self.ensemble):
         self.ensemble[idx] = wrap_cuda(self.ensemble[idx])