def _validation(self):
        sly.logger.info("Before validation", extra={'epoch': self.epoch_flt})
        if self.config['validate_with_model_eval']:
            self.model.eval()

        metrics_values = defaultdict(int)
        samples_cnt = 0

        for val_it, (inputs, targets) in enumerate(self.data_loaders['val']):
            inputs, targets = cuda_variable(inputs, volatile=True), cuda_variable(targets)
            outputs = self.model(inputs)
            full_batch_size = inputs.size(0)
            for name, metric in self.val_metrics.items():
                metric_value = metric(outputs, targets)
                if isinstance(metric_value, torch.autograd.Variable):  # for val loss
                    metric_value = metric_value.data[0]
                metrics_values[name] += metric_value * full_batch_size
            samples_cnt += full_batch_size

            sly.logger.info("Validation in progress", extra={'epoch': self.epoch_flt,
                                                         'val_iter': val_it, 'val_iters': self.val_iters})

        for name in metrics_values:
            metrics_values[name] /= float(samples_cnt)

        sly.report_metrics_validation(self.epoch_flt, metrics_values)

        self.model.train()
        sly.logger.info("Validation has been finished", extra={'epoch': self.epoch_flt})
        return metrics_values
Beispiel #2
0
def infer_per_pixel_scores_single_image(model,
                                        raw_input,
                                        out_shape,
                                        apply_softmax=True):
    """
    Performs inference with PyTorch model and resize predictions to a given size.

    Args:
        model: PyTorch model inherited from torch.Module class.
        raw_input: PyTorch Tensor
        out_shape: Output size (height, width).
        apply_softmax: Whether to apply softmax function after inference or not.
    Returns:
        Inference resulting numpy array resized to a given size.
    """
    model_input = torch.stack([raw_input], 0)  # add dim #0 (batch size 1)
    model_input = cuda_variable(model_input, volatile=True)

    output = model(model_input)
    if apply_softmax:
        output = torch_functional.softmax(output, dim=1)
    output = output.data.cpu().numpy()[0]  # from batch to 3d

    pred = np.transpose(output, (1, 2, 0))
    return sly_image.resize(pred, out_shape)
Beispiel #3
0
def dice_loss(preds, trues, weight=None, is_average=True, ignore_index=None):
    num = preds.size(0)
    preds = preds.view(num, -1)
    trues = trues.view(num, -1)

    if ignore_index is not None:
        ignore_mask = trues.data == ignore_index
        preds = preds.clone()
        preds.data[ignore_mask] = 0
        trues = trues.clone()
        trues.data[ignore_mask] = 0

    if weight is not None:
        w = cuda_variable(weight).view(num, -1)
        preds = preds * w
        trues = trues * w

    intersection = (preds * trues).sum(1)
    scores = 2. * (intersection + 1) / (preds.sum(1) + trues.sum(1) + 1)

    if is_average:
        score = scores.sum() / num
        return torch.clamp(score, 0., 1.)
    else:
        return scores
Beispiel #4
0
    def train(self):
        progress = sly.Progress('Model training: ',
                                self.epochs * self.train_iters)
        self.model.train()

        lr_decr = self.config['lr_decreasing']
        policy = LRPolicyWithPatience(optim_cls=Adam,
                                      init_lr=self.config['lr'],
                                      patience=lr_decr['patience'],
                                      lr_divisor=lr_decr['lr_divisor'],
                                      model=self.model)
        best_val_loss = float('inf')

        debug_saver = None
        debug_save_prob = float(os.getenv('DEBUG_PATCHES_PROB', 0.0))
        if debug_save_prob > 0:
            target_multi = int(255.0 / len(self.out_classes))
            debug_saver = DebugSaver(odir=os.path.join(sly.TaskPaths.DEBUG_DIR,
                                                       'debug_patches'),
                                     prob=debug_save_prob,
                                     target_multi=target_multi)

        for epoch in range(self.epochs):
            sly.logger.info("Before new epoch",
                            extra={'epoch': self.epoch_flt})

            for train_it, (inputs_cpu, targets_cpu) in enumerate(
                    self.data_loaders['train']):
                inputs, targets = cuda_variable(inputs_cpu), cuda_variable(
                    targets_cpu)
                outputs = self.model(inputs)
                loss = self.criterion(outputs, targets)

                if debug_saver is not None:
                    out_cls = functional.softmax(outputs, dim=1)
                    debug_saver.process(inputs_cpu, targets_cpu,
                                        out_cls.data.cpu())

                policy.optimizer.zero_grad()
                loss.backward()
                policy.optimizer.step()

                metric_values_train = {'loss': loss.data[0]}
                for name, metric in self.metrics.items():
                    metric_values_train[name] = metric(outputs, targets)

                progress.iter_done_report()

                self.epoch_flt = epoch_float(epoch, train_it + 1,
                                             self.train_iters)
                sly.report_metrics_training(self.epoch_flt,
                                            metric_values_train)

                if self.eval_planner.need_validation(self.epoch_flt):
                    metrics_values_val = self._validation()
                    self.eval_planner.validation_performed()

                    val_loss = metrics_values_val['loss']
                    model_is_best = val_loss < best_val_loss
                    if model_is_best:
                        best_val_loss = val_loss
                        sly.logger.info(
                            'It\'s been determined that current model is the best one for a while.'
                        )

                    self._save_model_snapshot(model_is_best,
                                              opt_data={
                                                  'epoch':
                                                  self.epoch_flt,
                                                  'val_metrics':
                                                  metrics_values_val,
                                              })

                    policy.reset_if_needed(val_loss, self.model)

            sly.logger.info("Epoch was finished",
                            extra={'epoch': self.epoch_flt})