Ejemplo n.º 1
0
def visualize_predictions(image_logger, max_samples, metric_fn, logits, gt):
    num_samples = min(len(gt), max_samples)
    metrics = to_numpy(
        metric_fn(from_numpy(logits), from_numpy(gt), average=False))
    order = np.argsort(metrics)
    gt = gt[order][:num_samples]
    logits = logits[order][:num_samples]
    metrics = metrics[order][:num_samples]
    probs = 1 / (1 + np.exp(-logits.squeeze()))

    samples_per_row = 16
    num_rows = int(np.ceil(num_samples / samples_per_row)) * 2
    plt.figure(figsize=(6, 1 * num_rows))

    for i in range(num_samples):
        plt.subplot(num_rows, samples_per_row,
                    (i // samples_per_row) * samples_per_row + i + 1)
        plt.title(f'{metrics[i]:.1f}')
        plt.imshow(probs[i], vmin=0, vmax=1)
        plt.xticks([])
        plt.yticks([])
        plt.subplot(num_rows, samples_per_row,
                    (i // samples_per_row + 1) * samples_per_row + i + 1)
        plt.imshow(gt[i])
        plt.xticks([])
        plt.yticks([])
    plt.gcf().tight_layout()
    plt.subplots_adjust(hspace=0.1, wspace=0.1)
    image_logger(plt.gcf())
Ejemplo n.º 2
0
def predict(checkpoint_path, batch_size=1, limit=None):
    model = load_checkpoint(checkpoint_path)
    model = as_cuda(model)
    torch.set_grad_enabled(False)
    model.eval()

    records = []
    ids = list(
        map(lambda path: path.split('/')[-1],
            get_images_in('data/test')))[:limit]
    test_generator = get_test_generator(batch_size, limit)
    for inputs, _ in tqdm(test_generator, total=len(test_generator)):
        inputs = from_numpy(inputs)
        outputs = model(inputs)
        masks = to_numpy(torch.argmax(outputs, dim=1))
        for mask in masks:
            _id = ids.pop(0)
            instance_masks = extract_instance_masks_from_binary_mask(mask)

            if len(instance_masks) == 0:
                records.append((_id, None))
            else:
                for instance_mask in instance_masks:
                    records.append((_id, encode_rle(instance_mask)))

    image_ids, encoded_pixels = zip(*records)
    df = pd.DataFrame({'ImageId': image_ids, 'EncodedPixels': encoded_pixels})
    df.to_csv('./data/submissions/__latest.csv', index=False)
Ejemplo n.º 3
0
def predict(checkpoint_path, batch_size=1, limit=None, tta=False):
    model = load_checkpoint(checkpoint_path)
    model = as_cuda(model)
    torch.set_grad_enabled(False)
    model.eval()

    records = []
    ids = list(
        map(lambda path: path.split('/')[-1],
            get_images_in('data/test')))[:limit]
    test_generator = get_test_generator(batch_size, limit)
    for batch in tqdm(test_generator, total=len(test_generator)):
        batch = from_numpy(batch)
        masks = None
        if tta:
            accumulated_outputs = 0
            for i, should_flip in product(range(4), [False, True]):
                image_batch = batch['image']
                image_batch = rotate_image_batch(image_batch, i)
                if should_flip: image_batch = flip_image_batch(image_batch)

                outputs = torch.sigmoid(model({'image': image_batch})['mask'])
                if should_flip: outputs = flip_image_batch(outputs)
                outputs = rotate_image_batch(outputs, -i)
                accumulated_outputs += outputs
            accumulated_outputs /= 8
            masks = to_numpy(accumulated_outputs[:, 0, :, :])
        else:
            outputs = model(batch)
            outputs['mask'] = torch.sigmoid(outputs['mask'])
            masks = to_numpy(outputs['mask'][:, 0, :, :])
        for mask in masks:
            _id = ids.pop(0)
            instance_masks = extract_instance_masks_from_soft_mask(mask)

            if len(instance_masks) == 0:
                records.append((_id, None))
            else:
                for instance_mask in instance_masks:
                    records.append((_id, encode_rle(instance_mask)))

    image_ids, encoded_pixels = zip(*records)
    df = pd.DataFrame({'ImageId': image_ids, 'EncodedPixels': encoded_pixels})
    df.to_csv('./data/submissions/__latest.csv', index=False)
Ejemplo n.º 4
0
    def on_validation_end(self, logs):
        state_dict = self.model.state_dict(keep_vars=True)
        samples_per_row = 6
        num_samples = len(self.grads)
        num_rows = math.ceil(num_samples / samples_per_row) * 2
        plt.figure(figsize=(16, 1.6 * num_rows))
        for i, (layer_name, grads) in enumerate(self.grads.items()):
            plt.subplot(num_rows, samples_per_row,
                        (i // samples_per_row) * samples_per_row + i + 1)
            plt.title(layer_name)
            flat_weights = to_numpy(state_dict[layer_name]).reshape(-1)
            plt.hist(flat_weights, bins=50)

            plt.subplot(num_rows, samples_per_row,
                        (i // samples_per_row + 1) * samples_per_row + i + 1)
            plt.title(layer_name)
            flat_grads = to_numpy(grads / self.batch_counter).reshape(-1)
            plt.hist(flat_grads, bins=50, color='green')
        plt.tight_layout()
        self.image_logger(plt.gcf())
        self.batch_counter = 0
        self.grads = {}
def predict(checkpoint_path,
            submission_path,
            batch_size=1,
            limit=None,
            tta=False):
    model = load_checkpoint(checkpoint_path)
    model = as_cuda(model)
    torch.set_grad_enabled(False)
    model.eval()

    records = []
    ids = list(
        map(lambda path: path.split('/')[-1],
            get_images_in('data/test')))[:limit]
    submission = pd.read_csv(submission_path)
    print('Num masks before', len(submission))
    test_generator = get_test_generator(batch_size, limit, classification=True)
    for batch in tqdm(test_generator, total=len(test_generator)):
        batch = from_numpy(batch)
        outputs = model(batch)
        if tta:
            batch['image'] = batch['image'].flip(dims=(3, ))
            flipped_outputs = model(batch)
            outputs['has_ships'] = (
                torch.sigmoid(outputs['has_ships']) +
                torch.sigmoid(flipped_outputs['has_ships'])) / 2
        else:
            outputs['has_ships'] = torch.sigmoid(outputs['has_ships'])
        pred_labels = to_numpy(outputs['has_ships'][:, 0].round().long())

        for pred in pred_labels:
            _id = ids.pop(0)
            if pred == 1: continue
            submission = submission[submission['ImageId'] != _id].copy()
            submission = submission.append(
                {
                    'ImageId': _id,
                    'EncodedPixels': None
                }, ignore_index=True)
    print('Num masks after', len(submission))
    submission.to_csv('./data/submissions/__latest_filtered.csv', index=False)
Ejemplo n.º 6
0
 def on_validation_batch_end(self, logs, outputs, batch):
     self.metrics.extend(
         to_numpy(self.metric_fn(outputs, batch, average=False)))
     preds = to_numpy(self.pred_fn(outputs, batch))
     for i in range(len(preds[0])):
         self.predictions_and_gt.append((preds[0][i], preds[1][i]))
Ejemplo n.º 7
0
def f2_score(outputs, gt):
    pred_masks = to_numpy(torch.sigmoid(outputs).round().long()[:, 0, :, :])
    pred_instance_masks = list(map(extract_instance_masks_from_binary_mask, pred_masks))
    gt_masks = list(map(lambda sample_gt: extract_instance_masks_from_labelled_mask(to_numpy(sample_gt)), gt))
    return f_score(2, [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95], pred_instance_masks, gt_masks)
Ejemplo n.º 8
0
def fit_model(model,
              train_generator,
              validation_generator,
              optimizer,
              loss_fn,
              num_epochs,
              logger,
              callbacks=[],
              metrics=[]):

    for epoch in tqdm(range(num_epochs)):
        num_batches = len(train_generator)
        logs = {}
        logs['train_loss'] = 0
        for func in metrics:
            logs[f'train_{func.__name__}'] = 0
        model.train()
        torch.set_grad_enabled(True)
        for callback in callbacks:
            callback.on_train_begin()
        for inputs, gt in tqdm(train_generator, total=num_batches):
            inputs, gt = from_numpy(inputs), from_numpy(gt)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = loss_fn(outputs, gt)
            loss.backward()
            optimizer.step()
            logs['train_loss'] += loss.data[0]
            for func in metrics:
                logs[f'train_{func.__name__}'] += func(outputs.detach(), gt)
            for callback in callbacks:
                callback.on_train_batch_end(loss.data[0])

        logs['train_loss'] /= num_batches
        for func in metrics:
            logs[f'train_{func.__name__}'] /= num_batches

        logs['val_loss'] = 0
        for func in metrics:
            logs[f'val_{func.__name__}'] = 0
        all_outputs = []
        all_gt = []
        num_batches = len(validation_generator)
        model.eval()
        torch.set_grad_enabled(False)
        for inputs, gt in tqdm(validation_generator, total=num_batches):
            all_gt.append(gt)
            inputs, gt = from_numpy(inputs), from_numpy(gt)
            outputs = model(inputs)
            # TODO AS: Extract as cmd opt
            flipped_outputs = torch.sigmoid(
                model(inputs.flip(dims=(3, ))).flip(dims=(3, )))
            outputs = torch.sigmoid(outputs)
            outputs = (outputs + flipped_outputs) / 2
            outputs = torch.log(outputs / (1 - outputs))
            logs['val_loss'] += loss_fn(outputs, gt).data[0]
            for func in metrics:
                logs[f'val_{func.__name__}'] += func(outputs.detach(), gt)

            if isinstance(outputs, tuple):
                all_outputs.append(list(map(to_numpy, outputs)))
            else:
                all_outputs.append(to_numpy(outputs))
        logs['val_loss'] /= num_batches
        for func in metrics:
            logs[f'val_{func.__name__}'] /= num_batches

        if isinstance(all_outputs[0], tuple):
            all_outputs = list(map(np.concatenate, zip(*all_outputs)))
        else:
            all_outputs = np.concatenate(all_outputs)

        all_gt = np.concatenate(all_gt)
        for callback in callbacks:
            callback.on_validation_end(logs, all_outputs, all_gt)

        epoch_rows = [['epoch', epoch]]
        for name, value in logs.items():
            epoch_rows.append([name, f'{value:.3f}'])

        logger(tabulate(epoch_rows))
 def on_validation_batch_end(self, _, outputs, batch):
     self.preds.extend(
         to_numpy(torch.sigmoid(
             outputs['has_ships']).round().long()).reshape(-1))
     self.gt.extend(to_numpy(batch['has_ships']).reshape(-1))
Ejemplo n.º 10
0
def f2_score(outputs, batch):
    pred_masks = torch.sigmoid(outputs['mask'])[:, 0, :, :]
    pred_instance_masks = list(map(extract_instance_masks_from_soft_mask, to_numpy(pred_masks)))
    gt_masks = list(map(lambda sample_gt: extract_instance_masks_from_labelled_mask(to_numpy(sample_gt)), batch['mask']))
    return f_score(2, [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95], pred_instance_masks, gt_masks)
Ejemplo n.º 11
0
 def on_validation_end(self, logs, outputs, gt):
     values = to_numpy(
         self.metric_fn(from_numpy(outputs), from_numpy(gt), average=False))
     plt.hist(values, bins=20)
     plt.title(self.metric_fn.__name__)
     self.image_logger(plt.gcf())
Ejemplo n.º 12
0
 def on_validation_batch_end(self, logs, outputs, batch):
     self.values.extend(
         to_numpy(self.metric_fn(outputs, batch, average=False)))