Ejemplo n.º 1
0
def test_crf():
    loaded = np.load('/mnt/g/ship/tmp/val_out.npz')
    outputs = loaded['outputs']
    y_true = loaded['y_true']
    print(outputs.shape)
    print(y_true.shape)

    tgt_size = (settings.ORIG_H, settings.ORIG_W)

    resized = list(map(lambda x: resize(x, tgt_size), outputs))
    print(resized[0].shape, len(resized))
    y_pred = list(map(lambda x: (x > 0.5).astype(np.uint8), resized))
    print(y_pred[0].shape, len(y_pred))

    iou_score = intersection_over_union(y_true, y_pred)
    iout_score = intersection_over_union_thresholds(y_true, y_pred)
    print(iou_score, iout_score)

    _, val_meta = get_train_val_meta(True)
    img_ids = val_meta['ImageId'].values.tolist()
    crf_imgs = []
    for i, img_id in enumerate(img_ids):
        orig_img = imread('/mnt/g/ship/train_v2/{}'.format(img_id))
        crf_output = crf(orig_img, y_pred[i])
        crf_imgs.append(crf_output)
        if i % 100 == 0:
            print(i)

    iou_score = intersection_over_union(y_true, crf_imgs)
    iout_score = intersection_over_union_thresholds(y_true, crf_imgs)
    print(iou_score, iout_score)
Ejemplo n.º 2
0
def _evaluate_pipeline(pipeline_name, validation_size):
    meta = pd.read_csv(os.path.join(params.meta_dir, 'stage1_metadata.csv'))
    meta_train = meta[meta['is_train'] == 1]
    valid_ids = eval(params.valid_category_ids)
    meta_train_split, meta_valid_split = train_valid_split(meta_train, validation_size, valid_category_ids=valid_ids)

    data = {'input': {'meta': meta_valid_split,
                      'meta_valid': None,
                      'train_mode': False,
                      'target_sizes': meta_valid_split[SIZE_COLUMNS].values
                      },
            }

    y_true = read_masks(meta_valid_split[Y_COLUMNS_SCORING].values)

    pipeline = PIPELINES[pipeline_name]['inference'](SOLUTION_CONFIG)
    pipeline.clean_cache()
    output = pipeline.transform(data)
    pipeline.clean_cache()
    y_pred = output['y_pred']

    logger.info('Calculating IOU and IOUT Scores')
    iou_score = intersection_over_union(y_true, y_pred)
    logger.info('IOU score on validation is {}'.format(iou_score))
    ctx.channel_send('IOU Score', 0, iou_score)

    iout_score = intersection_over_union_thresholds(y_true, y_pred)
    logger.info('IOUT score on validation is {}'.format(iout_score))
    ctx.channel_send('IOUT Score', 0, iout_score)
Ejemplo n.º 3
0
def validate(args, model, val_loader, epoch=0, threshold=0.5):
    model.eval()
    outputs = []
    focal_loss, lovaz_loss, salt_loss, w_loss = 0, 0, 0, 0
    with torch.no_grad():
        for img, target, salt_target in val_loader:
            if args.depths:
                add_depth_channel(img, args.pad_mode)
            img, target, salt_target = img.cuda(), target.cuda(
            ), salt_target.cuda()
            output, salt_out = model(img)

            _, floss, lovaz, _salt_loss, _w_loss = weighted_loss(
                args, (output, salt_out), (target, salt_target), epoch=epoch)
            focal_loss += floss
            lovaz_loss += lovaz
            salt_loss += _salt_loss
            w_loss += _w_loss
            output = torch.sigmoid(output)

            for o in output.cpu():
                outputs.append(o.squeeze().numpy())

    n_batches = val_loader.num // args.batch_size if val_loader.num % args.batch_size == 0 else val_loader.num // args.batch_size + 1

    # y_pred, list of np array, each np array's shape is 101,101
    y_pred = generate_preds(args, outputs, (settings.ORIG_H, settings.ORIG_W),
                            threshold)

    iou_score = intersection_over_union(val_loader.y_true, y_pred)
    iout_score = intersection_over_union_thresholds(val_loader.y_true, y_pred)

    return iout_score, iou_score, focal_loss / n_batches, lovaz_loss / n_batches, salt_loss / n_batches, iout_score * 4 - w_loss
Ejemplo n.º 4
0
Archivo: train.py Proyecto: chicm/salt
def validate(args, model, val_loader, epoch=0, threshold=0.5):
    model.eval()
    print('validating...')
    outputs = []
    val_loss = 0
    with torch.no_grad():
        for img, target, salt_target in val_loader:
            img, target, salt_target = img.cuda(), target.cuda(
            ), salt_target.cuda()
            output, salt_out = model(img)
            #print(output.size(), salt_out.size())

            loss = weighted_loss((output, salt_out), (target, salt_target),
                                 epoch=epoch)
            val_loss += loss.item()
            output = torch.sigmoid(output)

            for o in output.cpu():
                outputs.append(o.squeeze().numpy())

    n_batches = val_loader.num // args.batch_size if val_loader.num % args.batch_size == 0 else val_loader.num // batch_size + 1

    # y_pred, list of 400 np array, each np array's shape is 101,101
    y_pred = generate_preds_softmax(outputs,
                                    (settings.ORIG_H, settings.ORIG_W),
                                    threshold)
    print(y_pred[0].shape)
    print('Validation loss: {:.4f}'.format(val_loss / n_batches))

    iou_score = intersection_over_union(val_loader.y_true, y_pred)
    iout_score = intersection_over_union_thresholds(val_loader.y_true, y_pred)
    print('IOU score on validation is {:.4f}'.format(iou_score))
    print('IOUT score on validation is {:.4f}'.format(iout_score))

    return iout_score, iou_score, val_loss / n_batches
Ejemplo n.º 5
0
def test_bbox():
    tgt_size = (settings.ORIG_H, settings.ORIG_W)
    outputs, y_true = get_val_result(16)
    resized = list(map(lambda x: resize_image(x, tgt_size), outputs))
    print(resized[0].shape, len(resized))
    y_pred = list(map(lambda x: (x > 0.5).astype(np.uint8), resized))
    print(y_pred[0].shape, len(y_pred))

    iou_score = intersection_over_union(y_true, y_pred)
    iout_score = intersection_over_union_thresholds(y_true, y_pred)
    print(iou_score, iout_score)

    processed = list(map(lambda x: masks_to_bounding_boxes(x), y_pred))

    iou_score = intersection_over_union(y_true, processed)
    iout_score = intersection_over_union_thresholds(y_true, processed)
    print(iou_score, iout_score)
Ejemplo n.º 6
0
def calc_loss(pred, target, metrics, bce_weight=0.5):
    bce = torch.nn.functional.binary_cross_entropy_with_logits(pred, target)

    pred = torch.sigmoid(pred)
    dice = dice_loss(pred, target)

    pred_binary = normalise_mask(pred.detach().cpu().numpy())
    iou = intersection_over_union(target.detach().cpu().numpy(), pred_binary)

    loss = bce * bce_weight + dice * (1 - bce_weight)

    metrics['bce'] += bce.data.cpu().numpy() * target.size(0)
    metrics['dice'] += dice.data.cpu().numpy() * target.size(0)
    metrics['iou'] += iou * target.size(0)
    metrics['loss'] += loss.data.cpu().numpy() * target.size(0)

    return loss
    def _get_validation_loss(self):
        output, epoch_loss = self._transform()
        y_pred = self._generate_prediction(output)

        logger.info('Calculating IOU and IOUT Scores')
        iou_score = intersection_over_union(self.y_true, y_pred)
        iout_score = intersection_over_union_thresholds(self.y_true, y_pred)
        logger.info('IOU score on validation is {}'.format(iou_score))
        logger.info('IOUT score on validation is {}'.format(iout_score))

        if not self.transformer.validation_loss:
            self.transformer.validation_loss = {}
        self.transformer.validation_loss.setdefault(
            self.epoch_id, {
                'sum': epoch_loss,
                'iou': Variable(torch.Tensor([iou_score])),
                'iout': Variable(torch.Tensor([iout_score]))
            })
        return self.transformer.validation_loss[self.epoch_id]
Ejemplo n.º 8
0
Archivo: train.py Proyecto: chicm/rsna
def validate(args,
             model,
             val_loader,
             epoch=0,
             threshold=0.5,
             cls_threshold=0.5):
    model.eval()
    #print('validating...')
    outputs = []
    cls_preds = []
    total_num = 0
    cls_corrects = 0
    focal_loss, lovaz_loss, bce_loss, ship_loss = 0, 0, 0, 0
    with torch.no_grad():
        for img, target, ship_target in val_loader:
            img, target, ship_target = img.cuda(), target.cuda(
            ), ship_target.cuda()
            #output, ship_out = model(img)
            output, logit_pixel, ship_out = model(img)
            #print(output.size(), salt_out.size())
            ship_pred = (torch.sigmoid(ship_out) > cls_threshold).byte()
            total_num += len(img)
            cls_corrects += ship_pred.eq(ship_target.byte()).sum().item()

            _, floss, lovaz, _bce_loss, _ship_loss = criterion(
                args, (output, ship_out), (target, ship_target), epoch=epoch)
            focal_loss += floss
            lovaz_loss += lovaz
            ship_loss += _ship_loss
            bce_loss += _bce_loss
            output = torch.sigmoid(output)

            for o in output.cpu():
                outputs.append(o.squeeze().numpy())
            cls_preds.extend(ship_pred.cpu().squeeze().numpy().tolist())

    n_batches = val_loader.num // args.batch_size if val_loader.num % args.batch_size == 0 else val_loader.num // args.batch_size + 1

    # y_pred, list of 400 np array, each np array's shape is 101,101
    y_pred = generate_preds(args, outputs, cls_preds,
                            (settings.ORIG_H, settings.ORIG_W), threshold)

    #draw
    if args.dev_mode:
        for p, y in zip(y_pred, val_loader.y_true):
            print(p.shape, y.shape)
            #objs = split_mask(p, threshold_obj=30, threshold=None)
            #if objs:
            #    if False:
            #        objs = map(lambda x: mask_to_bbox(x), objs)
            #    cv2.imshow('image', np.hstack([*objs, y])*255)
            #else:
            #bb_img = masks_to_bounding_boxes(p)
            #bb_img = (bb_img > 0).astype(np.uint8)
            #print(bb_img.max())
            cv2.imshow('image', np.hstack([p, y]) * 255)
            cv2.waitKey(0)

    iou_score = intersection_over_union(val_loader.y_true, y_pred)
    iout_score = intersection_over_union_thresholds(val_loader.y_true, y_pred)
    #print('IOU score on validation is {:.4f}'.format(iou_score))
    #print('IOUT score on validation is {:.4f}'.format(iout_score))

    cls_acc = cls_corrects / total_num

    return iout_score, iou_score, focal_loss / n_batches, lovaz_loss / n_batches, bce_loss / n_batches, ship_loss / n_batches, cls_acc
    target_list: List[torch.Tensor] = []
    output_list: List[torch.Tensor] = []
    for inputs, targets in tqdm(test_loader):
        inputs = inputs.to(device)
        targets = targets.to(device)

        outputs = model(inputs)

        target_list.append(targets.detach().cpu())
        output_list.append(outputs.detach().cpu())

    outputs = torch.cat(output_list, dim=0)
    targets = torch.cat(target_list, dim=0).squeeze(1)

    loss = criterion(outputs, targets)
    iou = metrics.intersection_over_union(y_true=targets,
                                          y_pred=outputs,
                                          num_classes=cfg.num_classes)
    dice_coef: float = metrics.dice_coefficient(outputs, targets)

    # cmaps: List[Tuple[str, Tuple[int]]] = \
    #     utils.load_labelmap('../VOCDataset/labelmap.txt')

    logger.info(f'Finish testing on {len(X_test)} images.')
    logger.info(f'Loss: {loss}')
    logger.info(f'Dice coefficient: {dice_coef}')
    logger.info(f'mIoU: {np.mean(iou)}')
    logger.info('IoU:')
    # for idx, (class_name, _) in enumerate(cmaps):
    #     logger.info(f'{class_name.rjust(16)}: {iou[idx]}')
Ejemplo n.º 10
0
print(gt_file['t0'].keys())

#%%
segmentation = segmentation_file['t0/train1_epoch50']
segmentation = segmentation[:].astype(np.float)
gt = gt_file['t0/channel2'][:] > 0
# %%

lower_confidence = segmentation > 0.2
highest_confidence = segmentation > 0.98
# %%
markers = np.zeros_like(segmentation)
markers[highest_confidence] = 1
# %%
cleaned = watershed(-segmentation, markers, mask=lower_confidence)

# %%
del segmentation_file['t0/t1_e50_cleaned']
segmentation_file['t0'].create_dataset(name= 't1_e50_cleaned' ,data= cleaned, dtype=np.uint8)
# %%
segmentation_file.close()
gt_file.close()
# %%

import metrics

res = metrics.precisionRecall(gt, cleaned > 0)
# %%
iou = metrics.intersection_over_union(gt, cleaned > 0)
# %%
 def test_return_type(self):
     iou_list: List[float] = metrics.intersection_over_union(
         self.y_true, self.y_pred, self.num_classes)
     self.assertIsInstance(iou_list, list)
     self.assertEqual(tuple(iou_list), (0., 0.))