Пример #1
0
def test_TestSectionLoader_should_load_data_from_path_override_data():
    with tempfile.TemporaryDirectory() as data_dir:
        os.makedirs(os.path.join(data_dir, "volume_name"))
        os.makedirs(os.path.join(data_dir, "splits"))

        seimic = np.zeros([IL, XL, D])
        generate_npy_files(
            os.path.join(data_dir, "volume_name", "seismic.npy"), seimic)

        labels = np.ones([IL, XL, D])
        generate_npy_files(os.path.join(data_dir, "volume_name", "labels.npy"),
                           labels)

        txt_path = os.path.join(data_dir, "splits", "section_volume_name.txt")
        open(txt_path, "a").close()

        TestSectionLoader = get_test_loader(config)
        config.merge_from_list(["DATASET.ROOT", data_dir])
        test_set = TestSectionLoader(
            config,
            split="volume_name",
            is_transform=True,
            augmentations=None,
            seismic_path=os.path.join(data_dir, "volume_name", "seismic.npy"),
            label_path=os.path.join(data_dir, "volume_name", "labels.npy"),
        )

        assert_dimensions(test_set)
Пример #2
0
def _evaluate_split(split,
                    section_aug,
                    model,
                    device,
                    running_metrics_overall,
                    config,
                    debug=False):
    logger = logging.getLogger(__name__)

    TestSectionLoader = get_test_loader(config)
    test_set = TestSectionLoader(
        data_dir=config.DATASET.ROOT,
        split=split,
        is_transform=True,
        augmentations=section_aug,
    )

    n_classes = test_set.n_classes

    test_loader = data.DataLoader(test_set,
                                  batch_size=1,
                                  num_workers=config.WORKERS,
                                  shuffle=False)
    if debug:
        logger.info("Running in Debug/Test mode")
        test_loader = take(1, test_loader)

    running_metrics_split = runningScore(n_classes)

    # testing mode:
    with torch.no_grad():  # operations inside don't track history
        model.eval()
        total_iteration = 0
        for i, (images, labels) in enumerate(test_loader):
            logger.info(f"split: {split}, section: {i}")
            total_iteration = total_iteration + 1

            outputs = model(images.to(device))

            pred = outputs.detach().max(1)[1].cpu().numpy()
            gt = labels.numpy()
            running_metrics_split.update(gt, pred)
            running_metrics_overall.update(gt, pred)

    # get scores
    score, class_iou = running_metrics_split.get_scores()

    # Log split results
    logger.info(f'Pixel Acc: {score["Pixel Acc: "]:.3f}')
    for cdx, class_name in enumerate(_CLASS_NAMES):
        logger.info(
            f'  {class_name}_accuracy {score["Class Accuracy: "][cdx]:.3f}')

    logger.info(f'Mean Class Acc: {score["Mean Class Acc: "]:.3f}')
    logger.info(f'Freq Weighted IoU: {score["Freq Weighted IoU: "]:.3f}')
    logger.info(f'Mean IoU: {score["Mean IoU: "]:0.3f}')
    running_metrics_split.reset()
Пример #3
0
def test_TestSectionLoader_should_load_data_from_test2_set():
    with tempfile.TemporaryDirectory() as data_dir:
        os.makedirs(os.path.join(data_dir, "test_once"))
        os.makedirs(os.path.join(data_dir, "splits"))

        seimic = np.zeros([IL, XL, D])
        generate_npy_files(
            os.path.join(data_dir, "test_once", "test2_seismic.npy"), seimic)

        A = np.load(os.path.join(data_dir, "test_once", "test2_seismic.npy"))

        labels = np.ones([IL, XL, D])
        generate_npy_files(
            os.path.join(data_dir, "test_once", "test2_labels.npy"), labels)

        txt_path = os.path.join(data_dir, "splits", "section_test2.txt")
        open(txt_path, "a").close()

        TestSectionLoader = get_test_loader(config)
        config.merge_from_list(["DATASET.ROOT", data_dir])
        test_set = TestSectionLoader(config, split="test2")

        assert_dimensions(test_set)
Пример #4
0
def _evaluate_split(
    split,
    section_aug,
    model,
    pre_processing,
    output_processing,
    device,
    running_metrics_overall,
    config,
    debug=False,
):
    logger = logging.getLogger(__name__)

    TestSectionLoader = get_test_loader(config)
    test_set = TestSectionLoader(
        config.DATASET.ROOT,
        split=split,
        is_transform=True,
        augmentations=section_aug,
    )

    n_classes = test_set.n_classes

    test_loader = data.DataLoader(test_set,
                                  batch_size=1,
                                  num_workers=config.WORKERS,
                                  shuffle=False)

    if debug:
        logger.info("Running in Debug/Test mode")
        test_loader = take(1, test_loader)

    try:
        output_dir = generate_path(
            config.OUTPUT_DIR + "_test",
            git_branch(),
            git_hash(),
            config.MODEL.NAME,
            current_datetime(),
        )
    except TypeError:
        output_dir = generate_path(
            config.OUTPUT_DIR + "_test",
            config.MODEL.NAME,
            current_datetime(),
        )

    running_metrics_split = runningScore(n_classes)

    # testing mode:
    with torch.no_grad():  # operations inside don't track history
        model.eval()
        total_iteration = 0
        for i, (images, labels) in enumerate(test_loader):
            logger.info(f"split: {split}, section: {i}")
            total_iteration = total_iteration + 1

            outputs = _patch_label_2d(
                model,
                images,
                pre_processing,
                output_processing,
                config.TRAIN.PATCH_SIZE,
                config.TEST.TEST_STRIDE,
                config.VALIDATION.BATCH_SIZE_PER_GPU,
                device,
                n_classes,
            )

            pred = outputs.detach().max(1)[1].numpy()
            gt = labels.numpy()
            running_metrics_split.update(gt, pred)
            running_metrics_overall.update(gt, pred)

            #  dump images to disk for review
            mask_to_disk(pred.squeeze(),
                         os.path.join(output_dir, f"{i}_pred.png"))
            mask_to_disk(gt.squeeze(), os.path.join(output_dir, f"{i}_gt.png"))

    # get scores
    score, class_iou = running_metrics_split.get_scores()

    # Log split results
    logger.info(f'Pixel Acc: {score["Pixel Acc: "]:.3f}')
    for cdx, class_name in enumerate(_CLASS_NAMES):
        logger.info(
            f'  {class_name}_accuracy {score["Class Accuracy: "][cdx]:.3f}')

    logger.info(f'Mean Class Acc: {score["Mean Class Acc: "]:.3f}')
    logger.info(f'Freq Weighted IoU: {score["Freq Weighted IoU: "]:.3f}')
    logger.info(f'Mean IoU: {score["Mean IoU: "]:0.3f}')
    running_metrics_split.reset()
Пример #5
0
def _evaluate_split(
    split,
    section_aug,
    model,
    pre_processing,
    output_processing,
    device,
    running_metrics_overall,
    config,
    data_flow,
    debug=False,
):
    logger = logging.getLogger(__name__)

    TestSectionLoader = get_test_loader(config)

    test_set = TestSectionLoader(
        config,
        split=split,
        is_transform=True,
        augmentations=section_aug,
        debug=debug,
    )

    n_classes = test_set.n_classes

    if debug:
        data_flow[split] = dict()
        data_flow[split]["test_section_loader_length"] = len(test_set)
        data_flow[split]["test_input_shape"] = test_set.seismic.shape
        data_flow[split]["test_label_shape"] = test_set.labels.shape
        data_flow[split]["n_classes"] = n_classes

    test_loader = data.DataLoader(test_set,
                                  batch_size=1,
                                  num_workers=config.WORKERS,
                                  shuffle=False)

    if debug:
        data_flow[split]["test_loader_length"] = len(test_loader)
        logger.info("Running in Debug/Test mode")
        take_n = 2
        test_loader = take(take_n, test_loader)
        data_flow[split]["take_n_sections"] = take_n
        pred_list, gt_list, img_list = [], [], []

    try:
        output_dir = generate_path(
            f"{config.OUTPUT_DIR}/test/{split}",
            git_branch(),
            git_hash(),
            config.MODEL.NAME,
            current_datetime(),
        )
    except:
        output_dir = generate_path(
            f"{config.OUTPUT_DIR}/test/{split}",
            config.MODEL.NAME,
            current_datetime(),
        )

    running_metrics_split = runningScore(n_classes)

    # evaluation mode:
    with torch.no_grad():  # operations inside don't track history
        model.eval()
        for i, (images, labels) in enumerate(test_loader):
            logger.info(f"split: {split}, section: {i}")
            outputs = _patch_label_2d(
                model,
                images,
                pre_processing,
                output_processing,
                config.TRAIN.PATCH_SIZE,
                config.TEST.TEST_STRIDE,
                config.VALIDATION.BATCH_SIZE_PER_GPU,
                device,
                n_classes,
                split,
                debug,
                config.DATASET.MIN,
                config.DATASET.MAX,
            )

            pred = outputs.detach().max(1)[1].numpy()
            gt = labels.numpy()
            if debug:
                pred_list.append((pred.shape, len(np.unique(pred))))
                gt_list.append((gt.shape, len(np.unique(gt))))
                img_list.append(images.numpy().shape)

            running_metrics_split.update(gt, pred)
            running_metrics_overall.update(gt, pred)

            #  dump images to disk for review
            mask_to_disk(pred.squeeze(),
                         os.path.join(output_dir, f"{i}_pred.png"), n_classes)
            mask_to_disk(gt.squeeze(), os.path.join(output_dir, f"{i}_gt.png"),
                         n_classes)

    if debug:
        data_flow[split]["pred_shape"] = pred_list
        data_flow[split]["gt_shape"] = gt_list
        data_flow[split]["img_shape"] = img_list

    # get scores
    score, class_iou = running_metrics_split.get_scores()

    # Log split results
    logger.info(f'Pixel Acc: {score["Pixel Acc: "]:.3f}')
    if debug:
        for cdx in range(n_classes):
            logger.info(
                f'  Class_{cdx}_accuracy {score["Class Accuracy: "][cdx]:.3f}')
    else:
        for cdx, class_name in enumerate(_CLASS_NAMES):
            logger.info(
                f'  {class_name}_accuracy {score["Class Accuracy: "][cdx]:.3f}'
            )

    logger.info(f'Mean Class Acc: {score["Mean Class Acc: "]:.3f}')
    logger.info(f'Freq Weighted IoU: {score["Freq Weighted IoU: "]:.3f}')
    logger.info(f'Mean IoU: {score["Mean IoU: "]:0.3f}')
    running_metrics_split.reset()