예제 #1
0
def test_count_by_organ(loaders, models, cls_limit, output_path):

    fconv = open(os.path.join(output_path, '{}-count.csv'.format(now)),
                 'w',
                 newline="")
    w = csv.writer(fconv, delimiter=',')
    w.writerow(['id', 'count'])

    print('Start testing ...')

    colonset.setmode("image")
    breastset.setmode("image")
    prostateset.setmode("image")
    ids = np.array([])
    counts = np.array([])
    for i in range(3):
        organ_ids, _, organ_counts = inference_image(loaders[i],
                                                     models[i],
                                                     device,
                                                     mode='test',
                                                     cls_limit=cls_limit,
                                                     return_id=True)
        for j in range(len(organ_ids)):
            print("id: {}, organ: {}, pred: {}".format(int(organ_ids[j]),
                                                       organs[i],
                                                       int(organ_counts[j])))
        ids = np.concatenate((ids, organ_ids))
        counts = np.concatenate((counts, organ_counts))

    counts = counts[np.argsort(ids.astype(int))]
    for i, y in enumerate(counts, start=1):
        w.writerow([i, y])
    fconv.close()
예제 #2
0
def test_ensemble(loader, models, epoch, cls_limit, output_path):

    fconv = open(os.path.join(output_path,
                              '{}-count-e{}.csv'.format(now, epoch)),
                 'w',
                 newline="")
    w = csv.writer(fconv, delimiter=',')
    w.writerow(['id', 'count', 'organ'])

    outputs = []
    for i, m in enumerate(models):
        print('Testing {}/{}...'.format(i + 1, len(models)))

        testset.setmode("image")
        outputs.append(
            inference_image(loader,
                            m,
                            device,
                            mode='test',
                            cls_limit=cls_limit)[1])

    # take average as the final result
    output = np.asarray(outputs).mean(axis=0).round().astype(int)
    for i, y in enumerate(output, start=1):
        w.writerow([i, y, testset.organs[i - 1]])
    fconv.close()
예제 #3
0
def test_count(loader, model, epoch, cls_limit, output_path):

    fconv = open(os.path.join(output_path,
                              '{}-count-e{}.csv'.format(now, epoch)),
                 'w',
                 newline="")
    w = csv.writer(fconv, delimiter=',')
    w.writerow(['id', 'count', 'organ'])

    print('Start testing ...')

    testset.setmode("image")
    counts = inference_image(loader,
                             model,
                             device,
                             mode='test',
                             cls_limit=cls_limit)[1]
    for i, y in enumerate(counts, start=1):
        w.writerow([i, y, testset.organs[i - 1]])
    fconv.close()
예제 #4
0
def test_tile(loader, model, epoch, reg_limit, reg_loader, output_path):
    """
    :param testset:         测试数据集
    :param batch_size:      Dataloader 打包的小 batch 大小
    :param workers:         Dataloader 使用的进程数
    :param model:           网络模型
    :param output_path:     保存模型文件的目录
    """

    # 热图中各个 tile 的信息保存在 output_path/<timestamp>-pred-e<epoch>-p<tilesize>-i<interval>-c<threshold>.csv
    fconv = open(os.path.join(output_path, '{}-pred-e{}-p{}-i{}-c{}.csv'.format(
        now, epoch, args.tile_size, args.interval, args.threshold)), 'w', newline="")
    w = csv.writer(fconv)
    w.writerow(['tile_size', '{}'.format(testset.tile_size)])
    w.writerow(['interval', '{}'.format(testset.interval)])
    w.writerow(['idx', 'grid', 'prob'])
    fconv.close()

    def rank(testset, probs):
        """按概率对 tile 排序,便于与置信度进行比较。"""

        groups = np.array(testset.tileIDX)
        tiles = np.array(testset.tiles_grid)

        order = np.lexsort((probs, groups))
        groups = groups[order]
        probs = probs[order]
        tiles = tiles[order]

        # index = np.empty(len(groups), 'bool')
        # index[-topk:] = True
        # index[:-topk] = groups[topk:] != groups[:-topk]
        index = [prob > args.threshold for prob in probs]

        return tiles[index], probs[index], groups[index]

    print('Start testing ...')

    testset.setmode("tile")
    probs = inference_tiles(loader, model, device, mode='test')
    tiles, probs, groups = rank(testset, probs)

    # clear artifact images
    if reg_limit:
        reg_testset.setmode("image")
        model.setmode("image")

        with open(os.path.join(output_path, '{}-count-e{}.csv'.format(now, epoch)),
                  'w', newline="") as f:
            w = csv.writer(f, delimiter=',')
            w.writerow(['id', 'count', 'organ'])

            counts = inference_image(reg_loader, model, device, mode='test')[1]
            for i, y in enumerate(counts, start=1):
                w.writerow([i, y, reg_testset.organs[i - 1]])

        img_indices = np.select([counts != 0], [counts]).nonzero()[0]
        indices = [i for i, g in enumerate(groups) if g in img_indices]
        tiles = tiles[indices]
        probs = probs[indices]
        groups = groups[indices]

    # 生成热图
    fconv = open(os.path.join(output_path, '{}-pred-e{}-p{}-i{}-c{}.csv'.format(
        now, epoch, args.tile_size, args.interval, args.threshold)), 'a', newline="")
    heatmap(testset, tiles, probs, groups, fconv, output_path)
    fconv.close()
예제 #5
0
        from inference import inference_image

        # clear artifact images
        limit_set = LystoTestset(os.path.join(training_data_path,
                                              "training.h5"),
                                 num_of_imgs=100 if args.debug else 0)
        limit_loader = DataLoader(limit_set,
                                  batch_size=args.image_batch_size,
                                  shuffle=False,
                                  num_workers=args.workers,
                                  pin_memory=True)

        limit_set.setmode("image")
        model.setmode("image")

        counts = inference_image(limit_loader, model, device, mode='test')[1]

        img_indices = np.select([counts != 0], [counts]).nonzero()[0]
        indices = [i for i, g in enumerate(groups) if g in img_indices]
        tiles = tiles[indices]
        groups = groups[indices]

        pseudo_masks = generate_masks(dataset,
                                      tiles,
                                      groups,
                                      preprocess=args.preprocess,
                                      output_path=os.path.join(
                                          training_data_path,
                                          args.pseudomask_dir))
        trainset = Maskset(os.path.join(training_data_path, "training.h5"),
                           pseudo_masks,