示例#1
0
                for idx, inst_id in enumerate(pred_id_list):
                    inst_type = pred_type[pred_inst == inst_id]
                    type_list, type_pixels = np.unique(inst_type,
                                                       return_counts=True)
                    type_list = list(zip(type_list, type_pixels))
                    type_list = sorted(type_list,
                                       key=lambda x: x[1],
                                       reverse=True)
                    inst_type = type_list[0][0]
                    if inst_type == 0:  # ! pick the 2nd most dominant if exist
                        if len(type_list) > 1:
                            inst_type = type_list[1][0]
                        else:
                            print('[Warn] Instance has `background` type')
                    pred_inst_type[idx] = inst_type
                pred_inst_centroid = get_inst_centroid(pred_inst)

                sio.savemat(
                    '%s/%s.mat' % (proc_dir, basename), {
                        'inst_map': pred_inst,
                        'type_map': pred_type,
                        'inst_type': pred_inst_type[:, None],
                        'inst_centroid': pred_inst_centroid,
                    })
            else:
                sio.savemat('%s/%s.mat' % (proc_dir, basename),
                            {'inst_map': pred_inst})

            ##
            print('FINISH')
示例#2
0
        def process_image(filename):
            filename = os.path.basename(filename)
            basename = filename.split(".")[0]
            if jobs == 1:
                print(pred_dir, basename, flush=True)

            ##
            img = cv2.imread(
                os.path.join(data_dir, "{}{}".format(basename,
                                                     cfg.inf_imgs_ext)))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            pred = sio.loadmat(
                os.path.join(pred_dir, "{}.mat".format(basename)))
            pred = np.squeeze(pred["result"])

            if hasattr(cfg, "type_classification") and cfg.type_classification:
                pred_inst = pred[..., cfg.nr_types:]
                pred_type = pred[..., :cfg.nr_types]

                pred_inst = np.squeeze(pred_inst)
                pred_type = np.argmax(pred_type, axis=-1)

            else:
                pred_inst = pred

            if cfg.model_type == "np_hv" or cfg.model_type == "np_hv_opt":
                pred_inst = postproc.hover.proc_np_hv(pred_inst,
                                                      marker_mode=marker_mode,
                                                      energy_mode=energy_mode,
                                                      rgb=img)
            elif cfg.model_type == "np_dist":
                pred_inst = postproc.hover.proc_np_dist(pred_inst)

            # ! will be extremely slow on WSI/TMA so it's advisable to comment this out
            # * remap once so that further processing faster (metrics calculation, etc.)
            if cfg.remap_labels:
                pred_inst = remap_label(pred_inst, by_size=True)

            # for instance segmentation only
            if cfg.type_classification:
                #### * Get class of each instance id, stored at index id-1
                pred_id_list = list(
                    np.unique(pred_inst))[1:]  # exclude background ID
                pred_inst_type = np.full(len(pred_id_list), 0, dtype=np.int32)
                for idx, inst_id in enumerate(pred_id_list):
                    inst_type = pred_type[pred_inst == inst_id]
                    type_list, type_pixels = np.unique(inst_type,
                                                       return_counts=True)
                    type_list = list(zip(type_list, type_pixels))
                    type_list = sorted(type_list,
                                       key=lambda x: x[1],
                                       reverse=True)
                    inst_type = type_list[0][0]
                    if inst_type == 0:  # ! pick the 2nd most dominant if exist
                        if len(type_list) > 1:
                            inst_type = type_list[1][0]
                        else:
                            if jobs == 1:
                                pass  # print('[Warn] Instance has `background` type')
                    pred_inst_type[idx] = inst_type
                pred_inst_centroid = get_inst_centroid(pred_inst)

                ###### ad hoc just once for pannuke predictions
                # for key in ['type_map', 'inst_type']:
                # pred_type[(pred_type == 5)] = 4
                # pred_inst_type[(pred_inst_type == 5)] = 4

                sio.savemat(
                    os.path.join(proc_dir, "{}.mat".format(basename)),
                    {
                        "inst_map": pred_inst,
                        "type_map": pred_type,
                        "inst_type": pred_inst_type[:, None],
                        "inst_centroid": pred_inst_centroid,
                    },
                )
                overlaid_output = visualize_instances(
                    pred_inst,
                    img,
                    ((cfg.nuclei_type_dict, cfg.color_palete),
                     pred_inst_type[:, None]),
                    cfg.outline,
                    cfg.skip_types,
                )
                overlaid_output = cv2.cvtColor(overlaid_output,
                                               cv2.COLOR_BGR2RGB)
                cv2.imwrite(os.path.join(proc_dir, "{}.png".format(basename)),
                            overlaid_output)
                with open(os.path.join(proc_dir, f"{basename}.log"),
                          "w") as log_file:
                    unique, counts = np.unique(pred_inst_type[:, None],
                                               return_counts=True)
                    unique = list(unique)
                    if 0 in unique:  # remove backround entries
                        counts = np.delete(counts, unique.index(0))
                        unique.remove(0)
                    print(
                        f"{basename} : {dict(zip([{str(v): str(k) for k, v in cfg.nuclei_type_dict.items()}[str(item)] for item in unique], counts))}",
                        file=log_file,
                    )

            else:
                sio.savemat(
                    os.path.join(proc_dir, "{}.mat".format(basename)),
                    {"inst_map": pred_inst},
                )
                overlaid_output = visualize_instances(pred_inst, img)
                overlaid_output = cv2.cvtColor(overlaid_output,
                                               cv2.COLOR_BGR2RGB)
                cv2.imwrite(
                    os.path.join(proc_dir, "{}_uc.png".format(basename)),
                    overlaid_output,
                )

            ##
            if jobs == 1:
                print(
                    f"Finished for {basename} {datetime.now().strftime('%H:%M:%S.%f')}"
                )
示例#3
0
ann_dir = '/home/test/GhulamMurtaza/panNuke/Test/Labels/'  # * directory contains .npy
filepath_list = glob.glob('%s/*.npy' % ann_dir)

save_dir = 'GroundTruth/dump/'  # directory to save summarized info about nuclei

rm_n_mkdir(save_dir)
for path in filepath_list:
    basename = os.path.basename(path).split('.')[0]

    true_map = np.load(path)
    true_inst = true_map[..., 0]
    true_type = true_map[..., 1]

    true_inst = remap_label(true_inst, by_size=True)
    true_inst_centroid = get_inst_centroid(true_inst)
    #### * Get class of each instance id, stored at index id-1
    # for ground truth instance blob
    true_id_list = list(np.unique(true_inst))[1:]  # exclude background
    true_inst_type = np.full(len(true_id_list), -1, dtype=np.int32)
    for idx, inst_id in enumerate(true_id_list):
        inst_type = true_type[true_inst == inst_id]
        type_list, type_pixels = np.unique(inst_type, return_counts=True)
        inst_type = type_list[np.argmax(type_pixels)]
        if inst_type != 0:  # there are artifact nuclei (background types)
            true_inst_type[idx] = inst_type

    sio.savemat(
        '%s/%s.mat' % (save_dir, basename), {
            'inst_type': true_inst_type[:, None],
            'inst_centroid': true_inst_centroid,
        udbridge = np.logical_and(
            instance_map == 0,
            np.logical_and(np.logical_and(upmap != 0, downmap != 0),
                           upmap == downmap))
        instance_map[udbridge] = upmap[udbridge]

        instance_map = remap_label(
            instance_map, by_size=True)  # This resets instance number from 0
        type_map = np.zeros(instance_map.shape, np.int32)
        instance_list = list(np.unique(instance_map))[
            1:]  # Background 0 excluded from instance list
        inst_type = np.full(len(instance_list), 0, dtype=np.int32)
        inst_type2 = np.full(len(instance_list), 0, dtype=np.int32)
        inst_size = np.full(len(instance_list), 0, dtype=np.int32)
        inst_length = np.full(len(instance_list), 0, dtype=np.float32)
        inst_centroids = get_inst_centroid(instance_map)
        for point in region["points"]:
            annot = annotDict(point["type"])
            inst_id = instance_map[(point["coor"][1] -
                                    (region["bound"][0][1] + 1),
                                    point["coor"][0] -
                                    (region["bound"][0][0] + 1))]
            if inst_id > 0:
                #print("{}: {}".format(inst_id, annot))
                if inst_type[inst_id - 1] != annot:
                    nucleicount[annot - 1] += 1
                inst_type[inst_id - 1] = annot
                inst_type2[inst_id - 1] = annot
                type_map[instance_map == inst_id] = annot
                matches += 1
            else:
示例#5
0
        def process_image(filename):
            filename = os.path.basename(filename)
            basename = filename.split('.')[0]
            if not parallel: print(pred_dir, basename, flush=True)

            ##
            img = cv2.imread(os.path.join(data_dir, '{}{}'.format(basename, cfg.inf_imgs_ext)))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            pred = sio.loadmat(os.path.join(pred_dir, '{}.mat'.format(basename)))
            pred = np.squeeze(pred['result'])

            if hasattr(cfg, 'type_classification') and cfg.type_classification:
                pred_inst = pred[...,cfg.nr_types:]
                pred_type = pred[...,:cfg.nr_types]

                pred_inst = np.squeeze(pred_inst)
                pred_type = np.argmax(pred_type, axis=-1)

                if cfg.model_type == 'micronet':
                    # dilate prediction of all type to match it with
                    # the instance segmentation post-proc code
                    kernel = np.array([[0, 1, 0],
                                    [1, 1, 1],
                                    [0, 1, 0]], np.uint8)
                    canvas = np.zeros_like(pred_type, dtype=np.int32)
                    for type_id in range(1, cfg.nr_classes):
                        type_map = (pred_type == type_id).astype('uint8')
                        type_map = cv2.dilate(type_map, kernel, iterations=1)
                        canvas[type_map > 0] = type_id
            else:
                pred_inst = pred

            if cfg.model_type == 'np_hv' or cfg.model_type == 'np_hv_opt':
                pred_inst = postproc.hover.proc_np_hv(pred_inst,
                                marker_mode=marker_mode,
                                energy_mode=energy_mode, rgb=img)
            elif cfg.model_type == 'np_dist':
                pred_inst = postproc.hover.proc_np_dist(pred_inst)
            elif cfg.model_type == 'dist':
                pred_inst = postproc.dist.process(pred_inst)
            else:
                pred_inst = postproc.other.process(pred_inst, cfg.model_type)

            # ! will be extremely slow on WSI/TMA so it's advisable to comment this out
            # * remap once so that further processing faster (metrics calculation, etc.)
            if (cfg.remap_labels):
                pred_inst = remap_label(pred_inst, by_size=True)

            # for instance segmentation only
            if cfg.type_classification:
                #### * Get class of each instance id, stored at index id-1
                pred_id_list = list(np.unique(pred_inst))[1:] # exclude background ID
                pred_inst_type = np.full(len(pred_id_list), 0, dtype=np.int32)
                for idx, inst_id in enumerate(pred_id_list):
                    inst_type = pred_type[pred_inst == inst_id]
                    type_list, type_pixels = np.unique(inst_type, return_counts=True)
                    type_list = list(zip(type_list, type_pixels))
                    type_list = sorted(type_list, key=lambda x: x[1], reverse=True)
                    inst_type = type_list[0][0]
                    if inst_type == 0: # ! pick the 2nd most dominant if exist
                        if len(type_list) > 1:
                            inst_type = type_list[1][0]
                        else:
                            if not parallel: 
                                pass # print('[Warn] Instance has `background` type')
                    pred_inst_type[idx] = inst_type
                pred_inst_centroid = get_inst_centroid(pred_inst)


                ###### ad hoc just once for pannuke predictions
                # for key in ['type_map', 'inst_type']:
                # pred_type[(pred_type == 5)] = 4
                # pred_inst_type[(pred_inst_type == 5)] = 4

                sio.savemat(os.path.join(proc_dir, '{}.mat'.format(basename)),
                            {'inst_map'  :     pred_inst,
                            'type_map'  :     pred_type,
                            'inst_type' :     pred_inst_type[:, None],
                            'inst_centroid' : pred_inst_centroid,
                            })
                overlaid_output = visualize_instances(pred_inst, img, ((cfg.nuclei_type_dict, cfg.color_palete), pred_inst_type[:, None]), cfg.outline, cfg.skip_types)
                overlaid_output = cv2.cvtColor(overlaid_output, cv2.COLOR_BGR2RGB)
                cv2.imwrite(os.path.join(proc_dir, '{}.png'.format(basename)), overlaid_output)
                with open(os.path.join(proc_dir, f'{basename}.log'), 'w') as log_file:
                    unique, counts = np.unique(pred_inst_type[:, None], return_counts=True)
                    unique = list(unique)
                    if 0 in unique: # remove backround entries
                        counts = np.delete(counts, unique.index(0))
                        unique.remove(0)
                    print(f'{basename} : {dict(zip([{str(v): str(k) for k, v in cfg.nuclei_type_dict.items()}[str(item)] for item in unique], counts))}', file = log_file)

            else:
                sio.savemat(os.path.join(proc_dir, '{}.mat'.format(basename)),
                            {'inst_map'  : pred_inst})
                overlaid_output = visualize_instances(pred_inst, img)
                overlaid_output = cv2.cvtColor(overlaid_output, cv2.COLOR_BGR2RGB)
                cv2.imwrite(os.path.join(proc_dir, '{}_uc.png'.format(basename)), overlaid_output)

            ##
            if not parallel: print(f"Finished for {basename} {datetime.now().strftime('%H:%M:%S.%f')}")