Esempio n. 1
0
def process():
    """
    Performs post processing for a list of images

    """

    cfg = Config()

    for data_dir in cfg.inf_data_list:

        proc_dir = cfg.inf_output_dir + '/processed/'
        pred_dir = cfg.inf_output_dir + '/raw/'
        file_list = glob.glob(pred_dir + '*.npy')
        file_list.sort()  # ensure same order

        if not os.path.isdir(proc_dir):
            os.makedirs(proc_dir)
        for filename in file_list:
            start = time.time()
            filename = os.path.basename(filename)
            basename = filename.split('.')[0]

            test_set = basename.split('_')[0]
            test_set = test_set[-1]

            print(pred_dir, basename, end=' ', flush=True)

            ##
            img = cv2.imread(data_dir + basename + cfg.inf_imgs_ext)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            pred_map = np.load(pred_dir + '/%s.npy' % basename)

            # get the instance level prediction
            pred_inst = process_utils(pred_map, cfg.model_mode)

            # ! remap label is slow - check to see whether it is needed!
            pred_inst = remap_label(pred_inst, by_size=True)

            overlaid_output = visualize_instances(pred_inst, img)
            overlaid_output = cv2.cvtColor(overlaid_output, cv2.COLOR_BGR2RGB)
            cv2.imwrite('%s/%s.png' % (proc_dir, basename), overlaid_output)

            # save segmentation mask
            np.save('%s/%s' % (proc_dir, basename), pred_inst)

            end = time.time()
            diff = str(round(end - start, 2))
            print('FINISH. TIME: %s' % diff)
Esempio n. 2
0
    if not os.path.isdir(proc_dir):
        os.makedirs(proc_dir)

    for filename in file_list:
        filename = os.path.basename(filename)
        basename = filename.split('.')[0]
        print(basename, norm_target, end=' ')

        ##
        img = cv2.imread(imgs_dir + basename + cfg.inf_imgs_ext)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            
        pred = sio.loadmat('%s/%s.mat' % (pred_dir, basename))
        pred = np.squeeze(pred['result'])
        if proc_mode == 'np+xy':
            pred = proc_np_xy(pred, 
                            marker_mode=marker_mode,
                            energy_mode=energy_mode)
        if proc_mode == 'np+dst':
            pred = proc_np_dst(pred)

        ##
        overlaid_output = visualize_instances(pred, img)
        overlaid_output = cv2.cvtColor(overlaid_output, cv2.COLOR_BGR2RGB)
        cv2.imwrite('%s/%s.png' % (proc_dir, basename), overlaid_output)

        sio.savemat('%s/%s_predicted_map.mat' % (proc_dir, basename), 
                                    {'predicted_map': pred})

        ##
        print('FINISH')
Esempio n. 3
0
    if cfg.model_type == 'np_hv':
        pred_inst = postproc.hover.proc_np_hv(pred_inst, 
                        marker_mode=marker_mode,
                        energy_mode=energy_mode, rgb=img)

    elif cfg.model_type == 'np_dist':
        pred_inst = postproc.hover.proc_np_dist(pred_inst)
    elif cfg.model_type == 'dist':
        pred_inst = postproc.dist.process(pred_inst)
    else:
        pred_inst = postproc.other.process(pred_inst, cfg.model_type)

    # ! will be extremely slow on WSI/TMA so it's advisable to comment this out
    # * remap once so that further processing faster (metrics calculation, etc.)
    pred_inst = remap_label(pred_inst, by_size=True)
    overlaid_output = visualize_instances(pred_inst, img)
    # plt.subplot(1,2,1)
    # plt.imshow(pred_inst)
    # plt.subplot(1,2,2)
    # plt.imshow(pred_type)
    # plt.imshow()
    # np.save('/home/test/GhulamMurtaza/pred_maps_class_instance/%s.npy' % (basename),pred_type)
    # np.save('/home/test/GhulamMurtaza/pred_maps_class_instance/%s.npy' % (basename),pred_inst)

    overlaid_output_type = visualize_instances(pred_type,img,color=[[255,0,0],[255,255,0],[0,0,225],[0,128,0],[100,122,56]])
    overlaid_output_true = visualize_instances(true_mask_type,img,color=[[255,0,0],[255,255,0],[0,0,225],[0,128,0],[100,122,56]])
    overlaid_output = cv2.cvtColor(overlaid_output, cv2.COLOR_BGR2RGB)
    overlaid_output_type = cv2.cvtColor(overlaid_output_type,cv2.COLOR_BGR2RGB)
    overlaid_output_true = cv2.cvtColor(overlaid_output_true,cv2.COLOR_BGR2RGB)

    cv2.imwrite('%s/%s.png' % (proc_dir, basename), overlaid_output)
Esempio n. 4
0
        def process_image(filename):
            filename = os.path.basename(filename)
            basename = filename.split(".")[0]
            if jobs == 1:
                print(pred_dir, basename, flush=True)

            ##
            img = cv2.imread(
                os.path.join(data_dir, "{}{}".format(basename,
                                                     cfg.inf_imgs_ext)))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            pred = sio.loadmat(
                os.path.join(pred_dir, "{}.mat".format(basename)))
            pred = np.squeeze(pred["result"])

            if hasattr(cfg, "type_classification") and cfg.type_classification:
                pred_inst = pred[..., cfg.nr_types:]
                pred_type = pred[..., :cfg.nr_types]

                pred_inst = np.squeeze(pred_inst)
                pred_type = np.argmax(pred_type, axis=-1)

            else:
                pred_inst = pred

            if cfg.model_type == "np_hv" or cfg.model_type == "np_hv_opt":
                pred_inst = postproc.hover.proc_np_hv(pred_inst,
                                                      marker_mode=marker_mode,
                                                      energy_mode=energy_mode,
                                                      rgb=img)
            elif cfg.model_type == "np_dist":
                pred_inst = postproc.hover.proc_np_dist(pred_inst)

            # ! will be extremely slow on WSI/TMA so it's advisable to comment this out
            # * remap once so that further processing faster (metrics calculation, etc.)
            if cfg.remap_labels:
                pred_inst = remap_label(pred_inst, by_size=True)

            # for instance segmentation only
            if cfg.type_classification:
                #### * Get class of each instance id, stored at index id-1
                pred_id_list = list(
                    np.unique(pred_inst))[1:]  # exclude background ID
                pred_inst_type = np.full(len(pred_id_list), 0, dtype=np.int32)
                for idx, inst_id in enumerate(pred_id_list):
                    inst_type = pred_type[pred_inst == inst_id]
                    type_list, type_pixels = np.unique(inst_type,
                                                       return_counts=True)
                    type_list = list(zip(type_list, type_pixels))
                    type_list = sorted(type_list,
                                       key=lambda x: x[1],
                                       reverse=True)
                    inst_type = type_list[0][0]
                    if inst_type == 0:  # ! pick the 2nd most dominant if exist
                        if len(type_list) > 1:
                            inst_type = type_list[1][0]
                        else:
                            if jobs == 1:
                                pass  # print('[Warn] Instance has `background` type')
                    pred_inst_type[idx] = inst_type
                pred_inst_centroid = get_inst_centroid(pred_inst)

                ###### ad hoc just once for pannuke predictions
                # for key in ['type_map', 'inst_type']:
                # pred_type[(pred_type == 5)] = 4
                # pred_inst_type[(pred_inst_type == 5)] = 4

                sio.savemat(
                    os.path.join(proc_dir, "{}.mat".format(basename)),
                    {
                        "inst_map": pred_inst,
                        "type_map": pred_type,
                        "inst_type": pred_inst_type[:, None],
                        "inst_centroid": pred_inst_centroid,
                    },
                )
                overlaid_output = visualize_instances(
                    pred_inst,
                    img,
                    ((cfg.nuclei_type_dict, cfg.color_palete),
                     pred_inst_type[:, None]),
                    cfg.outline,
                    cfg.skip_types,
                )
                overlaid_output = cv2.cvtColor(overlaid_output,
                                               cv2.COLOR_BGR2RGB)
                cv2.imwrite(os.path.join(proc_dir, "{}.png".format(basename)),
                            overlaid_output)
                with open(os.path.join(proc_dir, f"{basename}.log"),
                          "w") as log_file:
                    unique, counts = np.unique(pred_inst_type[:, None],
                                               return_counts=True)
                    unique = list(unique)
                    if 0 in unique:  # remove backround entries
                        counts = np.delete(counts, unique.index(0))
                        unique.remove(0)
                    print(
                        f"{basename} : {dict(zip([{str(v): str(k) for k, v in cfg.nuclei_type_dict.items()}[str(item)] for item in unique], counts))}",
                        file=log_file,
                    )

            else:
                sio.savemat(
                    os.path.join(proc_dir, "{}.mat".format(basename)),
                    {"inst_map": pred_inst},
                )
                overlaid_output = visualize_instances(pred_inst, img)
                overlaid_output = cv2.cvtColor(overlaid_output,
                                               cv2.COLOR_BGR2RGB)
                cv2.imwrite(
                    os.path.join(proc_dir, "{}_uc.png".format(basename)),
                    overlaid_output,
                )

            ##
            if jobs == 1:
                print(
                    f"Finished for {basename} {datetime.now().strftime('%H:%M:%S.%f')}"
                )
        cv2.imwrite('%s/%s.png' % (save_dir, basename), patch)
        save_dir = os.path.join(output_folder, 'Labels')
        if not os.path.exists(save_dir): os.mkdir(save_dir)
        inst_centroids *= scale
        sio.savemat(
            '%s/%s.mat' % (save_dir, basename), {
                'inst_map': instance_map,
                'type_map': type_map,
                'inst_type': inst_type[:, None],
                'inst_centroid': inst_centroids,
            })

        save_dir = os.path.join(output_folder, 'Combined')
        if not os.path.exists(save_dir): os.mkdir(save_dir)
        colormap = [class_colors[x] for x in inst_type]
        overlaid_output = visualize_instances(instance_map, patchcorr,
                                              colormap)
        cv2.imwrite("{}/{}.png".format(save_dir, basename), overlaid_output)
        #type_map[np.logical_and(instance_map > 0, type_map == 0)] = -1
        unannot = np.logical_and(instance_map != 0, type_map == 0)
        patchjoin[unannot] = 255
        instance_map[unannot] = 0
        ann = np.dstack([instance_map, type_map])
        patchjoin = np.array(patchjoin, np.int32)
        print(ann.shape)
        print(patchjoin.shape)
        patchjoin = np.concatenate([patchjoin, ann], axis=-1)
        sub_patches = xtractor.extract(
            patchjoin, extract_type
        )  # This extracts mirrored subpatch of 540x540 from 1k patch
        for idx, patch in enumerate(sub_patches):
            np.save("{}/{}_{}.npy".format(save_dir, basename, idx), patch)
Esempio n. 6
0
        def process_image(filename):
            filename = os.path.basename(filename)
            basename = filename.split('.')[0]
            if not parallel: print(pred_dir, basename, flush=True)

            ##
            img = cv2.imread(os.path.join(data_dir, '{}{}'.format(basename, cfg.inf_imgs_ext)))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            pred = sio.loadmat(os.path.join(pred_dir, '{}.mat'.format(basename)))
            pred = np.squeeze(pred['result'])

            if hasattr(cfg, 'type_classification') and cfg.type_classification:
                pred_inst = pred[...,cfg.nr_types:]
                pred_type = pred[...,:cfg.nr_types]

                pred_inst = np.squeeze(pred_inst)
                pred_type = np.argmax(pred_type, axis=-1)

                if cfg.model_type == 'micronet':
                    # dilate prediction of all type to match it with
                    # the instance segmentation post-proc code
                    kernel = np.array([[0, 1, 0],
                                    [1, 1, 1],
                                    [0, 1, 0]], np.uint8)
                    canvas = np.zeros_like(pred_type, dtype=np.int32)
                    for type_id in range(1, cfg.nr_classes):
                        type_map = (pred_type == type_id).astype('uint8')
                        type_map = cv2.dilate(type_map, kernel, iterations=1)
                        canvas[type_map > 0] = type_id
            else:
                pred_inst = pred

            if cfg.model_type == 'np_hv' or cfg.model_type == 'np_hv_opt':
                pred_inst = postproc.hover.proc_np_hv(pred_inst,
                                marker_mode=marker_mode,
                                energy_mode=energy_mode, rgb=img)
            elif cfg.model_type == 'np_dist':
                pred_inst = postproc.hover.proc_np_dist(pred_inst)
            elif cfg.model_type == 'dist':
                pred_inst = postproc.dist.process(pred_inst)
            else:
                pred_inst = postproc.other.process(pred_inst, cfg.model_type)

            # ! will be extremely slow on WSI/TMA so it's advisable to comment this out
            # * remap once so that further processing faster (metrics calculation, etc.)
            if (cfg.remap_labels):
                pred_inst = remap_label(pred_inst, by_size=True)

            # for instance segmentation only
            if cfg.type_classification:
                #### * Get class of each instance id, stored at index id-1
                pred_id_list = list(np.unique(pred_inst))[1:] # exclude background ID
                pred_inst_type = np.full(len(pred_id_list), 0, dtype=np.int32)
                for idx, inst_id in enumerate(pred_id_list):
                    inst_type = pred_type[pred_inst == inst_id]
                    type_list, type_pixels = np.unique(inst_type, return_counts=True)
                    type_list = list(zip(type_list, type_pixels))
                    type_list = sorted(type_list, key=lambda x: x[1], reverse=True)
                    inst_type = type_list[0][0]
                    if inst_type == 0: # ! pick the 2nd most dominant if exist
                        if len(type_list) > 1:
                            inst_type = type_list[1][0]
                        else:
                            if not parallel: 
                                pass # print('[Warn] Instance has `background` type')
                    pred_inst_type[idx] = inst_type
                pred_inst_centroid = get_inst_centroid(pred_inst)


                ###### ad hoc just once for pannuke predictions
                # for key in ['type_map', 'inst_type']:
                # pred_type[(pred_type == 5)] = 4
                # pred_inst_type[(pred_inst_type == 5)] = 4

                sio.savemat(os.path.join(proc_dir, '{}.mat'.format(basename)),
                            {'inst_map'  :     pred_inst,
                            'type_map'  :     pred_type,
                            'inst_type' :     pred_inst_type[:, None],
                            'inst_centroid' : pred_inst_centroid,
                            })
                overlaid_output = visualize_instances(pred_inst, img, ((cfg.nuclei_type_dict, cfg.color_palete), pred_inst_type[:, None]), cfg.outline, cfg.skip_types)
                overlaid_output = cv2.cvtColor(overlaid_output, cv2.COLOR_BGR2RGB)
                cv2.imwrite(os.path.join(proc_dir, '{}.png'.format(basename)), overlaid_output)
                with open(os.path.join(proc_dir, f'{basename}.log'), 'w') as log_file:
                    unique, counts = np.unique(pred_inst_type[:, None], return_counts=True)
                    unique = list(unique)
                    if 0 in unique: # remove backround entries
                        counts = np.delete(counts, unique.index(0))
                        unique.remove(0)
                    print(f'{basename} : {dict(zip([{str(v): str(k) for k, v in cfg.nuclei_type_dict.items()}[str(item)] for item in unique], counts))}', file = log_file)

            else:
                sio.savemat(os.path.join(proc_dir, '{}.mat'.format(basename)),
                            {'inst_map'  : pred_inst})
                overlaid_output = visualize_instances(pred_inst, img)
                overlaid_output = cv2.cvtColor(overlaid_output, cv2.COLOR_BGR2RGB)
                cv2.imwrite(os.path.join(proc_dir, '{}_uc.png'.format(basename)), overlaid_output)

            ##
            if not parallel: print(f"Finished for {basename} {datetime.now().strftime('%H:%M:%S.%f')}")