Beispiel #1
0
def main(image, output, binarize):
    segmentator = cellsegmentator.CellSegmentator(NMODEL, CMODEL)
    in_image = imageio.imread(image)
    n_labels = segmentator.label_nuclei([in_image])[0]
    n_labels = transform.resize(n_labels,
                                (n_labels.shape[0] * 4, n_labels.shape[1] * 4))

    subgreen = n_labels[..., 2] * np.logical_not(n_labels[..., 1] > 0)
    labels = ndi.label(subgreen)[0]

    if binarize:
        labels = (labels > 0).astype(np.uint8)
        labels *= 255
    imageio.imsave(output, labels)
Beispiel #2
0
    def __init__(self, config, dl, df, trainset, use_gpu):
        self.sigm__ = lambda x: 1 / (1 + math.exp(-x))

        self.config = config
        self.df = df
        self.dl = dl
        self.trainset = trainset
        self.train_str = 'train' if trainset else 'test'
        self.sigm = nn.Sigmoid()
        self.model_path = self.config['model']['model_path']
        self.sigmoid = lambda arr: 1 / (1 + np.exp(-arr))
        self.relu = lambda arr: np.maximum(0, arr)
        self.use_amp = self.config['use_amp']
        self.img_dir = os.path.join(config[self.train_str]['path'], self.train_str)
        # self.mask_dir = self.config[self.train_str]['mask_path']
        self.exp_name = self.model_path.split('/')[-1].split('.')[0]
        self.num_classes = self.config['model']['classes']
        print(self.exp_name)

        self.cache_path = '/common/danylokolinko/hpa_cache/'
        self.pth = os.path.join(self.cache_path, f'cams/{self.exp_name}')

        self.best_val_ap = None

        self.model = Classifier(config['model'])

        self.nuc_load = lambda image_id: os.path.join(self.cache_path, 'nuc', image_id)
        self.cell_load = lambda image_id: os.path.join(self.cache_path, 'cell', image_id)

        # use_gpu = '0'
        # use_gpu = True
        device_str = 'cuda' if use_gpu else 'cpu'
        self.dev = torch.device(device_str)
        if use_gpu:
            self.model.cuda()
        self.model.eval()
        st_dt = torch.load(self.model_path, map_location=torch.device(device_str))['model']
        self.model.load_state_dict(st_dt)
        self.segmentator = cellsegmentator.CellSegmentator(
            config['segm_model']['nuclei_path'],
            config['segm_model']['cell_path'],
            device=device_str,
            multi_channel_model=True
        )
Beispiel #3
0
import cv2
import pandas as pd
import math


def valid_size(shape):
    s1, s2 = shape[0], shape[1]
    sa, sb = max(s1, s2), min(s1, s2)
    return True if (float(sa) / sb) < 1.5 else False


df = pd.read_csv('data/train.csv').fillna('')
id_list = [x for x in df.values if '|' not in x[1]]
data_root = 'I:/datasets/kaggle/human-protein-atlas/train'
save_root = 'I:/datasets/kaggle/human-protein-atlas/train-single-cell/cell-single-label'
segmentor = cellsegmentator.CellSegmentator('data/nuclei-model.pth', 'data/cell-model.pth', padding=True)

save = []
step_size = 10
step = math.ceil(len(id_list) / float(step_size))
for ep in range(step):
    try:
        prefix_list = [x[0] for x in id_list[ep * step_size:(ep + 1) * step_size]]
        label_list = [x[1] for x in id_list[ep * step_size:(ep + 1) * step_size]]
        print(f'{ep}th infering batch...')
        rpath = [f'{data_root}/{x}_red.png' for x in prefix_list]
        gpath = [f'{data_root}/{x}_green.png' for x in prefix_list]
        bpath = [f'{data_root}/{x}_blue.png' for x in prefix_list]
        ypath = [f'{data_root}/{x}_yellow.png' for x in prefix_list]
        imgs = [rpath, ypath, bpath]
        print(f'input size: {len(imgs[0])}, {len(imgs[1])}, {len(imgs[2])}')
Beispiel #4
0
"""Save masks using CellSegmentation"""
import hpacellseg.cellsegmentator as cellsegmentator
from hpacellseg.utils import label_cell#, label_nuclei
from typing import List
import glob
import os
import imageio
 
NUC_MODEL = 'nucleai-model.pth'  # will be downloaded if it doesn't exist
CELL_MODEL = 'cell-model.pth'  # will be downloaded if it doesn't exist
SEGMENTATOR = cellsegmentator.CellSegmentator(
    NUC_MODEL,
    CELL_MODEL,
    scale_factor = .25,
    device = 'cuda',
    padding = False,
    multi_channel_model=True
)

def save_masks(from_dir, to_dir, save_cell_mask=True, save_nuc_mask=True):
    if not os.path.exists(to_dir):
        os.makedirs(to_dir)

    microtubule: List[str] = glob.glob(from_dir + '/' + '*_red.png')
    endo_ret: List[str] = [e.replace('red', 'yellow') for e in microtubule]
    nuclei: List[str] = [n.replace('red', 'blue') for n in microtubule]
    images: List[List[str]] = [microtubule, endo_ret, nuclei]

    nuc_segmentations = SEGMENTATOR.pred_nuclei(images[2])
    cell_segmetnations = SEGMENTATOR.pred_cells(images)
Beispiel #5
0
PUBLIC_DATA_FLAG = args.public_data
PATH_TO_MASKS_ROOT = '../input/hpa_cell_mask_public/' if PUBLIC_DATA_FLAG else '../input/hpa_cell_mask/'
OUTPUT_PATH = '../input/cell_bboxes_public' if PUBLIC_DATA_FLAG else '../input/cell_bboxes_train'
IMGS_FOLDER = '../input/publichpa_1024' if PUBLIC_DATA_FLAG else '../input/hpa-single-cell-image-classification/train'

if not os.path.exists(OUTPUT_PATH):
    os.makedirs(OUTPUT_PATH)

main_df = get_public_df_ohe() if PUBLIC_DATA_FLAG else get_train_df_ohe()

NUC_MODEL = '../input/hpacellsegmentatormodelweights/dpn_unet_nuclei_v1.pth'
CELL_MODEL = '../input/hpacellsegmentatormodelweights/dpn_unet_cell_3ch_v1.pth'

segmentator = cellsegmentator.CellSegmentator(
    NUC_MODEL,
    CELL_MODEL,
    device='cuda',
    multi_channel_model=True,
    return_without_scale_restore=True)


def get_masks(imgs):
    images = [[img[:, :, 0] for img in imgs], [img[:, :, 3] for img in imgs],
              [img[:, :, 2] for img in imgs]]

    nuc_segmentations, median_nuc_sizes = segmentator.pred_nuclei(images[2])
    cell_segmentations, init_sizes = segmentator.pred_cells(
        images, median_nuc_sizes=median_nuc_sizes)
    cell_masks = []
    for i in range(len(cell_segmentations)):
        cell_mask = label_cell(nuc_segmentations[i],
                               cell_segmentations[i],
    # RLE encode mask --
    encoded_mask = coco_mask.encode(mask_to_encode)[0]["counts"]

    # compress and base64 encoding --
    binary_str = zlib.compress(encoded_mask, zlib.Z_BEST_COMPRESSION)
    base64_str = base64.b64encode(binary_str)
    return base64_str.decode('ascii')


NUC_MODEL = "./nuclei-model.pth"
CELL_MODEL = "./cell-model.pth"
segmentator = cellsegmentator.CellSegmentator(
    NUC_MODEL,
    CELL_MODEL,
    scale_factor=0.25,
    device="cuda:15",
    padding=True,
    multi_channel_model=True,
)


def read_sample_image_seg(filename):
    '''
    read individual images
    of different filters (R, B, Y)
    and stack them for segmentation.
    ---------------------------------
    Arguments:
    filename -- sample image file path
    
    Returns: