Exemplo n.º 1
0
 def merge_masks_diff_inputs(groupkeys, batchid=''):
     os.makedirs('/tmp/results/{}/inputmerged/'.format(batchid),
                 exist_ok=True)
     masks = []
     for file in groupkeys:
         binarymask = cv2.resize(pngread(file), (1024, 1024),
                                 interpolation=cv2.INTER_AREA)
         distance = ndi.distance_transform_edt(binarymask)
         local_maxi = peak_local_max(distance,
                                     labels=binarymask,
                                     footprint=np.ones((3, 3)),
                                     indices=False)
         markers = ndi.label(local_maxi)[0]
         masks.append(watershed(-distance, markers, mask=binarymask))
         try:
             binarymask = merge_two_masks(outpaths)
             distance = ndi.distance_transform_edt(binarymask)
             local_maxi = peak_local_max(distance,
                                         labels=binarymask,
                                         footprint=np.ones((3, 3)),
                                         indices=False)
             markers = ndi.label(local_maxi)[0]
         except:
             mask = watershed(-distance, markers, mask=binarymask)
             pass
         savepath = os.path.join(
             '/tmp/results/' + batchid + '/inputmerged/',
             file.split('/')[-1].split('__')[0].replace(
                 'merged_', 'inputmerged_') + '.jpg')
         pngsave(savepath, np.uint8(mask > 0))
Exemplo n.º 2
0
 def merge_two_masks(maskpaths):
     masks = []
     for mpath in maskpaths:
         binarymask = pngread(mpath)
         distance = ndi.distance_transform_edt(binarymask)
         local_maxi = peak_local_max(distance,
                                     labels=binarymask,
                                     footprint=np.ones((3, 3)),
                                     indices=False)
         markers = ndi.label(local_maxi)[0]
         masks.append(watershed(-distance, markers, mask=binarymask))
     mask = merge_multiple_detections(masks)
     return (mask)
Exemplo n.º 3
0
    def createmultipleinputs(inputpath):
        # pad to square
        im = pngread(inputpath)
        if len(im.shape) == 3:
            print(
                'Images should be grayscale but had dimensions {} - automatically converted'
                .format(im.shape))
            im = np.sum(im, 2)
        im = np.uint16(
            img_as_int(exposure.rescale_intensity(im,
                                                  out_range=(0, 2**15 - 1))))
        imshape = im.shape
        edgediff = np.max(imshape) - np.min(imshape)
        orig = im
        if imshape[1] > imshape[0]:
            orig = cv2.copyMakeBorder(im,
                                      math.ceil(edgediff / 2),
                                      math.ceil(edgediff / 2),
                                      0,
                                      0,
                                      cv2.BORDER_CONSTANT,
                                      value=[0, 0, 0])
        if imshape[0] > imshape[1]:
            orig = cv2.copyMakeBorder(im,
                                      0,
                                      0,
                                      math.ceil(edgediff / 2),
                                      math.ceil(edgediff / 2),
                                      cv2.BORDER_CONSTANT,
                                      value=[0, 0, 0])

        # ==>resize to 1024
        im1024 = cv2.resize(orig, (1024, 1024), interpolation=cv2.INTER_AREA)
        # ==>resize to 720
        im720 = cv2.resize(orig, (720, 720), interpolation=cv2.INTER_AREA)
        # preprocess both
        im1024preproc = preproc(im1024)
        im720preproc = preproc(im720)
        return ([orig, im1024preproc, im720preproc, im1024, im720])
Exemplo n.º 4
0
def proccesshelafiles(f, s3=s3, sess=sess, bucket=bucket, prefix=prefix):
    train_channel = prefix + '/train'
    validation_channel = prefix + '/validation'
    train_annotation_channel = prefix + '/train_annotation'
    validation_annotation_channel = prefix + '/validation_annotation'
    if 'jpg' in f:
        file = f
        if 'segproj/hela_dataset_training_data/train/' in f:
            jpgpath = '/tmp/' + 'hela_' + file.split('/')[-1].split(
                '.')[0] + '_' + 'raw.jpg'
            s3.meta.client.download_file('meadata', file, jpgpath)
            inverted_img = pngread(jpgpath)
            num = int(''.join(filter(str.isdigit, str(
                inverted_img.dtype)))) - 1
            image = exposure.rescale_intensity(inverted_img,
                                               out_range=(0, 2**num - 1))
            image = cv2.resize(image, (1024, 1024),
                               interpolation=cv2.INTER_AREA)
            pngsave(jpgpath, img_as_ubyte(image))
            sess.upload_data(path=jpgpath,
                             bucket=bucket,
                             key_prefix=train_channel)
        elif 'segproj/hela_dataset_training_data/val/' in f:
            jpgpath = '/tmp/' + 'hela_' + file.split('/')[-1].split(
                '.')[0] + '_' + 'raw.jpg'
            s3.meta.client.download_file('meadata', file, jpgpath)
            inverted_img = pngread(jpgpath)
            num = int(''.join(filter(str.isdigit, str(
                inverted_img.dtype)))) - 1
            image = exposure.rescale_intensity(inverted_img,
                                               out_range=(0, 2**num - 1))
            image = cv2.resize(image, (1024, 1024),
                               interpolation=cv2.INTER_AREA)
            pngsave(jpgpath, img_as_ubyte(image))
            sess.upload_data(path=jpgpath,
                             bucket=bucket,
                             key_prefix=validation_channel)
    elif 'png' in f:
        file = f
        if 'segproj/hela_dataset_training_data/train_annotation/' in f:
            pngpath = '/tmp/' + 'hela_' + file.split('/')[-1].split(
                '.')[0] + '_' + 'raw.png'
            s3.meta.client.download_file('meadata', file, pngpath)
            im1 = pngread(pngpath)
            num = int(''.join(filter(str.isdigit, str(im1.dtype)))) - 1
            image = exposure.rescale_intensity(im1, out_range=(0, 2**num - 1))
            image = img_as_ubyte(image)
            im = mark_boundaries(image,
                                 im1,
                                 color=[0, 0, 0],
                                 outline_color=[0, 0, 0],
                                 mode='outer',
                                 background_label=0)
            im2 = img_as_int(im)
            im3 = np.zeros([im2.shape[0], im2.shape[1]])
            im3 = im2[:, :, 0] + im2[:, :, 1] + im2[:, :, 2]
            im3 = np.uint8((im3 > 0))
            im3 = cv2.resize(im3, (1024, 1024), interpolation=cv2.INTER_AREA)
            pngsave(pngpath, im3, check_contrast=False)
            sess.upload_data(path=pngpath,
                             bucket=bucket,
                             key_prefix=train_annotation_channel)
        elif 'segproj/hela_dataset_training_data/val_annotation/' in f:
            pngpath = '/tmp/' + 'hela_' + file.split('/')[-1].split(
                '.')[0] + '_' + 'raw.png'
            s3.meta.client.download_file('meadata', file, pngpath)
            im1 = pngread(pngpath)
            num = int(''.join(filter(str.isdigit, str(im1.dtype)))) - 1
            image = exposure.rescale_intensity(im1, out_range=(0, 2**num - 1))
            image = img_as_ubyte(image)
            im = mark_boundaries(image,
                                 im1,
                                 color=[0, 0, 0],
                                 outline_color=[0, 0, 0],
                                 mode='outer',
                                 background_label=0)
            im2 = img_as_int(im)
            im3 = np.zeros([im2.shape[0], im2.shape[1]])
            im3 = im2[:, :, 0] + im2[:, :, 1] + im2[:, :, 2]
            im3 = np.uint8((im3 > 0))
            im3 = cv2.resize(im3, (1024, 1024), interpolation=cv2.INTER_AREA)
            pngsave(pngpath, im3, check_contrast=False)
            sess.upload_data(path=pngpath,
                             bucket=bucket,
                             key_prefix=validation_annotation_channel)