Example #1
0
def render_hed(patch_path, figsize: Tuple[int] = (11, 3)) -> None:
    # lecture d'un patch
    patch = Image.open(patch_path)
    # conversion du RGB au HED
    ihc_hed = rgb2hed(patch)
    # filtrage des pixels négatifs
    ihc_hed[ihc_hed < 0] = 0
    # construction des 3 canaux H, E et D
    null = np.zeros_like(ihc_hed[:, :, 0])
    ihc_h = hed2rgb(np.stack((ihc_hed[:, :, 0], null, null),
                             axis=-1))  #Hematoxylin channel
    ihc_e = hed2rgb(np.stack((null, ihc_hed[:, :, 1], null), axis=-1))  #Eosin
    ihc_d = hed2rgb(np.stack((null, null, ihc_hed[:, :, 2]), axis=-1))  #DAB
    # rendu graphique
    fig, axes = plt.subplots(1, 4, figsize=figsize, sharex=True, sharey=True)
    ax = axes.ravel()

    ax[0].imshow(patch)
    ax[0].set_title("Patch d'origine")

    ax[1].imshow(ihc_h)
    ax[1].set_title("Hématoxyline")

    ax[2].imshow(ihc_e)
    ax[2].set_title("Eosine")

    ax[3].imshow(ihc_d)
    ax[3].set_title("Diaminobenzidine")

    for a in ax.ravel():
        a.axis('off')
    fig.tight_layout()
    plt.show()
Example #2
0
 def transform(img):
     # Colour augmentation by breaking the image apart into H & E stains, and modifying their concentration.
     hed = skcolor.rgb2hed(img / 255.0)
     alphas = np.random.normal(size=(1, 1, 3), loc=mean, scale=std)
     hed = hed * alphas
     img = skcolor.hed2rgb(hed).astype(np.float32)
     return img
Example #3
0
def colour_augmentation(image, h_mean, h_std, d_mean, d_std, e_mean, e_std):

    ihc_hed = rgb2hed(image)
    Im_size = image.shape[1]

    h = ihc_hed[:, :, 0]
    d = ihc_hed[:, :, 1]
    e = ihc_hed[:, :, 2]

    hFlat = np.ravel(h, order='A')
    dFlat = np.ravel(d, order='A')
    eFlat = np.ravel(e, order='A')

    # Method
    hmod = random.normalvariate(h_mean, h_std)
    dmod = random.normalvariate(d_mean, d_std)
    emod = random.normalvariate(e_mean, e_std)

    for x in range(len(h.ravel())):
        hFlat[x] = hFlat[x] + hmod
        dFlat[x] = dFlat[x] + dmod
        eFlat[x] = eFlat[x] + emod

    ##############
    h = hFlat.reshape(Im_size, Im_size)
    d = dFlat.reshape(Im_size, Im_size)
    e = eFlat.reshape(Im_size, Im_size)

    zdh = np.stack((h, d, e), 2)
    zdh = hed2rgb(zdh)
    zdh_8bit = (zdh * 255).astype('uint8')
    image = zdh_8bit
    return image
Example #4
0
def stain_augment(hed_patches):

  # Calculate max H, E, D range in dataset - with no colour normalisation.
  max_hed = [-2.0]*3
  min_hed = [1.0]*3
  # Keep track of how many augments are added for each index.
  # Integer at i corresponds to how many repeats for that image.
  repeats = np.ones((len(hed_patches)))

  for p in hed_patches:
    hed = np.dsplit(p, 3)
    for channel in range(3):
        # A little buffer to avoid getting unusual combinations at ends of range
        max_hed[channel] = max(np.max(hed[channel]), max_hed[channel])# - 0.0001
        min_hed[channel] = min(np.min(hed[channel]), min_hed[channel])# + 0.0001

  # print("Calculated max HED range:", max_hed)
  # print("Calculated min HED range:", min_hed)

  # Tweak the H, E, and DAB channels within the range of the dataset (one channel at a time).
  tweaked_patches = []

  for i, p in enumerate(hed_patches):
      tweaked_patches.append(p) # Always include the original
      channels = np.dsplit(p, 3)
      # Subtract and add by interval until hit min and max in any one pixel value
      interval = 0.05
      current = interval
      not_reached_max, not_reached_min = [True]*3, [True]*3
      while(True in (not_reached_max + not_reached_min)):
          for ch in range(3):
              if not_reached_max[ch]:
                  ch_copy = np.copy(channels)
                  new_channel = np.add(ch_copy[ch], current)
                  if np.count_nonzero(np.where(new_channel > max_hed[ch], 1, 0)) == 0:
                      ch_copy[ch] = new_channel
                      tweaked_patches.append(np.dstack(ch_copy))
                      repeats[i] += 1
                  else:
                      not_reached_max[ch] = False
              if not_reached_min[ch]:
                  ch_copy = np.copy(channels)
                  new_channel = np.subtract(ch_copy[ch], current)
                  if np.count_nonzero(np.where(new_channel < min_hed[ch], 1, 0)) == 0:
                      ch_copy[ch] = new_channel
                      tweaked_patches.append(np.dstack(ch_copy))
                      repeats[i] += 1
                  else:
                      not_reached_min[ch] = False
          current += interval

  del hed_patches
  new_patches = []
  for p in tweaked_patches:
    new_patches.append(hed2rgb(p))

  del tweaked_patches

  return np.array(new_patches), repeats
def color_aug(img):
    adj_add = np.array([[[0.02, 0.001, 0.15]]], dtype=np.float32)
    img2 = np.clip(
        hed2rgb(
            rgb2hed(img.transpose((1, 0, 2)) / 255.0) +
            np.random.uniform(-1.0, 1.0, (1, 1, 3)) * adj_add).transpose(
                (1, 0, 2)) * 255.0, 0.0, 255.0)
    return img2.astype(np.uint8)
Example #6
0
 def stain_aug(self, image, k=0.03):
     # input must be rgb
     hed_image = rgb2hed(image)
     w = 1 - k + np.random.rand(3) * k * 2
     b = -k + np.random.rand(3) * k * 2
     for i in range(3):
         hed_image[:, :, i] = w[i] * hed_image[:, :, i] + b[i]
     return (hed2rgb(hed_image) * 255).astype(np.uint8)
Example #7
0
 def _augment(self, img, s):
     alpha = [0.95, 1.05]
     bias = [-0.01,  0.01] # -0.05,  0.05
     hed_img = rgb2hed(img)
     for channel in range(3):
         hed_img[..., channel] *= random.choice(np.arange(alpha[0], alpha[1], 0.01))
         hed_img[..., channel] += random.choice(np.arange(bias[0], bias[1], 0.01))
     return img_as_ubyte(hed2rgb(hed_img))
def random_hed_ratio(img,
                     bias_range=0.025,
                     coef_range=0.025,
                     random_state=None):
    rstate = check_random_state(random_state)
    hed = rgb2hed(img)
    bias = rstate.uniform(-bias_range, bias_range, 3)
    coefs = rstate.uniform(1 - coef_range, 1 + coef_range, 3)
    return np.clip(hed2rgb(hed * coefs + bias), 0, 1)
Example #9
0
 def he_aug(result):
     hed = rgb2hed(np.clip(result.transpose(1, 2, 0), -1.0,
                           1.0))
     ah = 0.95 + random.random() * 0.1
     bh = -0.05 + random.random() * 0.1
     ae = 0.95 + random.random() * 0.1
     be = -0.05 + random.random() * 0.1
     hed[:, :, 0] = ah * hed[:, :, 0] + bh
     hed[:, :, 1] = ae * hed[:, :, 1] + be
     result = hed2rgb(hed).transpose(2, 0, 1)
     return result
Example #10
0
    def adjust_HED(img, alpha, betti):
        img = np.array(img)

        s = np.reshape(color.rgb2hed(img), (-1, 3))
        ns = alpha * s + betti  # perturbations on HED color space
        nimg = color.hed2rgb(np.reshape(ns, img.shape))

        imin = nimg.min()
        imax = nimg.max()
        rsimg = (255 * (nimg - imin) / (imax - imin)).astype(
            'uint8')  # rescale to [0,255]
        # transfer to PIL image
        return Image.fromarray(rsimg)
Example #11
0
    def _apply_(self, *image):
        res = ()
        n_img = 0
        for img in image:
            if n_img == 0:

                dec_img = color.rgb2hed(img)
                ### perturbe each channel H, E, Dab
                for i in range(3):
                    k_i = self.params['k'][i]
                    b_i = self.params['b'][i]
                    dec_img[:,:,i] = GreyValuePerturbation(dec_img[:, :, i], k_i, b_i)
                sub_res = color.hed2rgb(dec_img).astype('uint8')

                ### Have to implement deconvolution of the deconvolution


            else:
                sub_res = img

            res += (sub_res,)
            n_img += 1
        return res
Example #12
0
    def _apply_(self, *image):
        res = ()
        n_img = 0
        for img in image:
            if n_img == 0:

                dec_img = color.rgb2hed(img)
                ### perturbe each channel H, E, Dab
                for i in range(3):
                    k_i = self.params['k'][i]
                    b_i = self.params['b'][i]
                    dec_img[:,:,i] = GreyValuePerturbation(dec_img[:, :, i], k_i, b_i)
                sub_res = color.hed2rgb(dec_img).astype('uint8')

                ### Have to implement deconvolution of the deconvolution


            else:
                sub_res = img

            res += (sub_res,)
            n_img += 1
        return res
Example #13
0
 def test_hed_rgb_roundtrip(self):
     img_rgb = img_as_ubyte(self.img_rgb)
     assert_equal(img_as_ubyte(hed2rgb(rgb2hed(img_rgb))), img_rgb)
Example #14
0
def colour_augmentation(image,
                        h_mean=0,
                        h_std=random.uniform(-0.035, 0.035),
                        d_mean=0,
                        d_std=random.uniform(-0.035, 0.035),
                        e_mean=0,
                        e_std=random.uniform(-0.035, 0.035)):
    '''
    Randomly augments staining of images by separating them in to h and e (and d)
    channels and modifying their values. Aims to produce plausible stain variation
    used in custom augmentation

    PARAMETERS
    ##########

    image - arbitary RGB image (3 channel array) expected to be 8-bit

    aug_mean - average value added to each stain, default setting is 0

    aug_std - standard deviation for random modifier, default value 0.035

    RETURNS
    #######

    image - 8 bit RGB image with the same dimensions as the input image, with
            a modified stain
    ​
    '''

    ihc_hed = rgb2hed(image)
    Im_size = image.shape[1]

    h = ihc_hed[:, :, 0]
    d = ihc_hed[:, :, 1]
    e = ihc_hed[:, :, 2]

    hFlat = np.ravel(h, order='A')
    dFlat = np.ravel(d, order='A')
    eFlat = np.ravel(e, order='A')

    # Method
    hmod = random.normalvariate(h_mean, h_std)
    dmod = random.normalvariate(d_mean, d_std)
    emod = random.normalvariate(e_mean, e_std)

    # maskFlat = np.ravel(mask, order='A')

    for x in range(len(h.ravel())):
        hFlat[x] = hFlat[x] + hmod
        dFlat[x] = dFlat[x] + dmod
        eFlat[x] = eFlat[x] + emod

    ##############
    h = hFlat.reshape(Im_size, Im_size)
    d = dFlat.reshape(Im_size, Im_size)
    e = eFlat.reshape(Im_size, Im_size)

    zdh = np.stack((h, d, e), 2)
    zdh = hed2rgb(zdh)
    zdh_8bit = (zdh * 255).astype('uint8')
    image = zdh_8bit
    return image
def data_aug_img(img, mu, sigma, deterministic=False, idraw=-1, jdraw=-1):
    # rotate small degree
    if np.random.rand(1)[0] < 0.9:
        img = img.transpose()
        if deterministic:
            img = rotate_img(img, 10.0)
        else:
            img = rotate_img(img, 45.0)
        img = img.transpose()

    # crop
    icut = APS - PS
    jcut = APS - PS
    if deterministic:
        if idraw < -0.5 or jdraw < -0.5:
            ioff = int(icut // 2)
            joff = int(jcut // 2)
        else:
            ioff = idraw
            joff = jdraw
    else:
        ioff = np.random.randint(MARGIN, icut + 1 - MARGIN)
        joff = np.random.randint(MARGIN, jcut + 1 - MARGIN)
    img = img[:, ioff:ioff + PS, joff:joff + PS]

    # adjust color
    if not deterministic:
        adj_add = np.array([[[0.15, 0.15, 0.02]]], dtype=np.float32)
        img = np.clip(hed2rgb( \
                rgb2hed(img.transpose((2, 1, 0)) / 255.0) + np.random.uniform(-1.0, 1.0, (1, 1, 3))*adj_add \
              ).transpose((2, 1, 0))*255.0, 0.0, 255.0)

    if not deterministic:
        adj_range = 0.1
        adj_add = 5
        rgb_mean = np.mean(img, axis=(1, 2), keepdims=True).astype(np.float32)
        adj_magn = np.random.uniform(1 - adj_range, 1 + adj_range,
                                     (3, 1, 1)).astype(np.float32)
        img = np.clip((img - rgb_mean) * adj_magn + rgb_mean +
                      np.random.uniform(-1.0, 1.0,
                                        (3, 1, 1)) * adj_add, 0.0, 255.0)

    # mirror and flip
    if np.random.rand(1)[0] < 0.5:
        img = img[:, ::-1, :]
    if np.random.rand(1)[0] < 0.5:
        img = img[:, :, ::-1]

    # transpose
    if np.random.rand(1)[0] < 0.5:
        img = img.transpose((0, 2, 1))

    ## scaling
    #if not deterministic:
    #    if np.random.rand(1)[0] < 0.1:
    #        iscale = 2*(np.random.rand(1)[0]-0.5)*0.05 + 1.0;
    #        jscale = 2*(np.random.rand(1)[0]-0.5)*0.05 + 1.0;
    #        img = misc.imresize(img.transpose().astype(np.uint8), \
    #                (int(img.shape[2]*jscale), int(img.shape[1]*iscale)) \
    #                ).transpose().astype(np.float32);

    img = zero_centering(img)
    img = (img / 255.0 - mu) / sigma

    return img
Example #16
0
 def test_hed_rgb_roundtrip(self):
     img_rgb = img_as_ubyte(self.img_rgb)
     with expected_warnings(['precision loss']):
         new = img_as_ubyte(hed2rgb(rgb2hed(img_rgb)))
     assert_equal(new, img_rgb)
 def test_hed_rgb_float_roundtrip(self):
     img_in = self.img_stains
     img_out = rgb2hed(hed2rgb(img_in))
     assert_array_almost_equal(img_out, img_in)
Example #18
0
    def get_example(self, i):
        loop_count = 0
        while True:
            # select a triangle by the current fetch-mode
            if self.fetch_mode == 'area':
                slide_id, region_id, tri_id = self._get_random_index_all()
            elif self.fetch_mode == 'slide':
                if loop_count % 100 == 0:  # prevent bias
                    slide_id = random.randint(0, len(self.structure) - 1)
                region_id, tri_id = self._get_random_index_slide(slide_id)
            elif self.fetch_mode == 'label':
                if loop_count % 100 == 0:  # prevent bias
                    label = random.choice(self.labels[self.label_to_use])
                slide_id, region_id, tri_id = self._get_random_index_label(
                    label)
            elif self.fetch_mode == 'label-slide':
                if loop_count % 100 == 0:  # prevent bias
                    label = random.choice(self.labels[self.label_to_use])
                    while True:
                        slide_id = random.randint(0, len(self.structure) - 1)
                        if len(self.regions_of_label_slide[self.label_to_use]
                               [label][slide_id]) > 0:
                            break
                region_id, tri_id = self._get_random_index_label_slide(
                    label, slide_id)
            loop_count += 1

            # select a point within the triangle as the center position of rectangle
            a1 = random.random()
            a2 = random.random()
            if a1 + a2 > 1.0:
                a1, a2 = 1.0 - a1, 1.0 - a2
            posx = (1 - a1 - a2) * self.triangulation[slide_id][region_id][tri_id][0][0] + \
                   a1 * self.triangulation[slide_id][region_id][tri_id][1][0] + \
                   a2 * self.triangulation[slide_id][region_id][tri_id][2][0]
            posy = (1 - a1 - a2) * self.triangulation[slide_id][region_id][tri_id][0][1] + \
                   a1 * self.triangulation[slide_id][region_id][tri_id][1][1] + \
                   a2 * self.triangulation[slide_id][region_id][tri_id][2][1]

            src_size = self.src_sizes[slide_id]
            if self.scale_augmentation:
                src_size *= 0.8 + random.random() * 0.4

            if self.rotation:
                angle = random.random() * math.pi * 2
            else:
                angle = -math.pi / 4
            angles = [
                angle, angle + math.pi / 2, angle + math.pi,
                angle + math.pi / 2 * 3
            ]
            discard = False
            corners = []
            for theta in angles:
                cx = posx + src_size / math.sqrt(2) * math.cos(theta)
                cy = posy + src_size / math.sqrt(2) * math.sin(theta)
                corners.append((cx, cy))
                if not self.point_in_region(slide_id, region_id, cx, cy):
                    discard = True
                    break
            if not discard:
                break

        self.fetch_count[slide_id][region_id] += 1
        self.total_fetch_count += 1
        self.total_loop_count += loop_count

        # cropping with rotation
        crop_size = int(src_size * 2**0.5 *
                        max(abs(math.cos(angle)), abs(math.sin(angle))))
        cropped = np.asarray(self.slides[slide_id].read_region(
            (int(posx - crop_size / 2), int(posy - crop_size / 2)), 0,
            (crop_size, crop_size)),
                             dtype=np.float32)[:, :, :3]
        mat = cv2.getRotationMatrix2D((crop_size / 2, crop_size / 2),
                                      45 + 360 * angle / (2 * math.pi), 1)
        rotated = cv2.warpAffine(cropped, mat, (crop_size, crop_size))

        result = rotated[int(crop_size/2-src_size/2):int(crop_size/2+src_size/2),\
                         int(crop_size/2-src_size/2):int(crop_size/2+src_size/2)]
        result = cv2.resize(result,
                            (self.patch_size, self.patch_size)).transpose(
                                (2, 0, 1))

        if self.flip and random.randint(0, 1):
            result = result[:, :, ::-1]
        result *= (1.0 / 255.0)

        # color matching
        if self.use_color_matching:
            result = self.match_color(result.transpose(1, 2,
                                                       0)).transpose(2, 0, 1)

        # blurring effect
        if self.blur > 0:
            blur_size = random.randint(1, self.blur)
            result = cv2.blur(result.transpose(1, 2, 0),
                              (blur_size, blur_size)).transpose((2, 0, 1))

        if self.he_augmentation:
            hed = rgb2hed(np.clip(result.transpose(1, 2, 0), -1.0, 1.0))
            ah = 0.95 + random.random() * 0.1
            bh = -0.05 + random.random() * 0.1
            ae = 0.95 + random.random() * 0.1
            be = -0.05 + random.random() * 0.1
            hed[:, :, 0] = ah * hed[:, :, 0] + bh
            hed[:, :, 1] = ae * hed[:, :, 1] + be
            result = hed2rgb(hed).transpose(2, 0, 1)
            result = np.clip(result, 0, 1.0).astype(np.float32)

        # debug
        if self.dump_patch is not None:
            from PIL import Image
            im = Image.fromarray(np.uint8(result.transpose((1, 2, 0)) * 255))
            im.save(
                './%s/%d_%d-%d-%d.png' %
                (self.dump_patch,
                 self.label_of_region[self.label_to_use][slide_id][region_id],
                 slide_id, region_id, i))

        return result, self.label_of_region[
            self.label_to_use][slide_id][region_id], (slide_id, region_id,
                                                      posx, posy)
Example #19
0
 def test_hed_rgb_float_roundtrip(self, channel_axis):
     img_in = self.img_stains
     img_in = np.moveaxis(img_in, source=-1, destination=channel_axis)
     img_out = rgb2hed(hed2rgb(img_in, channel_axis=channel_axis),
                       channel_axis=channel_axis)
     assert_array_almost_equal(img_out, img_in)
Example #20
0
 def test_hed_rgb_roundtrip(self):
     img_rgb = img_as_ubyte(self.img_rgb)
     new = img_as_ubyte(hed2rgb(rgb2hed(img_rgb)))
     assert_equal(new, img_rgb)
 def test_hed_rgb_roundtrip(self):
     img_rgb = self.img_rgb
     assert_equal(img_as_ubyte(hed2rgb(rgb2hed(img_rgb))), img_rgb)
Example #22
0
from PIL import Image
from skimage.color import hed2rgb, rgb2hed
import numpy as np
import glob
import os

for filepath in glob.iglob('image/*.png'):
    img = np.array(Image.open(filepath).convert('RGB')).astype(np.float32)
    adj_add = np.array([[[0.09, 0.09, 0.007]]], dtype=np.float32)
    img = hed2rgb(
        rgb2hed(img / 255.0) +
        np.clip(np.random.normal(0, 0.3, (1, 1, 3)), -1, 1) * adj_add) * 255.0

    adj_range = 0.03
    adj_add = 6
    rgb_mean = np.mean(img, axis=(0, 1), keepdims=True).astype(np.float32)
    adj_magn = np.random.uniform(1 - adj_range, 1 + adj_range,
                                 (1, 1, 3)).astype(np.float32)
    img = (img - rgb_mean) * adj_magn + rgb_mean + np.random.uniform(
        -1.0, 1.0, (1, 1, 3)) * adj_add

    img = np.clip(img, 0, 255).astype(np.uint8)
    Image.fromarray(img).save(
        os.path.join('image_augmented', os.path.basename(filepath)))
"""
import numpy as np
import matplotlib.pyplot as plt

from skimage import data
from skimage.color import rgb2hed, hed2rgb

# Example IHC image
ihc_rgb = data.immunohistochemistry()

# Separate the stains from the IHC image
ihc_hed = rgb2hed(ihc_rgb)

# Create an RGB image for each of the stains
null = np.zeros_like(ihc_hed[:, :, 0])
ihc_h = hed2rgb(np.stack((ihc_hed[:, :, 0], null, null), axis=-1))
ihc_e = hed2rgb(np.stack((null, ihc_hed[:, :, 1], null), axis=-1))
ihc_d = hed2rgb(np.stack((null, null, ihc_hed[:, :, 2]), axis=-1))

# Display
fig, axes = plt.subplots(2, 2, figsize=(7, 6), sharex=True, sharey=True)
ax = axes.ravel()

ax[0].imshow(ihc_rgb)
ax[0].set_title("Original image")

ax[1].imshow(ihc_h)
ax[1].set_title("Hematoxylin")

ax[2].imshow(ihc_e)
ax[2].set_title("Eosin")  # Note that there is no Eosin stain in this image
def segment_histo(image_path, colour1, colour2):
    image = io.imread(image_path)
    image_hed = rgb2hed(image)

    cmap_custom = LinearSegmentedColormap.from_list('mycmap',
                                                    [colour1, colour2])
    final_seg = image_hed[:, :, 0]

    fig, image_original = plt.subplots(figsize=(4, 3))
    image_original.imshow(image)
    image_original.set_title('Original Image')
    image_original.axis('off')

    fig, human_seperation = plt.subplots(figsize=(4, 3))
    human_seperation.imshow(final_seg, cmap=cmap_custom)
    human_seperation.set_title('colour deconvolution')
    human_seperation.axis('off')

    #return histogram of segmented image split between RGB channels

    final_seg_rgb = hed2rgb(final_seg)

    fig, rgb_histo = plt.subplots(nrows=3, ncols=1, figsize=(5, 8))

    #plotting the histogram and cummulative histogram across each colour channel
    for c, c_color in enumerate(('red', 'green', 'blue')):
        img_hist, bins = exposure.histogram(image[..., c])
        rgb_histo[c].plot(bins, img_hist / img_hist.max())
        img_cdf, bins = exposure.cumulative_distribution(image[..., c])
        rgb_histo[c].plot(bins, img_cdf)
        rgb_histo[c].set_ylabel(c_color)

    rgb_histo[0].set_title('Histogram of RGB channels (original image)')

    #saving the image onto machine, not crucial
    #massive precision loss look into conservation
    io.imsave('./images/' + image_path[9:17] + '_seg.jpg', final_seg_rgb)

    #derive heat level based on thermaml image, this will be the FLC input
    #this is happens only when human is detected, else the radiator power %
    #is set to 0
    human_threshold = -0.5681
    if final_seg.max() > human_threshold:
        controller_input = flc_input(image.mean())
        flc_input_file = open(
            '../fuzzyLogic/Juzzy/Project/Juzzy_V2_Source/src/radiatorFLC/FLCinput.txt',
            'w+')
        flc_input_file.write(str(controller_input))
        flc_input_file.close()
        print(controller_input)
    else:
        controller_input = 10
        flc_input_file = open(
            '../fuzzyLogic/Juzzy/Project/Juzzy_V2_Source/src/radiatorFLC/FLCinput.txt',
            'w+')
        flc_input_file.write(str(controller_input))
        flc_input_file.close()
        print(controller_input)
        print('NO HUMAN DETECTED IN ROOM \nHEATING DEACTIVATED')
        exit()

    return human_seperation, final_seg, rgb_histo
Example #25
0
 def test_hed_rgb_roundtrip(self):
     img_rgb = img_as_ubyte(self.img_rgb)
     with expected_warnings(['precision loss']):
         new = img_as_ubyte(hed2rgb(rgb2hed(img_rgb)))
     assert_equal(new, img_rgb)
Example #26
0
def processImage(im, options):
    """@brief   Finds the colors present on the input image

    @param  im      LIST    input image
    @param  options DICTIONARY  dictionary with options

    @return colors  LIST    colors of centroids of kmeans object
    @return indexes LIST    indexes of centroids with the same label
    @return kmeans  KMeans  object of the class KMeans
    """

#########################################################
##  YOU MUST ADAPT THE CODE IN THIS FUNCTIONS TO:
##  1- CHANGE THE IMAGE TO THE CORRESPONDING COLOR SPACE FOR KMEANS
##  2- APPLY KMEANS ACCORDING TO 'OPTIONS' PARAMETER
##  3- GET THE NAME LABELS DETECTED ON THE 11 DIMENSIONAL SPACE
#########################################################

##  1- CHANGE THE IMAGE TO THE CORRESPONDING COLOR SPACE FOR KMEANS
    if options['colorspace'].lower() == 'ColorNaming'.lower():
        im = cn.ImColorNamingTSELabDescriptor(im)
    elif options['colorspace'].lower() == 'RGB'.lower():
        pass
    elif options['colorspace'].lower() == 'Lab'.lower():
        im = color.rgb2lab(im)
    elif options['colorspace'].lower() == 'HED'.lower():
        im = color.rgb2hed(im)
    elif options['colorspace'].lower() == 'HSV'.lower():
        im = color.rgb2hsv(im)
    '''
    elif options['colorspace'].lower() == 'opponent'.lower():
        im = color.rgb2lab(im)
    elif options['colorspace'].lower() == 'HSL'.lower():
        im = color.rgb2(im)
    elif options['colorspace'].lower() == 'Lab'.lower():
        im = color.rgb2lab(im)
    '''


##  2- APPLY KMEANS ACCORDING TO 'OPTIONS' PARAMETER
    if options['K']<2: # find the bes K
        kmeans = km.KMeans(im, 0, options)
        kmeans.bestK()
    else:
        kmeans = km.KMeans(im, options['K'], options)
        kmeans.run()

##  3- GET THE NAME LABELS DETECTED ON THE 11 DIMENSIONAL SPACE
    if options['colorspace'].lower() == 'Lab'.lower():
        kmeans.centroids = cn.ImColorNamingTSELabDescriptor((color.lab2rgb(kmeans.centroids.reshape(1,len(kmeans.centroids),3))*255).reshape(len(kmeans.centroids),3))
    elif options['colorspace'].lower() == 'HED'.lower():
        kmeans.centroids = cn.ImColorNamingTSELabDescriptor(color.hed2rgb(kmeans.centroids.reshape(1,len(kmeans.centroids),3)).reshape(len(kmeans.centroids),3))
    elif options['colorspace'].lower() == 'HSV'.lower():
        kmeans.centroids = cn.ImColorNamingTSELabDescriptor((color.hsv2rgb(kmeans.centroids.reshape(1,len(kmeans.centroids),3))*255).reshape(len(kmeans.centroids),3))
    elif options['colorspace'].lower() == 'RGB'.lower():
        kmeans.centroids = cn.ImColorNamingTSELabDescriptor(kmeans.centroids)

#########################################################
##  THE FOLLOWING 2 END LINES SHOULD BE KEPT UNMODIFIED
#########################################################
    colors, which = getLabels(kmeans, options)
    return colors, which, kmeans
Example #27
0
 def test_hed_rgb_float_roundtrip(self):
     img_rgb = img_as_float(self.img_rgb)
     assert_array_almost_equal(hed2rgb(rgb2hed(img_rgb)), img_rgb)
Example #28
0
    def random_transform(self, x, seed=None):
        """Randomly augment a single image tensor.

        # Arguments
            x: 3D tensor, single image.
            seed: random seed.

        # Returns
            A randomly transformed version of the input (same shape).
        """
        # x is a single image, so it doesn't have image number at index 0
        img_row_axis = self.row_axis - 1
        img_col_axis = self.col_axis - 1
        img_channel_axis = self.channel_axis - 1

        if seed is not None:
            np.random.seed(seed)

        # use composition of homographies
        # to generate final transform that needs to be applied
        if self.rotation_range:
            theta = np.pi / 180 * np.random.uniform(-self.rotation_range,
                                                    self.rotation_range)
        else:
            theta = 0

        if self.height_shift_range:
            tx = np.random.uniform(
                -self.height_shift_range,
                self.height_shift_range) * x.shape[img_row_axis]
        else:
            tx = 0

        if self.width_shift_range:
            ty = np.random.uniform(
                -self.width_shift_range,
                self.width_shift_range) * x.shape[img_col_axis]
        else:
            ty = 0

        if self.shear_range:
            shear = np.random.uniform(-self.shear_range, self.shear_range)
        else:
            shear = 0

        if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
            zx, zy = 1, 1
        else:
            zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1],
                                       2)

        transform_matrix = None
        if theta != 0:
            rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
                                        [np.sin(theta),
                                         np.cos(theta), 0], [0, 0, 1]])
            transform_matrix = rotation_matrix

        if tx != 0 or ty != 0:
            shift_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
            transform_matrix = shift_matrix if transform_matrix is None else np.dot(
                transform_matrix, shift_matrix)

        if shear != 0:
            shear_matrix = np.array([[1, -np.sin(shear), 0],
                                     [0, np.cos(shear), 0], [0, 0, 1]])
            transform_matrix = shear_matrix if transform_matrix is None else np.dot(
                transform_matrix, shear_matrix)

        if zx != 1 or zy != 1:
            zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
            transform_matrix = zoom_matrix if transform_matrix is None else np.dot(
                transform_matrix, zoom_matrix)

        if transform_matrix is not None:
            h, w = x.shape[img_row_axis], x.shape[img_col_axis]
            transform_matrix = transform_matrix_offset_center(
                transform_matrix, h, w)
            x = apply_transform(x,
                                transform_matrix,
                                img_channel_axis,
                                fill_mode=self.fill_mode,
                                cval=self.cval)

        if self.channel_shift_range != 0:
            x = random_channel_shift(x, self.channel_shift_range,
                                     img_channel_axis)
        if self.horizontal_flip:
            if np.random.random() < 0.5:
                x = flip_axis(x, img_col_axis)

        if self.vertical_flip:
            if np.random.random() < 0.5:
                x = flip_axis(x, img_row_axis)

        if self.stain_transformation:
            if np.random.random() > 0.5:
                x = color.rgb2hed(x)
                scale = np.random.uniform(low=0.95, high=1.05)
                x = scale * x
                x = color.hed2rgb(x)
            else:
                pass

        return x
Example #29
0
 def transform(img):
     hed = skcolour.rgb2hed(img / 255.0)
     alphas = np.random.normal(size=(1, 1, 3), loc=mean, scale=std)
     hed = hed * alphas
     img = skcolour.hed2rgb(hed).astype(np.float32)
     return img
Example #30
0
    def fetch_patches_from_slide(self,
                                 wsi_filepath,
                                 count,
                                 src_size=512,
                                 patch_size=512,
                                 tissue_threshold=0.8,
                                 blur=0,
                                 he_augmentation=False):
        slide = OpenSlide(wsi_filepath)

        # load image with lower resolution
        desirable_long_edge = 2000

        level_downsample = 0
        for i, (w, h) in enumerate(slide.level_dimensions):
            if abs(max(w, h) - desirable_long_edge) < \
                    abs(max(slide.level_dimensions[level_downsample]) - desirable_long_edge):
                level_downsample = i
        magnification = slide.level_downsamples[level_downsample]
        image_downsample = slide.read_region(
            (0, 0), level_downsample,
            (slide.level_dimensions[level_downsample]))

        # Otsu binarization
        src = np.average(np.asarray(image_downsample,
                                    dtype=np.uint8)[:, :, :3],
                         axis=2)
        src = 255 - cv2.convertScaleAbs(src)
        th, binarized = cv2.threshold(src, 0, 255,
                                      cv2.THRESH_BINARY + cv2.THRESH_OTSU)

        # dilation
        kernel = np.ones((2, 2), np.uint8)
        dilated = cv2.dilate(binarized, kernel, iterations=1)

        ret_images = []
        src_downsampled_size = 512 / magnification
        padding = int(src_size / 2**0.5 / magnification) + 10
        for i in range(count):
            while True:
                cx = random.randint(
                    padding,
                    slide.level_dimensions[level_downsample][0] - padding)
                cy = random.randint(
                    padding,
                    slide.level_dimensions[level_downsample][1] - padding)
                angle = random.random() * 2 * math.pi

                # crop
                crop_size = int(
                    src_downsampled_size * 2**0.5 *
                    max(abs(math.cos(angle)), abs(math.sin(angle))))
                cropped = dilated[int(cy - crop_size / 2):int(cy +
                                                              crop_size / 2),
                                  int(cx - crop_size / 2):int(cx +
                                                              crop_size / 2)]
                mat = cv2.getRotationMatrix2D((crop_size / 2, crop_size / 2),
                                              45 + 360 * angle / (2 * math.pi),
                                              1)
                rotated = cv2.warpAffine(cropped, mat, (crop_size, crop_size))

                result = rotated[
                         int(crop_size / 2 - src_downsampled_size / 2):int(crop_size / 2 + src_downsampled_size / 2), \
                         int(crop_size / 2 - src_downsampled_size / 2):int(crop_size / 2 + src_downsampled_size / 2)]
                if np.average(result) / 255 > tissue_threshold:
                    break

            # transform to raw scale
            cx = cx * magnification
            cy = cy * magnification

            # real cropping
            crop_size = int(src_size * 2**0.5 *
                            max(abs(math.cos(angle)), abs(math.sin(angle))))
            cropped = np.asarray(slide.read_region(
                (int(cx - crop_size / 2), int(cy - crop_size / 2)), 0,
                (crop_size, crop_size)),
                                 dtype=np.float32)[:, :, :3]
            mat = cv2.getRotationMatrix2D((crop_size / 2, crop_size / 2),
                                          45 + 360 * angle / (2 * math.pi), 1)
            rotated = cv2.warpAffine(cropped, mat, (crop_size, crop_size))

            result = rotated[int(crop_size / 2 - src_size / 2):int(crop_size / 2 + src_size / 2), \
                     int(crop_size / 2 - src_size / 2):int(crop_size / 2 + src_size / 2)]
            result = cv2.resize(result, (patch_size, patch_size)).transpose(
                (2, 0, 1)) / 255

            # color matching
            if self.use_color_matching:
                result = self.match_color(result.transpose(1, 2, 0)).transpose(
                    2, 0, 1)

            # blurring effect
            if blur > 0:
                blur_size = random.randint(1, blur)
                result = cv2.blur(result.transpose(1, 2, 0),
                                  (blur_size, blur_size)).transpose((2, 0, 1))

            if he_augmentation:
                hed = rgb2hed(np.clip(result.transpose(1, 2, 0), -1.0, 1.0))
                ah = 0.95 + random.random() * 0.1
                bh = -0.05 + random.random() * 0.1
                ae = 0.95 + random.random() * 0.1
                be = -0.05 + random.random() * 0.1
                hed[:, :, 0] = ah * hed[:, :, 0] + bh
                hed[:, :, 1] = ae * hed[:, :, 1] + be
                result = hed2rgb(hed).transpose(2, 0, 1)

            ret_images.append(np.clip(result, 1e-7, 1.0 - 1e-7))

        return ret_images
Example #31
0
    def get_examples_of_slide_label(self, slide_id, label, count):
        if len(self.regions_of_label_slide[self.label_to_use][label]
               [slide_id]) == 0:
            return []

        results = []
        for _ in range(count):
            loop_count = 0
            while True:
                region_id, tri_id = self._get_random_index_label_slide(
                    label, slide_id)
                loop_count += 1

                # select a point within the triangle as the center position of rectangle
                a1 = random.random()
                a2 = random.random()
                if a1 + a2 > 1.0:
                    a1, a2 = 1.0 - a1, 1.0 - a2
                posx = (1 - a1 - a2) * self.triangulation[slide_id][region_id][tri_id][0][0] + \
                       a1 * self.triangulation[slide_id][region_id][tri_id][1][0] + \
                       a2 * self.triangulation[slide_id][region_id][tri_id][2][0]
                posy = (1 - a1 - a2) * self.triangulation[slide_id][region_id][tri_id][0][1] + \
                       a1 * self.triangulation[slide_id][region_id][tri_id][1][1] + \
                       a2 * self.triangulation[slide_id][region_id][tri_id][2][1]

                src_size = self.src_sizes[slide_id]
                if self.scale_augmentation:
                    src_size *= 0.8 + random.random() * 0.4

                if self.rotation:
                    angle = random.random() * math.pi * 2
                else:
                    angle = -math.pi / 4
                angles = [
                    angle, angle + math.pi / 2, angle + math.pi,
                    angle + math.pi / 2 * 3
                ]
                discard = False
                corners = []
                for theta in angles:
                    cx = posx + src_size / math.sqrt(2) * math.cos(theta)
                    cy = posy + src_size / math.sqrt(2) * math.sin(theta)
                    corners.append((cx, cy))
                    if not self.point_in_region(slide_id, region_id, cx, cy):
                        discard = True
                        break
                if not discard:
                    break

            # cropping with rotation
            crop_size = int(src_size * 2**0.5 *
                            max(abs(math.cos(angle)), abs(math.sin(angle))))
            cropped = np.asarray(self.slides[slide_id].read_region(
                (int(posx - crop_size / 2), int(posy - crop_size / 2)), 0,
                (crop_size, crop_size)),
                                 dtype=np.float32)[:, :, :3]
            mat = cv2.getRotationMatrix2D((crop_size / 2, crop_size / 2),
                                          45 + 360 * angle / (2 * math.pi), 1)
            rotated = cv2.warpAffine(cropped, mat, (crop_size, crop_size))

            result = rotated[int(crop_size/2-src_size/2):int(crop_size/2+src_size/2),\
                             int(crop_size/2-src_size/2):int(crop_size/2+src_size/2)]
            result = cv2.resize(result,
                                (self.patch_size, self.patch_size)).transpose(
                                    (2, 0, 1))

            if self.flip and random.randint(0, 1):
                result = result[:, :, ::-1]
            result *= (1.0 / 255.0)

            # color matching
            if self.use_color_matching:
                result = self.match_color(result.transpose(1, 2, 0)).transpose(
                    2, 0, 1)

            # blurring effect
            if self.blur > 0:
                blur_size = random.randint(1, self.blur)
                result = cv2.blur(result.transpose(1, 2, 0),
                                  (blur_size, blur_size)).transpose((2, 0, 1))

            if self.he_augmentation:
                hed = rgb2hed(np.clip(result.transpose(1, 2, 0), -1.0, 1.0))
                ah = 0.95 + random.random() * 0.1
                bh = -0.05 + random.random() * 0.1
                ae = 0.95 + random.random() * 0.1
                be = -0.05 + random.random() * 0.1
                hed[:, :, 0] = ah * hed[:, :, 0] + bh
                hed[:, :, 1] = ae * hed[:, :, 1] + be
                result = hed2rgb(hed).transpose(2, 0, 1)
                result = np.clip(result, 0, 1.0).astype(np.float32)

            results.append(result)
        return results
Example #32
0
 def add_h_color(self, img):
     hed = rgb2hed(img / 255.0)
     hed[..., 0] += np.random.rand() * 0.04
     img = hed2rgb(hed)
     img = np.clip(img * 255, 0, 255)
     return img;
Example #33
0
 def test_hed_rgb_float_roundtrip(self):
     img_rgb = img_as_float(self.img_rgb)
     assert_array_almost_equal(hed2rgb(rgb2hed(img_rgb)), img_rgb)
Example #34
0
 def test_hed_rgb_roundtrip(self):
     img_in = img_as_ubyte(self.img_stains)
     img_out = rgb2hed(hed2rgb(img_in))
     assert_equal(img_as_ubyte(img_out), img_in)