Пример #1
0
def stain_entropy_otsu(img):
    """Generate tissue mask using otsu thresholding and entropy calculation

    Args:
        img (ndarray): input image
    
    Return:
        mask (ndarray): binary mask

    """
    img_copy = img.copy()
    hed = skimage.color.rgb2hed(img_copy)  # convert colour space
    hed = (hed * 255).astype(np.uint8)
    h = hed[:, :, 0]
    e = hed[:, :, 1]
    d = hed[:, :, 2]
    selem = disk(4)  # structuring element
    # calculate entropy for each colour channel
    h_entropy = rank.entropy(h, selem)
    e_entropy = rank.entropy(e, selem)
    d_entropy = rank.entropy(d, selem)
    entropy = np.sum([h_entropy, e_entropy], axis=0) - d_entropy
    # otsu threshold
    threshold_global_otsu = threshold_otsu(entropy)
    mask = entropy > threshold_global_otsu

    return mask
def filter_bank(img, coeff_resolution):
    """
    Calculates the responses of an image to M filters.
    Returns 2-d array of the vectorial responses
    """

    h, w = img.shape

    im = np.reshape(img, (h*w, 1))

    e1 = np.reshape(entropy(img, disk(coeff_resolution*5)), (h*w, 1))
    e2 = np.reshape(entropy(img, disk(coeff_resolution*8)), (h*w, 1))
    e3 = np.reshape(entropy(img, disk(coeff_resolution*10)), (h*w, 1))

    g1 = np.reshape(gradient(img, disk(1)), (h*w, 1))
    g2 = np.reshape(gradient(img, disk(coeff_resolution*3)), (h*w, 1))
    g3 = np.reshape(gradient(img, disk(coeff_resolution*5)), (h*w, 1))

    m1 = np.reshape(ndi.maximum_filter(256-img, size=coeff_resolution*2, mode='constant'), (h*w, 1))
    m2 = np.reshape(ndi.maximum_filter(256-img, size=coeff_resolution*4, mode='constant'), (h*w, 1))
    m3 = np.reshape(ndi.maximum_filter(256-img, size=coeff_resolution*7, mode='constant'), (h*w, 1))

    #c = np.reshape(canny(img), (h*w, 1))
    s = np.reshape(sobel(img), (h*w, 1))

    return np.column_stack((im, e1, e2, e3, g1, g2, g3, m1, m2, m3, s))
def filter_bank(img, coeff_resolution):
    """
    Calculates the responses of an image to M filters.
    Returns 2-d array of the vectorial responses
    """

    h, w = img.shape

    im = np.reshape(img, (h * w, 1))

    e1 = np.reshape(entropy(img, disk(coeff_resolution * 5)), (h * w, 1))
    e2 = np.reshape(entropy(img, disk(coeff_resolution * 8)), (h * w, 1))
    e3 = np.reshape(entropy(img, disk(coeff_resolution * 10)), (h * w, 1))

    g1 = np.reshape(gradient(img, disk(1)), (h * w, 1))
    g2 = np.reshape(gradient(img, disk(coeff_resolution * 3)), (h * w, 1))
    g3 = np.reshape(gradient(img, disk(coeff_resolution * 5)), (h * w, 1))

    m1 = np.reshape(
        ndi.maximum_filter(256 - img,
                           size=coeff_resolution * 2,
                           mode='constant'), (h * w, 1))
    m2 = np.reshape(
        ndi.maximum_filter(256 - img,
                           size=coeff_resolution * 4,
                           mode='constant'), (h * w, 1))
    m3 = np.reshape(
        ndi.maximum_filter(256 - img,
                           size=coeff_resolution * 7,
                           mode='constant'), (h * w, 1))

    #c = np.reshape(canny(img), (h*w, 1))
    s = np.reshape(sobel(img), (h * w, 1))

    return np.column_stack((im, e1, e2, e3, g1, g2, g3, m1, m2, m3, s))
Пример #4
0
 def backgroundVariability(self):
     print 'Quantifying background variability.'
     self.video_background_variability = 0.0
     self.frame_check_rate = 300  # Don't have to check every frame to get sense of background variability.
     self.disk_size = 5
     self.im_list = glob.glob(self.video_seg_dir + '*')
     self.num_frames = len(self.im_list)
     imgPrev = np.array(cv2.imread(self.im_list[0]), dtype=np.int8)
     self.video_height, self.video_width = imgPrev.shape[:2]
     for i in range(1, self.num_frames, self.frame_check_rate):
         print 'Checking background variability of frame ' + str(i) + '.'
         img = np.array(cv2.imread(self.im_list[i]), dtype=np.int8)
         img_entropy = entropy(img[:, :, 0], disk(self.disk_size))
         img_entropy += entropy(img[:, :, 1], disk(self.disk_size))
         img_entropy += entropy(img[:, :, 2], disk(self.disk_size))
         self.writeIndexImage(copy.deepcopy(img_entropy), i,
                              'entropy' + str(self.disk_size))
         self.video_background_variability += np.sum(img_entropy)
     self.num_checked_frames = 1 + (self.num_frames -
                                    1) / self.frame_check_rate
     self.num_pixels = self.num_checked_frames * self.video_height * self.video_width
     # self.num_pixels = self.num_frames * self.video_height * self.video_width
     self.video_background_variability /= self.num_pixels
     print 'Average per pixel variability is ' + format(
         self.video_background_variability, '0.3f')
Пример #5
0
def stain_entropy_otsu(img):
    """
    Binarise an input image by calculating the entropy on the 
    hameatoxylin and eosin channels and then using otsu threshold 

    Args:
        img: input array
    """

    img_copy = img.copy()
    hed = skimage.color.rgb2hed(img_copy)  # convert colour space
    hed = (hed * 255).astype(np.uint8)
    h = hed[:, :, 0]
    e = hed[:, :, 1]
    d = hed[:, :, 2]
    selem = disk(4)  # structuring element
    # calculate entropy for each colour channel
    h_entropy = rank.entropy(h, selem)
    e_entropy = rank.entropy(e, selem)
    d_entropy = rank.entropy(d, selem)
    entropy = np.sum([h_entropy, e_entropy], axis=0) - d_entropy
    # otsu threshold
    threshold_global_otsu = threshold_otsu(entropy)
    mask = entropy > threshold_global_otsu

    return mask
Пример #6
0
def extract_features(images, vector_size=32):
    options = ["ORB", "SIFT", "LBP", "Gabor", "Entropy", "LBP and Entropy"]
    res = ui.prompt("Choose a feature selection algorithm:", options)
    type = options[int(res)]

    data = []
    for img in pb.progressbar(images):  # Process each image
        if type == "ORB":  # Corner features
            alg = cv2.ORB_create()
            descriptor_size = 32
            data.append(
                describe_keypoints(img, alg, vector_size, descriptor_size))
        elif type == "SIFT":  # Corner features (patented)
            alg = cv2.xfeatures2d.SIFT_create()
            descriptor_size = 128
            data.append(
                describe_keypoints(img, alg, vector_size, descriptor_size))
        elif type == "LBP":  # Simple texture recognition
            alg = LocalBinaryPatterns(32, 16)
            grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            data.append(alg.describe(grey))
        elif type == "Gabor":
            # prepare filter bank kernels
            kernels = []
            for theta in range(4):
                theta = theta / 8. * np.pi
                for sigma in (1, 3):
                    for frequency in (0.05, 0.25):
                        kernel = np.real(
                            gabor_kernel(frequency,
                                         theta=theta,
                                         sigma_x=sigma,
                                         sigma_y=sigma))
                        kernels.append(kernel)

            shrink = (slice(0, None, 3), slice(0, None, 3))
            img_shrink = img_as_float(cv2.cvtColor(img,
                                                   cv2.COLOR_BGR2GRAY))[shrink]

            feats = compute_feats(img_shrink, kernels).flatten()
            hist = exposure.histogram(img_shrink, nbins=16)
            data.append(np.append(feats, hist))
        elif type == "Entropy":
            grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            grey = entropy(grey, disk(5))
            hist = exposure.histogram(grey, nbins=16)[0]
            data.append(hist)
        elif type == "LBP and Entropy":
            grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            alg = LocalBinaryPatterns(32, 16)
            entropy_grey = entropy(grey, disk(5))
            hist = exposure.histogram(entropy_grey, nbins=16)[0]
            data.append(np.append(alg.describe(grey), hist))
        else:
            print("ERROR: Type " + type +
                  " not found (features.extract_features())\n")
            return 1

    return data, type
Пример #7
0
	def classify(self, keyPress):
		segment_id = int(segment_list[len(label_vector):][0][2])
		# Note that we classified one more image
		if keyPress == "snow":
			self.label_vector.append(1)
		elif keyPress == "gray":
			self.label_vector.append(2)
		elif keyPress == "melt":
			self.label_vector.append(3)
		elif keyPress == "water":
			self.label_vector.append(4)
		elif keyPress == "shadow":
			self.label_vector.append(5)
		elif keyPress == "unknown":
			self.label_vector.append(0)
		
		if mode == 1:
			#Creating a feature list for the classified superpixel
			# In development:
			if self.im_type == 'pan':
				entropy_image = entropy(bytescale(self.original_image[self.subimage_index]), disk(4))
				feature_array = feature_calculations.analyze_pan_image(self.original_image[self.subimage_index], 
					self.secondary_image[self.subimage_index], entropy_image, self.im_date, segment_id=segment_id)
			if self.im_type == 'srgb':
				entropy_image = entropy(bytescale(self.original_image[self.subimage_index][:,:,0]), disk(4))
				feature_array = feature_calculations.analyze_srgb_image(self.original_image[self.subimage_index], 
					self.secondary_image[self.subimage_index], entropy_image, segment_id=segment_id)
			if self.im_type == 'wv02_ms':
				feature_array = feature_calculations.analyze_ms_image(self.original_image[self.subimage_index], 
					self.secondary_image[self.subimage_index], segment_id=segment_id)

			# feature_calculations returns a 2d array, but we only want the 1d list of features.
			feature_array = feature_array[0]
			# feature_array = analyze_superpixels(self.original_image[self.subimage_index], self.secondary_image[self.subimage_index], segment_id, self.im_type)
		if mode == 2:
			feature_array = self.secondary_image[self.subimage_index][self.sp_buffer]
			print feature_array, self.label_vector[-1]

		if len(self.feature_matrix) == len(self.label_vector)-1:
			#Adding all of the features found for this watershed to the main matrix
			self.feature_matrix.append(feature_array)
			# print "added a feature"
		else:
			self.feature_matrix[len(self.label_vector)-1] = feature_array
			# print "replaced a feature"

		#Printing some useful statistics
		print str(self.label_vector[-1]) + ": " + keyPress
		print "~"*80

		# print self.original_image[self.subimage_index][self.sp_buffer[0],self.sp_buffer[1],0]
		# print feature_array
		# print "Number with Labels: %s" %len(self.label_vector)
		# print "Number with Features: %s" %len(self.feature_matrix)
		self.next_super_pixel()
Пример #8
0
    def build_features_from_arr(self, img_rgb):
        # the third component `_` is actually the number of channels in RGB,
        # which is already defined in the constant `NUM_RGB_CHANNELS`
        num_rows, num_cols, _ = img_rgb.shape
        num_pixels = num_rows * num_cols
        img_lab = color.rgb2lab(img_rgb)
        img_lab_l = img_lab[:, :, 0]  # ACHTUNG: this is a view

        X = np.zeros((num_pixels, self.num_pixel_features), dtype=np.float32)

        # color features
        # tpf.compute_color_features(X_img[:, self.color_slice])
        img_lab_vec = img_lab.reshape(num_rows * num_cols, NUM_LAB_CHANNELS)
        img_xyz_vec = color.rgb2xyz(img_rgb).reshape(num_rows * num_cols,
                                                     NUM_XYZ_CHANNELS)
        img_ill_vec = np.dot(A, np.log(np.dot(B, img_xyz_vec.transpose()) +
                                       1)).transpose()
        X[:, :NUM_LAB_CHANNELS] = img_lab_vec
        X[:,
          NUM_LAB_CHANNELS:NUM_LAB_CHANNELS + NUM_ILL_CHANNELS] = img_ill_vec

        # texture features
        # tpf.compute_texture_features(X_img[:, self.texture_slice],
        #                              self.sigmas, self.num_orientations)
        for i, sigma in enumerate(self.sigmas):
            base_kernel_arr = filters.get_texture_kernel(sigma)
            for j, orientation in enumerate(range(self.num_orientations)):
                # theta = orientation / num_orientations * np.pi
                theta = orientation * 180 / self.num_orientations
                oriented_kernel_arr = ndi.interpolation.rotate(
                    base_kernel_arr, theta)
                img_filtered = ndi.convolve(img_lab_l, oriented_kernel_arr)
                img_filtered_vec = img_filtered.flatten()
                X[:, self.num_color_features + i * self.num_orientations +
                  j] = img_filtered_vec

        # entropy features
        # tpf.compute_entropy_features(X_img[:, self.entropy_slice],
        #                              self.neighborhood, self.scales)
        entropy_start = self.num_color_features + self.num_texture_features
        X[:, entropy_start] = rank.entropy(img_lab_l.astype(np.uint16),
                                           self.neighborhood).flatten()

        for i, factor in enumerate(self.scales[1:], start=1):
            img = transform.resize(
                transform.downscale_local_mean(img_lab_l, (factor, factor)),
                img_lab_l.shape).astype(np.uint16)
            X[:,
              entropy_start + i] = rank.entropy(img,
                                                self.neighborhood).flatten()

        return X
Пример #9
0
def local_entropy(depth, kernel=16, mask=False):
    depth_entropy_sequence = []

    for i in range(depth.shape[0]):
        depth_i = depth[i].clip(0, b).astype(np.uint8)

        # compute entropy directly on depth, entropy kernel 16x16
        if mask:
            depth_entropy_i = entropy(depth_i, selem=np.ones((kernel, kernel)).astype(np.uint8), mask=(depth_i > 0))
        else:
            depth_entropy_i = entropy(depth_i, selem=np.ones((kernel, kernel)).astype(np.uint8))

        depth_entropy_sequence.append(depth_entropy_i)
    return np.stack(depth_entropy_sequence, axis=0)
Пример #10
0
def build_input_vector(gird_face):
    """ Concatenates: grey_or_ir_face, depth_face, entr_grey_or_ir_face, entr_depth_face"""
    (gir_face, depth_face) = gird_face
    if gir_face is None or depth_face is None:
        return None
    tmp = np.zeros((4 * IMG_SIZE, IMG_SIZE))
    entr_gir_face = entropy(gir_face, disk(5))
    entr_gir_face = entr_gir_face / np.max(entr_gir_face)
    entr_depth_face = entropy(depth_face, disk(5))
    entr_depth_face = entr_depth_face / np.max(entr_depth_face)
    tmp[0:IMG_SIZE] = depth_face
    tmp[IMG_SIZE:IMG_SIZE * 2] = gir_face
    tmp[IMG_SIZE * 2:IMG_SIZE * 3] = entr_gir_face
    tmp[IMG_SIZE * 3:IMG_SIZE * 4] = entr_depth_face
    return tmp
Пример #11
0
def guessSize(image, angle):
    entr = entropy((removeMotionBlur(image, 2, angle) * 255).astype(np.uint8),
                   disk(10))
    prevSum = np.sum(entr)
    curSum = prevSum

    n = 2
    while (curSum >= prevSum):
        n += 2
        prevSum = curSum
        curSum = np.sum(
            entropy((removeMotionBlur(image, n, angle) * 255).astype(np.uint8),
                    disk(10)))

    return n + 2
Пример #12
0
def make_entropy_vars(wells_df, logs, l_foots):

    new_df = pd.DataFrame()
    grouped = wells_df.groupby(['Well Name'])

    for key in grouped.groups.keys():

        depth = grouped.get_group(key)['Depth']
        temp_df = pd.DataFrame()
        temp_df['Depth'] = depth

        for log in logs:
            temp_data = grouped.get_group(key)[log]
            image = np.vstack((temp_data, temp_data, temp_data))
            image -= np.median(image)
            image /= np.max(np.abs(image))
            image = img_as_ubyte(image)

            for l_foot in l_foots:
                footprint = rectangle(l_foot, 3)
                temp_df[log + '_entropy_foot' + str(l_foot)] = entropy(
                    image, footprint)[0, :]

        new_df = new_df.append(temp_df)

    new_df = new_df.sort_index()
    new_df = new_df.drop(['Depth'], axis=1)
    return new_df
Пример #13
0
    def SPSelecttest(self,slice_colors,PatchNorm,label,numsuperpixels):

       SPDev=[]
       SPMean=[]
       spcount=0
       for nsp in numsuperpixels:
           superpixel = slic(slice_colors, n_segments=nsp, compactness=5, sigma=1.0)#superpixel caculation wiht slic
           spdev,spmean=self.SPRegionArribute(superpixel,PatchNorm,label)
           SPDev.append(spdev)
           SPMean.append(spmean)
           slice_entro = entropy(PatchNorm, disk(5))#get the whole image entropy
           region = regionprops(superpixel,slice_entro)#get the all regions information
           spcount+=1
           print('The Grounp No.%d/%d superpixel have been generated'%(spcount,len(numsuperpixels)))

      #******************************************************************************#
       #display the superpixel results
      #*******************************************************************************#
           # #
           fig, axes = plt.subplots(2, 2, figsize=(7, 6), sharex=True, sharey=True,
                                    subplot_kw={'adjustable': 'box-forced'})
           ax = axes.ravel()
           ax[0].imshow(slice_colors)
           from skimage.segmentation import mark_boundaries
           ax[1].imshow(mark_boundaries(slice_colors, superpixel),cmap='jet')
           ax[1].set_title('The %d superpixels'%nsp)
           ax[1].imshow(label, cmap='hot', alpha=0.0)
           ax[2].imshow(slice_entro)
           ax[3].imshow(superpixel)

       return SPDev,SPMean
Пример #14
0
def preprocess(img):
    height, width, *rest = img.shape
    # Convert to CIELAB colorspace
    lab_img = color.rgb2lab(img)
    feature_array = lab_img
    # Calculate illuminance invariant image
    ii_img = rgb2ii(img).reshape(width, height, 1)
    feature_array = np.concatenate((feature_array, ii_img), axis=2)
    # Calculate texture pattern
    for sigma in [1, math.sqrt(2), 2]:
        # for sigma in [2]:
        g = gaussian(lab_img[:, :, 0], sigma)
        feature_array = np.concatenate(
            (feature_array, g.reshape(width, height, 1)), axis=2)
    # Calculate entropy
    # for i in range(6):
    # theta = i / 6 * np.pi
    # filtered_img = gabor(lab_img[:,:,0], frequency=1/(2*np.pi), theta=theta)[0]
    # feature_array = np.concatenate((feature_array, filtered_img.reshape(width, height, 1)), axis=2)
    for ws in [3, 5, 9]:
        # for ws in [9]:
        entropy_img = entropy(lab_img[:, :, 0] / 100,
                              disk(ws))  # 100 = Max L value
        feature_array = np.concatenate(
            (feature_array, entropy_img.reshape(width, height, 1)), axis=2)

    feature_array = feature_array.reshape(feature_array.shape[0]**2,
                                          feature_array.shape[2])

    # Fill nans
    features = pd.DataFrame(feature_array)
    features.replace([np.inf, -np.inf], np.nan, inplace=True)
    features.fillna(0, inplace=True)

    return features.as_matrix()
def sklocal(image):

    from skimage.filters.rank import entropy

    le = entropy(image, disk(5))

    return force_to_uint8(le)
Пример #16
0
def calculate_oct_roi_mask(img, tresh=1e-10):
    """
        Calculate the interesting region MASK of the image using entropy
        :param img: 
        :param tresh: entropy cutoff threshold
        :return: mask of the interesting region (MASK = 1 interesting)
        """
    if img.ndim == 2:
        im_slice = skimage.img_as_float(img.astype(np.float32) / 128. - 1.)
    elif img.ndim == 3:
        im_slice = skimage.img_as_float(img[:, :, 1].astype(np.float32) /
                                        128. - 1.)
    assert img.ndim in {2, 3}
    im_slice_ = entropy(im_slice, disk(11))
    im_slice_ = im_slice_ / (np.max(im_slice_) + 1e-16)
    im_slice_ = np.asarray(im_slice_ > tresh, dtype=np.int8)
    selem = disk(35)
    im_slice_ = binary_closing(im_slice_, selem=selem)
    im_slice_ = convex_hull_image(im_slice_)

    plt.imshow(im_slice, cmap='gray')
    plt.imshow(im_slice_, cmap='jet', alpha=0.5)
    plt.pause(.1)

    return im_slice_
Пример #17
0
 def stain_entropy_otsu(self):
     im_copy = self.im_resize.copy()
     hed = skimage.color.rgb2hed(im_copy)  # convert colour space
     hed = (hed * 255).astype(np.uint8)
     h = hed[:, :, 0]
     e = hed[:, :, 1]
     d = hed[:, :, 2]
     selem = disk(4)  # structuring element
     # calculate entropy for each colour channel
     h_entropy = rank.entropy(h, selem)
     e_entropy = rank.entropy(e, selem)
     d_entropy = rank.entropy(d, selem)
     entropy = np.sum([h_entropy, e_entropy], axis=0) - d_entropy
     # otsu threshold
     threshold_global_otsu = threshold_otsu(entropy)
     self.otsu = entropy > threshold_global_otsu
Пример #18
0
def SuperpixelExtract(o_img, n_segments, is_data_from_nii=1):
    if is_data_from_nii == 1:
        o_img[o_img > 200] = 200
        o_img[o_img < -150] = -150
    else:
        # o_img原本通过阈值分割 在这里已经改变了如果有下面这一步的话,就会导致一个问题,o_img不会改变原来的值,而在提取patch后,它的值还是阈值分割之前的值
        # o_img = o_img.astype(np.int16)
        o_img += np.int16(-1024)
        o_img[o_img < -1500] = -1024
        o_img[o_img > 2976] = 2976
        o_img[o_img > 200] = 200
        o_img[o_img < -150] = -150
    img = o_img
    # ShowImage(1, img)
    PatchN = normalization_1(img)
    # edit on 4/22 归到[0, 1]还是[0, 255]之间? 归到0~255 之间  归到0 ~ 1 之间
    # PatchN = DataNormalize(img)
    # 高斯去噪
    PatchN = gaussian(PatchN, sigma=0.5)
    # 拓展通道
    slice_colors = ResizeChannel(PatchN)
    superpixel = slic(slice_colors, n_segments, compactness=5, sigma=1)
    slice_entro = entropy(PatchN, disk(5))
    regions = regionprops(superpixel, slice_entro)
    return PatchN, regions, superpixel, slice_colors
Пример #19
0
def compute_curriculum(x, wnd=5, name=None):
    '''Computes the curriculum complexity for the given dataset.'''
    # create vars
    c = torch.ones(x.size()[0])
    name = 'curriculum computation{}'.format(
        " ({})".format(name) if name is not None else "")

    # iterate through data
    for i in tqdm(range(0, x.size()[0]), desc=name, ncols=100, ascii=True):
        # retrieve the image
        img = x[i].mean(dim=0)
        if img.max() > 10.:
            img = img / 255.

        # compute complexity
        img = torch.clamp(img, min=-1., max=1.).cpu().numpy().astype("float32")
        img = img_as_ubyte(img)
        c_img = entropy(img, disk(wnd))

        # set value
        c[i] = torch.pow(torch.Tensor(c_img), 2).mean()

    # normalize data
    cmin = c.min()
    cmax = c.max()
    c = (c - cmin) / torch.clamp(cmax - cmin, min=0.0001)
    return c
def trim(image):
    '''transforms the given image to crop and orient the strips'''
    scale_factor = 5
    temp = rgb2gray(image)
    temp = downscale_local_mean(temp, (scale_factor, scale_factor))

    e = rank.entropy(temp, disk(10))
    fred = binary_fill_holes(e > threshold_isodata(e))
    fred = rank.minimum(fred, disk(10))
    labels = label(fred)

    props = regionprops(labels)
    areas = [prop['area'] for prop in props]
    selection = labels == props[np.argmax(areas)]['label']
    angles = np.linspace(-45, 45)
    rotations = [rotate(selection, angle, resize=True) for angle in angles]
    props = [regionprops(label(r)) for r in rotations]
    bboxes = [prop[0]['bbox'] for prop in props]
    areas = [(bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) for bbox in bboxes]
    best = np.argmin(areas)

    rotated = rotations[best]
    mask = rank.minimum(rotated, square(10)) > 0

    bbox = np.array(regionprops(label(mask))[0]['bbox'])
    rmin, cmin, rmax, cmax = bbox * scale_factor

    transformed = rotate(image, angles[best], resize=True)
    transformed = transformed[rmin:rmax, cmin:cmax]
    return transformed
def takeEntropy(some_list, some_dir):
    eb_list = []
    all_covariances = []
    for each_item in some_list:
        img = Image.open(some_dir + each_item)
        img_covariance = getCovariances(img)
        all_covariances.append(img_covariance)
        pix = np.array(img.getdata(), dtype=np.uint8)
        #img = np.array(img)
        entropy_img = entropy(pix, disk(300, dtype=np.uint8))
        expected_bits = entropy_img / math.log(2)
        temp_PCA = PCA()
        temp_PCA.fit(expected_bits)
        fig = plt.figure()
        plt.subplot(3, 1, 1)
        plt.title("Original Image")
        plt.imshow(img)
        plt.subplot(3, 1, 2)
        plt.title("Entropy Expected Bits")
        plt.plot(expected_bits)
        plt.subplot(3, 1, 3)
        plt.title("PCA Components")
        plt.ylabel("Principal Components")
        plt.xlabel("Planes")
        plt.plot(temp_PCA.components_)
        blue_patch = mpatches.Patch(color='blue', label='1st plane')
        orange_patch = mpatches.Patch(color='orange', label='2nd plane')
        green_patch = mpatches.Patch(color='green', label='3rd plane')
        plt.legend(handles=[blue_patch, orange_patch, green_patch])
        fig.subplots_adjust(hspace=0.5)
        plt.savefig(some_dir + (each_item.split(".")[0]) +
                    "_expected_bits.png")
        print each_item, "[bits, pca]:", expected_bits.shape, ",", temp_PCA.components_.shape
        eb_list.append(expected_bits.shape[0])
    return eb_list, all_covariances
 def transform(self, src):
     ent = np.array([
         rank.entropy(np.power(10, (src[0]) / 10.),
                      morphology.disk(self.params['disk_radius'])),
         rank.entropy(np.power(10, (src[1]) / 10.),
                      morphology.disk(self.params['disk_radius']))
     ])
     hist0 = np.histogram(ent[0].reshape(-1),
                          bins=[1, 2, 3, 4, 5],
                          normed=True,
                          range=(0, 5))[0]
     hist1 = np.histogram(ent[1].reshape(-1),
                          bins=[1, 2, 3, 4, 5],
                          normed=True,
                          range=(0, 5))[0]
     return [hist0, hist1]
Пример #23
0
def Watershed_sep(b_mic, thr, dsk_rad=5):
    """ Calculates watershed segmentation from prepared image file b_mic"""

    lo, hi = thr
    mask = zeros_like(b_mic)
    mask[b_mic < hi] = 1
    #e_map = sobel(b_mic, mask=mask)
    e_map = entropy(b_mic, disk(dsk_rad), mask=mask)
    figure(5)
    imshow(e_map)
    show()

    markers = zeros_like(b_mic)
    markers[b_mic < lo] = 1
    markers[b_mic > hi] = 2

    figure(6)
    imshow(markers)
    show()

    segmentation = watershed(e_map, markers)

    figure(7)
    imshow(segmentation, cmap=cm.spectral, interpolation='nearest')
    show()
    return segmentation, e_map, markers
Пример #24
0
    def parse_df(self, row, which='train'):
        _ = os.path.join
        parsed = self.parse_map.copy()
        img_name = "{}.png".format(row.id)
        img = (cv2.imread(row.path, 0) / 255).squeeze()
        is_train = which == 'train'

        paths = self.train_paths if is_train else self.test_paths

        if is_train:
            mask = (cv2.imread(row.mask_path, 0) / 255).squeeze()
            mask_edge = rank.entropy(mask, disk(1)).round()
            parsed['edge_counts'], parsed['mid_point'], parsed[
                'distance'] = self.get_edge_details(mask_edge)
            # io.imsave(_(paths['edges'], img_name), mask_edge)

        sobel_img = sobel(img)
        io.imsave(_(paths['sobels'], img_name), sobel_img)

        sobel_bin = sobel_img < self.sobel_threshold
        sobel_er = erosion(sobel_bin, disk(self.sobel_disk_size))
        sobel_mask = remove_small_objects(ndi.binary_opening(sobel_er))
        io.imsave(_(paths['sobels_mask_v1'], img_name),
                  sobel_mask.astype('int8') * 255)

        combined_image = np.dstack((img, sobel_img, sobel_mask))
        io.imsave(_(paths['combined_v1'], img_name), combined_image)

        return [parsed[k] for k in self.parse_order]
def binarize(img):
    # calculate local entropy
    entr = entropy(img, disk(5))
    # Normalize and negate entropy values
    MAX_ENTROPY = 8.0
    MAX_PIX_VAL = 255
    negative = 1 - (entr / MAX_ENTROPY)
    u8img = (negative * MAX_PIX_VAL).astype(np.uint8)
    # Global thresholding
    ret, mask = cv2.threshold(u8img, 0, MAX_PIX_VAL, cv2.THRESH_OTSU)
    # mask out text
    masked = cv2.bitwise_and(img, img, mask=mask)
    # fill in the holes to estimate the background
    kernel = np.ones((35, 35), np.uint8)
    background = cv2.dilate(masked, kernel, iterations=1)
    # By subtracting background from the original image, we get a clean text image
    text_only = cv2.absdiff(img, background)
    # Negate and increase contrast
    neg_text_only = (MAX_PIX_VAL - text_only) * 1.15
    # clamp the image within u8 range
    ret, clamped = cv2.threshold(neg_text_only, 255, MAX_PIX_VAL, cv2.THRESH_TRUNC)
    clamped_u8 = clamped.astype(np.uint8)
    # Do final adaptive thresholding to binarize image
    processed = cv2.adaptiveThreshold(clamped_u8, MAX_PIX_VAL, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,
                                        31, 2)
    return processed
Пример #26
0
def Entropy(gray):
    en_ = entropy(gray, disk(3))
    min_ = np.amin(en_)
    max_ = np.amax(en_)
    en_ -= min_
    en_ = en_ * 255 / (max_ - min_)
    en_ = en_.astype('uint8')
    return en_
Пример #27
0
    def entropy(image, channel=0):
        """Calculate the image entropy.

        Args:
            image(ndarray): image representation as array.

        """
        return rank.entropy(image[:,:, channel], disk(5))
Пример #28
0
def add_index_layer(img, append=False):
    img_rows = np.shape(img)[0]
    img_cols = np.shape(img)[1]
    # entropy 熵
    e = (
        (entropy(img[:, :, 0], disk(1)) + entropy(img[:, :, 1], disk(1)) + entropy(img[:, :, 2], disk(1))) / 3).reshape(
        [img_rows, img_cols, 1])
    # contrast 对比度
    c = ((enhance_contrast(img[:, :, 0], disk(1)) + enhance_contrast(img[:, :, 1], disk(1)) + enhance_contrast(
        img[:, :, 2], disk(1))) / 3).reshape([img_rows, img_cols, 1])
    # mean 均值
    m = ((mean(img[:, :, 0], disk(1)) + mean(img[:, :, 1], disk(1)) + mean(img[:, :, 2], disk(1))) / 3).reshape(
        [img_rows, img_cols, 1])
    if append:
        return np.concatenate([img, e, c, m], axis=2)
    else:
        return np.concatenate([e, c, m], axis=2)
Пример #29
0
def get_entropy_score(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    entr_img = entropy(gray, disk(5))
    all_sum = np.sum(entr_img)
    num_of_pixels = entr_img.shape[0] * entr_img.shape[1]
    entropy_score = (all_sum) / (num_of_pixels)

    return entropy_score
Пример #30
0
def threshold_sk_entropy(img):
    print(os.path.join(sample_path, filename))
    image = imread(os.path.join(sample_path, filename))
    image_gray = rgb2gray(image)
    entropy_image = entropy(image_gray, disk(10))
    plt.imsave(os.path.join(outpath, "sk_entropy_{}".format(img) + ".tiff"),
               entropy_image,
               cmap="gray")
    return entropy_image
Пример #31
0
def entropy_list(myimages):
    entropies = []
    myimages = tqdm(myimages)
    myimages.set_description("Computing entropies...")
    for img in myimages:
        if np.max(img) > 1:
            img = img / np.max(img)
        entropies.append(entropy(img, disk(10)))
    return entropies
Пример #32
0
def calculate_oct_y_range(img, tresh=1e-10):
    """
    Calculate the interesting y region of the image using entropy
    :param img: 
    :param tresh: entropy cutoff threshold
    :return: min and maximum y index containing the interesting region
    """

    if img.ndim == 2:
        im_slice = skimage.img_as_float(img.astype(np.float32) / 128. - 1.)
    elif img.ndim == 3:
        im_slice = skimage.img_as_float(img[:, :, 1].astype(np.float32) /
                                        128. - 1.)
    assert img.ndim in {2, 3}
    im_slice_ = entropy(im_slice, disk(11))
    p_ = np.mean(im_slice_, axis=1)
    p_ = p_ / (np.max(p_) + 1e-16)
    p_ = np.asarray(p_ > tresh, dtype=np.int)
    inx = np.where(p_ == 1)

    # im_slice_ = im_slice_ / (np.max(im_slice_) + 1e-16)
    # im_slice_ = np.asarray(im_slice_ > tresh, dtype=np.int)
    #
    #
    # plt.subplot(1,3,1)
    # plt.imshow(im_slice)
    # plt.subplot(1,3,2)
    # plt.imshow(im_slice_)
    # plt.subplot(1,3,3)
    # selem = disk(35)
    # im_slice_ = binary_closing(im_slice_,selem=selem)
    # plt.imshow(im_slice_)
    # plt.pause(.1)

    # y = list()
    # for i in range(0, im_slice_.shape[1]):
    #     if np.any(im_slice_[:,i]==1):
    #         yy = np.where(im_slice_[:,i]==1)[0][0]
    #     else:
    #         yy = 0
    #     y.append(yy)
    #
    # y = scipy.signal.medfilt(y, kernel_size=81)
    # p_ = np.zeros(p_.shape, dtype=np.float32)
    # if len(inx[0]) > 0:
    #     p_[inx[0][0]:inx[0][-1]] = 512.
    # plt.imshow(img)
    # print img.shape
    # plt.plot(p_, range(0, len(p_)), color='red')
    # plt.plot(range(0, len(y)), y, color='blue')
    # plt.pause(1)
    # plt.clf()

    if len(inx[0]) > 0:
        return np.min(inx[0]), np.max(inx[0])
    else:
        return 0, img.shape[0]
Пример #33
0
def entropy_filtering(img):
	# # First example: object detection.
	#
	# noise_mask = 28 * np.ones((128, 128), dtype=np.uint8)
	# noise_mask[32:-32, 32:-32] = 30
	#
	# noise = (noise_mask * np.random.random(noise_mask.shape) - 0.5 *
	# 		 noise_mask).astype(np.uint8)
	# img = noise + 128

	entr_img = entropy(img, disk(10))

	fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(8, 3))

	ax0.imshow(noise_mask, cmap=plt.cm.gray)
	ax0.set_xlabel("Noise mask")
	ax1.imshow(img, cmap=plt.cm.gray)
	ax1.set_xlabel("Noisy image")
	ax2.imshow(entr_img)
	ax2.set_xlabel("Local entropy")

	fig.tight_layout()

	# Second example: texture detection.

	image = img_as_ubyte(data.camera())

	fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(10, 4), sharex=True,
								   sharey=True,
								   subplot_kw={"adjustable": "box-forced"})

	img0 = ax0.imshow(image, cmap=plt.cm.gray)
	ax0.set_title("Image")
	ax0.axis("off")
	fig.colorbar(img0, ax=ax0)

	img1 = ax1.imshow(entropy(image, disk(5)), cmap=plt.cm.gray)
	ax1.set_title("Entropy")
	ax1.axis("off")
	fig.colorbar(img1, ax=ax1)

	fig.tight_layout()

	plt.show()
    def __init__(self, img=None, path=None, block_size=5):
        if path and not img:
            img = cv2.imread(path)

        #img = Preprocess().blur_image(img)
        self.block_size = block_size
        self.img_rgb = img.copy()
        self.img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
        #self.img_ycbcr = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
        self.img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        self.height, self.width, _ = img.shape
        self.Hxx, self.Hxy, self.Hyy = hessian_matrix(self.img_gray)
        #vector, self.hog = hog(self.img_gray, orientations=8, pixels_per_cell=(3, 3),
        #            cells_per_block=(1, 1), visualise=True)

        neighours = disk(25)
        self.entropy = entropy(self.img_gray, neighours)
Пример #35
0
def test_entropy():
    #  verify that entropy is coherent with bitdepth of the input data

    selem = np.ones((16, 16), dtype=np.uint8)
    # 1 bit per pixel
    data = np.tile(np.asarray([0, 1]), (100, 100)).astype(np.uint8)
    assert(np.max(rank.entropy(data, selem)) == 1)

    # 2 bit per pixel
    data = np.tile(np.asarray([[0, 1], [2, 3]]), (10, 10)).astype(np.uint8)
    assert(np.max(rank.entropy(data, selem)) == 2)

    # 3 bit per pixel
    data = np.tile(
        np.asarray([[0, 1, 2, 3], [4, 5, 6, 7]]), (10, 10)).astype(np.uint8)
    assert(np.max(rank.entropy(data, selem)) == 3)

    # 4 bit per pixel
    data = np.tile(
        np.reshape(np.arange(16), (4, 4)), (10, 10)).astype(np.uint8)
    assert(np.max(rank.entropy(data, selem)) == 4)

    # 6 bit per pixel
    data = np.tile(
        np.reshape(np.arange(64), (8, 8)), (10, 10)).astype(np.uint8)
    assert(np.max(rank.entropy(data, selem)) == 6)

    # 8-bit per pixel
    data = np.tile(
        np.reshape(np.arange(256), (16, 16)), (10, 10)).astype(np.uint8)
    assert(np.max(rank.entropy(data, selem)) == 8)

    # 12 bit per pixel
    selem = np.ones((64, 64), dtype=np.uint8)
    data = np.zeros((65, 65), dtype=np.uint16)
    data[:64, :64] = np.reshape(np.arange(4096), (64, 64))
    with expected_warnings(['Bitdepth of 11']):
        assert(np.max(rank.entropy(data, selem)) == 12)

    # make sure output is of dtype double
    with expected_warnings(['Bitdepth of 11']):
        out = rank.entropy(data, np.ones((16, 16), dtype=np.uint8))
    assert out.dtype == np.double
Пример #36
0
def filter_entropy_image(image, filter):
	
	eimage = entropy(image, disk(5))

	new_picture =  np.ndarray(shape=eimage.shape) #[[False] * image.shape[1]] * image.shape[0]

	for rn, row in enumerate(eimage):
	
		for pn, pixel in enumerate(row):

			if pixel < filter:
				
				new_picture[rn,pn] = True

			else:
				new_picture[rn,pn] = False

	return new_picture.astype('b')
Пример #37
0
def calculateFeatures( imagePath ):
	image = img_as_ubyte(imread(imagePath, as_grey=False, plugin=None, flatten=None))
	img_gray = color.rgb2gray(image)
	img_hsv = color.rgb2hsv(image)

	#fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(10, 4))

	#img0 = ax0.imshow(img_gray, cmap=plt.cm.gray)
	#ax0.set_title('Image')
	#ax0.axis('off')
	#fig.colorbar(img0, ax=ax0)

	imageEntropy = entropy(img_gray, disk(5))
	#img1 = ax1.imshow(imageEntropy, cmap=plt.cm.jet)
	#ax1.set_title('Entropy')
	#ax1.axis('off')
	#fig.colorbar(img1, ax=ax1)

	huePreMatrix = []
	for row in img_hsv:
		hueRow = []
		for pixel in row:
			hueRow.append(pixel[0])
		huePreMatrix.append(hueRow)

	hueMatrix = numpy.matrix(huePreMatrix)
	hueRound = numpy.round(hueMatrix, 4)
	hueCount = numpy.subtract(hueRound, numpy.round(hueRound.mean(), 4))

	brightMatrix = numpy.where( img_gray > img_gray.mean() )

	meanEntropy = imageEntropy.mean()
	maxEntropy = imageEntropy.max()
	meanIntensity = img_gray.mean()
	meanHue = hueMatrix.mean()
	countOfAverageHuePixels = hueMatrix.size - numpy.count_nonzero(hueCount)
	percentageOfLightPixels = float(numpy.count_nonzero(brightMatrix)) / img_gray.size

	featureVector = numpy.array([meanEntropy, maxEntropy, meanIntensity, meanHue, countOfAverageHuePixels, percentageOfLightPixels])

	#plt.show()

	return featureVector
    def patches_by_entropy(self, num_patches):
        """
        
        Finds high-entropy patches based on label, allows net to learn borders more effectively.
        :param num_patches: int, defaults to num_samples, 
                            enter in quantity it using in conjunction with randomly sampled patches.
        :return: list of patches (num_patches, 4, h, w) selected by highest entropy
        """
        patches, labels = [], []
        ct = 0
        while ct < num_patches:
            im_path = random.choice(self.train_data)
            fn = os.path.basename(im_path)
            label = io.imread('Labels/' + fn[:-4] + 'L.png')

            # pick again if slice is only background
            if len(np.unique(label)) == 1:
                continue

            img = io.imread(im_path).reshape(5, 240, 240)[:-1].astype('float')
            l_ent = entropy(label, disk(self.h))
            top_ent = np.percentile(l_ent, 90)

            # restart if 80th entropy percentile = 0
            if top_ent == 0:
                continue

            highest = np.argwhere(l_ent >= top_ent)
            p_s = random.sample(highest, 3)
            for p in p_s:
                p_ix = (p[0] - (self.h / 2), p[0] + ((self.h + 1) / 2), p[1] - (self.w / 2),
                        p[1] + ((self.w + 1) / 2))
                patch = np.array([i[p_ix[0]: p_ix[1], p_ix[2]: p_ix[3]] for i in img])
                # exclude any patches that are too small
                if np.shape(patch) != (4, 65, 65):
                    continue
                patches.append(patch)
                labels.append(label[p[0], p[1]])
            ct += 1
            return np.array(patches[:self.num_samples]), np.array(labels[:self.num_samples])
Пример #39
0
def check_all():
    np.random.seed(0)
    image = np.random.rand(25, 25)
    selem = morphology.disk(1)
    refs = np.load(os.path.join(skimage.data_dir, "rank_filter_tests.npz"))

    assert_equal(refs["autolevel"], rank.autolevel(image, selem))
    assert_equal(refs["autolevel_percentile"], rank.autolevel_percentile(image, selem))
    assert_equal(refs["bottomhat"], rank.bottomhat(image, selem))
    assert_equal(refs["equalize"], rank.equalize(image, selem))
    assert_equal(refs["gradient"], rank.gradient(image, selem))
    assert_equal(refs["gradient_percentile"], rank.gradient_percentile(image, selem))
    assert_equal(refs["maximum"], rank.maximum(image, selem))
    assert_equal(refs["mean"], rank.mean(image, selem))
    assert_equal(refs["mean_percentile"], rank.mean_percentile(image, selem))
    assert_equal(refs["mean_bilateral"], rank.mean_bilateral(image, selem))
    assert_equal(refs["subtract_mean"], rank.subtract_mean(image, selem))
    assert_equal(refs["subtract_mean_percentile"], rank.subtract_mean_percentile(image, selem))
    assert_equal(refs["median"], rank.median(image, selem))
    assert_equal(refs["minimum"], rank.minimum(image, selem))
    assert_equal(refs["modal"], rank.modal(image, selem))
    assert_equal(refs["enhance_contrast"], rank.enhance_contrast(image, selem))
    assert_equal(refs["enhance_contrast_percentile"], rank.enhance_contrast_percentile(image, selem))
    assert_equal(refs["pop"], rank.pop(image, selem))
    assert_equal(refs["pop_percentile"], rank.pop_percentile(image, selem))
    assert_equal(refs["pop_bilateral"], rank.pop_bilateral(image, selem))
    assert_equal(refs["sum"], rank.sum(image, selem))
    assert_equal(refs["sum_bilateral"], rank.sum_bilateral(image, selem))
    assert_equal(refs["sum_percentile"], rank.sum_percentile(image, selem))
    assert_equal(refs["threshold"], rank.threshold(image, selem))
    assert_equal(refs["threshold_percentile"], rank.threshold_percentile(image, selem))
    assert_equal(refs["tophat"], rank.tophat(image, selem))
    assert_equal(refs["noise_filter"], rank.noise_filter(image, selem))
    assert_equal(refs["entropy"], rank.entropy(image, selem))
    assert_equal(refs["otsu"], rank.otsu(image, selem))
    assert_equal(refs["percentile"], rank.percentile(image, selem))
    assert_equal(refs["windowed_histogram"], rank.windowed_histogram(image, selem))
Пример #40
0
def block_processing_setup(arrays, binning, blk_size, qmetric):
    """
    Create a binned version of the full sun image, filled with values of sharpness metrics
    Uses a custom kernel, 2nd-degree, Laplacian-like

    :param arrays: image series (data cube)
    :param binning: binning factor.
    :return: Binned array of size = arrays size / binning.
    """
    # dimensions of the binned array
    naxis1, naxis2, nframes = arrays.shape
    nbaxis1 = int(naxis1 / binning)
    nbaxis2 = int(naxis2 / binning)

    kernel = np.array([[1, 4, 1],
                      [4, -20, 4],
                      [1, 4, 1]])

    # Initialize quality array - "q" for quality
    qbinned_arrays = np.zeros([nbaxis2, nbaxis1, nframes])
    for k in range(0, nframes):
        # Get a binned version of the arrays
        frame           = np.squeeze(arrays[:, :, k])
        # binnedFrame     = rebin(frame, new_shape=(nbaxis2, nbaxis1), operation='sum')
        #frame = ndimage.gaussian_filter(frame, sigma=(3, 3), order=0)

        binned_frame = frame.copy()

        if qmetric.lower() == 'laplace':
            if binning != 1:
                binned_frame = rebin(binned_frame, binning)
        # Custom "Laplace-like" quality array
            qframe = convolve2d(binned_frame, kernel, mode='same', boundary='symm')  # laplace(binnedFrame)
        # If using Gradient-entropy
        elif qmetric.lower() == 'entropy':
            # Clip background values so they do not contaminate too much the entropy at the limb
            binned_frame[binned_frame < 700] = 700
            if binning != 1:
                binned_frame = rebin(binned_frame, binning)

            gy, gx = np.gradient(binned_frame)
            binned_frame = np.sqrt(gx ** 2 + gy ** 2)
            binned_frame *= 255.0 / np.max(binned_frame)
            binned_frame = binned_frame.astype(np.uint8) #binned_frame.astype(np.uint16)
            qframe = entropy(binned_frame, disk(blk_size/binning))
        elif qmetric.lower() == 'rentropy':
            # Clip background values so they do not contaminate too much the entropy at the limb
            binned_frame[binned_frame < 700] = 700
            if binning != 1:
                binned_frame = rebin(binned_frame, binning)

            gy, gx = np.gradient(binned_frame)
            binned_frame = np.sqrt(gx ** 2 + gy ** 2)
            binned_frame *= 255.0 / np.max(binned_frame)
            binned_frame = binned_frame.astype(np.uint8)  # binned_frame.astype(np.uint16)
            # Do not calculate entropy here. This is done later for each block
            qframe = binned_frame


        qbinned_arrays[:, :, k] = qframe

    return qbinned_arrays
Пример #41
0
from skimage import data
from skimage.util import img_as_ubyte
from skimage.filters.rank import entropy
from skimage.morphology import disk

# First example: object detection.

noise_mask = 28 * np.ones((128, 128), dtype=np.uint8)
noise_mask[32:-32, 32:-32] = 30

noise = (noise_mask * np.random.random(noise_mask.shape) - 0.5 *
         noise_mask).astype(np.uint8)
img = noise + 128

entr_img = entropy(img, disk(10))

fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(10, 4))

ax0.imshow(noise_mask, cmap='gray')
ax0.set_xlabel("Noise mask")
ax1.imshow(img, cmap='gray')
ax1.set_xlabel("Noisy image")
ax2.imshow(entr_img, cmap='viridis')
ax2.set_xlabel("Local entropy")

fig.tight_layout()

# Second example: texture detection.

image = img_as_ubyte(data.camera())
thresholding. In practice, you might want to define a region for tinting based
on segmentation results or blob detection methods.
"""

from skimage.filters import rank

# Square regions defined as slices over the first two dimensions.
top_left = (slice(100),) * 2
bottom_right = (slice(-100, None),) * 2

sliced_image = image.copy()
sliced_image[top_left] = colorize(image[top_left], 0.82, saturation=0.5)
sliced_image[bottom_right] = colorize(image[bottom_right], 0.5, saturation=0.5)

# Create a mask selecting regions with interesting texture.
noisy = rank.entropy(grayscale_image, np.ones((9, 9)))
textured_regions = noisy > 4
# Note that using `colorize` here is a bit more difficult, since `rgb2hsv`
# expects an RGB image (height x width x channel), but fancy-indexing returns
# a set of RGB pixels (# pixels x channel).
masked_image = image.copy()
masked_image[textured_regions, :] *= red_multiplier

fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(8, 4), sharex=True, sharey=True)
ax1.imshow(sliced_image)
ax2.imshow(masked_image)
ax1.set_adjustable('box-forced')
ax2.set_adjustable('box-forced')

plt.show()
Пример #43
0
from skimage import data
from skimage.util import img_as_ubyte
from skimage.filters.rank import entropy
from skimage.morphology import disk

# First example: object detection.

noise_mask = 28 * np.ones((128, 128), dtype=np.uint8)
noise_mask[32:-32, 32:-32] = 30

noise = (noise_mask * np.random.random(noise_mask.shape) - 0.5 *
         noise_mask).astype(np.uint8)
img = noise + 128

entr_img = entropy(img, disk(10))

fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(8, 3))

ax0.imshow(noise_mask, cmap=plt.cm.gray)
ax0.set_xlabel("Noise mask")
ax1.imshow(img, cmap=plt.cm.gray)
ax1.set_xlabel("Noisy image")
ax2.imshow(entr_img)
ax2.set_xlabel("Local entropy")

fig.tight_layout()

# Second example: texture detection.

image = img_as_ubyte(data.camera())
Пример #44
0
import numpy as np

from skimage import data
from skimage.util import img_as_ubyte
from skimage.filters.rank import entropy
from skimage.morphology import disk

noise_mask = 28 * np.ones((128, 128), dtype=np.uint8)
noise_mask[32:-32, 32:-32] = 30

noise = (noise_mask * np.random.random(noise_mask.shape) - .5 *
         noise_mask).astype(np.uint8)
img = noise + 128

radius = 10
e = entropy(img, disk(radius))

fig, ax = plt.subplots(1, 3, figsize=(8, 5))
ax1, ax2, ax3 = ax.ravel()

ax1.imshow(noise_mask, cmap=plt.cm.gray)
ax1.set_xlabel('Noise mask')
ax2.imshow(img, cmap=plt.cm.gray)
ax2.set_xlabel('Noised image')
ax3.imshow(e)
ax3.set_xlabel('Local entropy ($r=%d$)' % radius)

# second example: texture detection

image = img_as_ubyte(data.camera())
Пример #45
0
from skimage import data
from skimage.filters.rank import entropy
from skimage.morphology import disk
import numpy as np
import matplotlib.pyplot as plt

image = data.camera()

fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4), sharex=True, sharey=True)

fig.colorbar(ax1.imshow(image, cmap=plt.cm.gray), ax=ax1)
ax1.set_title('Image')
ax1.axis('off')
ax1.set_adjustable('box-forced')

fig.colorbar(ax2.imshow(entropy(image, disk(5)), cmap=plt.cm.gray), ax=ax2)
ax2.set_title('Entropy')
ax2.axis('off')
ax2.set_adjustable('box-forced')

######################################################################
#
# Implementation
# ==============
#
# The central part of the `skimage.rank` filters is build on a sliding window
# that updates the local gray-level histogram. This approach limits the
# algorithm complexity to O(n) where n is the number of image pixels. The
# complexity is also limited with respect to the structuring element size.
#
# In the following we compare the performance of different implementations
Пример #46
0
def poisson_disc(img, n, k=30):
    h, w = img.shape[:2]

    nimg = denoise_bilateral(img, sigma_range=0.15, sigma_spatial=15)
    img_gray = rgb2gray(nimg)
    img_lab = rgb2lab(nimg)

    entropy_weight = 2**(entropy(img_as_ubyte(img_gray), disk(15)))
    entropy_weight /= np.amax(entropy_weight)
    entropy_weight = gaussian_filter(dilation(entropy_weight, disk(15)), 5)

    color = [sobel(img_lab[:, :, channel])**2 for channel in range(1, 3)]
    edge_weight = functools.reduce(op.add, color) ** (1/2) / 75
    edge_weight = dilation(edge_weight, disk(5))

    weight = (0.3*entropy_weight + 0.7*edge_weight)
    weight /= np.mean(weight)
    weight = weight

    max_dist = min(h, w) / 4
    avg_dist = math.sqrt(w * h / (n * math.pi * 0.5) ** (1.05))
    min_dist = avg_dist / 4

    dists = np.clip(avg_dist / weight, min_dist, max_dist)

    def gen_rand_point_around(point):
        radius = random.uniform(dists[point], max_dist)
        angle = rand(2 * math.pi)
        offset = np.array([radius * math.sin(angle), radius * math.cos(angle)])
        return tuple(point + offset)

    def has_neighbours(point):
        point_dist = dists[point]
        distances, idxs = tree.query(point,
                                    len(sample_points) + 1,
                                    distance_upper_bound=max_dist)

        if len(distances) == 0:
            return True

        for dist, idx in zip(distances, idxs):
            if np.isinf(dist):
                break

            if dist < point_dist and dist < dists[tuple(tree.data[idx])]:
                return True

        return False

    # Generate first point randomly.
    first_point = (rand(h), rand(w))
    to_process = [first_point]
    sample_points = [first_point]
    tree = KDTree(sample_points)

    while to_process:
        # Pop a random point.
        point = to_process.pop(random.randrange(len(to_process)))

        for _ in range(k):
            new_point = gen_rand_point_around(point)

            if (0 <= new_point[0] < h and 0 <= new_point[1] < w
                    and not has_neighbours(new_point)):
                to_process.append(new_point)
                sample_points.append(new_point)
                tree = KDTree(sample_points)
                if len(sample_points) % 1000 == 0:
                    print("Generated {} points.".format(len(sample_points)))

    print("Generated {} points.".format(len(sample_points)))

    return sample_points
 def _make_edges(self):
     self.current_image = rgb2gray(self.current_image)
     self.current_image = equalize_hist(self.current_image)
     self.current_image = entropy(self.current_image,disk(4))
Пример #48
0
 def _make_feature(self):
     self.current_image = rgb2gray(self.current_image)
     #self.current_image = equalize_hist(self.current_image)
     self.current_image = entropy(self.current_image,disk(5))
Пример #49
0
Image entropy is a quantity which is used to describe the amount of information
coded in an image.

"""
import matplotlib.pyplot as plt

from skimage import data
from skimage.filters.rank import entropy
from skimage.morphology import disk
from skimage.util import img_as_ubyte


image = img_as_ubyte(data.camera())

fig, (ax0, ax1) = plt.subplots(
    ncols=2, figsize=(10, 4), sharex=True, sharey=True, subplot_kw={"adjustable": "box-forced"}
)

img0 = ax0.imshow(image, cmap=plt.cm.gray)
ax0.set_title("Image")
ax0.axis("off")
fig.colorbar(img0, ax=ax0)

img1 = ax1.imshow(entropy(image, disk(5)), cmap=plt.cm.jet)
ax1.set_title("Entropy")
ax1.axis("off")
fig.colorbar(img1, ax=ax1)

plt.show()
Пример #50
0
import matplotlib.pyplot as plt

from skimage import data
from skimage.filters.rank import entropy
from skimage.morphology import disk
from skimage.util import img_as_ubyte

if __name__ == "__main__":
    
    image = img_as_ubyte(data.camera())
    #image = img_as_ubyte(data.checkerboard())

    #image_entropy = entropy(image,disk(5))    
    image_entropy = entropy(image,disk(10))
    #image_entropy = entropy(image,disk(15))
    
    fig, (ax0, ax1) = plt.subplots(ncols=2,figsize=(10,4))
    
    img0 = ax0.imshow(image, cmap=plt.cm.gray)
    ax0.set_title('image')
    ax0.axis('off')
    fig.colorbar(img0,ax=ax0)
    
    img1 = ax1.imshow(image_entropy, cmap=plt.cm.jet)
    ax1.set_title('entropy')
    ax1.axis('off')
    fig.colorbar(img1,ax=ax1)
    
    plt.show()
    
Пример #51
0
def get_patch_and_texture_features(img, contours):
    """
    Features for each patch candidate. Takes some time (~tens of seconds)

    :param img:
    :param contours:
    :return:
    """
    bw_mask = np.zeros(img.shape[0:2], np.int32)
    for c in range(len(contours)):
        cv2.drawContours(bw_mask, contours, c, c + 1, -1)

    img_y = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
    # 1-3
    features = []
    for k in range(3):
        fts = []
        for c in range(len(contours)):
            avg_ch = np.average(img_y[:, :, k], weights=(bw_mask == (c + 1)))
            fts.append(avg_ch)
        features.append(np.array(fts).reshape((-1, 1)))
    # 4 - 15
    # TODO Slightly relevant BUT slooooow. Replace them with something convolutional like Law energy + some pyramidal operations
    f2_tmp = []
    for c in range(len(contours)):
        fts = []
        for k in range(3):
            for m in [3, 7, 15, 25]:
                se = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (m, m))
                ent_img = skrank.entropy(img_y[:, :, k], se, mask=(bw_mask == (c + 1)))
                f = np.average(ent_img, weights=(bw_mask == (c + 1)))
                fts.append(f)
        f2_tmp.append(np.array(fts).reshape((1, -1)))
    features.append(np.vstack(f2_tmp))

    # TODO Standardize the histogram for the whole image first. Most bins are empty but some are pretty relevant
    # TODO !!!  RERUN THE WHOLE EXPERIMENTS AFTERWARDS to see if there is an improvement or not !!!
    # 16 - 111
    for k in range(3):
        fts = []
        for c in range(len(contours)):
            H = cv2.calcHist([img_y], [1], (bw_mask == (c + 1)).astype(np.uint8), [32], [0, 255])
            fts.append(H.flatten())
        features.append(np.vstack(fts))

    # 112 - 118
    # shape features
    fts = []
    for c in range(len(contours)):
        M = cv2.moments(contours[c])
        Hu = cv2.HuMoments(M)
        fts.append(Hu.T)
    features.append(np.vstack(fts))

    # 119
    # TODO Highly relevant, Maybe extract a histogram or sth?
    fts = []
    for c in range(len(contours)):
        hull = cv2.convexHull(contours[c], returnPoints=False)
        defects = cv2.convexityDefects(contours[c], hull)
        def_avg = np.mean(defects[:, 0, 3]) / 256.0
        fts.append(np.array(def_avg))
    features.append(np.vstack(fts))

    features = np.hstack(features)
    # print features.shape
    return features
Пример #52
0
		g = image[:,:,1]
		b = image[:,:,2]

		# Calculate new features
		# HSV
		hsv_image = rgb2hsv(image)
		h = hsv_image[:,:,0]
		s = hsv_image[:,:,1]
		v = hsv_image[:,:,2]
		# Greyscale - used for some calculations
		grey_image = rgb2grey(image)
		# Equalize
		grey_image_global_equalize = exposure.equalize_hist(grey_image)

		# Entropy
		image_entropy = entropy(grey_image_global_equalize, disk(5))
		# Edges
		image_edges = feature.canny(grey_image_global_equalize)

		# Aggregate the new features, per superpixel
		for superpixel_i in xrange(num_superpixels):
			# Get a mask for this specific superpixel
			superpixel = (superpixels_mask == superpixel_i)
			pixel_count = np.sum(superpixel)
			# Compute RGB-related features
			r_sp = image[superpixel, 0]
			g_sp = image[superpixel, 1]
			b_sp = image[superpixel, 2]
			r_avg = np.average(r) # R
			g_avg = np.average(g) # G
			b_avg = np.average(b) # B
Пример #53
0
def dwtSlide(filePath,patchRadius,features):

    im = Image.open(filePath)
    im = np.asarray(im.convert('L'))# * (1.0/255.0)
    #im = np.asarray(im)
    print im.shape
    #l=lp
    im = rescale(im,0.25)#reduce the size of the image for speed
    print("in the dwt")
    
    #print im2.shape
    if features=='entropy'or features=='combinedEntSob':
        coeffStore = entropy(im,disk(10))
        coeffStore = coeffStore.reshape(-1,1)
        coeffStore11 = entropy(im,disk(8))
        coeffStore11 = coeffStore11.reshape(-1,1)
        coeffStore12 = entropy(im,disk(6))
        coeffStore12 = coeffStore12.reshape(-1,1)
        print("in the dwt done first")
        
        coeffStore2 = entropy(im,disk(5))
        coeffStore2 = coeffStore2.reshape(-1,1)
        coeffStore3 = entropy(im,disk(4))
        coeffStore3 = coeffStore3.reshape(-1,1)
        coeffStore4 = entropy(im,disk(3))
        coeffStore4 = coeffStore4.reshape(-1,1)
        coeffStore5 = entropy(im,disk(2))
        coeffStore5 = coeffStore5.reshape(-1,1)
        coeffStore = np.hstack([coeffStore,coeffStore11,coeffStore12,coeffStore2,coeffStore3,coeffStore4,coeffStore5])
    elif features=='dwt'or features=='combinedDwtSob':
    
        imPad = np.pad(im,patchRadius,"symmetric")
        coeffStore = np.zeros((im.shape[0]*im.shape[1],8))
        for i in range(im.shape[0]):#range(1):#range(im.shape[0]):
            print( i )
            if i % 100 ==0 and i>1:
                print coeffVector
                print cA2
                print cH1
            #if i % 400 == 0 and i>1:
            #    l=lp
            for j in range(im.shape[1]):
                #print (j)
                patch = imPad[i:i+patchRadius*2+1,j:j+patchRadius*2+1]
                coeffs = pywt.wavedec2(patch, 'db1', level=1)
                cA2, (cH1, cV1, cD1) = coeffs
                cAMean = np.mean(cA2)
                cAVar = np.var(cA2)
                cH1Mean = np.mean(cH1)
                cH1Var = np.var(cH1)
                cV1Mean = np.mean(cV1)
                cV1Var = np.var(cV1)
                cD1Mean = np.mean(cD1)
                cD1Var = np.var(cD1)
                coeffVector = np.hstack([cAMean,cAVar,cH1Mean,cH1Var,cV1Mean,cV1Var,cD1Mean,cD1Var])
                #coeffVector = np.hstack([np.array(coeffs[0]).reshape(1,-1),np.array(coeffs[1]).reshape(1,-1)])
                ##coeffVector = np.hstack([coeffVector,np.array(coeffs[2]).reshape(1,-1)])
                #print coeffVector
                #if i == 0 and j ==0:
                #    coeffStore = coeffVector
                #else:
                #print j
                #print im.shape
                coeffStore[(i)*im.shape[1]+(j)]=coeffVector #np.vstack([coeffStore,coeffVector])
                #flatMaskArray = maskArray.reshape(maskArray.shape[0]*maskArray.shape[1])
                #X = np.dstack([foreGroundSamples[foreGroundIndices,...],backGroundSamples[backGroundIndices,...]])
        print coeffStore.shape
        print coeffVector
        print np.max(coeffStore[:,0])
        print np.max(coeffStore[:,1])
        print np.max(coeffStore[:,2])
        print np.max(coeffStore[:,3]) 
        print np.mean(coeffStore[:,0])
        print np.mean(coeffStore[:,1])
        print np.mean(coeffStore[:,2])
        print np.mean(coeffStore[:,3])
        
        coeffStore[:,0] = (coeffStore[:,0]-np.mean(coeffStore[:,0]))/np.var(coeffStore[:,0])
        coeffStore[:,1] = (coeffStore[:,1]-np.mean(coeffStore[:,1]))/np.var(coeffStore[:,1])
        coeffStore[:,2] = (coeffStore[:,2]-np.mean(coeffStore[:,2]))/np.var(coeffStore[:,2])
        coeffStore[:,3] = (coeffStore[:,3]-np.mean(coeffStore[:,3]))/np.var(coeffStore[:,3])
        coeffStore[:,4] = (coeffStore[:,4]-np.mean(coeffStore[:,4]))/np.var(coeffStore[:,4])
        coeffStore[:,5] = (coeffStore[:,5]-np.mean(coeffStore[:,5]))/np.var(coeffStore[:,5])
        coeffStore[:,6] = (coeffStore[:,6]-np.mean(coeffStore[:,6]))/np.var(coeffStore[:,6])
        coeffStore[:,7] = (coeffStore[:,7]-np.mean(coeffStore[:,7]))/np.var(coeffStore[:,7])
        
        print np.max(coeffStore[:,0])
        print np.max(coeffStore[:,1])
        print np.max(coeffStore[:,2])
        print np.max(coeffStore[:,3]) 
        print np.var(coeffStore[:,0])
        print np.mean(coeffStore[:,1])
        print np.var(coeffStore[:,2])
        print np.mean(coeffStore[:,3])
        #l=lp    
        #l=lp
        #ab = np.array([[5,4],[2,3]])
        #c = np.pad(ab,2,"symmetric")
        #print(c)
        #print "python2"
        
    
    
    
    #coeffs = pywt.wavedec2(np.ones((8,8)), 'db1', level=2)
    ##cA2, (cH2, cV2, cD2), (cH1, cV1, cD1) = coeffs
    #print(cH1)
    return coeffStore