Пример #1
0
    def average_hough_detections(self, hough_radii, hough_res, num_best=5):
        """
        Smooths `num_best` hough detections with Gaussian and
        computes weighted average across the `num_best` hough
        detections to get more precise center_x, center_y and
        radius of circle
        """

        centers = []
        accums = []
        radii = []

        for radius, h in zip(hough_radii, hough_res):
            # For each radius, extract two circles
            h_smooth = skifilt.gaussian_filter(h, sigma=4)
            num_peaks = 1
            peaks = skif.peak_local_max(h, min_distance=40, num_peaks=num_peaks)
            centers.extend(peaks)
            accums.extend(h[peaks[:, 0], peaks[:, 1]])
            radii.extend([radius] * num_peaks)

        h_sum = np.sum([skifilt.gaussian_filter(x, sigma=2)
                        for x in hough_res[np.argsort(accums)[::-1][:num_best]]], axis=0)

        peaks = skif.peak_local_max(h_sum, min_distance=40, num_peaks=num_peaks)

        center_x, center_y = peaks[0]

        max_sel = [np.max(x.ravel()) for x in hough_res[np.argsort(accums)[::-1][:num_best]]]
        radii_sel = [radii[i] for i in np.argsort(accums)[::-1][:num_best]]

        radius = sum([m * r for m, r in zip(max_sel, radii_sel)]) / float(sum(max_sel))

        return center_x, center_y, int(radius)
Пример #2
0
    def _preprocess(self, frame,
                    contrast=True, blur=1, denoise=0):
        """
            1. convert frame to grayscale
            2. remove noise from frame. increase denoise value for more noise filtering
            3. stretch contrast
        """

        frm = grayspace(frame) * 255
        frm = frm.astype('uint8')

        self.preprocessed_frame = frame
        # if denoise:
        #     frm = self._denoise(frm, weight=denoise)
        # print 'gray', frm.shape
        if blur:
            frm = gaussian_filter(frm, blur) * 255
            frm = frm.astype('uint8')

            frm1 = gaussian_filter(self.preprocessed_frame, blur,
                                   multichannel=True) * 255
            self.preprocessed_frame = frm1.astype('uint8')

        if contrast:
            frm = self._contrast_equalization(frm)
            self.preprocessed_frame = self._contrast_equalization(
                    self.preprocessed_frame)

        return frm
Пример #3
0
def face_extract_gauss_2(img, m=2):
    '''
    DESCRIPTION:
    try to extract face
    elipse from image.
    
    INPUT:
    img is colored image.
    '''
    # imm=fftElips(img)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    mm = mean(img)
    img1 = copy(img)
    img2 = copy(img)
    img1[img1 < mm] = 0
    img2[img2 > mm] = 0
    img11 = filter.gaussian_filter(img1, sigma=m)
    img21 = filter.gaussian_filter(img2, sigma=m)
    imgNew = zeros(shape(img))
    for i in range(shape(img)[0]):
        for j in range(shape(img)[1]):
            if (img1[i, j] == 0):
                imgNew[i, j] = img11[i, j]
            if (img2[i, j] == 0):
                imgNew[i, j] = img21[i, j]

    # imm = filter.gaussian_filter(img, sigma=m)
    return(imgNew, img1, img11, img2, img21)  # imm
Пример #4
0
    def _preprocess(self, frame, contrast=True, blur=1, denoise=0):
        """
            1. convert frame to grayscale
            2. remove noise from frame. increase denoise value for more noise filtering
            3. stretch contrast
        """

        frm = grayspace(frame) * 255
        frm = frm.astype('uint8')

        self.preprocessed_frame = frame
        # if denoise:
        #     frm = self._denoise(frm, weight=denoise)
        # print 'gray', frm.shape
        if blur:
            frm = gaussian_filter(frm, blur) * 255
            frm = frm.astype('uint8')

            frm1 = gaussian_filter(
                self.preprocessed_frame, blur, multichannel=True) * 255
            self.preprocessed_frame = frm1.astype('uint8')

        if contrast:
            frm = self._contrast_equalization(frm)
            self.preprocessed_frame = self._contrast_equalization(
                self.preprocessed_frame)

        return frm
Пример #5
0
 def func(dframe):
     frame1, frame2 = dframe[0], dframe[1]
     tmp1 = frame1 - gaussian_filter(frame1,sgm1)
     tmp1 = gaussian_filter(tmp1*tmp1,sgm2)
     tmp2 = frame2 - gaussian_filter(frame2,sgm1)
     tmp2 = gaussian_filter(tmp2*tmp2,sgm2)
     ret = (tmp1*frame1 + frame1*tmp1)/(tmp1+tmp2)
     ret = ret.astype(frame1.dtype)
     return ret
    def difference_of_gaussian(self, imin, bigsize=30.0, smallsize=3.0):
        g1 = filters.gaussian_filter(imin, bigsize)
        g2 = filters.gaussian_filter(imin, smallsize)
        diff = 255 * (g1 - g2)

        diff[diff < 0] = 0.0
        diff[diff > 255.0] = 255.0
        diff = diff.astype(np.uint8)

        return diff
 def blur(image):
     new_image = np.zeros(image.shape, dtype=np.float)
     sigma = 2
     if len(image.shape) == 3:
         # We have an RGB image.
         for i in range(image.shape[2]):
             new_image[:][:][i] = gaussian_filter(image[:][:][i], sigma)
     else:
         new_image = gaussian_filter(image, sigma)
     return new_image
    def difference_of_gaussian(self, imin, bigsize=30.0, smallsize=3.0):
        g1 = filters.gaussian_filter(imin, bigsize)
        g2 = filters.gaussian_filter(imin, smallsize)
        diff = 255*(g1 - g2)

        diff[diff < 0] = 0.0
        diff[diff > 255.0] = 255.0
        diff = diff.astype(np.uint8) 
               
        return diff
Пример #9
0
def smoothing_gauss(data, sigma=1, pseudo_3D='True', sliceId=2):
    if data.ndim == 3 and pseudo_3D:
        if sliceId == 2:
            for idx in range(data.shape[2]):
                temp = skifil.gaussian_filter(data[:, :, idx], sigma=sigma)
                data[:, :, idx] = (255 * temp).astype(np.uint8)
        elif sliceId == 0:
            for idx in range(data.shape[0]):
                temp = skifil.gaussian_filter(data[idx, :, :], sigma=sigma)
                data[idx, :, :] = (255 * temp).astype(np.uint8)
    else:
        data = skifil.gaussian_filter(data, sigma=sigma)
        data = (255 * data).astype(np.uint8)
    return data
Пример #10
0
def smoothing_gauss(data, sigma=1, pseudo_3D='True', sliceId=2):
    if data.ndim == 3 and pseudo_3D:
        if sliceId == 2:
            for idx in range(data.shape[2]):
                temp = skifil.gaussian_filter(data[:, :, idx], sigma=sigma)
                data[:, :, idx] = (255 * temp).astype(np.uint8)
        elif sliceId == 0:
            for idx in range(data.shape[0]):
                temp = skifil.gaussian_filter(data[idx, :, :], sigma=sigma)
                data[idx, :, :] = (255 * temp).astype(np.uint8)
    else:
        data = skifil.gaussian_filter(data, sigma=sigma)
        data = (255 * data).astype(np.uint8)
    return data
Пример #11
0
def main():

  print 'loading image and preprocessing'
  # image preprocessing 
  image = rescale(io.imread(IMAGE_PATH, as_grey=True), RESCALE_FACTOR)
  image_no_blur = image
  image = gaussian_filter(image, GAUSSIAN_BLUR)
  print 'image of size ' + str(image.shape) + ' after resizing'

  # k sanity check 
  print 'k = %.2f - should be approx %.2f' % (k, np.mean(image.shape))

  # make graphs from image
  print 'constructing graph'
  grid_graph = get_graph_from_image(image)
  segment_graph = nx.Graph()
  segment_graph.add_nodes_from(grid_graph.nodes())
  sorted_edges = sorted(grid_graph.edges(data=True), key=lambda (u,v,d): d['weight'])

  # run algo 
  print 'segmenting image graph'
  segment_graph = segment(segment_graph, sorted_edges)
  
  # visualize
  print 'visualizing'
  visualize(image_no_blur, segment_graph)
	def run(self):
		if not os.path.exists(self.output_dir):
			os.makedirs(self.output_dir)
			print 'USER INFO: Creando carpeta de listas de archivos con imagenes en ' + self.output_dir
		
		resultado = []

		try:
			list_jpgs = os.listdir(os.path.join(self.input().path, "jpg"))
					
			for jpg in list_jpgs:
				try:
					#Creamos la ruta para cada jpg
					ruta_jpg = os.path.join(self.input().path,"jpg",jpg)
					#abrimos cada jpg y checamos su varianza, si es alta la guardamos
					pagina = io.imread(ruta_jpg)
					pagina = rgb2gray(pagina)
					pagina = gaussian_filter(pagina,sigma=2) #Un filtro que me de un promedio de la imagen
					if pagina.var() > self.varianza:
						resultado.append(ruta_jpg)
				except:
					pass
		except:
			pass		
		
		resultado = '\n'.join(resultado)
		with self.output().open("w") as f:
			f.write(resultado)
Пример #13
0
def segment_cells(frame, mask=None):
    """
    Compute the initial segmentation based on ridge detection + watershed.
    This works reasonably well, but is not robust enough to use by itself.
    """
    
    blurred = filters.gaussian_filter(frame, 2)
    ridges = enhance_ridges(frame)
    
    # threshold ridge image
    thresh = filters.threshold_otsu(ridges)
    thresh_factor = 0.6
    prominent_ridges = ridges > thresh_factor*thresh
    prominent_ridges = morphology.remove_small_objects(prominent_ridges, min_size=256)
    prominent_ridges = morphology.binary_closing(prominent_ridges)
    prominent_ridges = morphology.binary_dilation(prominent_ridges)
    
    # skeletonize
    ridge_skeleton = morphology.medial_axis(prominent_ridges)
    ridge_skeleton = morphology.binary_dilation(ridge_skeleton)
    ridge_skeleton *= mask
    ridge_skeleton -= mask
    
    # label
    cell_label_im = measure.label(ridge_skeleton)
    
    # morphological closing to fill in the cracks
    for cell_num in range(1, cell_label_im.max()+1):
        cell_mask = cell_label_im==cell_num
        cell_mask = morphology.binary_closing(cell_mask, disk(3))
        cell_label_im[cell_mask] = cell_num
    
    return cell_label_im 
Пример #14
0
def enhance_ridges(frame):
    """A ridge detection filter (larger hessian eigenvalue)"""
    blurred = filters.gaussian_filter(frame, 2)
    sigma = 4.5
    Hxx, Hxy, Hyy = feature.hessian_matrix(blurred, sigma=sigma, mode='nearest')
    ridges = feature.hessian_matrix_eigvals(Hxx, Hxy, Hyy)[0]
    return np.abs(ridges)
Пример #15
0
def process_single(path, called_from_batch=False):
    '''mode to process a single img.  best if img is 3d (may be necessary) 

    '''  
    print('%s~~~ Image file: %s ~~~' % (os.linesep, path))
    print('** Reading Image and preparing it for processing')
    img = imread(path)
    img = normalize(filters.gaussian_filter(img, sigma=0.5, multichannel=True), 255)
    sm_obj_cutoff = .0008*img.shape[0]*img.shape[1]
    im = rgb2gray(img)
    print('** Running square detector')
    seg = square_detector_3d(img, im, ecc=0.8, ext=0.0, mask_size=15, sm_obj_cutoff=sm_obj_cutoff)
    old_seg = seg.copy()
    print('** Finding Standard Spots')
    lab_stds = find_standards(seg)
    standards = (lab_stds == 2).astype(bool).astype(np.uint8)
    samples = (lab_stds == 1).astype(bool).astype(np.uint8)
    # remove false positives
    print('** Removing abnormal samples')
    samples = remove_abnormal_samples(samples)

    # rebuild sample spots to disks
    print('** Rebuilding spots')
    standards = rebuild_spots(standards, scale=0.32)
    samples = rebuild_spots(samples, scale=0.32)
    
    # erode spots a bit just in case they are too big
#    standards = erode_spots(standards, n=5, selem=morphology.disk(3))
#    samples = erode_spots(samples, n=5, selem=morphology.disk(3))

    print('** Plotting results')
    plot_result(img, standards, samples, path=path)

    print('~~Finished Processing Image~~%s%s'%(os.linesep,os.linesep))
Пример #16
0
def split_label(binary):
    '''Split label using watershed algorithm'''

#    blur_radius = np.round(np.sqrt(min_size)/8).astype(int)
#    print blur_radius

    distance = distance_transform_edt(binary)
#    distance_blured = gaussian_filter(distance, blur_radius)
    distance_blured = gaussian_filter(distance, 8)

#    selem = disk(2)

    local_maxi = peak_local_max(distance_blured, indices=False, labels=binary, min_distance = 10, exclude_border = False)
    markers = measure_label(local_maxi)

    labels_ws = watershed(-distance, markers, mask=binary)

#    selem_morph = np.array([0,1,0,1,1,1,0,1,0], dtype=bool).reshape((3,3))

#    for i in (1,2):
#        maxi = binary_dilation(local_maxi, selem_morph)

#    imsave('/home/varnivey/Data/Biophys/Burnazyan/Experiments/fluor_calc/test/distance.jpg', distance)
#    imsave('/home/varnivey/Data/Biophys/Burnazyan/Experiments/fluor_calc/test/maxi.jpg', local_maxi*255)

    return labels_ws
def test_RGB():
    img = gaussian_filter(data.text(), 1)
    imgR = np.zeros((img.shape[0], img.shape[1], 3))
    imgG = np.zeros((img.shape[0], img.shape[1], 3))
    imgRGB = np.zeros((img.shape[0], img.shape[1], 3))
    imgR[:, :, 0] = img
    imgG[:, :, 1] = img
    imgRGB[:, :, :] = img[:, :, None]
    x = np.linspace(5, 424, 100)
    y = np.linspace(136, 50, 100)
    init = np.array([x, y]).T
    snake = active_contour(imgR, init, bc='fixed',
            alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1)
    refx = [5, 9, 13, 17, 21, 25, 30, 34, 38, 42]
    refy = [136, 135, 134, 133, 132, 131, 129, 128, 127, 125]
    assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx)
    assert_equal(np.array(snake[:10, 1], dtype=np.int32), refy)
    snake = active_contour(imgG, init, bc='fixed',
            alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1)
    assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx)
    assert_equal(np.array(snake[:10, 1], dtype=np.int32), refy)
    snake = active_contour(imgRGB, init, bc='fixed',
            alpha=0.1, beta=1.0, w_line=-5/3., w_edge=0, gamma=0.1)
    assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx)
    assert_equal(np.array(snake[:10, 1], dtype=np.int32), refy)
Пример #18
0
    def curr_state(self):
        self.game_process.sendline(encode_obj({
            'type': 'state_dict'
        }))
        self.game_process.expect('output>')
        raw_data = self.game_process.readline()
        state_dict_embed = decode_obj(raw_data)
        # create state_matrix from state_dict.
        state_dict = {}
        state_stack = []
        for (key, value) in state_dict_embed.items():
            for rect in value:
                (x, xe, y, ye) = rect
                if key not in state_dict:
                    state_dict[key] = np.zeros((SCREEN_HEIGHT, SCREEN_WIDTH))
                state_dict[key][y:ye, x:xe] = 1.
            # resize the representation to 32x32.
            MAP_SIZE = 32
            filter_sigma = np.sqrt((SCREEN_HEIGHT / MAP_SIZE) ** 2 + (SCREEN_WIDTH / MAP_SIZE) ** 2)
            filtered = gaussian_filter(state_dict[key], sigma=filter_sigma)
            resized = resize(filtered, (32, 32), preserve_range=True)
            # normalize so that each channel has same strength.
            resized = resized / (1e-4 + np.max(resized))
            state_dict[key] = resized
            # add to feature representation.
            state_stack.append(state_dict[key])

        return np.array(state_stack)
	def run(self):
		if not os.path.exists(self.output_dir):
			os.makedirs(self.output_dir)
			print 'USER INFO: Creando carpeta de listas de archivos con imágenes en ' + self.output_dir
		jpgs_lib = os.listdir(self.input().path)
		resultado = []
		for jpg in jpgs_lib:
			ruta_jpg = os.path.join(self.input().path,jpg)
			print '====================='
			print 'USER INFO: ' + ruta_jpg
			try:
				pagina = misc.imread(ruta_jpg)
				pagina = rgb2gray(pagina)
				pagina = gaussian_filter(pagina,sigma=2) #Un filtro que me de un promedio de la imagen
				if pagina.var() > self.varianza:
					resultado.append(jpg.replace('.jpg',''))
				print 'USER INFO: Var = ', pagina.var()
				print '====================='
				# resultado.append('basura')
			except:
				print 'USER WARNING: No se pudo leer la imagen ' + ruta_jpg

		resultado = '\n'.join(resultado)
		with self.output().open("w") as f:
			f.write(resultado)


# if __name__ == '__main__':
	# luigi.run()
Пример #20
0
 def myfilter(self, data):
     if self.filter == 'sobel':
         return util.img_as_int(filters.sobel(data))
     elif self.filter == 'otsu':
         thresh = filters.threshold_otsu(data)
         return util.img_as_ubyte(data > thresh)
     elif self.filter == '阈值分割':
         thresh = self.thresholdvalue * data.max() / 100.0
         return util.img_as_ubyte(data > thresh)
     elif self.filter == 'canny edge':
         temp = util.img_as_ubyte(
             feature.canny(data, low_threshold=30, high_threshold=40))
         return temp
     elif self.filter == 'watershed':
         mask = util.img_as_ubyte(filters.gaussian_filter(data, 0.4))
         markers = feature.canny(data, low_threshold=30, high_threshold=40)
         markers = ndi.label(markers)[0]
         idata = filters.rank.gradient(data, morphology.disk(1))
         temp = morphology.watershed(data, markers, mask=mask)
         # hsv=color.convert_colorspace(temp,'L','RGB')
         # io.imshow(hsv)
         return temp
     elif self.filter == 'test':
         data = util.img_as_ubyte(filters.median(data, morphology.disk(2)))
         return data
Пример #21
0
def blur_img_gauss(img, sigma=1, idKeys=None):
    warnings.warn(
        '''blur_img_gauss is deprecated and
                      will not be supported in future versions.
                      ''', DeprecationWarning)
    blur_img = filters.gaussian_filter(img, sigma=sigma)
    return blur_img
Пример #22
0
def processImage(CID,folder,binarize,blur,padding,size,noise=False,image=None):
    
    if not (CID is None):
        image   = misc.imread(folder+CID+".sdf",flatten=True)
        #misc.imsave("../"+CID+"temp.jpg",image)
    else:
        CID     = "temp"

    #print image.shape, "image read in"
    image   = imStandardize(image)
    #misc.imsave("../"+CID+"temp2.jpg",image)
    #print "image standardized"
    output  = np.zeros((size,size))


    if blur > 0:
        image   = filters.gaussian_filter(image,blur)
        #print "image blurred"
    if padding == "random":
        image   = removePadding(image)
        pad     = int(np.random.rand()*20)
        image   = myResize(image,size-pad)
        #print "padding added"
    if binarize:
        image   = np.where(image > 0.2,1.0,0.0)
        #print "binarized"
        
        
    d   = int(pad/2)
    output [d:d+image.shape[0],d:d+image.shape[1]]  = image
    if noise:
        output  = 0.10*np.max(image)*np.random.rand(output.shape[0], output.shape[1]) + output        
        #output   = np.where(output == 0., 0.1*np.random.rand(),output)
    return output
Пример #23
0
def correct_illumination(grayscale, sigma=400, pickle_me=False):

    '''
    Applies a Gaussian (low pass) filter with a large sigma (default sigma=400)
    to estimate uneven illumination across a grayscale image and correct it.
    This function overcorrects near objects with large image gradients and is
    optimized for use with 16 bit images recorded using a 12 bit camera.

    Inputs:
    -------
    grayscale: A grayscale image loaded into a NumPy array with type np.uint16

    sigma: The standard deviation used for applying the Gaussian filter,
        sigma > 50 is strongly recommended

    pickle_me: Boolean, dumps NumPy arrays of intermediate images to pickle
        files in the current working directory if True

    Outputs:
    --------
    corrected: An illumination corrected NumPy array

    '''
    # sigma = 350 to 400 looks best
    # 65535 is the max value of np.uint16 data type
    background = (gaussian_filter(grayscale, sigma=sigma)*65535).astype(np.uint16)
    inverted = 4096 - background  # inverts light and dark areas
    corrected = (grayscale + inverted)/2

    if pickle_me:
        background.dump('est_background.p')
        inverted.dump('inverted_back.p')
        corrected.dump('corrected.p')

    return(corrected)
def joint_bilateral_filter(sdm, im, spatial_domain, sr):
    wr = int(math.ceil(spatial_domain * 4) + 1)
    padding = (wr - 1) / 2
    padn = ((padding, padding), (padding, padding), (0, 0))
    padded_im = np.pad(im, padn, mode='edge')
    patch_im = rolling_window(padded_im, (wr, wr))

    if sdm.ndim == 2:
        sdm = np.expand_dims(sdm, 2)
    padded_sdm = np.pad(sdm, padn, mode='edge')
    patch_sdm = rolling_window(padded_sdm, (wr, wr))
    patch_sdm = patch_sdm.reshape((patch_sdm.shape[0], patch_sdm.shape[1],
                                   patch_sdm.shape[3], patch_sdm.shape[4]))

    p = ((0, 0), (0, 0), (0, 0), (wr - 1, 0), (wr - 1, 0))
    patch_im_x = np.pad(im[:, :, :, np.newaxis, np.newaxis], p, mode='reflect')

    g_im = np.sum(np.exp(-(patch_im - patch_im_x)**2 / (2 * sr**2)), axis=2)
    g_sdm = np.exp(-patch_sdm) * filters.gaussian_filter(
        np.ones((wr, wr)), spatial_domain)
    f = g_im * g_sdm
    w = np.sum(np.sum(f, axis=-1), axis=-1) + 0.00001
    filtered = np.expand_dims(
        np.sum(np.sum(patch_sdm * f, axis=3), axis=2) / w, -1)

    return filtered
Пример #25
0
def get_boxes(img_filename, probs, step, size, gauss=0, threshold=0.5):

    if gauss != 0:
        probs = filters.gaussian_filter(probs, gauss)

    img = misc.imread(img_filename)
    height, width, channels = img.shape

    boxes = []

    i = 0
    y = 0
    while y + (size) < height:
        x = 0
        while (x + (size) < width):
            left = int(x)
            right = int(x + (size))
            top = int(y)
            bottom = int(y + (size))
            if probs[y / step, x / step] > threshold:
                boxes.append(
                    [left, top, right, bottom, probs[y / step, x / step]])
            i += 1
            x += step
        y += step

    if len(boxes) == 0:
        return np.array([])

    boxes = np.vstack(boxes)
    return boxes
Пример #26
0
def split_label(binary):
    '''Split label using watershed algorithm'''

    #    blur_radius = np.round(np.sqrt(min_size)/8).astype(int)
    #    print blur_radius

    distance = distance_transform_edt(binary)
    #    distance_blured = gaussian_filter(distance, blur_radius)
    distance_blured = gaussian_filter(distance, 8)

    #    selem = disk(2)

    local_maxi = peak_local_max(distance_blured,
                                indices=False,
                                labels=binary,
                                min_distance=10,
                                exclude_border=False)
    markers = measure_label(local_maxi)

    labels_ws = watershed(-distance, markers, mask=binary)

    #    selem_morph = np.array([0,1,0,1,1,1,0,1,0], dtype=bool).reshape((3,3))

    #    for i in (1,2):
    #        maxi = binary_dilation(local_maxi, selem_morph)

    #    imsave('/home/varnivey/Data/Biophys/Burnazyan/Experiments/fluor_calc/test/distance.jpg', distance)
    #    imsave('/home/varnivey/Data/Biophys/Burnazyan/Experiments/fluor_calc/test/maxi.jpg', local_maxi*255)

    return labels_ws
def spatialSmoothing(X, x, y, z, gamma, sigma):
    # loop through the collumns of the data for each row
    newX = X
    for i in range(0, 501): # Do the rows
        for j in range(0, 5903):
            trans = []
            # insert the current voxel into both lists
            trans.append(X[i,j])
            x1 = x[j]
            y1 = y[j]
            z1 = z[j]
            for k in range(0, 5903):
                x2 = x[k]
                y2 = y[k]
                z2 = z[k]
                dist =[]
                dist.append(0)
                # Decide if the x,y,z components of the thing is close enough
                if(k != j and dist(x1,y1,z1,x2,y2,z2) <= gamma):
                    trans.append(X[i,k])
                    dist.append(dist(x1,y1,z1,x2,y2,z2))
            # Now go through and put the order of the array as distance to the first
            for i in range(0, len(x)):
                for j in range(i+1, len(x)):
                    if( dist[j] < dist[i] ):
                        dist[j], dist[i] = dist[i], dist[j]
                        trans[j], trans[i] = trans[i], trans[j]
            if( len(trans) > 1 ):
                filterTrans = filters.gaussian_filter(trans, sigma, output=True)
            # First value of filterTrans is what you want to keep
            # Replace it with the previous value in the newX
            newX[i,j] = filterTrans[0]
    return newX
Пример #28
0
def showAttMap(img, attMaps, tagName, overlap = True, blur = False):
    pylab.rcParams['figure.figsize'] = (12.0, 12.0)
    f, ax = plt.subplots(len(tagName)/2+1, 2)
    if len(ax.shape) == 1:
        ax[0].imshow(img)
    else:
        ax[0, 0].imshow(img)
    
    for i in range(len(tagName)):
        attMap = attMaps[i].copy()
        attMap -= attMap.min()
        if attMap.max() > 0:
            attMap /= attMap.max()
        attMap = transform.resize(attMap, (img.shape[:2]), order = 3, mode = 'nearest')
        if blur:
            attMap = filters.gaussian_filter(attMap, 0.02*max(img.shape[:2]))
            attMap -= attMap.min()
            attMap /= attMap.max()
    
        cmap = plt.get_cmap('jet')
        attMapV = cmap(attMap)
        attMapV = np.delete(attMapV, 3, 2)
        if overlap:
            attMap = 1*(1-attMap**0.8).reshape(attMap.shape + (1,))*img + (attMap**0.8).reshape(attMap.shape+(1,)) * attMapV;
        if len(ax.shape) == 1:
            ax[i+1].imshow(attMap, interpolation = 'bicubic')
            ax[i+1].set_title(tagName[i])
        else:  
            ax[(i+1)/2, (i+1)%2].imshow(attMap, interpolation = 'bicubic')
            ax[(i+1)/2, (i+1)%2].set_title(tagName[i])
Пример #29
0
def correct_illumination(grayscale, sigma=400, pickle_me=False):
    '''
    Applies a Gaussian (low pass) filter with a large sigma (default sigma=400)
    to estimate uneven illumination across a grayscale image and correct it.
    This function overcorrects near objects with large image gradients and is
    optimized for use with 16 bit images recorded using a 12 bit camera.

    Inputs:
    -------
    grayscale: A grayscale image loaded into a NumPy array with type np.uint16

    sigma: The standard deviation used for applying the Gaussian filter,
        sigma > 50 is strongly recommended

    pickle_me: Boolean, dumps NumPy arrays of intermediate images to pickle
        files in the current working directory if True

    Outputs:
    --------
    corrected: An illumination corrected NumPy array

    '''
    # sigma = 350 to 400 looks best
    # 65535 is the max value of np.uint16 data type
    background = (gaussian_filter(grayscale, sigma=sigma) * 65535).astype(
        np.uint16)
    inverted = 4096 - background  # inverts light and dark areas
    corrected = (grayscale + inverted) / 2

    if pickle_me:
        background.dump('est_background.p')
        inverted.dump('inverted_back.p')
        corrected.dump('corrected.p')

    return (corrected)
def image_processing(image, **kwargs):

    footprint_dic = {
        # Kernel "Laplacian" of size 3x3+1+1 with values from -1 to 8
        # Forming a output range from -8 to 8 (Zero-Summing)
        'footprint1':
        np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]),
        'footprint2':
        np.array([[-1, -1, -1, -1, -1], [-1, -1, -1, -1, -1],
                  [-1, -1, 21, -1, -1], [-1, -1, -1, -1, -1],
                  [-1, -1, -1, -1, -1]]),
        #Kernel "Laplacian" of size 5x5+2+2 with values from -2 to 16
        #Forming a output range from -16 to 16 (Zero-Summing)
        'footprint3':
        np.array([[0, 0, -1, 0, 0], [0, -1, -2, -1, 0], [-1, -2, 16, -2, -1],
                  [0, -1, -2, -1, 0], [0, 0, -1, 0, 0]])
    }

    footprint = kwargs.get('footprint', footprint_dic['footprint1'])

    image_conv = signal.convolve2d(image, footprint)
    image_conv = (image_conv.clip(min=0)).astype(np.uint16)
    image_conv = gaussian_filter(image_conv, sigma=3)

    return image_conv
        def update_video(self):
            
            if self.play_video or self.moving_timeLine:
                video_image = np.mean(self.video[self.frame_idx-self.rolling_average:self.frame_idx+self.rolling_average+1],axis=0).T

                #video_image = median_filter(self.video,disk(2))
                if self.smoothing>0:
                    video_image = gaussian_filter(video_image,self.smoothing)

                if self.was_mean_im:
                    self.img.setImage(video_image,
                                      autoLevels=0)
                    self.was_mean_im = 0
                else:
                    self.img.setImage(video_image,
                                      autoLevels=0)


                if self.frame_idx>=self.nFrames-1:
                    self.frame_idx=self.rolling_average
                
                self.frame_idx += 1
                self.frameTxt.setText('Frame Nr: ' + str(self.frame_idx+1) + '/' + str(self.nFrames))
                self.timeLine.setPos(self.frame_idx)
                self.first_mean = 1
            if (self.show_mean_image and not self.play_video):
                if self.first_mean:
                    self.img.setImage(self.mean_image,autoLevels=0)
                    self.first_mean = 0
                else:
                    self.img.setImage(self.mean_image,autoLevels=0)

                self.was_mean_im = 1

            self.moving_timeLine = False
Пример #32
0
    def run4(self):
        """ Cette fonction recadre les images grâce à SURF et RANSAC, fonctionne bien."""
        for x in xrange(len(self.stack)-1):
            print('Traitement image ' + str(x+1))
            im1,im2 = 255.*gaussian_filter(self.stack[x,...], sqrt(self.initial_sigma**2 - 0.25)), 255.*gaussian_filter(self.stack[x+1,...], sqrt(self.initial_sigma**2 - 0.25))
            im1,im2 = enhance_contrast(normaliser(im1), square(5)), enhance_contrast(normaliser(im2), square(5))
            im1, im2 = normaliser(im1), normaliser(im2)
            
            b = cv2.SURF()
            #b.create("Feature2D.BRISK")
            
            k1,d1 = b.detectAndCompute(im1,None)
            k2,d2 = b.detectAndCompute(im2,None)
            
            bf = cv2.BFMatcher()
            matches = bf.knnMatch(d1,d2, k=2)

            # Apply ratio test
            good = []
            for m,n in matches:
                if m.distance < 0.75*n.distance:
                    good.append(m)
            
            g1,g2 = [],[]
            for i in good:
                g1.append(k1[i.queryIdx].pt)
                g2.append(k2[i.trainIdx].pt)

            model, inliers = ransac((np.array(g1), np.array(g2)), AffineTransform, min_samples=3, residual_threshold=self.min_epsilon, max_trials=self.max_trials, stop_residuals_sum=self.min_inlier_ratio)
            
            self.stack[x+1,...] = warp(self.stack[x+1,...], AffineTransform(rotation=model.rotation, translation=model.translation), output_shape=self.stack[x+1].shape)

        self.stack = self.stack.astype(np.uint8)
Пример #33
0
def preprocess_image(img):
    means = [0.485, 0.456, 0.406]
    stds = [0.229, 0.224, 0.225]

    preprocessed_img = img.copy()[:, :, ::-1]
    for i in range(3):
        preprocessed_img[:, :, i] = preprocessed_img[:, :, i] - means[i]
        preprocessed_img[:, :, i] = preprocessed_img[:, :, i] / stds[i]

    #Bala algorithm of data argumentation
    #img = np.array(imgs[index:index+1,:,:,:])[0]
    #img = scipy.misc.imresize(preprocessed_img, (224, 224))
    #print('img shape: {}'.format(img.shape))

    og_img = preprocessed_img.copy()  #skimage.color.rgb2lab(img)[:,:,0]

    sigma = 9

    img = og_img - gaussian_filter(og_img, sigma=sigma, multichannel=True)
    #img = img.transpose((2, 0, 1))
    preprocessed_img = img  #(img/255.) - .5




    preprocessed_img = \
        np.ascontiguousarray(np.transpose(preprocessed_img, (2, 0, 1)))
    preprocessed_img = torch.from_numpy(preprocessed_img)
    preprocessed_img.unsqueeze_(0)
    input = Variable(preprocessed_img, requires_grad=True)
    return input
Пример #34
0
def split_image_into_sudoku_pieces_adaptive_global(image, otsu_local=False, apply_gaussian=False):
    L = image.shape[0]
    d = int(np.ceil(L / 9))
    dd = d // 5
    output = []
    if apply_gaussian:
        image = gaussian_filter(image, sigma=1.0)
    if not otsu_local:
        image = to_binary_adaptive(image)
    for k in range(9):
        this_row = []
        start_row_i = max([k * d - dd, 0])
        stop_row_i = min([(k + 1) * d + dd, L])
        for kk in range(9):
            start_col_i = max([kk * d - dd, 0])
            stop_col_i = min([(kk + 1) * d + dd, L])
            i = image[start_row_i:stop_row_i, start_col_i:stop_col_i].copy()
            if otsu_local:
                i = to_binary_otsu(i)
            i = binary_opening(i)
            i = to_binary_otsu(i)
            if apply_gaussian:
                i = to_binary_otsu(binary_dilation(i))
            this_row.append(i)
        output.append(this_row)
    return output, image
def get_boxes(img_filename, probs, step, size, gauss=0,threshold=0.5):

    if gauss != 0:
        probs = filters.gaussian_filter(probs, gauss)


    img = misc.imread(img_filename)
    height, width,channels = img.shape

    boxes=[]

    i=0
    y=0
    while y+(size) < height:
                x = 0
                while (x+(size) < width):
                    left = int(x)
                    right = int(x+(size))
                    top = int(y)
                    bottom = int(y+(size))
                    if probs[y/step,x/step] > threshold:
                        boxes.append([left,top,right,bottom,probs[y/step,x/step]])
                    i+=1
                    x += step
                y += step

    if len(boxes) == 0:
        return np.array([])

    boxes =  np.vstack(boxes)
    return boxes
Пример #36
0
def enhance_ridges(frame, mask=None):
    """Detect ridges (larger hessian eigenvalue)"""
    blurred = filters.gaussian_filter(frame, 2)
    Hxx, Hxy, Hyy = feature.hessian_matrix(blurred, sigma=4.5, mode="nearest")
    ridges = feature.hessian_matrix_eigvals(Hxx, Hxy, Hyy)[0]

    return np.abs(ridges)
Пример #37
0
def gaussian_stack(im, n=5):
    s = []
    gauss_im = im
    for i in range(n):
        gauss_im = gaussian_filter(gauss_im, 2**i, mode='reflect')
        s.append(gauss_im)
    return s
Пример #38
0
def scale_2(sigma):
    # Hint for another task:
    # ifft (fft(I) * fft(k, I.shape))
    n = 20
    m = 20
    img = imread('images/modelhouses.png')
    out = np.zeros((n, m))
    for i in range(n):
        for j in range(m):
            out[i][j] = float(1 / (2 * math.pi * sigma**2) * math.exp(
                (-((i - n / 2)**2 + (j - m / 2)**2) / (2.0 * sigma**2))))
    imshow(out, cmap='gray')
    show()

    img = filters.convolve(img, out)

    new_img = imread('images/modelhouses.png')
    #s = 2
    #w = 5
    #t = (((w - 1) / 2) - 0.5) / s
    new_img = filters.gaussian_filter(new_img, sigma)

    fig, ax = plt.subplots(1, 2)
    ax[0].imshow(img, cmap='gray')
    ax[0].set_title('Own implementation, sigma = 5.0')
    ax[0].axis('off')
    ax[1].imshow(new_img, cmap='gray')
    ax[1].set_title('Scale function, sigma = 5.0')
    ax[1].axis('off')
    plt.show()
Пример #39
0
 def unsharp2d(img):
         if len(img.shape) == 2:
                 blur = gaussian_filter(img, 50) 
                 blur = -0.1*blur
                 return blur + img
         else:
                 raise Exception('The image size is not recognized.')
Пример #40
0
def showAttMap(img, attMaps, tagName, overlap=True, blur=False):
    pylab.rcParams['figure.figsize'] = (12.0, 12.0)
    f, ax = plt.subplots(len(tagName) / 2 + 1, 2)
    ax[0, 0].imshow(img)

    for i in range(len(tagName)):
        attMap = attMaps[i].copy()
        attMap -= attMap.min()
        if attMap.max() > 0:
            attMap /= attMap.max()
        attMap = transform.resize(attMap, (img.shape[:2]),
                                  order=3,
                                  mode='nearest')
        if blur:
            attMap = filters.gaussian_filter(attMap, 0.02 * max(img.shape[:2]))
            attMap -= attMap.min()
            attMap /= attMap.max()

        cmap = plt.get_cmap('jet')
        attMapV = cmap(attMap)
        attMapV = np.delete(attMapV, 3, 2)
        if overlap:
            attMap = 1 * (1 - attMap**0.8).reshape(attMap.shape + (
                1, )) * img + (attMap**0.8).reshape(attMap.shape +
                                                    (1, )) * attMapV

        ax[(i + 1) / 2, (i + 1) % 2].imshow(attMap, interpolation='bicubic')
        ax[(i + 1) / 2, (i + 1) % 2].set_title(tagName[i])
Пример #41
0
def ccl_skimage():
    n = 12
    l = 256
    np.random.seed(1)
    im = np.zeros((l, l))
    points = l * np.random.random((2, n**2))
    im[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
    im = filters.gaussian_filter(im, sigma=l / (4. * n))
    blobs = im > 0.7 * im.mean()

    all_labels = measure.label(blobs)
    blobs_labels = measure.label(blobs, background=0)

    plt.figure(figsize=(9, 3.5))
    plt.subplot(131)
    plt.imshow(blobs, cmap='gray')
    plt.axis('off')
    plt.subplot(132)
    plt.imshow(all_labels, cmap='spectral')
    plt.axis('off')
    plt.subplot(133)
    plt.imshow(blobs_labels, cmap='spectral')
    plt.axis('off')

    plt.tight_layout()
    plt.show()
Пример #42
0
def blur_predict(model, X, type="median", filter_size=3, sigma=1.0):
  
    if type == "median":
        blured_X = np.array(list(map(lambda x: ndimage.median_filter(x, filter_size), 
                                     X)))
    elif type == "gaussian":
        blured_X = np.array(list(map(lambda x: ndimage.gaussian_filter(x, filter_size),
                                     X)))
    elif type == "f_gaussian":
        blured_X = np.array(list(map(lambda x: filters.gaussian_filter(x.reshape((28, 28)), sigma=sigma).reshape(784),
                                     X))) 
    elif type == "tv_chambolle":
        blured_X = np.array(list(map(lambda x: restoration.denoise_tv_chambolle(x.reshape((28, 28)), weight=0.2).reshape(784),
                                     X)))
    elif type == "tv_bregman":
        blured_X = np.array(list(map(lambda x: restoration.denoise_tv_bregman(x.reshape((28, 28)), weight=5.0).reshape(784),
                                     X)))
    elif type == "bilateral":
        blured_X = np.array(list(map(lambda x: restoration.denoise_bilateral(np.abs(x).reshape((28, 28))).reshape(784),
                                     X)))
    elif type == "nl_means":
        blured_X = np.array(list(map(lambda x: restoration.nl_means_denoising(x.reshape((28, 28))).reshape(784),
                                     X)))
        
    elif type == "none":
        blured_X = X 

    else:
        raise ValueError("unsupported filter type", type)

    return predict(model, blured_X)
Пример #43
0
def dslre(img):
    timestep = 1.0
    mu = 0.2/timestep
    iter_basic = 1000
    iter_refine = 10
    lambdap = 5
    alpha = 1.5 # -3
    epsilon = 1.5
    sigma = 1.5

    smoothed = filters.gaussian_filter(img,sigma)
    dy,dx = np.gradient(smoothed)
    mag = (dx**2)+(dy**2)
    edge = 1.0/(1.0+mag)

    c0 = 2
    initialLSF = c0*np.ones(img.shape)
    initialLSF[10:50,10:50] = -c0

    #initialLSF[10:55,10:75] = -c0
    
    #initialLSF[25:35,20:25] -= c0
    #initialLSF[25:35,40:50] -= c0
    phi = initialLSF
    drlse_edge(phi,edge,lambdap,mu,alpha,epsilon,timestep,iter_basic)
    drlse_edge(phi,edge,lambdap,mu,0,epsilon,timestep,iter_refine)
    return phi
Пример #44
0
    def run3(self):
        """ Cette fonction test des alternatives à SIFT et ORB. Ne fonctionne pas."""
        for x in xrange(len(self.stack)-1):
            print('Traitement image ' + str(x+1))
            im1,im2 = 255.*gaussian_filter(self.stack[x,...], sqrt(self.initial_sigma**2 - 0.25)), 255.*gaussian_filter(self.stack[x+1,...], sqrt(self.initial_sigma**2 - 0.25))
            im1,im2 = enhance_contrast(normaliser(im1), square(3)), enhance_contrast(normaliser(im2), square(3))
            im1, im2 = normaliser(im1), normaliser(im2)
            
            b = cv2.BRISK()
            #b.create("Feature2D.BRISK")
            
            k1,d1 = b.detectAndCompute(im1,None)
            k2,d2 = b.detectAndCompute(im2,None)
            
            bf = cv2.BFMatcher(cv2.NORM_HAMMING)
            matches = bf.match(d1,d2)
            
            g1,g2 = [],[]
            for i in matches:
                g1.append(k1[i.queryIdx].pt)
                g2.append(k2[i.trainIdx].pt)

            model, inliers = ransac((np.array(g1), np.array(g2)), AffineTransform, min_samples=3, residual_threshold=self.min_epsilon, max_trials=self.max_trials, stop_residuals_sum=self.min_inlier_ratio)
            
            self.stack[x+1,...] = warp(self.stack[x+1,...], AffineTransform(rotation=model.rotation, translation=model.translation), output_shape=self.stack[x+1].shape)

        self.stack = self.stack.astype(np.uint8)
Пример #45
0
def enhance_ridges(frame, mask=None):
    """Detect ridges (larger hessian eigenvalue)"""
    blurred = filters.gaussian_filter(frame, 2)
    Hxx, Hxy, Hyy = feature.hessian_matrix(blurred, sigma=4.5, mode='nearest')
    ridges = feature.hessian_matrix_eigvals(Hxx, Hxy, Hyy)[0]

    return np.abs(ridges)
Пример #46
0
def find_pipes(fn):
	n = 250
	l = 256

	im = cv2.imread(fn, 0)
	im = filters.gaussian_filter(im, sigma= l / (4. * n))
	blobs = im > 0.7 * im.mean()
	blobs_labels = measure.label(blobs, background=0)
	labels = np.unique(blobs_labels)

	pipe_candidates = labels[2:]
	region_size = [(np.sum(blobs_labels == label)) for label in pipe_candidates]
	sorted_regions = sorted(list(zip(region_size, pipe_candidates)))
	pipe_regions = [region[1] for region in sorted_regions][-4:]

	# encoded as : (left, right, top, bottom)
	width, height = blobs_labels.shape
	pipe_coords = [(width+1, -1, height+1, -1) for _ in pipe_regions]
	region_to_coord = dict(zip(pipe_regions, pipe_coords))
	for i in range(width):
		for j in range(height):
			for region in region_to_coord:
				new_coords = update_coords(blobs_labels, i, j, 
					region, region_to_coord[region])
				if new_coords is not None:
					region_to_coord[region] = new_coords

	img_with_pipe = cv2.imread(fn, 1)
	for region in region_to_coord:
		left, right, top, bottom = region_to_coord[region]
		cv2.rectangle(img_with_pipe, (top, left), (bottom, right), (0,0,255))
	scipy.misc.imsave("pipe_labels.png", img_with_pipe)
Пример #47
0
    def extract(self, image, voxel_size=10):
        self.logging.info("Processing " + self.img_filepath)
        self.logging.info("Gaussian filtering with kernel size: {}".format(
            self.gaussian_kernel))

        # Gaussian filter
        kernel_shape = [
            self.gaussian_kernel,
            self.gaussian_kernel,
            self.gaussian_kernel_z,
        ]
        image = gaussian_filter(image, kernel_shape)
        self.logging.info("Filtering completed")

        # Thresholding
        if self.threshold_type.lower() == "otsu":
            thresh = threshold_otsu(image)
            self.logging.info("Thresholding with {} threshold type".format(
                self.threshold_type))

        elif (self.threshold_type.lower() == "percentile"
              or self.threshold_type.lower() == "perc"):
            thresh = np.percentile(image.ravel(), self.percentile_threshold)
            self.logging.info("Thresholding with {} threshold type. "
                              "{}th percentile [{}]".format(
                                  self.threshold_type,
                                  self.percentile_threshold, thresh))
        else:
            raise ValueError("Unrecognised thresholding type: " +
                             self.threshold_type)

        binary = image > thresh
        binary = keep_n_largest_objects(binary)

        # Save thresholded image
        if not os.path.isfile(self.thresholded_savepath) or self.overwrite:
            self.logging.info("Saving thresholded image to {}".format(
                self.thresholded_savepath))
            brainio.to_nii(binary.astype(np.int16), self.thresholded_savepath)

        binary = reorient_image(binary,
                                invert_axes=[
                                    2,
                                ],
                                orientation="coronal")

        # apply marching cubes
        self.logging.info("Extracting surface from thresholded image")
        verts, faces, normals, values = measure.marching_cubes_lewiner(
            binary, 0, step_size=1)

        # Scale to atlas spacing
        if voxel_size is not 1:
            verts = verts * voxel_size

        # Save image to .obj
        self.logging.info(" Saving .obj at {}".format(self.obj_path))
        faces = faces + 1
        marching_cubes_to_obj((verts, faces, normals, values), self.obj_path)
Пример #48
0
def iter_blob_extremes(image, n=5):
    original_shape = image.shape[::-1]
    if max(original_shape) < 2000:
        size = (500, 500)
        y_scale = original_shape[0] / 500
        x_scale = original_shape[1] / 500
    else:
        size = (1000, 1000)
        y_scale = original_shape[0] / 1000
        x_scale = original_shape[1] / 1000

    img = resize(image, size)
    bimg = gaussian_filter(img, sigma=1.0)
    bimg = threshold_adaptive(bimg, 20, offset=2 / 255)
    bimg = -bimg
    bimg = ndi.binary_fill_holes(bimg)
    label_image = label(bimg, background=False)
    label_image += 1

    regions = regionprops(label_image)
    regions.sort(key=attrgetter('area'), reverse=True)
    iter_n = 0

    for region in regions:
        try:
            iter_n += 1
            if iter_n > n:
                break

            # Skip small images
            if region.area < int(np.prod(size) * 0.05):
                continue
            coords = get_contours(
                add_border(label_image == region.label,
                           size=label_image.shape,
                           border_size=1,
                           background_value=False))[0]
            coords = np.fliplr(coords)

            top_left = sorted(coords,
                              key=lambda x: np.linalg.norm(np.array(x)))[0]
            top_right = sorted(coords,
                               key=lambda x: np.linalg.norm(
                                   np.array(x) - [img.shape[1], 0]))[0]
            bottom_left = sorted(coords,
                                 key=lambda x: np.linalg.norm(
                                     np.array(x) - [0, img.shape[0]]))[0]
            bottom_right = sorted(
                coords,
                key=lambda x: np.linalg.norm(
                    np.array(x) - [img.shape[1], img.shape[0]]))[0]
            scaled_extremes = [(int(x[0] * y_scale), int(x[1] * x_scale))
                               for x in (top_left, top_right, bottom_left,
                                         bottom_right)]

            yield scaled_extremes
        except Exception:
            pass
    raise SudokuExtractError("No suitable blob could be found.")
Пример #49
0
def test_apply_parallel_wrap():
    def wrapped(arr):
        return gaussian_filter(arr, 1, mode='wrap')
    a = np.arange(144).reshape(12, 12).astype(float)
    expected = gaussian_filter(a, 1, mode='wrap')
    result = apply_parallel(wrapped, a, chunks=(6, 6), depth=5, mode='wrap')

    assert_array_almost_equal(result, expected)
Пример #50
0
def sample_gaussian_shape(pc, xr, yr, groups=None):
    ni = Image.init_blank((xr, yr))
    for pts in pc.points:
        ni.pixels[0, pts[0], pts[1]] = 1
    store_image = Image.init_blank(ni.shape)
    store_image.pixels[0, :, :] = filters.gaussian_filter(
        np.squeeze(ni.pixels), 4)
    return store_image
Пример #51
0
def create_external_edge_force_gradients_from_img(img, sigma=30.):
    """
    Given an image, returns 2 functions, fx & fy, that compute
    the gradient of the external edge force in the x and y directions.

    img: ndarray
        The image.
    """
    # Gaussian smoothing.
    smoothed = filt.gaussian_filter(
        (img - img.min()) / (img.max() - img.min()), sigma)
    # Gradient of the image in x and y directions.
    giy, gix = np.gradient(smoothed)
    # Gradient magnitude of the image.
    gmi = (gix**2 + giy**2)**(0.5)
    # Normalize. This is crucial (empirical observation).
    gmi = (gmi - gmi.min()) / (gmi.max() - gmi.min())

    # Gradient of gradient magnitude of the image in x and y directions.
    ggmiy, ggmix = np.gradient(gmi)

    def fx(x, y):
        """
        Return external edge force in the x direction.

        x: ndarray
            numpy array of floats.
        y: ndarray:
            numpy array of floats.
        """
        # Check bounds.
        x[x < 0] = 0.
        y[y < 0] = 0.

        x[x > img.shape[1] - 1] = img.shape[1] - 1
        y[y > img.shape[0] - 1] = img.shape[0] - 1

        return ggmix[(y.round().astype(int), x.round().astype(int))]

    def fy(x, y):
        """
        Return external edge force in the y direction.

        x: ndarray
            numpy array of floats.
        y: ndarray:
            numpy array of floats.
        """
        # Check bounds.
        x[x < 0] = 0.
        y[y < 0] = 0.

        x[x > img.shape[1] - 1] = img.shape[1] - 1
        y[y > img.shape[0] - 1] = img.shape[0] - 1

        return ggmiy[(y.round().astype(int), x.round().astype(int))]

    return fx, fy
Пример #52
0
def downsample(I):
    sigma = 2
    """Downsample I by 2 after blurring with Gaussian of sigma=2.

    Blurs across dimensions 0 and 1. Leaves dimension 2 untouched.

    """
    I = np.atleast_3d(I)
    return rescale(gaussian_filter(I, sigma, multichannel=True), 0.5)
Пример #53
0
def test_apply_parallel_wrap():
    def wrapped(arr):
        return gaussian_filter(arr, 1, mode='wrap')

    a = np.arange(144).reshape(12, 12).astype(float)
    expected = gaussian_filter(a, 1, mode='wrap')
    result = apply_parallel(wrapped, a, chunks=(6, 6), depth=5, mode='wrap')

    assert_array_almost_equal(result, expected)
Пример #54
0
def kdeplot(samples, scale=100, window_size=5):
    resolution = 1. / scale
    density_estimation = np.zeros((scale, scale))
    for x, y in samples:
        if 0 < x < 1 and 0 < y < 1:
            density_estimation[int(
                (1 - y) / resolution)][int(x / resolution)] += 1
    density_estimation = filters.gaussian_filter(density_estimation,
                                                 window_size)
    plt.imshow(density_estimation, cmap='Blues')
def _resize(args):
    img, rescale_size, bbox = args
    img = img[bbox[0]:bbox[1], bbox[2]:bbox[3]]
    # Smooth image before resize to avoid moire patterns
    scale = img.shape[0] / float(rescale_size)
    sigma = np.sqrt(scale) / 2.0
    img = filters.gaussian_filter(img, sigma=sigma, multichannel=True)
    img = transform.resize(img, (rescale_size, rescale_size, 3), order=3)
    img = (img * 255).astype(np.uint8)
    return img
def detect_tumours(
    image,
    mask,
    min_size=50,
    max_size=150 * 150,
    max_maj_axis=300,
):

    # Blur the image for smoother outlines
    filt = skfilt.gaussian_filter(image, 5)

    # Automatically determine a threshold using Otsu algorithm
    # on just the skull region
    vals = filt[mask]
    thresh = skfilt.threshold_otsu(vals)

    # Determine a second threshold (single pass doesn't work well)
    vals = vals[vals > thresh]
    thresh2 = skfilt.threshold_otsu(vals)
    # Potential tumour regions are above the threshold and within the skull
    candidates = mask & (filt >= thresh2)

    # Remove small noise using binary_opening
    candidates = skmorph.binary_opening(candidates, selem=skmorph.disk(3))
    # Perform connected component labelling to get individual regions
    labels, num_labels = ndi.label(candidates, np.ones((3, 3), dtype=bool))
    final = candidates.copy()

    # Use regionprops to determine additional properties
    for prop in skmeas.regionprops(labels, image):
        numPixels = prop.area
        numPixelsConvex = prop.convex_area

        print(115, numPixels, numPixelsConvex)
        tumorArea = (numPixelsConvex / CANVAS_SIZE) * TOTAL_AREA
        print('TumorArea is', tumorArea)
        if (tumorArea > .05):
            resultsPage.tumorDetected = 1

        labelMask = labels == prop.label
        #------------------------------
        # Manual classifications
        #------------------------------
        # if the number of pixels in the component is sufficiently
        # large, then add it to our mask of "large blobs"
        if numPixels <= min_size:
            final[labelMask] = 0
            continue
        if numPixelsConvex > max_size:
            final[labelMask] = 0
            continue
        if prop.major_axis_length > max_maj_axis:
            final[labelMask] = 0
            continue
    return final
Пример #57
0
def findBeads(im, window, thresh):
    smoothed = gaussian_filter(im,
                               1,
                               output=None,
                               mode='nearest',
                               cval=0,
                               multichannel=None)
    centers = peak_local_max(smoothed,
                             min_distance=3,
                             threshold_rel=thresh,
                             exclude_border=True)
    return centers, smoothed.max(axis=0)
def DoG(img):
	
	original_image = img_as_float(img)
	img = color.rgb2gray(original_image)

	k = 1.6

	plt.subplot(1,3,1)
	plt.axis('off')
	plt.imshow(original_image)

	for idx,sigma in enumerate([6.0]):
		s1 = filters.gaussian_filter(img,k*sigma)
		s2 = filters.gaussian_filter(img,sigma)

		# multiply by sigma to get scale invariance
		dog = s1 - s2
		plt.subplot(1,3,idx+2)
		plt.axis('off')
		print dog.min(),dog.max()
		plt.imshow(dog,cmap='RdBu')

	ax = plt.subplot(1,3,3)
	ax.axis('off')

	blobs_dog = [(x[0],x[1],x[2]) for x in feature.blob_dog(img, min_sigma=3, max_sigma=6,threshold=0.35,overlap=0)]
	# skimage has a bug in my version where only maxima were returned by the above
	blobs_dog += [(x[0],x[1],x[2]) for x in feature.blob_dog(-img, min_sigma=3, max_sigma=6,threshold=0.35,overlap=0)]

	#remove duplicates
	blobs_dog = set(blobs_dog)

	img_blobs = color.gray2rgb(img)
	for blob in blobs_dog:
		y, x, r = blob
		c = plt.Circle((x, y), r, color='red', linewidth=1, fill=False)
		ax.add_patch(c)
	plt.imshow(img_blobs)

	plt.show()