def load_scenes(filename): zipped_scenes = [] print 'Working on: ' + filename img = data.imread('scenes/' + filename, as_grey=True) tmp = img tmp = filter.canny(tmp, sigma=2.0) tmp = ndimage.binary_fill_holes(tmp) #tmp = morphology.dilation(tmp, morphology.disk(2)) tmp = morphology.remove_small_objects(tmp, 2000) contours = measure.find_contours(tmp, 0.8) ymin, xmin = contours[0].min(axis=0) ymax, xmax = contours[0].max(axis=0) if xmax - xmin > ymax - ymin: xdest = 1000 ydest = 670 else: xdest = 670 ydest = 1000 src = np.array(((0, 0), (0, ydest), (xdest, ydest), (xdest, 0))) dst = np.array(((xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin))) tform3 = tf.ProjectiveTransform() tform3.estimate(src, dst) warped = tf.warp(img, tform3, output_shape=(ydest, xdest)) tmp = filter.canny(warped, sigma=2.0) tmp = morphology.dilation(tmp, morphology.disk(2)) descriptor_extractor.detect_and_extract(tmp) obj_key = descriptor_extractor.keypoints scen_desc = descriptor_extractor.descriptors zipped_scenes.append([warped, scen_desc, obj_key, filename]) return zipped_scenes
def canny(data, sigma=1, sliceId=2): edges = np.zeros(data.shape, dtype=np.bool) if sliceId == 2: for idx in range(data.shape[2]): edges[:, :, idx] = skifil.canny(data[:, :, idx], sigma=sigma) elif sliceId == 0: for idx in range(data.shape[0]): edges[idx, :, :] = skifil.canny(data[idx, :, :], sigma=sigma) return edges
def _canny_edge_fired(self): self.im = self.orig r,g,b = np.rollaxis(self.im,axis=-1) edge_r = canny(tv_denoise(r, weight=1)) edge_g = canny(tv_denoise(g, weight=1)) edge_b = canny(tv_denoise(b, weight=1)) edges = edge_r + edge_g + edge_b self.im = np.dstack((edges,edges,edges)) self.im[self.im > 0.] = 1. try: self.axes.imshow(self.im) self.figure.canvas.draw() except: pass
def canny(parameters): """Canny edge extraction filter. This wraps `skimage.filter.canny`. The `low_threshold`, `high_threshold` and `mask` options are not supported. The wrapped function returns a boolean array with pixel values True or False. Since it is not very convenient to pass such an array to other functions, the return value is cast to uint8, thus containing 0 or 1 values. ..warning:: During testing there have been some issues with the results. Check the corresponding test function for details. :param parameters['data'][0]: input image :type parameters['data'][0]: numpy.array :param parameters['sigma']: standard deviation of the gaussian filter, defaults to 1.0 :type parameters['sigma']: float :return: numpy.array, with dtype('uint8') containing 0 or 1 values """ img = parameters['data'][0] sigma = parameters.get('sigma', 1.0) result = filter.canny(img, sigma=sigma) return result.astype('uint8')
def getRegions(): """Geocode address and retreive image centered around lat/long""" address = request.args.get('address') results = Geocoder.geocode(address) lat, lng = results[0].coordinates zip_code = results[0].postal_code map_url = 'https://maps.googleapis.com/maps/api/staticmap?center={0},{1}&size=640x640&zoom=19&sensor=false&maptype=roadmap&&style=visibility:simplified|gamma:0.1' request_url = map_url.format(lat, lng) req = urllib.urlopen(request_url) img = io.imread(req.geturl(), flatten=True) labels, numobjects = ndimage.label(img) image = filter.canny(img, sigma=3) thresh = threshold_otsu(image) bw = closing(image > thresh, square(3)) # remove artifacts connected to image border cleared = bw.copy() clear_border(cleared) # label image regions label_image = label(cleared) borders = np.logical_xor(bw, cleared) label_image[borders] = -1 image_label_overlay = label2rgb(label_image, image=image) fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6)) ax.imshow(image_label_overlay)
def find_edges(self, sigma=None): if sigma is not None: self.sigma = sigma print 'Identifying edges...' self.edges = filter.canny(self.data, sigma=self.sigma) return self.edges
def detect_edges(image_array): """ Detect edges in a given image Takes a numpy.array representing an image, apply filters and edge detection and return a numpy.array Parameters ---------- image_array : ndarray (2D) Image data to be processed. Detect edges on this 2D array representing the image Returns ------- edges : ndarray (2D) Edges of an image. """ #Transform image into grayscale img = rgb2gray(image_array) #Remove some noise from the image img = denoise_tv_chambolle(img, weight=0.55) #Apply canny edges = filter.canny(img, sigma=3.2) #Clear the borders clear_border(edges, 15) #Dilate edges to make them more visible and connected edges = binary_dilation(edges, selem=diamond(3)) return edges
def getRegions(): """Geocode address and retreive image centered around lat/long""" address = request.args.get('address') results = Geocoder.geocode(address) lat, lng = results[0].coordinates zip_code = results[0].postal_code map_url = 'https://maps.googleapis.com/maps/api/staticmap?center={0},{1}&size=640x640&zoom=19&sensor=false&maptype=roadmap&&style=visibility:simplified|gamma:0.1' request_url = map_url.format(lat, lng) req = urllib.urlopen(request_url) img = io.imread(req.geturl(),flatten=True) labels, numobjects = ndimage.label(img) image = filter.canny(img, sigma=3) thresh = threshold_otsu(image) bw = closing(image > thresh, square(3)) # remove artifacts connected to image border cleared = bw.copy() clear_border(cleared) # label image regions label_image = label(cleared) borders = np.logical_xor(bw, cleared) label_image[borders] = -1 image_label_overlay = label2rgb(label_image, image=image) fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6)) ax.imshow(image_label_overlay)
def encode(self): data_array = np.array(self.image)#data).reshape((28,28)) edges = filt.canny(data_array, sigma=3) def linear_mapping(data): # using principal components analysis pca = decomposition.PCA(n_components=784) pca.fit(data) mapping = pca.transform(data) return mapping # encoded = linear_mapping(edges) encoded = np.array(edges).reshape(784) # encoded = [] # for d in self.data: # if (d > 45): # encoded.append(1) # else: # encoded.append(0) return encoded
def CanNuc(datatype, maxrange, outputfile, outputfiletype): h = open(outputfile, outputfiletype) TC = 0 for i in range(0, maxrange): A = datatype[i][0] T = mahotas.thresholding.otsu(A) C = A.copy() if T < 1: C[ C <= T ] = 0 C[ C > T ] = 1 else: C[ C < T ] = 0 C[ C >= T ] = 1 filled = scipy.ndimage.morphology.binary_fill_holes(C) filled = filled.astype(np.uint8) edges1 = filter.canny(filled, sigma=1) edges1 = edges1.astype(np.uint8) edges1 = np.where(edges1 == 1) TC += len(edges1[0]) XY1 = np.vstack((edges1[0], edges1[1], [i*5]*len(edges1[0]))) for p in range(0, len(XY1[0])): for yel in range(0, len(XY1)): h.write(str(XY1[yel][p]) + '\t') h.write('\n') h.write(str(TC) + '\n') h.write('.' + '\n') h.close()
def getConfidenceImage(dist, segcpimg_crop, clr=None): num_points = dist.shape[0] numpy.asarray(dist, dtype=numpy.float) m, n, _ = segcpimg_crop.shape bbox = numpy.array([1, 1, n, m]) clrstr = numpy.array([[0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 0, 1], [0, 1, 1], [1, 1, 0], [0, 0, 0]]) if clr != None: clrstr = numpy.tile(clr, (9, 1)) uintimg = numpy.asarray(segcpimg_crop, dtype=numpy.uint8) grayimg = color.rgb2gray(uintimg) edges = filter.canny(grayimg) background = numpy.asarray(~edges, dtype=numpy.float) bg = background[:, :, numpy.newaxis] pdf_img = 0.2 * numpy.tile(bg, (1, 1, 3)) + 0.8 * numpy.ones( (bbox[3], bbox[2], 3)) # Normalise the distributions for visualisation for c in numpy.arange(num_points - 1, -1, -1): d = dist[c, :, :] / numpy.amax(numpy.amax(dist[c, :, :])) dd = d[:, :, numpy.newaxis] alpha = numpy.tile(dd, (1, 1, 3)) t = clrstr[c, :, numpy.newaxis, numpy.newaxis] single_joint_pdf = numpy.tile(t, (1, dist.shape[1], dist.shape[2])) single_joint_pdf = numpy.transpose(single_joint_pdf, (1, 2, 0)) pdf_img = alpha * single_joint_pdf + (1 - alpha) * pdf_img return pdf_img
def update_image(self, event=None): self.imgview.image = canny(self.original_image, sigma=self.sigma, low_threshold=self.low, high_threshold=self.high) self.imgview.redraw()
def getVoidBorder(self): """Create boolean array where border points are True and all others False. Input: - none Example: >>> import pycoresis as pcs >>> fid = r'C:\YOUR\FILE\HERE.txt' >>> crs = pcs.corescan(fid) >>> crs.getVoidBorder() Number of border points : 2449 Number of border points : 3245 array([[ True, True, True, ..., True, True, True], [ True, False, False, ..., False, False, True], [ True, False, False, ..., False, False, True], ..., [ True, False, False, ..., False, False, True], [ True, False, False, ..., False, False, True], [ True, True, True, ..., True, True, True]], dtype=bool) """ self.voidedges = filter.canny(self.data) point_num = np.where(self.voidedges==True) self.pointnum = np.size(point_num[0]) print "Number of border points :", self.pointnum return self.voidedges
def label_particles_edge(im, sigma=2, closing_size=0, **extra_args): """ Segment image using Canny edge-finding filter. parameters ---------- im : image in which to find particles sigma : size of the Canny filter closing_size : size of the closing filter returns ------- labels : an image array of uniquely labeled segments """ from skimage.morphology import square, binary_closing, skeletonize if skimage_version < StrictVersion('0.11'): from skimage.filter import canny else: from skimage.filters import canny edges = canny(im, sigma=sigma) if closing_size > 0: edges = binary_closing(edges, square(closing_size)) edges = skeletonize(edges) labels = sklabel(edges) print "found {} segments".format(labels.max()) # in ma.array mask, False is True, and vice versa labels = np.ma.array(labels, mask=edges == 0) return labels
def parse(filename): image = io.imread(filename, as_grey = True) if image.shape[0] != 768: print "WARN: Resizing image to old iPad Size. TODO> Move forward to retina images!" print image.shape image = transform.resize(image, (768, 1024)) print image.shape ff = filter.canny(image) # io.imshow(ff) pi = 0 for i in xrange(len(ff)): cnt = np.count_nonzero(ff[i]) if cnt > 800: diff = i - pi if diff > 50 and diff < 60: slit = image[i-52:i] yield slit # data = slit.flatten() # yield data # io.imshow(slit) if __name__ == "__main__": print i, cnt, i-pi pi = i
def _get_canny_image(self): ci = canny( self.original_image, sigma=self.canny_sigma, low_threshold=self.canny_low_threshold, high_threshold=self.canny_high_threshold, ) return ci
def filter(data, filtType, par): if filtType == "sobel": filt_data = sobel(data) elif filtType == "roberts": filt_data = roberts(data) elif filtType == "canny": filt_data = canny(data) elif filtType == "lowpass_avg": from scipy import ndimage p = int(par) kernel = np.ones((p, p), np.float32) / (p * p) filt_data = ndimage.convolve(data, kernel) elif filtType == "lowpass_gaussian": s = float(par) filt_data = gaussian_filter(data, sigma=s) elif filtType == "highpass_gaussian": s = float(par) lp_data = gaussian_filter(data, sigma=s) filt_data = data - lp_data elif filtType == "highpass_avg": from scipy import ndimage p = int(par) kernel = np.ones((p, p), np.float32) / (p * p) lp_data = ndimage.convolve(data, kernel) filt_data = data - lp_data #elif filtType == "gradient": return filt_data
def segment(self, src): ndsrc = src.ndarray / 255. edges = canny(ndsrc, # low_threshold=0.001, # high_threshold=0.1, # low_threshold=self.canny_low_threshold, # high_threshold=self.canny_high_threshold, sigma=self.canny_sigma) filled = ndimage.binary_fill_holes(edges) filled = invert(filled) * 255 # label_objects, _ = ndimage.label(filled) # sizes = bincount(label_objects.ravel()) # # mask_sizes = sizes > 1 # mask_sizes[0] = 0 # cleaned = mask_sizes[label_objects] # cleaned = asarray(cleaned, 'uint8') # cleaned = closing(cleaned, square(5)) # self._locate_helper(invert(cleaned), **kw) nsrc = asarray(filled, 'uint8') return nsrc
def get_edge(name, sig = 8): im = ndimage.imread(name, True) edge = filter.canny(im, sigma = sig) modded = (255.0 / edge.max() * (edge - edge.min())).astype(np.uint8) edged = Image.fromarray(modded) edged.save("photos/edge.png") return edged
def find_iris(image, pupil, **kwargs): buffer = 20 # run canny image = filter.canny(image, sigma=1, low_threshold=10, high_threshold=50) cx, cy, radius = pupil segments = get_segments(400, step=0.01) # get ray directions directions = zip(map(cos, segments[0]), map(sin, segments[0])) shape = image.shape points = [] for d in directions: start = (cx + (radius + buffer) * d[0], cy + (radius + buffer)*d[1]) ray = Ray(image, start, d) point = ray.fire() if point != None: points.append(point) for p in points: x, y = circle_perimeter(int(p[0]), int(p[1]), 3) x = x[x < rgb.shape[0]] y = y[y < rgb.shape[1]] rgb[x,y] = (220, 40, 40) e = Ellipse().fit_with_center(None, points) return image, points, e
def main(): plt.figure(figsize=(25, 24)) planes = ['samolot00.jpg', 'samolot01.jpg', 'samolot03.jpg', 'samolot04.jpg', 'samolot05.jpg','samolot07.jpg', 'samolot08.jpg', 'samolot09.jpg', 'samolot10.jpg', 'samolot11.jpg', 'samolot12.jpg', 'samolot13.jpg', 'samolot14.jpg', 'samolot15.jpg', 'samolot16.jpg', 'samolot17.jpg', 'samolot18.jpg', 'samolot20.jpg'] i = 1 for file in planes: img = data.imread(file, as_grey=True) img2 = data.imread(file) ax = plt.subplot(6, 3, i) ax.axis('off') img **= 0.4 img = filter.canny(img, sigma=3.0) img = morphology.dilation(img, morphology.disk(4)) img = ndimage.binary_fill_holes(img) img = morphology.remove_small_objects(img, 1000) contours = measure.find_contours(img, 0.8) ax.imshow(img2, aspect='auto') for n, contour in enumerate(contours): ax.plot(contour[:, 1], contour[:, 0], linewidth=1.5) center = (sum(contour[:, 1])/len(contour[:, 1]), sum(contour[:, 0])/len(contour[:, 0])) ax.scatter(center[0], center[1], color='white') i += 1 plt.savefig('zad2.pdf')
def watershed_segmentation(image_gray, markers_trace, markers_background): ''' Segments the image into regions with one of two types (i.e. foreground and background) using a watershed algorithm. Parameters ----------- image_gray : 2-D numpy array A grayscale image. markers_trace : 2-D Boolean numpy array An array with the same shape as image_gray, where the seeds of the trace regions are True. markers_background : 2-D Boolean numpy array An array with the same shape as image_gray, where the seeds of the background regions are True. Returns --------- image_bin : 2-D Boolean numpy array A 2-D array with the same shape as the input image. Foreground pixels are True, and background pixels are False. ''' bin_markers = np.zeros_like(image_gray, dtype=int) bin_markers = np.where(markers_trace, 2, 0) bin_markers = np.where(markers_background, 1, bin_markers) #image_sobel = sobel(gaussian_filter(image_gray, 1)) image_sobel = sobel(image_gray) edges = np.maximum(canny(image_gray).astype(float), image_sobel) image_bin = watershed(edges, bin_markers) image_bin = image_bin == 2 return image_bin
def auto_canny(array, average=None, gaussian_sigma=1, strongness=2.5): if average is None: average = array.size**0.5 / array.size array -= array.min() array /= array.max() def canny_average(hard_threshold): soft_threshold = hard_threshold / strongness edges = canny(array, gaussian_sigma, hard_threshold, soft_threshold) return edges.mean() hard_threshold = 0.4 epsilon = 0.0001 bottom, top = 0., 1. for iteration in xrange(20): current_average = canny_average(hard_threshold) print(hard_threshold, current_average) if abs(current_average - average) < epsilon: break elif current_average < average: top = hard_threshold hard_threshold = (bottom + top) / 2 else: bottom = hard_threshold hard_threshold = (bottom + top) / 2 else: print("Agotados los intentos") soft_threshold = hard_threshold / strongness return canny(array, gaussian_sigma, hard_threshold, soft_threshold)
def findPlantsCanny(stackVar, stackSum, showImages=True): edges = canny(stackVar) fill_stack = ndimage.binary_fill_holes(edges) label_objects, nb_labels = ndimage.label(fill_stack) sizes = np.bincount(label_objects.ravel()) mask_sizes = sizes > 25 for label in range(len(mask_sizes)): ''' Get rid of lines in addition to the straight size threshold. ''' pts = np.where(label_objects == label) xRange = (max(pts[0]) - min(pts[0])) yRange = (max(pts[1]) - min(pts[1])) areaCovered = float(len(pts[0])) / (xRange*yRange) if (areaCovered < .33) or (xRange < 3) or (yRange < 3): mask_sizes[label] = False mask_sizes[0] = 0 plants_cleaned = mask_sizes[label_objects] labeled_plants, numPlants = ndimage.label(plants_cleaned) center = findCenters(labeled_plants, stackSum) if showImages: fig, axs = plt.subplots(1,3, figsize=(14,4), sharey=True) axs[0].imshow(stackVar) axs[1].imshow(stackVar, cmap=plt.cm.jet, interpolation='nearest') #@UndefinedVariable axs[1].contour(plants_cleaned, [0.5], linewidths=1.2, colors='y') axs[2].imshow(labeled_plants, cmap=plt.cm.spectral, interpolation='nearest') #@UndefinedVariable axs[2].scatter(np.array(center.tolist())[:,1], np.array(center.tolist())[:,0], color='grey') for ax in axs: ax.axis('off') fig.subplots_adjust(wspace=.01) return labeled_plants, center
def auto_canny(array, average=None, gaussian_sigma=1, strongness=2.5): if average is None: average = array.size ** 0.5 / array.size array -= array.min() array /= array.max() def canny_average(hard_threshold): soft_threshold = hard_threshold / strongness edges = canny(array, gaussian_sigma, hard_threshold, soft_threshold) return edges.mean() hard_threshold = 0.4 epsilon = 0.0001 bottom, top = 0., 1. for iteration in xrange(20): current_average = canny_average(hard_threshold) print(hard_threshold, current_average) if abs(current_average - average) < epsilon: break elif current_average < average: top = hard_threshold hard_threshold = (bottom + top) / 2 else: bottom = hard_threshold hard_threshold = (bottom + top) / 2 else: print("Agotados los intentos") soft_threshold = hard_threshold / strongness return canny(array, gaussian_sigma, hard_threshold, soft_threshold)
def get_gravatar_array(email, sz=100, edge=(5, 20, 2, 2), mask=((0, 10), (80, 90)), shrink=0.0): g = hashlib.md5(email.lower()).hexdigest() + ".jpg" fname = "/tmp/" + g url = "http://www.gravatar.com/avatar/" + g os.system("curl -s -o %s %s" % (fname, url)) g = scipy.misc.imread(fname, flatten=True)[5:75, 5:75] g = 255 - scipy.misc.imresize(g, (sz, sz)) if edge: (sigma, hi, low, sigma2) = edge g = 255 * canny(g, sigma, hi, low) g = (ndimage.gaussian_filter(g, sigma=sigma2) > 25) * 255 if mask: ((r1, c1), (r2, c2)) = mask g[r2:, :] = 0 g[:r1, :] = 0 g[:, :c1] = 0 g[:, c2:] = 0 if shrink > 0.0: (nx, ny) = g.shape px = int(nx * shrink / 2) py = int(ny * shrink / 2) g = concatenate((zeros((py, nx), dtype=g.dtype), g, zeros((py, nx), dtype=g.dtype)), axis=0) g = concatenate((zeros((ny + 2 * py, px), dtype=g.dtype), g, zeros((ny + 2 * py, px), dtype=g.dtype)), axis=1) g = scipy.misc.imresize(g, (sz, sz)) g = g.flatten() return g
def find_edges(self,sigma=None): if sigma is not None: self.sigma = sigma print 'Identifying edges...' self.edges = filter.canny(self.data,sigma=self.sigma) return self.edges
def encode(self): data_array = np.array(self.image) #data).reshape((28,28)) edges = filt.canny(data_array, sigma=3) def linear_mapping(data): # using principal components analysis pca = decomposition.PCA(n_components=784) pca.fit(data) mapping = pca.transform(data) return mapping # encoded = linear_mapping(edges) encoded = np.array(edges).reshape(784) # encoded = [] # for d in self.data: # if (d > 45): # encoded.append(1) # else: # encoded.append(0) return encoded
def path2tab(path, nbI, threshold): tab = [] maxAddr = 0 # Ouverture dossier dirs = os.listdir(path) # Pour chaque fichier for file in dirs: # Si le fichier est une image .jpg if file[-4:] == '.jpg': # Chargement image sous-forme tableau image = io.imread(path + file, as_grey=True) edges = canny(image) address = 0 # Pour chaque ligne de l'image for ligne in edges: # Pour chaque pixel de la ligne for pix in ligne: # Si le pixel est blanc (c'est un contour) if pix == True: # Ajout du pixel au tableau pour déclencher un influx # Par le neurone correspondant tab.append((address, nbI)) if address > maxAddr: maxAddr = address # Incrémentation adresse address += 1 # Incrémentation nombre d'images traitées nbI += 1 print maxAddr return tab, nbI
def label_particles_edge(im, sigma=2, closing_size=0, **extra_args): """ label_particles_edge(image, sigma=3, closing_size=3) Returns the labels for an image. Segments using Canny edge-finding filter. keyword arguments: image -- The image in which to find particles sigma -- The size of the Canny filter closing_size -- The size of the closing filter """ from skimage.morphology import square, binary_closing, skeletonize if skversion < version('0.11'): from skimage.filter import canny else: from skimage.filters import canny edges = canny(im, sigma=sigma) if closing_size > 0: edges = binary_closing(edges, square(closing_size)) edges = skeletonize(edges) labels = sklabel(edges) print "found {} segments".format(labels.max()) labels = np.ma.array( labels, mask=edges == 0) # in ma.array mask, False is True, and vice versa return labels
def mapEdge(req): """Convert img to bytearray and do edge detection on centered building""" img = io.imread(req.geturl(),flatten=True) labels, numobjects = ndimage.label(img) edges = filter.canny(img, sigma=3) return(edges)
def mapEdge(req): """Convert img to bytearray and do edge detection on centered building""" img = io.imread(req.geturl(), flatten=True) labels, numobjects = ndimage.label(img) edges = filter.canny(img, sigma=3) return (edges)
def image_filter(self, image, **kwargs): canny_keys = ('sigma', 'low_threshold', 'high_threshold') canny_kwargs = dict([(k, kwargs.pop(k)) for k in canny_keys]) hough_kwargs = kwargs edges = canny(image, **canny_kwargs) lines = probabilistic_hough(edges, **hough_kwargs) self._lines = lines return edges
def makeEdges(self): image = self.image edges = canny(image / 255.) ax = self.axs[0, 4] ax.imshow(edges, cmap=plt.cm.gray, interpolation='nearest') ax.axis('off') ax.set_title('Canny detector') self.edges = edges #fig.canvas.show()
def getArea(address): """Geocode address and retreive image centered around lat/long""" address = address results = Geocoder.geocode(address) lat, lng = results[0].coordinates zip_code = results[0].postal_code map_url = 'https://maps.googleapis.com/maps/api/staticmap?center={0},{1}&size=640x640&zoom=19&sensor=false&maptype=roadmap&&style=visibility:simplified|gamma:0.1' request_url = map_url.format(lat, lng) req = urllib.urlopen(request_url) img = io.imread(req.geturl(),flatten=True) labels, numobjects = ndimage.label(img) image = filter.canny(img, sigma=3) thresh = threshold_otsu(image) bw = closing(image > thresh, square(3)) # remove artifacts connected to image border cleared = bw.copy() clear_border(cleared) # label image regions label_image = label(cleared) borders = np.logical_xor(bw, cleared) label_image[borders] = -1 image_label_overlay = label2rgb(label_image, image=image) fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6)) ax.imshow(image_label_overlay) dist = [] rp = regionprops(label_image) rp = [x for x in rp if 100 < x.area <= 900] for region in rp: # skip small images #if region.area < 100: # continue dist.append(sqrt( ( 320-region.centroid[0] )**2 + ( 320-region.centroid[1] )**2 )) # draw rectangle around segmented coins #minr, minc, maxr, maxc = region.bbox #rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, # fill=False, edgecolor='red', linewidth=2) #ax.add_patch(rect) roof_index = dist.index(min(dist)) minr, minc, maxr, maxc = rp[roof_index].bbox rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='red', linewidth=2) ax.add_patch(rect) img = StringIO() fig.savefig(img) img.seek(0) session['roof_area'] = rp[roof_index].area roof_area = (rp[roof_index].area)*12 return(roof_area)
def find_lines(sourcefile=SOURCEFILE, plot=PLOT, shrink=2, threshold=128, scale_first=True, **transformparams): """ This function borrows from http://scikits-image.org/docs/0.3/auto_examples/plot_hough_transform.html """ # Line finding, using the Probabilistic Hough Transform params = {} params.update(TRANSFORMPARAMS) params.update(transformparams) print params image_orig = imread(sourcefile) # scale up if scale_first: image_orig = scale_up(image_orig) # thin lines if shrink > 0: # note that this returns a one-bit image shrunk_image = shrink_lines(image_orig,threshold=threshold, iterations=shrink) else: # in this case shrunk_image may still be grayscale shrunk_image = image_orig # switch the y axis image = shrunk_image[::-1,:] #edge detection edges = canny(image, 2, 1, 25) #actual transform lines = probabilistic_hough(edges, **params) if PLOT: plt.figure(figsize=(12, 4)) plt.subplot(131) plt.imshow(image, cmap=plt.cm.gray) plt.title('Input image') plt.subplot(132) plt.imshow(edges, cmap=plt.cm.gray) plt.title('Sobel edges') plt.subplot(133) plt.imshow(edges * 0) for line in lines: p0, p1 = line plt.plot((p0[0], p1[0]), (p0[1], p1[1])) plt.title('Lines found with PHT') plt.axis('image') plt.show() if scale_first: lines = np.array(lines)/2 return lines
def process_image(request, image): """Creates an edge image and calculates the values needed for the score calculation if necessary. This function is called as soon as an image is requested. :param request: The request object containing the user request. :type request: :class:`django.http.HttpRequest`. :param image: The image to be processed. :type image: :class:`models.Image`. """ # detect edges if not image.edge_image: greyscale_image = io.imread(os.path.join(settings.MEDIA_ROOT, image.image.name), as_grey=True) # resize image height = len(greyscale_image) width = len(greyscale_image[0]) factor = 768.0 / height greyscale_image = transform.resize(greyscale_image, [height * factor, width * factor]) # detect edges edges = filter.canny(greyscale_image, sigma=image.canny_sigma, low_threshold=image.canny_low_threshold, high_threshold=image.canny_high_threshold) # save edge image temp_filename = '/tmp/' + request.session.session_key + '.png' io.imsave(temp_filename, ~edges * 1.) image.edge_image.save(slugify(os.path.splitext(os.path.basename(image.image.name))[0]) + '.png', File(open(temp_filename))) os.remove(temp_filename) if not image.dilated_edge_image: edge_image = io.imread(os.path.join(settings.MEDIA_ROOT, image.edge_image.name), as_grey=True) edge_image = edge_image.astype(np.float64) if edge_image.max() > 1.: edge_image /= 255. # map values greater .5 as edge edge_image = (1. - edge_image) / .5 # save dilated edge image temp_filename = '/tmp/' + request.session.session_key + '.png' io.imsave(temp_filename, ~ndimage.binary_dilation(edge_image, iterations=2) * 1.) image.dilated_edge_image.save(slugify(os.path.splitext(os.path.basename(image.image.name))[0]) + '.png', File(open(temp_filename))) os.remove(temp_filename) # save maximum distance (needed for score calculation) if not image.max_distance: ones = np.ones(image.edge_image.height * image.edge_image.width).reshape((image.edge_image.height, image.edge_image.width)) dilated_edge_image = io.imread(os.path.join(settings.MEDIA_ROOT, image.dilated_edge_image.name), as_grey=True) dilated_edge_image = dilated_edge_image.astype(np.float64) if dilated_edge_image.max() > 1.: dilated_edge_image /= 255. image.max_distance = np.sum(np.absolute(ones - dilated_edge_image)) image.save()
def profiling(self): image = resize(self.image, (257, 257)) edges = ff.canny(image) edges = skeletonize(edges) edges = edges.astype(int) edges = edges[1:256, 1:256] lt = self.feature_extraction_profiling(edges, 2, 5) profiling = lt.ravel() return np.append(profiling, [float(self.ratio) / 10])
def getEdgeMatrix(img, sigpercent=.01, axis=0): '''Use the canny filter to produce a boolean matrix with the same dimensions as the image where the value True indicates an edge and False indicates no edge. sig is the tuning parameter for the canny filter: a higher sig means fewer edges detected (smoother image) ''' # If the image is not already grayscale, scale it down if np.ndim(img) == 3: img = img.mean(axis=2) edgeMat = filter.canny(img, sigma=sigpercent*np.shape(img)[axis]) return edgeMat
def profiling(self): image = resize(self.image, (257, 257)) edges = ff.canny(image) edges = skeletonize(edges) edges = edges.astype(int) edges = edges[1:256, 1:256] lt = self.feature_extraction_profiling(edges, 2, 5) profiling = lt.ravel() return np.append(profiling, [float(self.ratio)/10])
def edge_change_ratio(frame1, frame2, sigma=3, low_threshold=20, high_threshold=80, distance=24, edge_width=10, float_accuracy=3): """ Calculate Edge Change Ratio for the given 2 frames (n-1, n) :param frame1: Frame N-1 :param frame2: Frame N :param sigma: Edge detection level :param low_threshold: Dark threshold :param high_threshold: Bright threshold :param distance: Dialtion Distance :param edge_width: Distance of Edges Measured :param float_accuracy: Floating point precision :return: Float """ frame1_grey = desaturate(frame1) frame2_grey = desaturate(frame2) frame1_edge = canny(frame1_grey, sigma, low_threshold, high_threshold) frame2_edge = canny(frame2_grey, sigma, low_threshold, high_threshold) frame1_inv_edge = invert(frame1_edge).astype('uint8') * 255 frame2_inv_edge = invert(frame2_edge).astype('uint8') * 255 frame1_contours = measure.find_contours(frame1_inv_edge, edge_width) frame2_contours = measure.find_contours(frame2_inv_edge, edge_width) frame1_dialate = dilation(frame1_edge, square(distance)) frame2_dialate = dilation(frame2_edge, square(distance)) frame1_comp = frame1_inv_edge + frame2_dialate frame2_comp = frame2_inv_edge + frame1_dialate frame1_comp_contours = measure.find_contours(frame1_comp, edge_width) frame2_comp_contours = measure.find_contours(frame2_comp, edge_width) try: return round( max(float(len(frame1_comp_contours)) / float(len(frame1_contours)), float(len(frame2_comp_contours)) / float(len(frame2_contours))), float_accuracy) * 100 except ZeroDivisionError: return 0
def distinct(planes): for i, img in enumerate(planes): #contours = measure.find_contours(planes[i], 0.8) planes[i] = filter.canny(planes[i], sigma=3) #planes[i] = filter.sobel(planes[i]) planes[i] = Image(morphology.dilation(planes[i], square(3))) #planes[i] = Image(morphology.erosion(planes[i], square(2))) message = "Distincted plane: " + str(i) print(message)
def processor(t_sigma, t_level, sigma, low_threshold, high_threshold): t_sigma = sigmoid(t_sigma) * 20 t_level = sigmoid(t_level) sigma = sigmoid(sigma) * 50 phase = p.unwrapped_phase local_context = gaussian_filter(phase, t_sigma) phase = (phase - local_context) / (1 - t_level) phase /= phase.ptp() phase -= phase.min() cannied = canny(phase, sigma, low_threshold, high_threshold) return np.vstack((phase, cannied))
def direction_skeleton(self): edges = ff.canny(self.image) edges = skeletonize(edges) edges = edges.astype(int) edges = edges[1:251, 1:251] zones = self.zoning(edges) for i in zones: self.line_segmentation(i) direction = self.direction_transfer(self.count_common(zones)) return np.append(direction, [float(self.ratio) / 10])
def find_center(saas_image, sigma=0.8, num_circles=5): """ Find the center of the image :param saas_image: A SAAS image object. :param sigma: The amount of gaussian blurring :param num_circles: The number of circles to find. Returns: """ edges = filter.canny(saas_image.roi_data, sigma=sigma) hough_radii = np.arange(10, 70, 1) hough_res = hough_circle(edges, hough_radii) centers = [] accums = [] radii = [] for radius, h in zip(hough_radii, hough_res): # For each radius, extract two circles peaks = peak_local_max(h, num_peaks=2) if peaks != []: centers.extend(peaks) accums.extend(h[peaks[:, 0], peaks[:, 1]]) radii.extend([radius, radius]) best_centers = [] best_radii = [] best_x = [] best_y = [] number_of_best_circles = num_circles for idx in np.argsort(accums)[::-1][:number_of_best_circles]: center_x, center_y = centers[idx] best_x.append(center_x) best_y.append(center_y) best_centers.append(centers[idx]) best_radii.append(radii[idx]) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 6)) ax1.imshow(edges) ax2.imshow(self.roi_data, cmap=cm.gray) for center, radius in zip(best_centers, best_radii): circle = plt.Circle((center[1], center[0]), radius, color='r', fill=False) ax2.add_patch(circle) print("Calibrated Center X = %s +/- %s" % (np.average(best_x), np.std(best_x))) print("Calibrated Center Y = %s +/- %s" % (np.average(best_y), np.std(best_y))) return np.array([[np.average(best_x), np.std(best_x)], [np.average(best_y), np.std(best_y)]])
def processDropboxImage(files): job = get_current_job() job.meta['handled_by'] = socket.gethostname() job.meta['state'] = 'start' job.save() print 'Current job: %s' % (job.id) #print job.meta for file in files: import uuid url_to_grab = file['link'] image_path = '/vagrant/app/static/uploads/%s%s' % ( uuid.uuid4(), os.path.splitext(file['link'])[1]) urllib.urlretrieve(url_to_grab, image_path) job.meta['state'] = 'download complete' job.save() #time.sleep(3) im = ndimage.imread(image_path, True) job.meta['state'] = 'image loaded complete' job.save() # time.sleep(3) edges2 = filter.canny(im, sigma=3) job.meta['state'] = 'filter complete' job.save() #time.sleep(3) misc.imsave(image_path[:-4] + '-canny.jpg', edges2) job.meta['state'] = 'image saved' job.save() #time.sleep(3) return_data = {} return_data['processed'] = image_path[:-4] + '-canny.jpg' return_data['original'] = image_path return_data['src'] = '/static/uploads/%s' % os.path.split( return_data['original'])[1] return_data['srcproc'] = '/static/uploads/%s' % os.path.split( return_data['original'])[1][:-4] + '-canny.jpg' #return_data['callback_id'] = callback_id #print job.meta return json.dumps(return_data)
def filter_function(img_grey, filt='canny'): """ Grayscales and apply edge detectors to image. Returns the flattened filtered image array. input: raw image 3d tensor output: filtered image filters: 'sobel', 'roberts', 'scharr' default filter = 'canny' """ # grayscale filters: if filt == 'sobel': return sobel(img_grey) elif filt == 'roberts': return roberts(img_grey) elif filt == 'canny': return canny(img_grey) elif filt == 'scharr': return scharr(image_grey) elif filt == ('canny', 'sobel'): return canny(sobel(img_grey)) else: raise Exception('No Such Filter!')
def init_pq(imgSize, radiance): boundary = np.zeros((imgSize, imgSize)) #calculate E_x and E_y to intialize E_x = np.array(radiance,copy = True); E_y = np.array(radiance,copy = True); E_x[:,1:-1] = 0.5*(E_x[:,2:] - E_x[:,:-2]) E_y[1:-1,:] = 0.5*(- E_y[2:,:] + E_y[:-2,:]) boundary = filter.canny(radiance,sigma = 1.0); p_init = np.array(E_x*boundary,copy =True) q_init = np.array(E_y*boundary,copy =True) return p_init,q_init,boundary
def init_fg(imgSize, radiance): boundary = np.zeros((imgSize, imgSize)) #calculate E_x and E_y to intialize E_x = np.array(radiance, copy=True) E_y = np.array(radiance, copy=True) E_x[:, 1:-1] = 0.5 * (E_x[:, 2:] - E_x[:, :-2]) E_y[1:-1, :] = 0.5 * (-E_y[2:, :] + E_y[:-2, :]) f, g = pq2fg(E_x, E_y) boundary = filter.canny(radiance, sigma=3.0) f_init = np.array(f * boundary, copy=True) g_init = np.array(g * boundary, copy=True) return f_init, g_init, boundary
def ContourLoss(self, img, label, IDMask): contour = filter.canny(img) selem = disk(5) outlier = dilation(contour, selem) outlier[np.where(np.absolute(IDMask) > 0)] = 0 contour = contour.astype(float) contour *= IDMask Similarity = np.ones_like(IDMask, dtype=np.float) Similarity[np.where(outlier > 0)] = -0.5 if np.amax(IDMask) > 100000: raise Exception("Wrong in IDMask!") LocationsSrc = { index: np.where(np.absolute(contour) == index + 1) for index in range(np.amax(IDMask)) } LocationsRef = { index: np.where(IDMask == -(index + 1)) for index in range(np.amax(IDMask)) } for index in range(np.amax(IDMask)): SrcX = LocationsSrc[index][0] SrcY = LocationsSrc[index][1] RefX = LocationsRef[index][0] RefY = LocationsRef[index][1] ContourSimilarity = 0 if np.size(SrcX) > 0.9 * np.size(RefX) and np.size( RefX) > 10 and np.size(SrcX) < 1.2 * np.size(RefX): if np.size(np.unique(SrcX)) > np.size(np.unique(SrcY)): VecSrc = np.polyfit(SrcX, SrcY, 3)[:3] VecRef = np.polyfit(RefX, RefY, 3)[:3] else: VecSrc = np.polyfit(SrcY, SrcX, 3)[:3] VecRef = np.polyfit(RefY, RefX, 3)[:3] ContourSimilarity = np.absolute(np.inner(VecSrc, VecRef)) / ( LA.norm(VecSrc) + 1e-10) / (LA.norm(VecRef) + 1e-10) Similarity[np.where(np.absolute(IDMask) == index + 1)] = ContourSimilarity return Similarity
def build_r_table(image, origin): ''' Build the R-table from the given shape image and a reference point ''' edges = canny(image, low_threshold=MIN_CANNY_THRESHOLD, high_threshold=MAX_CANNY_THRESHOLD) gradient = gradient_orientation(edges) r_table = defaultdict(list) for (i, j), value in np.ndenumerate(edges): if value: r_table[gradient[i, j]].append((origin[0] - i, origin[1] - j)) return r_table
def task_2_3(): I = imread('images/coins.png') edges_I = canny(I) radi = np.arange(3, 30) hough1 = hough_circle(I, radi) hough2 = hough_circle(edges_I, radi) print hough1.shape fig, ax = plt.subplots(1, 2) ax[0].imshow(I, cmap='gray') ax[1].imshow(hough2[0], cmap='gray') ax[0].set_title('Original image') ax[0].axis('off') ax[1].set_title('Original Image segmented via Hough Circle') ax[1].axis('off') plt.show()
def accumulate_gradients(r_table, grayImage): ''' Perform a General Hough Transform with the given image and R-table ''' edges = canny(grayImage, low_threshold=MIN_CANNY_THRESHOLD, high_threshold=MAX_CANNY_THRESHOLD) gradient = gradient_orientation(edges) accumulator = np.zeros(grayImage.shape) for (i, j), value in np.ndenumerate(edges): if value: for r in r_table[gradient[i, j]]: accum_i, accum_j = i + r[0], j + r[1] if accum_i < accumulator.shape[ 0] and accum_j < accumulator.shape[1]: accumulator[accum_i, accum_j] += 1 return accumulator
def test_01_02_circle_with_noise(self): '''Test that the Canny filter finds the circle outlines in a noisy image''' np.random.seed(0) i, j = np.mgrid[-200:200, -200:200].astype(float) / 200 c = np.abs(np.sqrt(i * i + j * j) - .5) < .02 cf = c.astype(float) * .5 + np.random.uniform(size=c.shape) * .5 result = F.canny(cf, 4, .1, .2, np.ones(c.shape, bool)) # # erode and dilate the circle to get rings that should contain the # outlines # cd = binary_dilation(c, iterations=4) ce = binary_erosion(c, iterations=4) cde = np.logical_and(cd, np.logical_not(ce)) self.assertTrue(np.all(cde[result])) point_count = np.sum(result) self.assertTrue(point_count > 1200) self.assertTrue(point_count < 1600)