def __init__(self, coord, convex_hull=False): """Fields: coord coord is list of (lon, lat [,alt]):""" if convex_hull: if len(coord[0]) == 2: coord = ConvexHull.convex_hull(coord) else: v = [p[:2] for p in coord] v = ConvexHull.convex_hull(v) coord = [(k[0], k[1], a[2]) for (k, a) in zip(v, coord)] self.coord = coord
def __init__(self, video_src, debugMode=True): self.do_work = False # To perform or not perform the recognition self.direction = 0 self.debugMode = debugMode self.fingers = 0 self.frame = 0 self.camera = cv2.VideoCapture(video_src) self.camera.set(3, 640) self.camera.set(4, 480) self.posPre = 0 self.Data = { "angles less 90": 0, "cursor": (0, 0), "hulls": 0, "defects": 0, "fingers": 0, "fingers history": [0], "area": 0, } self.lastData = self.Data try: # Open the configuration file to read the settings from config_file = open("config.json", "r") self.config = json.load(config_file) except: print "Configuration file not found." exit() self.convex_hull = ConvexHull() if self.debugMode: cv2.namedWindow("Filters") cv2.createTrackbar("erode", "Filters", self.config[ "erode"], 255, self.erode) cv2.createTrackbar("dilate", "Filters", self.config[ "dilate"], 255, self.dilate) cv2.createTrackbar("smooth", "Filters", self.config[ "smooth"], 255, self.smooth) cv2.createTrackbar("upper", "Filters", self.config[ "upper"], 255, self.onChange_upper) cv2.createTrackbar("lower", "Filters", self.config[ "lower"], 255, self.onChange_lower)
target = loos.selectAtoms(target, "!hydrogen") chains = target.splitByResidue() if (args.zmin > args.zmax): tmp = args.zmax args.zmax = args.zmin args.zmin = tmp slicers = [] block_size = (args.zmax - args.zmin) / args.zblocks for i in range(args.zblocks): z1 = args.zmin + i * block_size z2 = args.zmin + (i + 1) * block_size ch = ConvexHull.ZSliceSelector(z1, z2) slicers.append(ch) frame = 0 bound_lipids = {} while (traj.readFrame()): traj.updateGroupCoords(system) # set up the convex hulls for this slice hulls = [] for i in range(args.zblocks): current_list = [] for h in range(len(helices)): helix = slicers[i](helices[h])
#Based on the means or centroids it creates the clusters clusters = kmeans.FindClusters() vor = Voronoi(means) #fig = voronoi_plot_2d(vor) list = [] # Call for each cluster the Jarvis March Algorithm for i in range(len(clusters)): if len(clusters[i]) <=1: continue else: chtemp = ch.JarvisMarch(clusters[i]) chtemp.ConvHull() #==============Visualization===================== means_x1 = [i[0] for i in means] means_y1 = [i[1] for i in means] cluster_x = [[clusters[i][j][0] for j in range(len(clusters[i]))] for i in range(len(clusters))] cluster_y = [[clusters[i][j][1] for j in range(len(clusters[i]))] for i in range(len(clusters))] for i in range(len (clusters)): plt.scatter(cluster_x[i],cluster_y[i],s=5) for i in range(len(means)): plt.scatter(means[i][0],means[i][1],color='Black')
from PIL import Image,ImageTk,ImageDraw,ImageDraw2 import ConvexHull class CoordinateTrans: def __init__(self,picture_size,coordinate_size): self.picture_size=picture_size self.coordinate_size=coordinate_size self.xscal=float(self.picture_size[0])/(self.coordinate_size[2]-self.coordinate_size[0]) self.yscal=float(self.picture_size[1])/(self.coordinate_size[3]-self.coordinate_size[1]) def PointTransCoordinate(self,point): xpos=float(point[0]-self.coordinate_size[0])*self.xscal ypos=float(point[1]-self.coordinate_size[1])*self.yscal return (xpos,self.picture_size[1]-ypos) if __name__ == '__main__': pointBuffer=[(0,0),(2,6),(9,7),(6,9),(5,5)] li = ConvexHull.sortPoint(pointBuffer) res = ConvexHull.stack(li) print res top=Tkinter.Tk() picture_size=(800,800) coordinate_size=(-2,-2,10,10) ct=CoordinateTrans(picture_size,coordinate_size) res2=[ct.PointTransCoordinate(point) for point in res] imgf=Image.new('RGB',picture_size,(255,255,255)) imgdraw=ImageDraw.Draw(imgf) imgdraw.polygon(res2, fill="yellow", outline="red") zero_point=ct.PointTransCoordinate((0,0)) if zero_point[0]>0 and zero_point[0]<picture_size[0]: imgdraw.line([(zero_point[0],0),(zero_point[0],picture_size[1])],fill="black")
class HandTracker(object): def __init__(self, video_src, debugMode=True): self.do_work = False # To perform or not perform the recognition self.direction = 0 self.debugMode = debugMode self.fingers = 0 self.frame = 0 self.camera = cv2.VideoCapture(video_src) self.camera.set(3, 640) self.camera.set(4, 480) self.posPre = 0 self.Data = { "angles less 90": 0, "cursor": (0, 0), "hulls": 0, "defects": 0, "fingers": 0, "fingers history": [0], "area": 0, } self.lastData = self.Data try: # Open the configuration file to read the settings from config_file = open("config.json", "r") self.config = json.load(config_file) except: print "Configuration file not found." exit() self.convex_hull = ConvexHull() if self.debugMode: cv2.namedWindow("Filters") cv2.createTrackbar("erode", "Filters", self.config[ "erode"], 255, self.erode) cv2.createTrackbar("dilate", "Filters", self.config[ "dilate"], 255, self.dilate) cv2.createTrackbar("smooth", "Filters", self.config[ "smooth"], 255, self.smooth) cv2.createTrackbar("upper", "Filters", self.config[ "upper"], 255, self.onChange_upper) cv2.createTrackbar("lower", "Filters", self.config[ "lower"], 255, self.onChange_lower) def erode(self, value): self.config["erode"] = value + 1 json.dump(self.config, open( "config.json", "w"), sort_keys=True, indent=4) def dilate(self, value): self.config["dilate"] = value + 1 json.dump(self.config, open( "config.json", "w"), sort_keys=True, indent=4) def smooth(self, value): self.config["smooth"] = value + 1 json.dump(self.config, open( "config.json", "w"), sort_keys=True, indent=4) def onChange_upper(self, value): self.config["upper"] = value json.dump(self.config, open( "config.json", "w"), sort_keys=True, indent=4) def onChange_lower(self, value): self.config["lower"] = value json.dump(self.config, open( "config.json", "w"), sort_keys=True, indent=4) def filter_skin(self, im): """Apply the filter skin.""" UPPER = np.array([self.config["upper"], self.config[ "filterUpS"], self.config["filterUpV"]], np.uint8) LOWER = np.array([self.config["lower"], self.config[ "filterDownS"], self.config["filterDownV"]], np.uint8) hsv_im = cv2.cvtColor(im, cv2.COLOR_BGR2HSV) # Remove all the region except the skin filter_im = cv2.inRange(hsv_im, LOWER, UPPER) return filter_im def run(self): while True: if (self.frame % 10 == 0): # Do something every 10 frames only # window_name = win32gui.GetWindowText( # win32gui.GetForegroundWindow()) self.frame = 1 # Check if current window is in the list of allowed windows or # not if self.debugMode: self.do_work = True else: self.do_work = False # self.do_work = any(window.get( # 'name') in window_name for window in self.config.get('allowed_windows')) else: self.frame = self.frame + 1 # print window_name + '=>' + str(self.do_work) ret, im = self.camera.read() im = cv2.flip(im, 1) # Flip the image to make it as a mirror # print self.do_work if self.do_work: # Perform image processing on only the window that are allowed # print window_name self.im_orig = im.copy() self.imNoFilters = im.copy() # Smoothing to reduce noise # Uses Normalized Box Filter # Simplest of all! Each output pixel is the mean of its kernel neighbors (all # of them contribute with equal weights) im = cv2.blur(im, (self.config[ "smooth"], self.config["smooth"])) im_blur = im.copy() filter_ = self.filter_skin(im) im_skin = filter_.copy() filter_ = cv2.erode(filter_, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (self.config["erode"], self.config["erode"]))) im_erode = filter_.copy() filter_ = cv2.dilate(filter_, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (self.config["dilate"], self.config["dilate"]))) dilated = filter_.copy() try: # print 'here' vis, self.fingers, self.direction = self.convex_hull.draw_convex( filter_, self.im_orig, self.frame) ch = 0xFF & cv2.waitKey(5) if ch == ord(' '): cv2.imwrite('images/im_orig.jpg', self.imNoFilters) cv2.imwrite('images/im_blur.jpg', im_blur) cv2.imwrite('images/im_skin.jpg', im_skin) cv2.imwrite('images/im_erode.jpg', im_erode) cv2.imwrite('images/im_dilated.jpg', dilated) cv2.imwrite('images/im_final.jpg', vis) print 'Images saved to disk.' except Exception, e: # dilated = self.im_orig # print 'Error => ', e vis = self.im_orig else: # NO image processing required vis = im if self.debugMode: common.draw_str(vis, ( 20, 50), 'No of fingers: ' + str(self.fingers)) common.draw_str(vis, ( 20, 80), 'Movement of contour:' + str(self.direction)) cv2.imshow('dilated', dilated) cv2.imshow('image', vis) if cv2.waitKey(1) == 27: # IF Esc key is pressed self.exit() return
import Point as Pt import random as r import Segment as Sg import ConvexHull def generate_random_points(n): arr = [] for _ in range(n): point = Pt.Point(r.randrange(50, 950), r.randrange(50, 670)) arr.append(point) return arr points = generate_random_points(100) CH = ConvexHull.ConvexHull(points) CH.algorithm() convex_hull = CH.R leftest_vertex = None for vertex in convex_hull: if leftest_vertex is not None: if vertex.x < leftest_vertex.x: leftest_vertex = vertex elif vertex.x == leftest_vertex.x: if vertex.y < leftest_vertex.y: leftest_vertex = vertex else: leftest_vertex = vertex triangulation_segments = []