def process_node(tup): node, color, size, text = tup color_scalar = cv.cvScalar(color[0], color[1], color[2]) node_val = node.val(t) if node_val.__class__ != tuple: if node_val != None: v_cv = self.disp.to_screen(node.val(t)) def circ(n): pt = cv.cvPoint(int(round(n[0, 0])), int(round(n[1, 0]))) cv.cvCircle(self.disp.buffer, pt, size, color_scalar, cv.CV_FILLED, cv.CV_AA) pt2 = cv.cvPoint(pt.x + 2, pt.y + 2) cv.cvPutText(self.disp.buffer, text, pt, self.disp.font, cv.cvScalar(255, 255, 255)) cv.cvPutText(self.disp.buffer, text, pt2, self.disp.font, cv.cvScalar(50, 50, 50)) map(circ, fun.points_of_mat(v_cv)) else: start_pts, end_pts = node_val for idx in range(start_pts.shape[1]): start_pt = cvpoint_of_pt( self.disp.to_screen(start_pts[:, idx]))[0] end_pt = cvpoint_of_pt(self.disp.to_screen( end_pts[:, idx]))[0] cv.cvLine(self.disp.buffer, start_pt, end_pt, color_scalar, size, cv.CV_AA)
def clear(self): cv.cvRectangle(self.buffer, cv.cvPoint(0,0), cv.cvPoint(self.buffer.width, self.buffer.height), cv.cvScalar(255,255,255), cv.CV_FILLED) if self.draw_grid: line_color = 230 lc = cv.cvScalar(line_color,line_color,line_color) for i in xrange(1, as_int(self.meters_disp)+3): cv.cvCircle(self.buffer, cv.cvPoint(self.w/2,self.h), as_int(self.pixels_per_meter * (i-.5)), #lc, 1) lc, 1, cv.CV_AA) cv.cvCircle(self.buffer, cv.cvPoint(self.w/2,self.h), as_int(self.pixels_per_meter * i), #lc, 1) lc, 1, cv.CV_AA) for i in xrange(360/30): x = (self.w/2) + math.cos(math.radians(i*30)) * self.pixels_per_meter * (self.meters_disp+2) y = self.h + math.sin(math.radians(i*30)) * self.pixels_per_meter * (self.meters_disp+2) cv.cvLine(self.buffer, cv.cvPoint(self.w/2,self.h), cv.cvPoint(as_int(x),as_int(y)), lc, 1, cv.CV_AA) if self.draw_center: cv.cvCircle(self.buffer, cv.cvPoint(self.w/2,self.h), 3, cv.cvScalar(0,0,200), cv.CV_FILLED, cv.CV_AA)
def __GetCrossDist(self, p1, dx, dy, iPointIndex): bFound = 0 fDist = 0 bestPoint = cv.cvPoint(0, 0) bestLength = 1e10 bigLength = -1 nPoints = len(self.keypoints) for k in range(nPoints): if (k == iPointIndex or k == iPointIndex + 1): continue q1 = self.keypoints[(k - 1 + nPoints) % nPoints] q2 = self.keypoints[k] du = q2.x - q1.x dv = q2.y - q1.y dd = (dy * du - dx * dv) if (dd == 0): continue t = (dy * (p1.x - q1.x) - dx * (p1.y - q1.y)) / dd if (t >= -0.0001 and t <= 1.0001): # found it ptt = cv.cvPoint(int(q1.x + t * du), int(q1.y + t * dv)) l = math.sqrt((ptt.x - p1.x ) * (ptt.x - p1.x ) + (ptt.y - p1.y ) * (ptt.y - p1.y)) l2 = ((dv * q1.x - du * q1.y) - (dv * p1.x - du * p1.y)) / ( dv * dx - du * dy) bFound = 1 if (l <= bestLength and l2 > 0): bestPoint = ptt bestLength = l fDist = bestLength if (not bFound): fDist = 0 if (self.img): cv.cvLine(self.img, cv.cvPoint(int(p1.x), int(p1.y)), bestPoint, cv.cvScalar(255, 255, 255, 0)) return fDist
def draw_weighted_Pose2D(display, max_weight, particles): for p in particles: if type(p) is types.TupleType: part, weight = p rpos = part.pos else: part = p rpos = p.pos x = mt.cos(part.angle) * .07 y = mt.sin(part.angle) * .07 dir = rpos.copy() dir[0,0] = dir[0,0] + x dir[1,0] = dir[1,0] + y pos = display.to_screen(rpos) dirp = display.to_screen(dir) if type(p) is types.TupleType: color = round(255.0 * (weight/max_weight)) cv.cvCircle(display.buffer, cv.cvPoint((int) (pos[0,0]), (int) (pos[1,0])), 2, cv.cvScalar(255, 255-color, 255), cv.CV_FILLED, cv.CV_AA) cv.cvCircle(display.buffer, cv.cvPoint((int) (pos[0,0]), (int) (pos[1,0])), 2, cv.cvScalar(200, 200, 200), 8, cv.CV_AA) else: cv.cvCircle(display.buffer, cv.cvPoint((int) (pos[0,0]), (int) (pos[1,0])), 2, cv.cvScalar(150, 150, 150), cv.CV_FILLED, cv.CV_AA) cv.cvLine(display.buffer, cv.cvPoint((int) (pos[0,0]), (int) (pos[1,0])), cv.cvPoint((int) (dirp[0,0]), (int) (dirp[1,0])), cv.cvScalar(100,200,100), 1, cv.CV_AA, 0)
def __GetCrossDist(self, p1, dx, dy, iPointIndex): bFound = 0 fDist = 0 bestPoint = cv.cvPoint(0, 0) bestLength = 1e10 bigLength = -1 nPoints = len(self.keypoints) for k in range(nPoints): if (k == iPointIndex or k == iPointIndex + 1): continue q1 = self.keypoints[(k - 1 + nPoints) % nPoints] q2 = self.keypoints[k] du = q2.x - q1.x dv = q2.y - q1.y dd = (dy * du - dx * dv) if (dd == 0): continue t = (dy * (p1.x - q1.x) - dx * (p1.y - q1.y)) / dd if (t >= -0.0001 and t <= 1.0001): # found it ptt = cv.cvPoint(int(q1.x + t * du), int(q1.y + t * dv)) l = math.sqrt((ptt.x - p1.x) * (ptt.x - p1.x) + (ptt.y - p1.y) * (ptt.y - p1.y)) l2 = ((dv * q1.x - du * q1.y) - (dv * p1.x - du * p1.y)) / (dv * dx - du * dy) bFound = 1 if (l <= bestLength and l2 > 0): bestPoint = ptt bestLength = l fDist = bestLength if (not bFound): fDist = 0 if (self.img): cv.cvLine(self.img, cv.cvPoint(int(p1.x), int(p1.y)), bestPoint, cv.cvScalar(255, 255, 255, 0)) return fDist
def draw_weighted_Pose2D(display, max_weight, particles): for p in particles: if type(p) is types.TupleType: part, weight = p rpos = part.pos else: part = p rpos = p.pos x = mt.cos(part.angle) * 0.07 y = mt.sin(part.angle) * 0.07 dir = rpos.copy() dir[0, 0] = dir[0, 0] + x dir[1, 0] = dir[1, 0] + y pos = display.to_screen(rpos) dirp = display.to_screen(dir) if type(p) is types.TupleType: color = round(255.0 * (weight / max_weight)) cv.cvCircle( display.buffer, cv.cvPoint((int)(pos[0, 0]), (int)(pos[1, 0])), 2, cv.cvScalar(255, 255 - color, 255), cv.CV_FILLED, cv.CV_AA, ) cv.cvCircle( display.buffer, cv.cvPoint((int)(pos[0, 0]), (int)(pos[1, 0])), 2, cv.cvScalar(200, 200, 200), 8, cv.CV_AA, ) else: cv.cvCircle( display.buffer, cv.cvPoint((int)(pos[0, 0]), (int)(pos[1, 0])), 2, cv.cvScalar(150, 150, 150), cv.CV_FILLED, cv.CV_AA, ) cv.cvLine( display.buffer, cv.cvPoint((int)(pos[0, 0]), (int)(pos[1, 0])), cv.cvPoint((int)(dirp[0, 0]), (int)(dirp[1, 0])), cv.cvScalar(100, 200, 100), 1, cv.CV_AA, 0, )
def drawLines(original, outimage=None, lines=None, color=COL_RED): """Draw a list of lines on an image. If no outimage is supplied, the original is used. If no lines are supplied, dafault to drawing the golden section. If no color is supplied, use red""" if not outimage: outimage = original if not lines: lines = findGoldenMeans(cv.cvGetSize(original)) for line in lines: cv.cvLine(outimage, line.p1, line.p2, color)
def main(): """ Just the test This method is a good resource on how to handle the results. Save images in this method if you have to. """ filename = sys.argv[1] image = highgui.cvLoadImage (filename) cutRatios = [lib.PHI] #cutRatios = [0.618] settings = Settings(cutRatios) image = highgui.cvLoadImage (filename) thickness = 4 settings.setMarginPercentage(0.025) settings.setMethod(sys.argv[3]) cut = int(sys.argv[2]) winname = sys.argv[1] #settings.setThresholds(100,150) # Set the color for the boxes #color = lib.COL_BLACK #color = lib.COL_WHITE #color = lib.COL_RED color = lib.COL_GREEN #color = lib.COL_BLUE blobImg = blobResult(image, settings, cut) boxxImg = boundingBoxResult(image, settings, cut, thickness, color) cutt = lib.findMeans(cv.cvGetSize(image), settings.cutRatios[0])[cut] # cuttet verdi, dog skal det vi generaliseres lidt oriantesen = cutt.getPoints()[0].x == cutt.getPoints()[1].x if oriantesen: cutPixel = cutt.getPoints()[1].x else: cutPixel = cutt.getPoints()[1].y if oriantesen: # print 'hej' cv.cvLine(boxxImg, cv.cvPoint(cutPixel, cutt.getPoints()[0].y), cv.cvPoint(cutPixel, cutt.getPoints()[1].y), lib.COL_RED) else: cv.cvLine(boxxImg, cv.cvPoint(cutt.getPoints()[0].x, cutPixel), cv.cvPoint(cutt.getPoints()[1].x, cutPixel), lib.COL_RED) # Save images highgui.cvSaveImage('flood_cut_%s.png' % cut, boxxImg) highgui.cvSaveImage('blobs_cut_%s.png' % cut, blobImg) # Show images compareImages(blobImg, boxxImg, "blob", winname)
def clear(self): cv.cvRectangle(self.buffer, cv.cvPoint(0, 0), cv.cvPoint(self.buffer.width, self.buffer.height), cv.cvScalar(255, 255, 255), cv.CV_FILLED) if self.draw_grid: line_color = 230 lc = cv.cvScalar(line_color, line_color, line_color) for i in xrange(1, as_int(self.meters_disp) + 3): cv.cvCircle( self.buffer, cv.cvPoint(self.w / 2, self.h), as_int(self.pixels_per_meter * (i - .5)), #lc, 1) lc, 1, cv.CV_AA) cv.cvCircle( self.buffer, cv.cvPoint(self.w / 2, self.h), as_int(self.pixels_per_meter * i), #lc, 1) lc, 1, cv.CV_AA) for i in xrange(360 / 30): x = (self.w / 2) + math.cos(math.radians( i * 30)) * self.pixels_per_meter * (self.meters_disp + 2) y = self.h + math.sin(math.radians( i * 30)) * self.pixels_per_meter * (self.meters_disp + 2) cv.cvLine(self.buffer, cv.cvPoint(self.w / 2, self.h), cv.cvPoint(as_int(x), as_int(y)), lc, 1, cv.CV_AA) if self.draw_center: cv.cvCircle(self.buffer, cv.cvPoint(self.w / 2, self.h), 3, cv.cvScalar(0, 0, 200), cv.CV_FILLED, cv.CV_AA)
def main(): """ Just the test This method is a god resource on how to handle the results """ filename = sys.argv[1] image = highgui.cvLoadImage (filename) cutRatios = [0.61] settings = Settings(cutRatios) image = highgui.cvLoadImage (filename) thickness = 4 settings.setMarginPercentage(0.025) cutNo = int(sys.argv[2]) cut = lib.findMeans(cv.cvGetSize(image), settings.cutRatios[0])[cutNo] # Get the BW edge image edgeImage = naiveMethod.getEdgeImage(image, settings) (blobImg, comp) = naiveMethod.analyzeCut(image, edgeImage, cut, settings, 'True') #liste af liste gridPointsList = grid.gridIt(blobImg, comp) #print gridPointsList points = centerOfMass(gridPointsList) #(x,y) for point in points: cv.cvLine(image, cv.cvPoint(point, 0), cv.cvPoint(point,600), COL_GREEN) lib.drawBoundingBoxes(image, comp) #highgui.cvSaveImage('floodfillbilledet.png', blobImg) #highgui.cvSaveImage('boindingboxbilledet.png', boxxImg) showImage(image, 'name')
def process_node(tup): node, color, size, text = tup color_scalar = cv.cvScalar(color[0], color[1], color[2]) node_val = node.val(t) if node_val.__class__ != tuple: if node_val != None: v_cv = self.disp.to_screen(node.val(t)) def circ(n): pt = cv.cvPoint(int(round(n[0,0])),int(round(n[1,0]))) cv.cvCircle(self.disp.buffer, pt, size, color_scalar, cv.CV_FILLED, cv.CV_AA) pt2 = cv.cvPoint(pt.x + 2, pt.y + 2) cv.cvPutText(self.disp.buffer, text, pt, self.disp.font, cv.cvScalar(255,255,255)) cv.cvPutText(self.disp.buffer, text, pt2, self.disp.font, cv.cvScalar(50,50,50)) map(circ, fun.points_of_mat(v_cv)) else: start_pts, end_pts = node_val for idx in range(start_pts.shape[1]): start_pt = cvpoint_of_pt(self.disp.to_screen(start_pts[:,idx]))[0] end_pt = cvpoint_of_pt(self.disp.to_screen( end_pts[:,idx]))[0] cv.cvLine(self.disp.buffer, start_pt, end_pt, color_scalar, size, cv.CV_AA)
def draw_line(image, pnt1, pnt2, color=(255, 0, 0)): cv.cvLine(image, cv.cvPoint(rnd(pnt1[0]), rnd(pnt1[1])), cv.cvPoint(rnd(pnt2[0]), rnd(pnt2[1])), cv.CV_RGB(*color))
# create the images we need image = cv.cvCreateImage(cv.cvGetSize(frame), 8, 3) image.origin = frame.origin hsv = cv.cvCreateImage(cv.cvGetSize(frame), 8, 3) hue = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) mask = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) backproject = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) hist = cv.cvCreateHist([hdims], cv.CV_HIST_ARRAY, hranges, 1) # flip the image cv.cvFlip(frame, image, 1) cv.cvCvtColor(image, hsv, cv.CV_BGR2HSV) cv.cvLine(image, cv.cvPoint(0, image.height / 2), cv.cvPoint(image.width, image.height / 2), cv.CV_RGB(0, 255, 0), 2, 8, 0) cv.cvLine(image, cv.cvPoint(image.width / 2, 0), cv.cvPoint(image.width / 2, image.height), cv.CV_RGB(0, 255, 0), 2, 8, 0) if track_object: _vmin = vmin _vmax = vmax cv.cvInRangeS(hsv, cv.cvScalar(0, smin, min(_vmin, _vmax), 0), cv.cvScalar(180, 256, max(_vmin, _vmax), 0), mask) cv.cvSplit(hsv, hue, None, None, None)
# draw all the points cv.cvCircle (image, points [i], 2, cv.cvScalar (0, 0, 255, 0), cv.CV_FILLED, cv.CV_AA, 0) # start the line from the last point pt0 = points [hull [-1]] for point_index in hull: # connect the previous point to the current one # get the current one pt1 = points [point_index] # draw cv.cvLine (image, pt0, pt1, cv.cvScalar (0, 255, 0, 0), 1, cv.CV_AA, 0) # now, current one will be the previous one for the next iteration pt0 = pt1 # display the final image highgui.cvShowImage ('hull', image) # handle events, and wait a key pressed k = highgui.cvWaitKey (0) if k == '\x1b': # user has press the ESC key, so exit break
new_points.append (the_point) # draw the current point cv.cvCircle (image, cv.cvPointFrom32f(the_point), 3, cv.cvScalar (0, 255, 0, 0), -1, 8, 0) # draw the flow vector if status[point_counter]: dx1 = old_points[point_counter].x - the_point.x dy1 = old_points[point_counter].y - the_point.y if dx1 * dx1 + dy1 * dy1 <= 500: cv.cvLine(image, cv.cvPointFrom32f(old_points[point_counter]), cv.cvPointFrom32f(the_point), cv.cvScalar(0,255,0,0), 2) #print 'velocity: ',dx1/(time.time()-t1),dy1/(time.time()-t1) velocities[0].append(dx1/(time.time()-t1)) velocities[1].append(dy1/(time.time()-t1)) # set back the points we keep points [1] = new_points if len(velocities[0]) != 0 and len(velocities[1]) != 0: vavgx = sum(velocities[0])/len(velocities[0]) vavgy = sum(velocities[1])/len(velocities[1]) else: vavgx = 0 vavgy = 0
# We're done with the blurred image now #cv.cvReleaseImage(blurImage) #print points[:0] cut = lines[1] margin = marginCalculator.getPixels(image, cut, 0.024) component_dictionary = featureDetector.ribbonFloodFill(image, edges, out, cut, margin, lo, up) #featureDetector.floodFillLine(image, out, points, cut, lo, up, {}) #flags = cv.CV_FLOODFILL_FIXED_RANGE #flags = 4 color = lib.getRandomColor() comp = cv.CvConnectedComp() #cv.cvFloodFill(out, cv.cvPoint(x,y), color, cv.CV_RGB(lo,lo,lo), cv.CV_RGB(up,up,up),comp ,flags)#, None); #lib.plot(out, cv.cvPoint(x,y), 3, lib.COL_RED) cv.cvLine(out, cut.p1, cut.p2, lib.COL_RED) lib.drawMargin(out, cut, margin) #startpoint = lines[0].getPoints()[0] #points.append(lines[0].getPoints()[1]) #for point in points: # out = floofill.floofill(out, lowerThres, upperThres, startpoint, point, 1) # startpoint = point #for point in points: # lib.plot(out, point, 2) #out = edgeDetector.findEdges(out, 70, 70) nystr = str(str(lo)+'-'+str(up)+'.png') winname = 'hej' highgui.cvNamedWindow (winname, highgui.CV_WINDOW_AUTOSIZE)
# create window and display the original picture in it highgui.cvNamedWindow (window_name, 1) cv.cvSetZero (image) highgui.cvShowImage (window_name, image) # create the random number random = Random () # draw some lines for i in range (number): pt1 = cv.cvPoint (random.randrange (-width, 2 * width), random.randrange (-height, 2 * height)) pt2 = cv.cvPoint (random.randrange (-width, 2 * width), random.randrange (-height, 2 * height)) cv.cvLine (image, pt1, pt2, random_color (random), random.randrange (0, 10), line_type, 0) highgui.cvShowImage (window_name, image) highgui.cvWaitKey (delay) # draw some rectangles for i in range (number): pt1 = cv.cvPoint (random.randrange (-width, 2 * width), random.randrange (-height, 2 * height)) pt2 = cv.cvPoint (random.randrange (-width, 2 * width), random.randrange (-height, 2 * height)) cv.cvRectangle (image, pt1, pt2, random_color (random), random.randrange (-1, 9), line_type, 0)
global variable_base variable_base = v/10.0 xmatch = size.width / 2 + 1 ymatch = size.height / 2 + 1 highgui.cvSetMouseCallback("depthmatch - left", mousecb) highgui.cvCreateTrackbar("ROI", "depthmatch - left", variable_roi, size.width, cb_roi) highgui.cvCreateTrackbar("Buffer", "depthmatch - left", variable_buf, size.width, cb_buf) highgui.cvCreateTrackbar("Focal Length", "depthmatch - left", variable_focal, 1000, cb_focal) highgui.cvCreateTrackbar("Baseline/10", "depthmatch - left", variable_base, 1000, cb_base) leftdraw = cv.cvCreateImage(size, 8, 3) rightdraw = cv.cvCreateImage(size, 8, 3) while 1: depth = depthmatch(xmatch, ymatch, left, right, roi=variable_roi, buf=variable_buf,baseline=variable_base, focal_length=variable_focal) cv.cvCopy(left, leftdraw) cv.cvCopy(right, rightdraw) cv.cvLine(leftdraw, depth[1], depth[2], (0,255,0), 2) cv.cvPutText(leftdraw, "%2f(m) at (%2f,%2f)" % (depth[0][2],depth[0][0],depth[0][1]), (xmatch,ymatch), font, (0,0,255)) cv.cvLine(rightdraw, depth[2], depth[2], (0,0,255), 5) highgui.cvShowImage("depthmatch - left", leftdraw) highgui.cvShowImage("depthmatch - right", rightdraw) print depth highgui.cvWaitKey(10) if __name__ == "__main__" and test_number == 2: left = highgui.cvLoadImage(str(sys.argv[1])) right = highgui.cvLoadImage(str(sys.argv[2])) highgui.cvNamedWindow("Depth") depth = depthmatrix(left, right, 4) depth_full = cv.cvCreateImage(cv.cvGetSize(left), 8, 1) cv.cvResize(depth, depth_full)
def main(): a_window = highgui.cvNamedWindow('a_window', highgui.CV_WINDOW_AUTOSIZE) ''' image=cv.LoadImage('desktopBlue.jpg', cv.CV_LOAD_IMAGE_COLOR) dst = cv.cvCreateImage((600,200), 8, 3) cv.cvLine(dst,(100,200),(100,100),(0,0,1)) cv.cvCircle(dst,(-10,-10),10,(0,0,0)) highgui.cvShowImage('a_window',dst) highgui.cvWaitKey(10000) ''' dst = cv.cvCreateImage((700, 700), 8, 3) cv.cvLine(dst, (20, 20), (20, 680), (0xFF, 0xFF, 0xFF), 2, 0) cv.cvLine(dst, (20, 680), (680, 680), (0xFF, 0xFF, 0xFF), 2, 0) no = int(raw_input("Enter total no. of points")) a = [] for i in range(no): no1 = int(raw_input("Enter x coord: ")) no2 = int(raw_input("Enter y coord: ")) a.append([no1, no2, 0]) print a slope = [] for i in range(no): a[i][2] = 1 j = 0 slope = [[find_slope(a[i][0], a[i][1], a[j][0], a[j][1]), j]] j = j + 1 while (j < no): slope.append([find_slope(a[i][0], a[i][1], a[j][0], a[j][1]), j]) j = j + 1 slope.sort() j = 1 temp_arr = [] temp_arr.append(slope[0]) while (j < no): # print "temparr:"+str(temp_arr) if (i == slope[j][1]): j = j + 1 continue if (slope[j][0] == temp_arr[0][0]): if (a[slope[j][1]][2] == 0): temp_arr.append(slope[j]) else: if (len(temp_arr) >= 2): print "Following points are in a line:" for k in range(len(temp_arr)): print "[" + str(a[temp_arr[k][1]][0]) + "," + str( a[temp_arr[k][1]][1]) + "]" a[temp_arr[k][1]][2] = 1 print "[" + str(a[i][0]) + "," + str(a[i][1]) + "]" for k in range(len(temp_arr) - 1): cv.cvLine(dst, (20 + a[temp_arr[k][1]][0] * 10, 680 - a[temp_arr[k][1]][1] * 10), (20 + a[temp_arr[k + 1][1]][0] * 10, 680 - a[temp_arr[k + 1][1]][1] * 10), (0, 0, 0xFF), 2, 0) cv.cvLine(dst, (20 + a[i][0] * 10, 680 - a[i][1] * 10), (20 + a[temp_arr[k + 1][1]][0] * 10, 680 - a[temp_arr[k + 1][1]][1] * 10), (0, 0, 0xFF), 2, 0) temp_arr = [] temp_arr.append(slope[j]) j = j + 1 if (len(temp_arr) >= 2): print "Following points are in a line:" for k in range(len(temp_arr)): print "[" + str(a[temp_arr[k][1]][0]) + "," + str( a[temp_arr[k][1]][1]) + "]" a[temp_arr[k][1]][2] = 1 print "[" + str(a[i][0]) + "," + str(a[i][1]) + "]" for k in range(len(temp_arr) - 1): cv.cvLine(dst, (20 + a[temp_arr[k][1]][0] * 10, 680 - a[temp_arr[k][1]][1] * 10), (20 + a[temp_arr[k + 1][1]][0] * 10, 680 - a[temp_arr[k + 1][1]][1] * 10), (0, 0, 0xFF), 2, 0) cv.cvLine(dst, (20 + a[i][0] * 10, 680 - a[i][1] * 10), (20 + a[temp_arr[k + 1][1]][0] * 10, 680 - a[temp_arr[k + 1][1]][1] * 10), (0, 0, 0xFF), 2, 0) slope = [] for k in range(no): cv.cvLine(dst, (20 + a[k][0] * 10, 680 - a[k][1] * 10), (20 + a[k][0] * 10, 680 - a[k][1] * 10), (0xFF, 0, 0), 4, 0) highgui.cvShowImage('a_window', dst) highgui.cvWaitKey(90000)
if image is None: # create the images we need image = cv.cvCreateImage (cv.cvGetSize (frame), 8, 3) image.origin = frame.origin hsv = cv.cvCreateImage( cv.cvGetSize(frame), 8, 3 ) hue = cv.cvCreateImage( cv.cvGetSize(frame), 8, 1 ) mask = cv.cvCreateImage( cv.cvGetSize(frame), 8, 1 ) backproject = cv.cvCreateImage( cv.cvGetSize(frame), 8, 1 ) hist = cv.cvCreateHist( [hdims], cv.CV_HIST_ARRAY, hranges, 1 ) # flip the image cv.cvFlip (frame, image, 1) cv.cvCvtColor( image, hsv, cv.CV_BGR2HSV) cv.cvLine(image, cv.cvPoint(0, image.height/2), cv.cvPoint(image.width, image.height/2), cv.CV_RGB(0,255,0), 2, 8, 0 ) cv.cvLine(image, cv.cvPoint(image.width/2, 0), cv.cvPoint(image.width/2, image.height), cv.CV_RGB(0,255,0), 2, 8, 0 ) if track_object: _vmin = vmin _vmax = vmax cv.cvInRangeS( hsv, cv.cvScalar( 0, smin,min(_vmin,_vmax),0), cv.cvScalar(180, 256, max(_vmin,_vmax),0), mask ); cv.cvSplit( hsv, hue, None, None, None)
def draw_line(image, pnt1, pnt2, color=(255,0,0)): cv.cvLine(image, cv.cvPoint(rnd(pnt1[0]), rnd(pnt1[1])), cv.cvPoint(rnd(pnt2[0]), rnd(pnt2[1])), cv.CV_RGB(*color))
def timerEvent(self, ev): # Fetch a frame from the video camera frame = highgui.cvQueryFrame(self.cap) img_orig = cv.cvCreateImage(cv.cvSize(frame.width, frame.height), cv.IPL_DEPTH_8U, frame.nChannels) if (frame.origin == cv.IPL_ORIGIN_TL): cv.cvCopy(frame, img_orig) else: cv.cvFlip(frame, img_orig, 0) # Create a grey frame to clarify data img_grey = cv.cvCreateImage(cv.cvSize(img_orig.width, img_orig.height), 8, 1) cv.cvCvtColor(img_orig, img_grey, cv.CV_BGR2GRAY) # Detect objects within the frame self.faces_storage = cv.cvCreateMemStorage(0) faces = self.detect_faces(img_grey) self.circles_storage = cv.cvCreateMemStorage(0) circles = self.detect_circles(img_grey) self.squares_storage = cv.cvCreateMemStorage(0) squares = self.detect_squares(img_grey, img_orig) self.lines_storage = cv.cvCreateMemStorage(0) lines = self.detect_lines(img_grey, img_orig) # Draw faces if faces: for face in faces: pt1, pt2 = self.face_points(face) cv.cvRectangle(img_orig, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8, 0) # Draw lines if lines: for line in lines: cv.cvLine(img_orig, line[0], line[1], cv.CV_RGB(255, 255, 0), 3, 8) # Draw circles if circles: for circle in circles: cv.cvCircle( img_orig, cv.cvPoint(cv.cvRound(circle[0]), cv.cvRound(circle[1])), cv.cvRound(circle[2]), cv.CV_RGB(0, 0, 255), 3, 8, 0) # Draw squares if squares: i = 0 while i < squares.total: pt = [] # read 4 vertices pt.append(squares[i]) pt.append(squares[i + 1]) pt.append(squares[i + 2]) pt.append(squares[i + 3]) ## draw the square as a closed polyline cv.cvPolyLine(img_orig, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv.CV_AA, 0) i += 4 # Resize the image to display properly within the window # CV_INTER_NN - nearest-neigbor interpolation, # CV_INTER_LINEAR - bilinear interpolation (used by default) # CV_INTER_AREA - resampling using pixel area relation. (preferred for image decimation) # CV_INTER_CUBIC - bicubic interpolation. img_display = cv.cvCreateImage(cv.cvSize(self.width(), self.height()), 8, 3) cv.cvResize(img_orig, img_display, cv.CV_INTER_NN) img_pil = adaptors.Ipl2PIL(img_display) s = StringIO() img_pil.save(s, "PNG") s.seek(0) q_img = QImage() q_img.loadFromData(s.read()) bitBlt(self, 0, 0, q_img)
def timerEvent(self, ev): # Fetch a frame from the video camera frame = highgui.cvQueryFrame(self.cap) img_orig = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),cv.IPL_DEPTH_8U, frame.nChannels) if (frame.origin == cv.IPL_ORIGIN_TL): cv.cvCopy(frame, img_orig) else: cv.cvFlip(frame, img_orig, 0) # Create a grey frame to clarify data img_grey = cv.cvCreateImage(cv.cvSize(img_orig.width,img_orig.height), 8, 1) cv.cvCvtColor(img_orig, img_grey, cv.CV_BGR2GRAY) # Detect objects within the frame self.faces_storage = cv.cvCreateMemStorage(0) faces = self.detect_faces(img_grey) self.circles_storage = cv.cvCreateMemStorage(0) circles = self.detect_circles(img_grey) self.squares_storage = cv.cvCreateMemStorage(0) squares = self.detect_squares(img_grey, img_orig) self.lines_storage = cv.cvCreateMemStorage(0) lines = self.detect_lines(img_grey, img_orig) # Draw faces if faces: for face in faces: pt1, pt2 = self.face_points(face) cv.cvRectangle(img_orig, pt1, pt2, cv.CV_RGB(255,0,0), 3, 8, 0) # Draw lines if lines: for line in lines: cv.cvLine(img_orig, line[0], line[1], cv.CV_RGB(255,255,0), 3, 8) # Draw circles if circles: for circle in circles: cv.cvCircle(img_orig, cv.cvPoint(cv.cvRound(circle[0]),cv.cvRound(circle[1])),cv.cvRound(circle[2]),cv.CV_RGB(0,0,255),3,8,0) # Draw squares if squares: i = 0 while i<squares.total: pt = [] # read 4 vertices pt.append(squares[i]) pt.append(squares[i+1]) pt.append(squares[i+2]) pt.append(squares[i+3]) ## draw the square as a closed polyline cv.cvPolyLine(img_orig, [pt], 1, cv.CV_RGB(0,255,0), 3, cv.CV_AA, 0) i += 4 # Resize the image to display properly within the window # CV_INTER_NN - nearest-neigbor interpolation, # CV_INTER_LINEAR - bilinear interpolation (used by default) # CV_INTER_AREA - resampling using pixel area relation. (preferred for image decimation) # CV_INTER_CUBIC - bicubic interpolation. img_display = cv.cvCreateImage(cv.cvSize(self.width(),self.height()), 8, 3) cv.cvResize(img_orig, img_display, cv.CV_INTER_NN) img_pil = adaptors.Ipl2PIL(img_display) s = StringIO() img_pil.save(s, "PNG") s.seek(0) q_img = QImage() q_img.loadFromData(s.read()) bitBlt(self, 0, 0, q_img)
def main(): """ Just the test This method is a god resource on how to handle the results """ filename = sys.argv[1] image = highgui.cvLoadImage (filename) cutRatios = [0.75] #cutRatios = [lib.PHI] settings = Settings(cutRatios) image = highgui.cvLoadImage (filename) thickness = 4 settings.setMarginPercentage(0.025) cutNo = int(sys.argv[2]) #udtrak af cut cut = lib.findMeans(cv.cvGetSize(image), settings.cutRatios[0])[cutNo] # cuttet verdi, dog skal det vi generaliseres lidt oriantesen = cut.getPoints()[0].x == cut.getPoints()[1].x if oriantesen: cutPixel = cut.getPoints()[1].x else: cutPixel = cut.getPoints()[1].y #Get the BW edge image edgeImage = expandedMethod.getEdgeImage(image, settings) (blobImg, comp) = expandedMethod.analyzeCut(image, edgeImage, cut, settings, 'True') #Liste af liste # Find the margin margin = marginCalculator.getPixels(image, cut, settings.marginPercentage) lib.drawMargin(image, cut, margin) #Udregning af gridet gridPointsList = grid.gridIt(blobImg, comp) #hvor mange pixel der er pa den ende side i forhold til den anden, i procent pixelRatio = pixelSideCounter(gridPointsList, cutPixel, oriantesen) print pixelRatio #Udregning af center og mass points = centerOfMass(gridPointsList, oriantesen) #Draw the cut #print cut.getPoints()[0].y #print cut.getPoints()[1].y #print cut.getPoints()[0].x #print cut.getPoints()[1].x #print cutPixel if oriantesen: # print 'hej' cv.cvLine(image, cv.cvPoint(cutPixel, cut.getPoints()[0].y), cv.cvPoint(cutPixel, cut.getPoints()[1].y), COL_RED) else: cv.cvLine(image, cv.cvPoint(cut.getPoints()[0].x, cutPixel), cv.cvPoint(cut.getPoints()[1].x, cutPixel), COL_RED) #Draw center of mass for point in points: if oriantesen: # print 'hej' # print point cv.cvLine(image, cv.cvPoint(point, cut.getPoints()[0].y), cv.cvPoint(point, cut.getPoints()[1].y), COL_GREEN) else: # print point cv.cvLine(image, cv.cvPoint(cut.getPoints()[0].x, point), cv.cvPoint(cut.getPoints()[1].x, point), COL_GREEN) lib.drawBoundingBoxes(image, comp, 4, COL_GREEN) #highgui.cvSaveImage('floodfillbilledet.png', blobImg) highgui.cvSaveImage('centerOfMass.png', image) showImage(image, 'name')
def main(): """ Just the test This method is a good resource on how to handle the results. Save images in this method if you have to. """ filename = sys.argv[1] image = highgui.cvLoadImage (filename) cutRatios = [lib.PHI] #cutRatios = [0.75] ratios = (0.51803398874999995, 0.56803398875, 0.61803398875000004, 0.66666000000000003, 0.71803398875000002, 0.76803398874999995, 0.81803398875, 0.86803398875000004 , 0.91803398874999997, 0.96803398875000002) settings = Settings(cutRatios) image = highgui.cvLoadImage (filename) thickness = 4 settings.setMarginPercentage(0.024) cuts = (0,1,2,3) cut = int(sys.argv[2]) winname = sys.argv[3]+".png" #settings.setThresholds(100,150) # Set the color for the boxes #color = lib.COL_BLACK #color = lib.COL_WHITE #color = lib.COL_RED color = lib.COL_GREEN #color = lib.COL_BLUE tmp = 0 for r in cuts: cut = r print str(r), '---------------------------------' for i in ratios: #settings = Settings(cutRatios) print i cutd = lib.findMeans(cv.cvGetSize(image), i)[cut] edgeImage = naiveMethod.getEdgeImage(image, settings) components = naiveMethod.analyzeCut(image, edgeImage, cutd, settings) print len(components) tmp = tmp + len(components) print tmp blobImg = blobResult(image, settings, cut) boxxImg = boundingBoxResult(image, settings, cut, thickness, color) cutt = lib.findMeans(cv.cvGetSize(image), settings.cutRatios[0])[cut] # cuttet verdi, dog skal det vi generaliseres lidt oriantesen = cutt.getPoints()[0].x == cutt.getPoints()[1].x if oriantesen: cutPixel = cutt.getPoints()[1].x else: cutPixel = cutt.getPoints()[1].y if oriantesen: # print 'hej' cv.cvLine(boxxImg, cv.cvPoint(cutPixel, cutt.getPoints()[0].y), cv.cvPoint(cutPixel, cutt.getPoints()[1].y), lib.COL_RED) else: cv.cvLine(boxxImg, cv.cvPoint(cutt.getPoints()[0].x, cutPixel), cv.cvPoint(cutt.getPoints()[1].x, cutPixel), lib.COL_RED) # Save images highgui.cvSaveImage('floodfillbilledet.png', blobImg) highgui.cvSaveImage(winname, boxxImg) # Show images compareImages(blobImg, boxxImg, "blob", winname)
def paint(self, img): for p in self.points: cv.cvDrawCircle(img, p.getCvPoint(), 2, cv.cvScalar(0, 0, 255,0)) for i in range(len(self.points) - 1): cv.cvLine(img, self.points[i].getCvPoint(), self.points[i + 1].getCvPoint(), cv.cvScalar(255,255,255,0), 1)
cv.cvZero(hist_hue_img) # hue_min,hue_max,min_loc,max_loc = cv.cvGetMinMaxHistValue(h_hue) for h in xrange(h_bins): hue = cv.cvGetReal1D(h_hue.bins, h) color = hsv2rgb(h * h_limit / h_bins) cv.cvRectangle( hist_hue_img, (h * scalewidth, 0), ((h + 1) * scalewidth, (hue / sample_pixels) * scaleheight), color, cv.CV_FILLED, ) cv.cvLine( hist_hue_img, (0, scaleheight * hue_cutoff / sample_pixels), (h_bins * scalewidth, scaleheight * hue_cutoff / sample_pixels), (255, 0, 0), 1, ) highgui.cvShowImage("Histogram - Hue", hist_hue_img) cv.cvZero(hist_val_img) # val_min,val_max,min_loc,max_loc = cv.cvGetMinMaxHistValue(h_val) for v in xrange(v_bins): val = cv.cvGetReal1D(h_val.bins, v) color = cv.cvScalar(180, 255, v * v_limit / v_bins) cv.cvRectangle( hist_val_img, (v * scalewidth, 0), ((v + 1) * scalewidth, (val / sample_pixels) * scaleheight), color,