def score(testfile, correctfile): test_pts = Annotating.read_anno(testfile) correct_pts = Annotating.read_anno(correctfile) net_error = 0.0 max_error = 0.0 rel_pts = [] if IGNORE_COLLAR: if MODE == SWEATER or MODE == TEE: check_points = (0, 1, 2, 3, 4, 8, 9, 10, 11, 12) else: check_points = range(len(test_pts)) else: check_points = range(len(test_pts)) errors = [] (x_axis, y_axis) = get_axes(correct_pts) for i in check_points: #(1,4,8,11):#range(len(test_pts)): test_pt = test_pts[i] correct_pt = correct_pts[i] rel_pt = Vector2D.pt_diff(test_pt, correct_pt) rel_pts.append( (Vector2D.dot_prod(rel_pt, x_axis), Vector2D.dot_prod(rel_pt, y_axis))) error = Vector2D.pt_distance(test_pt, correct_pt) errors.append(error) return (lst_avg(errors), rel_pts)
def get_new_grasp_points_position(points,foldLine): mirrored_pts = [] for pt in points: if(pt != None): mirrored_pts.append(Vector2D.mirror_pt(pt,Vector2D.make_ln_from_pts(foldLine[0],foldLine[1]))) else: show_message("Some of the grasp points wasn't set.", MsgTypes.exception) """ Mirroring visualisation cv.NamedWindow("Mirroring visualisation") img = img = cv.CreateImage((800,600),8,3) # axis cv.PolyLine(img,[foldLine],1,cv.CV_RGB(0,255,0),1) # source points for pt in points: intPt = (int(pt[0]),int(pt[1])) cv.Circle(img,intPt,3,cv.CV_RGB(255,0,0),2) # mirrored points for pt in mirrored_pts: intPt = (int(pt[0]),int(pt[1])) cv.Circle(img,intPt,3,cv.CV_RGB(0,0,255),2) cv.ShowImage("Mirroring visualisationn",img) cv.WaitKey() cv.DestroyWindow("Mirroring visualisation") # Mirroring visualisation """ #show_message("Move grasped points to: " + str(mirrored_pts), MsgTypes.info) return mirrored_pts
def sampled(line,NUM_SAMPLES): pts = [] (start,end) = Vector2D.end_points(line) dist = Vector2D.pt_distance(start,end) for i in range(NUM_SAMPLES): pt = Vector2D.extrapolate(line, dist * i / float(NUM_SAMPLES)) pts.append(pt) return pts
def foldDrawer(self,event,x,y,flags,param): if event==cv.CV_EVENT_LBUTTONUP: self.foldline_pts.append((x,y)) print "ADDED PT" cv.Circle(self.background,(x,y),3,cv.CV_RGB(255,0,0),-1) if len(self.foldline_pts) >= 2: self.foldline = Vector2D.make_ln_from_pts(self.foldline_pts[0],self.foldline_pts[1]) ln_start = Vector2D.intercept(self.foldline,Vector2D.horiz_ln(y=0)) ln_end = Vector2D.intercept(self.foldline,Vector2D.horiz_ln(y=self.background.height)) cv.Line(self.background,(int(ln_start[0]),int(ln_start[1])),(int(ln_end[0]),int(ln_end[1])),cv.CV_RGB(0,0,0)) self.has_foldline = True elif len(self.foldline_pts) > 0: self.addTempCVShape(CVLineSegment(cv.CV_RGB(255,255,255),2,Geometry2D.LineSegment(Geometry2D.Point(self.foldline_pts[0][0],self.foldline_pts[0][1]),Geometry2D.Point(x,y))))
def get_axes(pts): if MODE == TOWEL: y_axis = Vector2D.normalize(Vector2D.pt_diff(pts[1],pts[0])) elif MODE == PANTS: y_axis = Vector2D.normalize(Vector2D.pt_diff(Vector2D.pt_scale(Vector2D.pt_sum(pts[4],pts[3]),0.5),pts[0])) else: y_axis = Vector2D.normalize(Vector2D.pt_diff(pts[5],Vector2D.pt_scale(Vector2D.pt_sum(pts[0],pts[-1]),0.5))) x_axis = (-1*y_axis[1],y_axis[0]) print (x_axis,y_axis) return (x_axis,y_axis) if MODE == SWEATER or MODE == TEE: check_points = (0,1,2,3,4,8,9,10,11,12) else: check_points = range(len(test_pts))
def symmPolyDrawer(self, event, x, y, flags, param): if event == cv.CV_EVENT_LBUTTONDOWN: self.newPoly.append(Geometry2D.Point(x, y)) elif event == cv.CV_EVENT_RBUTTONDOWN: print "MADE IT" backwards = list(self.newPoly) backwards.reverse() backwards = [pt.toTuple() for pt in backwards] for pt in backwards: print "Looking at pt" newpt = Vector2D.mirror_pt(pt, self.symmline) print newpt self.newPoly.append(Geometry2D.Point(newpt[0], newpt[1])) print "Added pt" print len(self.newPoly) poly = Geometry2D.Polygon(*self.newPoly) cvPoly = CVPolygon(self.getDrawColor(), self.front(), poly) self.addCVShape(cvPoly) self.newPoly = [] elif len(self.newPoly) > 0: startPt = self.newPoly[-1] endPt = Geometry2D.Point(x, y) line = Geometry2D.LineSegment(startPt, endPt) cvLine = CVLineSegment(self.lineColor, self.tempFront(), line) self.addTempCVShape(cvLine) for i, pt in enumerate(self.newPoly): self.highlightPt(pt) if i > 0: startPt = self.newPoly[i - 1] line = Geometry2D.LineSegment(startPt, pt) cvLine = CVLineSegment(self.lineColor, self.tempFront(), line) self.addTempCVShape(cvLine)
def visualize_errors(rel_pts): model_file = MODELS[MODE] model = pickle.load(open(model_file)) white_image = cv.CreateImage((750, 500), 8, 3) cv.Set(white_image, cv.CV_RGB(255, 255, 255)) model.translate((50, 0)) model.draw_to_image(white_image, cv.CV_RGB(0, 0, 255)) """ for i,pt in enumerate(model.vertices_full()): ctr = (pt[0] + mean_x[i],pt[1] + mean_y[i]) y_axis = std_y[i] x_axis = std_x[i] cv.Ellipse(white_image,ctr,(x_axis,y_axis),0,0,360,cv.CV_RGB(255,0,0)) """ for i, pt in enumerate(model.vertices_full()): print "Drawing model" absolute_pts = [Vector2D.pt_sum(pt, rel_pt) for rel_pt in rel_pts[i]] #for abs_pt in absolute_pts: # cv.Circle(white_image,abs_pt,2,cv.CV_RGB(0,255,0),-1) angle = get_angle(rel_pts[i]) x_axis = (cos(angle), -1 * sin(angle)) y_axis = (sin(angle), cos(angle)) mean_x = lst_avg([x for (x, y) in rel_pts[i]]) + pt[0] mean_y = lst_avg([y for (x, y) in rel_pts[i]]) + pt[1] std_dev_x = lst_std( [Vector2D.dot_prod(rel_pt, x_axis) for rel_pt in rel_pts[i]]) std_dev_y = lst_std( [Vector2D.dot_prod(rel_pt, y_axis) for rel_pt in rel_pts[i]]) cv.Ellipse(white_image, (mean_x, mean_y), (std_dev_x, std_dev_y), angle * 360 / (2 * pi), 0, 360, cv.CV_RGB(255, 0, 0), 2) """ newmat = cv.CreateMat(1,len(absolute_pts),cv.CV_32SC2) for i in range(len(absolute_pts)): newmat[0,i] = absolute_pts[i] fit_ellipse = cv.FitEllipse2(newmat) cv.EllipseBox(white_image,fit_ellipse,cv.CV_RGB(255,0,0)) """ cv.SaveImage("comparison.png", white_image)
def saveModel(self): file = open(self.output_modelpath,'w') displacement = Vector2D.intercept(self.foldline,Vector2D.horiz_ln(y=0))[0] (dx,dy) = Vector2D.line_vector(self.foldline) angle = abs(arctan(dy / dx)) if dx > 0 and dy > 0: angle = angle elif dx < 0 and dy > 0: angle = pi - angle elif dx < 0 and dy < 0: angle = pi + angle else: angle *= -1 model = Models.Point_Model_Folded(self.initial_model,self.foldline_pts[0],self.foldline_pts[1]) model.draw_to_image(self.background,cv.RGB(255,0,0)) if model.illegal() or model.structural_penalty() >= 1.0: print "Model is illegal!" self.clearAll() else: model.set_image(None) pickle.dump(model,file)
def get_axes(pts): if MODE == TOWEL: y_axis = Vector2D.normalize(Vector2D.pt_diff(pts[1], pts[0])) elif MODE == PANTS: y_axis = Vector2D.normalize( Vector2D.pt_diff( Vector2D.pt_scale(Vector2D.pt_sum(pts[4], pts[3]), 0.5), pts[0])) else: y_axis = Vector2D.normalize( Vector2D.pt_diff( pts[5], Vector2D.pt_scale(Vector2D.pt_sum(pts[0], pts[-1]), 0.5))) x_axis = (-1 * y_axis[1], y_axis[0]) print(x_axis, y_axis) return (x_axis, y_axis) if MODE == SWEATER or MODE == TEE: check_points = (0, 1, 2, 3, 4, 8, 9, 10, 11, 12) else: check_points = range(len(test_pts))
def get_folded_model(self,foldLine): #do the fold line # A function Vector2D.intercept doesnt work if x or y of pts are same. Therofre I put some noise if neded noise = -1; difX = (foldLine[0])[0] - (foldLine[1])[0] difY = (foldLine[0])[1] - (foldLine[1])[1] if ((difX == 0) and (difY == 0)): self.foldline_pts.append( ((foldLine[0])[0]+noise,(foldLine[0])[1]+noise) ) elif(difX == 0): self.foldline_pts.append( ((foldLine[0])[0]+noise,(foldLine[0])[1]) ) elif(difY == 0): self.foldline_pts.append( ((foldLine[0])[0],(foldLine[0])[1]+noise) ) else: self.foldline_pts.append(foldLine[0]) self.foldline_pts.append(foldLine[1]) self.foldline = Vector2D.make_ln_from_pts(self.foldline_pts[0],self.foldline_pts[1]) ln_start = Vector2D.intercept(self.foldline,Vector2D.horiz_ln(y=0)) ln_end = Vector2D.intercept(self.foldline,Vector2D.horiz_ln(y=self.background.height)) #visualisation cv.Line(self.background,(int(ln_start[0]),int(ln_start[1])),(int(ln_end[0]),int(ln_end[1])),cv.CV_RGB(0,0,0)) cv.Circle(self.background,self.foldline_pts[0],4,cv.CV_RGB(0,255,0)) cv.Circle(self.background,self.foldline_pts[1],4,cv.CV_RGB(0,255,0)) cv.Circle(self.background,(int(ln_start[0]),int(ln_start[1])),4,cv.CV_RGB(255,0,0)) cv.Circle(self.background,(int(ln_end[0]),int(ln_end[1])),4,cv.CV_RGB(255,0,0)) cv.ShowImage("Fold visualisation",self.background ) cv.WaitKey() cv.DestroyWindow("Fold visualisation") model = Models.Point_Model_Folded(self.initial_model,self.foldline_pts[0],self.foldline_pts[1]) model.draw_to_image(self.background,cv.RGB(255,0,0)) if model.illegal() or model.structural_penalty() >= 1.0: print "Model is illegal!" return None else: return model
def visualize_errors(rel_pts): model_file = MODELS[MODE] model = pickle.load(open(model_file)) white_image = cv.CreateImage((750,500),8,3) cv.Set(white_image,cv.CV_RGB(255,255,255)) model.translate((50,0)) model.draw_to_image(white_image,cv.CV_RGB(0,0,255)) """ for i,pt in enumerate(model.vertices_full()): ctr = (pt[0] + mean_x[i],pt[1] + mean_y[i]) y_axis = std_y[i] x_axis = std_x[i] cv.Ellipse(white_image,ctr,(x_axis,y_axis),0,0,360,cv.CV_RGB(255,0,0)) """ for i,pt in enumerate(model.vertices_full()): print "Drawing model" absolute_pts = [Vector2D.pt_sum(pt,rel_pt) for rel_pt in rel_pts[i]] #for abs_pt in absolute_pts: # cv.Circle(white_image,abs_pt,2,cv.CV_RGB(0,255,0),-1) angle = get_angle(rel_pts[i]) x_axis = (cos(angle),-1*sin(angle)) y_axis = (sin(angle),cos(angle)) mean_x = lst_avg([x for (x,y) in rel_pts[i]]) + pt[0] mean_y = lst_avg([y for (x,y) in rel_pts[i]]) + pt[1] std_dev_x = lst_std([Vector2D.dot_prod(rel_pt,x_axis) for rel_pt in rel_pts[i]]) std_dev_y = lst_std([Vector2D.dot_prod(rel_pt,y_axis) for rel_pt in rel_pts[i]]) cv.Ellipse(white_image,(mean_x,mean_y),(std_dev_x,std_dev_y),angle*360/(2*pi),0,360,cv.CV_RGB(255,0,0),2) """ newmat = cv.CreateMat(1,len(absolute_pts),cv.CV_32SC2) for i in range(len(absolute_pts)): newmat[0,i] = absolute_pts[i] fit_ellipse = cv.FitEllipse2(newmat) cv.EllipseBox(white_image,fit_ellipse,cv.CV_RGB(255,0,0)) """ cv.SaveImage("comparison.png",white_image)
def score(testfile,correctfile): test_pts = Annotating.read_anno(testfile) correct_pts = Annotating.read_anno(correctfile) net_error = 0.0 max_error = 0.0 rel_pts = [] if IGNORE_COLLAR: if MODE == SWEATER or MODE == TEE: check_points = (0,1,2,3,4,8,9,10,11,12) else: check_points = range(len(test_pts)) else: check_points = range(len(test_pts)) errors = [] (x_axis,y_axis) = get_axes(correct_pts) for i in check_points:#(1,4,8,11):#range(len(test_pts)): test_pt = test_pts[i] correct_pt = correct_pts[i] rel_pt = Vector2D.pt_diff(test_pt,correct_pt) rel_pts.append((Vector2D.dot_prod(rel_pt,x_axis),Vector2D.dot_prod(rel_pt,y_axis))) error = Vector2D.pt_distance(test_pt,correct_pt) errors.append(error) return (lst_avg(errors),rel_pts)
def draw_line(img, pt1, pt2): line = Vector2D.make_ln_from_pts(pt1, pt2) ln_start = Vector2D.intercept(line, Vector2D.horiz_ln(y=0)) ln_end = Vector2D.intercept(line, Vector2D.horiz_ln(y=img.height)) cv.Line(img, ln_start, ln_end, cv.CV_RGB(255, 0, 0), 2)
def get_fold_line(model,i): show_message("GET_FOLD_LINE - begin", MsgTypes.debug) foldStart = None foldEnd = None if(TYPE == ASYMM): #towel if(i == 1): #Fold in half show_message("Model verticies " + str(model.polygon_vertices_int()), MsgTypes.info) [bl,tl,tr,br] = [pt for pt in model.polygon_vertices_int()][0:4] foldStart = Vector2D.pt_center(bl,br) foldEnd = Vector2D.pt_center(tl,tr) # make foldline little bit bigger than conture foldLineCenter = Vector2D.pt_center(foldStart,foldEnd) foldStart = Vector2D.scale_pt(foldStart,1.3,foldLineCenter) foldEnd = Vector2D.scale_pt(foldEnd,1.3,foldLineCenter) # transfer points to corect data type foldStart = (int(Vector2D.pt_x(foldStart)),int(Vector2D.pt_y(foldStart))) foldEnd = (int(Vector2D.pt_x(foldEnd)),int(Vector2D.pt_y(foldEnd))) elif(i == 2): #Fold in half again show_message("Model verticies " + str(model.polygon_vertices_int()), MsgTypes.info); [bl,tl,tr,br] = ([pt for pt in model.polygon_vertices_int()])[0:4] foldStart = Vector2D.pt_center(br,tr) foldEnd = Vector2D.pt_center(bl,tl) # make foldline little bit bigger than conture foldLineCenter = Vector2D.pt_center(foldStart,foldEnd) foldStart = Vector2D.scale_pt(foldStart,1.2,foldLineCenter) foldEnd = Vector2D.scale_pt(foldEnd,1.2,foldLineCenter) # transfer points to corect data type foldStart = (int(Vector2D.pt_x(foldStart)),int(Vector2D.pt_y(foldStart))) foldEnd = (int(Vector2D.pt_x(foldEnd)),int(Vector2D.pt_y(foldEnd))) elif(TYPE == TEE_SKEL): if(i == 1): ls = model.left_shoulder_top() lc = model.left_collar() lslc = Vector2D.pt_center(ls,lc) # point between ls and lc bl = model.bottom_left() sbl = Vector2D.translate_pt(bl,Vector2D.pt_diff(lslc,ls)) # shifted bl by vector (ls,lslc) foldLineCenter = Vector2D.pt_center(lslc,sbl) # make foldline little bit bigger than conture lslc = Vector2D.scale_pt(lslc,1.3,foldLineCenter) sbl = Vector2D.scale_pt(sbl,1.4,foldLineCenter) # transfer points to corect data type foldEnd = (int(Vector2D.pt_x(lslc)),int(Vector2D.pt_y(lslc))) foldStart = (int(Vector2D.pt_x(sbl)),int(Vector2D.pt_y(sbl))) if(i == 2): rs = model.right_shoulder_top() rc = model.right_collar() rsrc = Vector2D.pt_center(rs,rc) # point between rs and rc br = model.bottom_right() sbr = Vector2D.translate_pt(br,Vector2D.pt_diff(rsrc,rs)) # shifted br by vector (rs,rsrc) foldLineCenter = Vector2D.pt_center(rsrc,sbr) # make foldline little bit bigger than conture rsrc = Vector2D.scale_pt(rsrc,1.5,foldLineCenter) sbr = Vector2D.scale_pt(sbr,1.5,foldLineCenter) # transfer points to corect data type foldStart = (int(Vector2D.pt_x(rsrc)),int(Vector2D.pt_y(rsrc))) foldEnd = (int(Vector2D.pt_x(sbr)),int(Vector2D.pt_y(sbr))) if(i == 3): ls = model.left_shoulder_top() rs = model.right_shoulder_top() bl = model.bottom_left() br = model.bottom_right() foldStart = Vector2D.pt_center(br,rs) foldEnd = Vector2D.pt_center(bl,ls) foldLineCenter = Vector2D.pt_center(foldStart,foldEnd) # make foldline little bit bigger than conture #foldStart = Vector2D.scale_pt(foldStart,0.9,foldLineCenter) #foldEnd = Vector2D.scale_pt(foldEnd,0.9,foldLineCenter) # transfer points to corect data type foldStart = (int(Vector2D.pt_x(foldStart)),int(Vector2D.pt_y(foldStart))) foldEnd = (int(Vector2D.pt_x(foldEnd)),int(Vector2D.pt_y(foldEnd))) else: show_message("Not implemented type of cloth",MsgTypes.exception) sys.exit() foldLine = [foldStart, foldEnd] show_message("New fold line: " + str(foldLine),MsgTypes.info) show_message("GET_FOLD_LINE - end", MsgTypes.debug) return foldLine;
def fit(self,model,contour,img_annotated=None,img=None): assert not model.illegal() if not img_annotated: xs = [x for (x,y) in contour] ys = [y for (x,y) in contour] width = max(xs) - min(xs) height = max(ys) - min(ys) cv.Set(img_annotated,cv.CV_RGB(255,255,255)) model.set_image(cv.CloneImage(img_annotated)) shape_contour = contour if SHOW_CONTOURS: cv.DrawContours(img_annotated,shape_contour,cv.CV_RGB(255,0,0),cv.CV_RGB(255,0,0),0,1,8,(0,0)) if self.INITIALIZE: self.printout("INITIALIZING") (real_center,real_top,real_theta,real_scale) = get_principle_info(shape_contour) if SHOW_UNSCALED_MODEL: model.draw_to_image(img_annotated,cv.CV_RGB(0,0,255)) model_contour = model.vertices_dense(constant_length=False,density=30) (model_center,model_top,model_theta,model_scale) = get_principle_info(model_contour) displ = displacement(model_center,real_center) #Drawing if SHOW_INIT_PTS: top_img = cv.CloneImage(img_annotated) cv.DrawContours(top_img,shape_contour,cv.CV_RGB(255,0,0),cv.CV_RGB(255,0,0),0,1,8,(0,0)) model.draw_contour(top_img,cv.CV_RGB(0,0,255),2) draw_pt(top_img,real_top,cv.CV_RGB(255,0,0)) draw_pt(top_img,real_center,cv.CV_RGB(255,0,0)) draw_pt(top_img,model_top,cv.CV_RGB(0,0,255)) draw_pt(top_img,model_center,cv.CV_RGB(0,0,255)) cv.NamedWindow("Top") cv.ShowImage("Top",top_img) cv.WaitKey() angle = model_theta - real_theta if self.ORIENT_OPT: angle = 0 scale = real_scale/float(model_scale) if scale < 0.25: scale = 1 model_trans = translate_poly(model.polygon_vertices(),displ) model_rot = rotate_poly(model_trans,-1*angle,real_center) model_scaled = scale_poly(model_rot,scale,real_center) #(model_center,model_top,model_theta,model_scale) = get_principle_info(model_scaled) #Do the same to the actual model model.translate(displ) if self.ROTATE: model.rotate(-1*angle,real_center) model.scale(scale,real_center) if SHOW_SCALED_MODEL: model.draw_to_image(img_annotated,cv.CV_RGB(0,0,255)) self.printout("Energy is: %f"%model.score(shape_contour)) self.printout("Shape contour has %d points"%(len(shape_contour))) sparse_shape_contour = make_sparse(shape_contour,1000) #Optimize if self.ORIENT_OPT: init_model = Models.Orient_Model(model,pi/2) orient_model_finished = self.black_box_opt(model=init_model,contour=shape_contour,num_iters = self.num_iters,delta=init_model.preferred_delta(),epsilon = 0.01,mode="orient",image=img) model_oriented = orient_model_finished.transformed_model() else: model_oriented = model if self.SYMM_OPT: self.printout("SYMMETRIC OPTIMIZATION") new_model_symm = self.black_box_opt(model=model_oriented,contour=shape_contour,num_iters = self.num_iters,delta=model.preferred_delta(),epsilon = 0.01,mode="symm",image=img) else: new_model_symm = model_oriented if SHOW_SYMM_MODEL: new_model_symm.draw_to_image(img=img_annotated,color=cv.CV_RGB(0,255,0)) model=new_model_symm.make_asymm() if self.HIGH_EXPLORATION: exp_factor = 3.0 else: exp_factor = 1.5 if self.ASYMM_OPT: new_model_asymm = self.black_box_opt(model=model,contour=shape_contour,num_iters=self.num_iters,delta=model.preferred_delta(),exploration_factor=exp_factor,fine_tune=False,mode="asymm",image=img) else: new_model_asymm = model if self.FINE_TUNE: #tunable_model = model_oriented.make_tunable() tunable_model = new_model_asymm.make_tunable() final_model = self.black_box_opt(model=tunable_model,contour=shape_contour,num_iters=self.num_iters,delta=5.0,exploration_factor=1.5,fine_tune=False,image=img) final_model = final_model.final() else: final_model = new_model_asymm final_model.draw_to_image(img=img_annotated,color=cv.CV_RGB(255,0,255)) nearest_pts = [] for vert in final_model.polygon_vertices(): nearest_pt = min(shape_contour,key=lambda pt: Vector2D.pt_distance(pt,vert)) cv.Circle(img_annotated,nearest_pt,5,cv.CV_RGB(255,255,255),3) nearest_pts.append(nearest_pt) fitted_model = Models.Point_Model_Contour_Only_Asymm(*nearest_pts) #fitted_model = final_model if SHOW_FITTED: fitted_model.draw_to_image(img=img_annotated,color=cv.CV_RGB(0,255,255)) return (nearest_pts,final_model,fitted_model)
def draw_line(img,pt1,pt2): line = Vector2D.make_ln_from_pts(pt1,pt2) ln_start = Vector2D.intercept(line,Vector2D.horiz_ln(y=0)) ln_end = Vector2D.intercept(line,Vector2D.horiz_ln(y=img.height)) cv.Line(img,ln_start,ln_end,cv.CV_RGB(255,0,0),2)
def fit(self,model,contour,img_annotated=None,img=None): assert not model.illegal() if not img_annotated: xs = [x for (x,y) in contour] ys = [y for (x,y) in contour] width = max(xs) - min(xs) height = max(ys) - min(ys) cv.Set(img_annotated,cv.CV_RGB(255,255,255)) model.set_image(cv.CloneImage(img_annotated)) shape_contour = contour if SHOW_CONTOURS: cv.DrawContours(img_annotated,shape_contour,cv.CV_RGB(255,0,0),cv.CV_RGB(255,0,0),0,1,8,(0,0)) if self.INITIALIZE: self.printout("INITIALIZING") (real_center,real_top,real_theta,real_scale) = get_principle_info(shape_contour) if SHOW_UNSCALED_MODEL: model.draw_to_image(img_annotated,cv.CV_RGB(0,0,255)) model_contour = model.vertices_dense(constant_length=False,density=30,includeFoldLine = False) (model_center,model_top,model_theta,model_scale) = get_principle_info(model_contour) displ = displacement(model_center,real_center) #Drawing if SHOW_INIT_PTS: top_img = cv.CloneImage(img_annotated) cv.DrawContours(top_img,shape_contour,cv.CV_RGB(255,0,0),cv.CV_RGB(255,0,0),0,1,8,(0,0)) model.draw_contour(top_img,cv.CV_RGB(0,0,255),2) draw_pt(top_img,real_top,cv.CV_RGB(255,0,0)) draw_pt(top_img,real_center,cv.CV_RGB(255,0,0)) draw_pt(top_img,model_top,cv.CV_RGB(0,0,255)) draw_pt(top_img,model_center,cv.CV_RGB(0,0,255)) cv.NamedWindow("Top") cv.ShowImage("Top",top_img) cv.WaitKey() angle = model_theta - real_theta #if self.ORIENT_OPT: # angle = 0 scale = real_scale/float(model_scale) if scale < 0.25: scale = 1 model_trans = translate_poly(model.polygon_vertices(),displ) model_rot = rotate_poly(model_trans,-1*angle,real_center) model_scaled = scale_poly(model_rot,scale,real_center) """ DEBUG print "/**************Test****************/" A = [ (int(pt[0]),int(pt[1])) for pt in model_trans] B = [ (int(pt[0]),int(pt[1])) for pt in model_rot] C = [ (int(pt[0]),int(pt[1])) for pt in model_scaled] cv.NamedWindow("Debug window") im = cv.CloneImage(img_annotated) model.draw_contour(im,cv.CV_RGB(100,100,100),2) cv.DrawContours(im,shape_contour,cv.CV_RGB(255,0,0),cv.CV_RGB(255,0,0),0,1,8,(0,0)) draw_pt(im,real_top,cv.CV_RGB(255,0,0)) draw_pt(im,real_center,cv.CV_RGB(255,0,0)) draw_pt(im,model_top,cv.CV_RGB(100,100,100)) draw_pt(im,model_center,cv.CV_RGB(100,100,100)) cv.PolyLine(im,[A],1,cv.CV_RGB(255,0,0),1) cv.ShowImage("Debug window",im) cv.WaitKey() cv.PolyLine(im,[B],1,cv.CV_RGB(0,255,0),1) cv.ShowImage("Debug window",im) cv.WaitKey() cv.PolyLine(im,[C],1,cv.CV_RGB(0,0,255),1) cv.ShowImage("Debug window",im) cv.WaitKey() cv.DestroyWindow("Debug window") print "/************EndOfTest*************/" self.exitIfKeyPressed("c"); #""" #(model_center,model_top,model_theta,model_scale) = get_principle_info(model_scaled) #Do the same to the actual model # translate model model.translate(displ,True) """ DEBUG print "/**************Test****************/" cv.NamedWindow("Translate model") im = cv.CloneImage(img_annotated) cv.PolyLine(im,[model.polygon_vertices_int()],1,cv.CV_RGB(0,0,255),1) cv.ShowImage("Translate model",im) cv.WaitKey() cv.DestroyWindow("Translate model") print "/************EndOfTest*************/" #""" #rotate model if self.ROTATE: model.rotate(-1*angle,real_center,True) """ DEBUG print "/**************Test****************/" cv.NamedWindow("Rotate model") im = cv.CloneImage(img_annotated) cv.PolyLine(im,[model.polygon_vertices_int()],1,cv.CV_RGB(0,0,255),1) cv.ShowImage("Rotate model",im) cv.WaitKey() cv.DestroyWindow("Rotate model") print "/************EndOfTest*************/" #""" #scale model model.scale(scale,real_center,True) if SHOW_SCALED_MODEL: model.draw_to_image(img_annotated,cv.CV_RGB(0,0,255)) """ DEBUG print "/**************Test****************/" cv.NamedWindow("Scale model") im = cv.CloneImage(img_annotated) cv.PolyLine(im,[model.polygon_vertices_int()],1,cv.CV_RGB(0,0,255),1) cv.ShowImage("Scale model",im) cv.WaitKey() cv.DestroyWindow("Scale model") print "/************EndOfTest*************/" #""" self.printout("Energy is: %f"%model.score(shape_contour)) self.printout("Shape contour has %d points"%(len(shape_contour))) sparse_shape_contour = make_sparse(shape_contour,1000) """ DEBUG print "/**************Test****************/" cv.NamedWindow("Sparse_shape_contour model") im = cv.CloneImage(img_annotated) cv.PolyLine(im,[sparse_shape_contour],1,cv.CV_RGB(0,0,255),1) cv.ShowImage("Sparse_shape_contour model",im) cv.WaitKey() cv.DestroyWindow("Sparse_shape_contour model") print "/************EndOfTest*************/" #""" #Optimize # Orientation phase if self.ORIENT_OPT: self.printout("ORIENTATION OPTIMIZATION") init_model = Models.Orient_Model(model,pi/2) orient_model_finished = self.black_box_opt(model=init_model,contour=shape_contour,num_iters = self.num_iters,delta=init_model.preferred_delta(),epsilon = 0.01,mode="orient",image=img) """ DEBUG print "/**************Test****************/" cv.NamedWindow("Orientation phase: final model") im = cv.CloneImage(img_annotated) cv.PolyLine(im,[orient_model_finished.polygon_vertices_int()],1,cv.CV_RGB(0,0,255),1) cv.ShowImage("Orientation phase: final model",im) cv.WaitKey() cv.DestroyWindow("Orientation phase: final model") print "/************EndOfTest*************/" #""" model_oriented = orient_model_finished.transformed_model() else: model_oriented = model # Symmetric phase if self.SYMM_OPT: self.printout("SYMMETRIC OPTIMIZATION") new_model_symm = self.black_box_opt(model=model_oriented,contour=shape_contour,num_iters = self.num_iters,delta=model.preferred_delta(),epsilon = 0.01,mode="symm",image=img) """ DEBUG print "/**************Test****************/" cv.NamedWindow("Symmetric phase: final model") im = cv.CloneImage(img_annotated) cv.PolyLine(im,[new_model_symm.polygon_vertices_int()],1,cv.CV_RGB(0,0,255),1) cv.ShowImage("Symmetric phase: final model",im) cv.WaitKey() cv.DestroyWindow("Symmetric phase: final model") print "/************EndOfTest*************/" #""" else: new_model_symm = model_oriented if SHOW_SYMM_MODEL: new_model_symm.draw_to_image(img=img_annotated,color=cv.CV_RGB(0,255,0)) # Asymmetric phase model=new_model_symm.make_asymm() if self.HIGH_EXPLORATION: exp_factor = 3.0 else: exp_factor = 1.5 if self.ASYMM_OPT: self.printout("ASYMMETRIC OPTIMIZATION") new_model_asymm = self.black_box_opt(model=model,contour=shape_contour,num_iters=self.num_iters,delta=model.preferred_delta(),exploration_factor=exp_factor,fine_tune=False,mode="asymm",image=img) """ DEBUG print "/**************Test****************/" cv.NamedWindow("Asymmetric phase: final model") im = cv.CloneImage(img_annotated) cv.PolyLine(im,[new_model_symm.polygon_vertices_int()],1,cv.CV_RGB(0,0,255),1) cv.ShowImage("Asymmetric phase: final model",im) cv.WaitKey() cv.DestroyWindow("Asymmetric phase: final model") print "/************EndOfTest*************/" #""" else: new_model_asymm = model #Final tune phase if self.FINE_TUNE: self.printout("FINAL TUNE") #tunable_model = model_oriented.make_tunable() tunable_model = new_model_asymm.make_tunable() final_model = self.black_box_opt(model=tunable_model,contour=shape_contour,num_iters=self.num_iters,delta=5.0,exploration_factor=1.5,fine_tune=False,image=img) final_model = final_model.final() else: final_model = new_model_asymm final_model.draw_to_image(img=img_annotated,color=cv.CV_RGB(255,0,255)) # Find nearest points nearest_pts = [] for vert in final_model.polygon_vertices(): nearest_pt = min(shape_contour,key=lambda pt: Vector2D.pt_distance(pt,vert)) cv.Circle(img_annotated,nearest_pt,5,cv.CV_RGB(255,255,255),3) nearest_pts.append(nearest_pt) fitted_model = Models.Point_Model_Contour_Only_Asymm(*nearest_pts) if SHOW_FITTED: fitted_model.draw_to_image(img=img_annotated,color=cv.CV_RGB(0,255,255)) return (nearest_pts,final_model,fitted_model)
def fit(self, model, contour, img_annotated=None, img=None): assert not model.illegal() if not img_annotated: xs = [x for (x, y) in contour] ys = [y for (x, y) in contour] width = max(xs) - min(xs) height = max(ys) - min(ys) cv.Set(img_annotated, cv.CV_RGB(255, 255, 255)) model.set_image(cv.CloneImage(img_annotated)) shape_contour = contour if SHOW_CONTOURS: cv.DrawContours(img_annotated, shape_contour, cv.CV_RGB(255, 0, 0), cv.CV_RGB(255, 0, 0), 0, 1, 8, (0, 0)) if self.INITIALIZE: self.printout("INITIALIZING") (real_center, real_top, real_theta, real_scale) = get_principle_info(shape_contour) if SHOW_UNSCALED_MODEL: model.draw_to_image(img_annotated, cv.CV_RGB(0, 0, 255)) model_contour = model.vertices_dense(constant_length=False, density=30) (model_center, model_top, model_theta, model_scale) = get_principle_info(model_contour) displ = displacement(model_center, real_center) #Drawing if SHOW_INIT_PTS: top_img = cv.CloneImage(img_annotated) cv.DrawContours(top_img, shape_contour, cv.CV_RGB(255, 0, 0), cv.CV_RGB(255, 0, 0), 0, 1, 8, (0, 0)) model.draw_contour(top_img, cv.CV_RGB(0, 0, 255), 2) draw_pt(top_img, real_top, cv.CV_RGB(255, 0, 0)) draw_pt(top_img, real_center, cv.CV_RGB(255, 0, 0)) draw_pt(top_img, model_top, cv.CV_RGB(0, 0, 255)) draw_pt(top_img, model_center, cv.CV_RGB(0, 0, 255)) cv.NamedWindow("Top") cv.ShowImage("Top", top_img) cv.WaitKey() angle = model_theta - real_theta if self.ORIENT_OPT: angle = 0 scale = real_scale / float(model_scale) if scale < 0.25: scale = 1 model_trans = translate_poly(model.polygon_vertices(), displ) model_rot = rotate_poly(model_trans, -1 * angle, real_center) model_scaled = scale_poly(model_rot, scale, real_center) #(model_center,model_top,model_theta,model_scale) = get_principle_info(model_scaled) #Do the same to the actual model model.translate(displ) if self.ROTATE: model.rotate(-1 * angle, real_center) model.scale(scale, real_center) if SHOW_SCALED_MODEL: model.draw_to_image(img_annotated, cv.CV_RGB(0, 0, 255)) self.printout("Energy is: %f" % model.score(shape_contour)) self.printout("Shape contour has %d points" % (len(shape_contour))) sparse_shape_contour = make_sparse(shape_contour, 1000) #Optimize if self.ORIENT_OPT: init_model = Models.Orient_Model(model, pi / 2) orient_model_finished = self.black_box_opt( model=init_model, contour=shape_contour, num_iters=self.num_iters, delta=init_model.preferred_delta(), epsilon=0.01, mode="orient", image=img) model_oriented = orient_model_finished.transformed_model() else: model_oriented = model if self.SYMM_OPT: self.printout("SYMMETRIC OPTIMIZATION") new_model_symm = self.black_box_opt(model=model_oriented, contour=shape_contour, num_iters=self.num_iters, delta=model.preferred_delta(), epsilon=0.01, mode="symm", image=img) else: new_model_symm = model_oriented if SHOW_SYMM_MODEL: new_model_symm.draw_to_image(img=img_annotated, color=cv.CV_RGB(0, 255, 0)) model = new_model_symm.make_asymm() if self.HIGH_EXPLORATION: exp_factor = 3.0 else: exp_factor = 1.5 if self.ASYMM_OPT: new_model_asymm = self.black_box_opt(model=model, contour=shape_contour, num_iters=self.num_iters, delta=model.preferred_delta(), exploration_factor=exp_factor, fine_tune=False, mode="asymm", image=img) else: new_model_asymm = model if self.FINE_TUNE: #tunable_model = model_oriented.make_tunable() tunable_model = new_model_asymm.make_tunable() final_model = self.black_box_opt(model=tunable_model, contour=shape_contour, num_iters=self.num_iters, delta=5.0, exploration_factor=1.5, fine_tune=False, image=img) final_model = final_model.final() else: final_model = new_model_asymm final_model.draw_to_image(img=img_annotated, color=cv.CV_RGB(255, 0, 255)) nearest_pts = [] for vert in final_model.polygon_vertices(): nearest_pt = min(shape_contour, key=lambda pt: Vector2D.pt_distance(pt, vert)) cv.Circle(img_annotated, nearest_pt, 5, cv.CV_RGB(255, 255, 255), 3) nearest_pts.append(nearest_pt) fitted_model = Models.Point_Model_Contour_Only_Asymm(*nearest_pts) #fitted_model = final_model if SHOW_FITTED: fitted_model.draw_to_image(img=img_annotated, color=cv.CV_RGB(0, 255, 255)) return (nearest_pts, final_model, fitted_model)
def extract_samples(self,nearest_pts,cv_image,contour): [center,b_l,t_l,t_r,b_r] = nearest_pts l_line = Vector2D.make_seg(center,t_l) r_line = Vector2D.make_seg(center,t_r) l_side = Vector2D.make_seg(b_l,t_l) bl_side = Vector2D.make_seg(b_l,center) br_side = Vector2D.make_seg(b_r,center) r_side = Vector2D.make_seg(b_r,t_r) t_side = Vector2D.make_seg(t_l,t_r) l_crop_br = Vector2D.extrapolate_pct(bl_side,0.5) l_crop_bl = Vector2D.intercept(l_side,Vector2D.horiz_ln(l_crop_br[1])) l_crop_tr = Vector2D.intercept(Vector2D.vert_ln(l_crop_br[0]),l_line) l_crop_tl = Vector2D.pt_sum(l_crop_bl,Vector2D.pt_diff(l_crop_tr,l_crop_br)) l_rect = (l_crop_bl,l_crop_br,l_crop_tr,l_crop_tl) r_crop_bl = Vector2D.extrapolate_pct(br_side,0.5) r_crop_br = Vector2D.intercept(r_side,Vector2D.horiz_ln(r_crop_bl[1])) r_crop_tl = Vector2D.intercept(Vector2D.vert_ln(r_crop_bl[0]),r_line) r_crop_tr = Vector2D.pt_sum(r_crop_br,Vector2D.pt_diff(r_crop_tl,r_crop_bl)) r_rect = (r_crop_bl,r_crop_br,r_crop_tr,r_crop_tl) t_crop_bl = Vector2D.extrapolate_pct(l_line,0.5) t_crop_br = Vector2D.intercept(Vector2D.horiz_ln(t_crop_bl[1]),r_line) if t_l[1] > t_r[1]: t_crop_tl = Vector2D.intercept(Vector2D.vert_ln(t_crop_bl[0]),t_side) t_crop_tr = Vector2D.pt_sum(t_crop_br,Vector2D.pt_diff(t_crop_tl,t_crop_bl)) else: t_crop_tr = Vector2D.intercept(Vector2D.vert_ln(t_crop_br[0]),t_side) t_crop_tl = Vector2D.pt_sum(t_crop_bl,Vector2D.pt_diff(t_crop_tr,t_crop_br)) """ t_rect_old = (t_crop_bl,t_crop_br,t_crop_tr,t_crop_tl) (orig_t_width,orig_t_height) = Vector2D.rect_size(t_rect_old) while cv.PointPolygonTest(contour,Vector2D.pt_scale(Vector2D.pt_sum(t_crop_tl,t_crop_tr),0.5),0) < 0: t_crop_tl = (t_crop_tl[0],t_crop_tl[1]+0.05*orig_t_height) t_crop_tr = (t_crop_tr[0],t_crop_tr[1]+0.05*orig_t_height) print "shrinking t_height" """ t_rect = (t_crop_bl,t_crop_br,t_crop_tr,t_crop_tl) (l_width,l_height) = Vector2D.rect_size(l_rect) (r_width,r_height) = Vector2D.rect_size(r_rect) (t_width,t_height) = Vector2D.rect_size(t_rect) #print "Height difference:%f"%(t_height - orig_t_height) width = min(l_width,r_width,t_width) * 0.9 height = min(l_height,r_height,t_height) * 0.9 if width < 5: width = 5 print "Hit min" if height < 5: height = 5 print "Hit min" l_rect_scaled = Vector2D.scale_rect(l_rect,width,height) r_rect_scaled = Vector2D.scale_rect(r_rect,width,height) t_rect_scaled = Vector2D.scale_rect(t_rect,width,height) filenames = ("l_part.png","r_part.png","t_part.png") for i,r in enumerate((l_rect_scaled,r_rect_scaled,t_rect_scaled)): image_temp = cv.CloneImage(cv_image) cv.SetImageROI(image_temp,Vector2D.rect_to_cv(r)) cv.SaveImage("%s/%s"%(self.save_dir,filenames[i]),image_temp) return (l_line,r_line)
def draw_seg(self,img,seg,color): (start,end) = Vector2D.end_points(seg) cv.Line(img,start,end,color,3)