Exemplo n.º 1
0
 def adjust_fold(self, req):
     #Go to viewing stance
     StanceUtils.call_stance("open_both", 5.0)
     StanceUtils.call_stance("viewing", 5.0)
     last_model = pickle.load(open("/tmp/last_model.pickle"))
     camera_model = image_geometry.PinholeCameraModel()
     info = RosUtils.get_next_message("wide_stereo/left/camera_info",
                                      CameraInfo)
     cam_frame = info.header.frame_id
     camera_model.fromCameraInfo(info)
     now = rospy.Time.now()
     req.start.header.stamp = now
     req.end.header.stamp = now
     self.listener.waitForTransform(cam_frame, req.start.header.frame_id,
                                    now, rospy.Duration(20.0))
     start_cam = self.listener.transformPoint(cam_frame, req.start)
     end_cam = self.listener.transformPoint(cam_frame, req.end)
     start = camera_model.project3dToPixel(
         (start_cam.point.x, start_cam.point.y, start_cam.point.z))
     end = camera_model.project3dToPixel(
         (end_cam.point.x, end_cam.point.y, end_cam.point.z))
     folded_model = Models.Point_Model_Folded(last_model, start, end)
     folded_model.image = None
     folded_model.initial_model.image = None
     pickle.dump(folded_model, open("/tmp/last_model.pickle", 'w'))
     adjust_folded_model = rospy.ServiceProxy(
         "fold_finder_node/process_mono", ProcessMono)
     resp = adjust_folded_model("wide_stereo/left")
     new_start = resp.pts3d[0]
     new_end = resp.pts3d[1]
     return AdjustFoldResponse(start=new_start, end=new_end)
Exemplo n.º 2
0
 def getModelSymm(self):
     poly = self.getPolys()[0].getShape()
     #Due to symmetry, we only need half the points)
     vertices = poly.vertices()[0:len(poly.vertices()) / 2]
     tuple_vertices = [v.toTuple() for v in vertices]
     model = Models.Model_Symm(tuple_vertices, self.symmline)
     return model
Exemplo n.º 3
0
 def getModelPantsSkel(self):
     #Parameters: mid_center,top_center,mid_left,left_leg_center,left_leg_left
     width = Geometry2D.distance(self.left_leg_left, self.left_leg_right)
     return Models.Model_Pants_Skel_New(True, self.mid_center.toTuple(),
                                        self.top_center.toTuple(),
                                        self.mid_left.toTuple(),
                                        self.left_leg_center.toTuple(),
                                        self.top_left.toTuple(), width)
Exemplo n.º 4
0
    def getModelSkel(self):

        left_sleeve_width = Geometry2D.distance(self.sleeve_top,
                                                self.sleeve_node) * 2
        return Models.Model_Shirt_Skel_Less_Restricted(
            True, self.spine_bottom.toTuple(), self.spine_top.toTuple(),
            self.collar.toTuple(), self.shoulder_joint.toTuple(),
            self.shoulder_top.toTuple(), self.sleeve_node.toTuple(),
            self.bottom_left.toTuple(), left_sleeve_width)
Exemplo n.º 5
0
 def getModelTee(self):
     left_sleeve_width = Geometry2D.distance(self.sleeve_top,
                                             self.sleeve_node) * 2
     return Models.Model_Tee_Skel_No_Skew(True, self.spine_bottom.toTuple(),
                                          self.spine_top.toTuple(),
                                          self.collar.toTuple(),
                                          self.shoulder_joint.toTuple(),
                                          self.shoulder_top.toTuple(),
                                          self.sleeve_node.toTuple(),
                                          self.bottom_left.toTuple(),
                                          left_sleeve_width)
Exemplo n.º 6
0
 def saveModel(self):
     file = open(self.output_modelpath,'w')
     displacement = Vector2D.intercept(self.foldline,Vector2D.horiz_ln(y=0))[0]
     (dx,dy) = Vector2D.line_vector(self.foldline)
     angle = abs(arctan(dy / dx))
     if dx > 0 and dy > 0:
         angle = angle
     elif dx < 0 and dy > 0:
         angle = pi - angle
     elif dx < 0 and dy < 0:
         angle = pi + angle
     else:
         angle *= -1
     model = Models.Point_Model_Folded(self.initial_model,self.foldline_pts[0],self.foldline_pts[1])
     model.draw_to_image(self.background,cv.RGB(255,0,0))
     if model.illegal() or model.structural_penalty() >= 1.0:
         print "Model is illegal!"
         self.clearAll()
     else:
         model.set_image(None)
         pickle.dump(model,file)
Exemplo n.º 7
0
    def process(self,cv_image,info,image2=None):
        self.load_model(self.modelpath)
        if self.transform:
            H = cv.Load(self.matrix_location)
            input_image = cv.CloneImage(cv_image)
            cv.WarpPerspective(input_image,cv_image,H,
                    cv.CV_INTER_LINEAR+cv.CV_WARP_INVERSE_MAP+cv.CV_WARP_FILL_OUTLIERS)
        #Use the thresholding module to get the contour out
        shape_contour = thresholding.get_contour(cv_image,bg_mode=self.bg_mode,filter_pr2=self.filter_pr2
                                                    ,crop_rect=(self.cropx,self.cropy,self.cropwidth,self.cropheight),cam_info=info,listener=self.listener)

        #Use the shape_fitting module to fit the model to the contour
        
        if self.mode=="tee":
            #fitter = shape_fitting.ShapeFitter(SYMM_OPT=True,ORIENT_OPT=False,FINE_TUNE=False)
            fitter = shape_fitting.ShapeFitter(SYMM_OPT=False,ORIENT_OPT=True,FINE_TUNE=False,num_iters=self.num_iters)
        elif self.mode=="sweater":
            fitter = shape_fitting.ShapeFitter(SYMM_OPT=False,ORIENT_OPT=False,FINE_TUNE=False,num_iters=self.num_iters)
        elif self.mode=="folded":
            fitter = shape_fitting.ShapeFitter(SYMM_OPT=False,ORIENT_OPT=False,FINE_TUNE=False,INITIALIZE=False,num_iters=self.num_iters)
        else:
            fitter = shape_fitting.ShapeFitter(SYMM_OPT=False,ORIENT_OPT=False,FINE_TUNE=False,num_iters=self.num_iters)
        image_anno = cv.CloneImage(cv_image)
        (nearest_pts, final_model, fitted_model) = fitter.fit(self.model,shape_contour,image_anno)
        pts = nearest_pts
        
        params = {}
        
        
        if self.mode == "triangles":
            return_pts = [pts[1],pts[4],pts[2],pts[3]]
            self.highlight_pt(pts[1],cv.CV_RGB(255,0,0),image_anno)
            font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX,1.0,1.0)
            cv.PutText(image_anno,"(l)eft",(pts[1][0]-20,pts[1][1]-15),font,cv.CV_RGB(255,0,0))
            self.highlight_pt(pts[4],cv.CV_RGB(0,0,255),image_anno)
            cv.PutText(image_anno,"(r)ight",(pts[4][0]-20,pts[4][1]-15),font,cv.CV_RGB(0,0,255))
            params = {"tilt":0.0}
        elif self.mode == "towel":
            return_pts = pts
        elif self.mode == "tee" or self.mode == "sweater":
            return_pts = pts[0:5]+pts[8:]
            params = {}
        elif self.mode == "folded":
            return_pts = [final_model.fold_bottom(),final_model.fold_top()]
        else:
            return_pts = pts
            params = {}
        if self.transform:
            H_inv = cv.CloneMat(H)
            cv.Invert(H,H_inv)
            anno_unchanged = cv.CloneImage(image_anno)
            cv.WarpPerspective(anno_unchanged,image_anno,H_inv,
                    cv.CV_INTER_LINEAR+cv.CV_WARP_INVERSE_MAP+cv.CV_WARP_FILL_OUTLIERS)
            new_return_pts = self.transform_pts(return_pts,H_inv)
            for i,pt in enumerate(new_return_pts):
                return_pts[i] = pt
        if self.mode != "triangles":
            for pt in return_pts:
                self.highlight_pt(pt,cv.CV_RGB(255,0,0),image_anno)
        if self.mode != "folded":
            fitted_model.set_image(None)
            pickle.dump(fitted_model,open("%s/last_model.pickle"%self.save_dir,'w'))
        else:
            model_pts = final_model.vertices_full()
            new_model = Models.Point_Model_Contour_Only_Asymm(*model_pts)
            pickle.dump(new_model,open("%s/last_model.pickle"%self.save_dir,'w'))
        score = final_model.score(shape_contour) #fitter.energy_fxn(final_model,shape_contour)
        params["score"] = score
        return (return_pts,params,image_anno)
Exemplo n.º 8
0
 def getModelSockSkel(self):
     return Models.Model_Sock_Skel(True, self.ankle_center.toTuple(),
                                   self.ankle_joint.toTuple(),
                                   self.toe_center.toTuple(),
                                   self.sock_width, self.sock_width / 2)
Exemplo n.º 9
0
 def getModelAsymm(self):
     poly = self.getPolys()[0].getShape()
     vertices = poly.vertices()
     tuple_vertices = tuple([v.toTuple() for v in vertices])
     model = Models.Point_Model_Contour_Only_Asymm(*tuple_vertices)
     return model
Exemplo n.º 10
0
    def fit(self, model, contour, img_annotated=None, img=None):
        assert not model.illegal()
        if not img_annotated:
            xs = [x for (x, y) in contour]
            ys = [y for (x, y) in contour]
            width = max(xs) - min(xs)
            height = max(ys) - min(ys)
            cv.Set(img_annotated, cv.CV_RGB(255, 255, 255))
        model.set_image(cv.CloneImage(img_annotated))

        shape_contour = contour

        if SHOW_CONTOURS:
            cv.DrawContours(img_annotated, shape_contour, cv.CV_RGB(255, 0, 0),
                            cv.CV_RGB(255, 0, 0), 0, 1, 8, (0, 0))
        if self.INITIALIZE:
            self.printout("INITIALIZING")
            (real_center, real_top, real_theta,
             real_scale) = get_principle_info(shape_contour)
            if SHOW_UNSCALED_MODEL:
                model.draw_to_image(img_annotated, cv.CV_RGB(0, 0, 255))
            model_contour = model.vertices_dense(constant_length=False,
                                                 density=30)
            (model_center, model_top, model_theta,
             model_scale) = get_principle_info(model_contour)
            displ = displacement(model_center, real_center)

            #Drawing
            if SHOW_INIT_PTS:
                top_img = cv.CloneImage(img_annotated)
                cv.DrawContours(top_img, shape_contour, cv.CV_RGB(255, 0, 0),
                                cv.CV_RGB(255, 0, 0), 0, 1, 8, (0, 0))
                model.draw_contour(top_img, cv.CV_RGB(0, 0, 255), 2)
                draw_pt(top_img, real_top, cv.CV_RGB(255, 0, 0))
                draw_pt(top_img, real_center, cv.CV_RGB(255, 0, 0))
                draw_pt(top_img, model_top, cv.CV_RGB(0, 0, 255))
                draw_pt(top_img, model_center, cv.CV_RGB(0, 0, 255))
                cv.NamedWindow("Top")
                cv.ShowImage("Top", top_img)
                cv.WaitKey()

            angle = model_theta - real_theta
            if self.ORIENT_OPT:
                angle = 0
            scale = real_scale / float(model_scale)
            if scale < 0.25:
                scale = 1
            model_trans = translate_poly(model.polygon_vertices(), displ)
            model_rot = rotate_poly(model_trans, -1 * angle, real_center)
            model_scaled = scale_poly(model_rot, scale, real_center)

            #(model_center,model_top,model_theta,model_scale) = get_principle_info(model_scaled)

            #Do the same to the actual model

            model.translate(displ)
            if self.ROTATE:
                model.rotate(-1 * angle, real_center)
            model.scale(scale, real_center)

            if SHOW_SCALED_MODEL:
                model.draw_to_image(img_annotated, cv.CV_RGB(0, 0, 255))

        self.printout("Energy is: %f" % model.score(shape_contour))
        self.printout("Shape contour has %d points" % (len(shape_contour)))
        sparse_shape_contour = make_sparse(shape_contour, 1000)

        #Optimize
        if self.ORIENT_OPT:
            init_model = Models.Orient_Model(model, pi / 2)
            orient_model_finished = self.black_box_opt(
                model=init_model,
                contour=shape_contour,
                num_iters=self.num_iters,
                delta=init_model.preferred_delta(),
                epsilon=0.01,
                mode="orient",
                image=img)
            model_oriented = orient_model_finished.transformed_model()
        else:
            model_oriented = model

        if self.SYMM_OPT:
            self.printout("SYMMETRIC OPTIMIZATION")
            new_model_symm = self.black_box_opt(model=model_oriented,
                                                contour=shape_contour,
                                                num_iters=self.num_iters,
                                                delta=model.preferred_delta(),
                                                epsilon=0.01,
                                                mode="symm",
                                                image=img)
        else:
            new_model_symm = model_oriented
        if SHOW_SYMM_MODEL:
            new_model_symm.draw_to_image(img=img_annotated,
                                         color=cv.CV_RGB(0, 255, 0))
        model = new_model_symm.make_asymm()
        if self.HIGH_EXPLORATION:
            exp_factor = 3.0
        else:
            exp_factor = 1.5
        if self.ASYMM_OPT:
            new_model_asymm = self.black_box_opt(model=model,
                                                 contour=shape_contour,
                                                 num_iters=self.num_iters,
                                                 delta=model.preferred_delta(),
                                                 exploration_factor=exp_factor,
                                                 fine_tune=False,
                                                 mode="asymm",
                                                 image=img)
        else:
            new_model_asymm = model

        if self.FINE_TUNE:
            #tunable_model = model_oriented.make_tunable()
            tunable_model = new_model_asymm.make_tunable()
            final_model = self.black_box_opt(model=tunable_model,
                                             contour=shape_contour,
                                             num_iters=self.num_iters,
                                             delta=5.0,
                                             exploration_factor=1.5,
                                             fine_tune=False,
                                             image=img)
            final_model = final_model.final()
        else:
            final_model = new_model_asymm
        final_model.draw_to_image(img=img_annotated,
                                  color=cv.CV_RGB(255, 0, 255))
        nearest_pts = []
        for vert in final_model.polygon_vertices():
            nearest_pt = min(shape_contour,
                             key=lambda pt: Vector2D.pt_distance(pt, vert))
            cv.Circle(img_annotated, nearest_pt, 5, cv.CV_RGB(255, 255, 255),
                      3)
            nearest_pts.append(nearest_pt)

        fitted_model = Models.Point_Model_Contour_Only_Asymm(*nearest_pts)
        #fitted_model = final_model
        if SHOW_FITTED:
            fitted_model.draw_to_image(img=img_annotated,
                                       color=cv.CV_RGB(0, 255, 255))
        return (nearest_pts, final_model, fitted_model)