Ejemplo n.º 1
0
def score(testfile, correctfile):
    test_pts = Annotating.read_anno(testfile)
    correct_pts = Annotating.read_anno(correctfile)
    net_error = 0.0
    max_error = 0.0
    rel_pts = []
    if IGNORE_COLLAR:
        if MODE == SWEATER or MODE == TEE:
            check_points = (0, 1, 2, 3, 4, 8, 9, 10, 11, 12)
        else:
            check_points = range(len(test_pts))
    else:
        check_points = range(len(test_pts))
    errors = []
    (x_axis, y_axis) = get_axes(correct_pts)
    for i in check_points:  #(1,4,8,11):#range(len(test_pts)):
        test_pt = test_pts[i]
        correct_pt = correct_pts[i]
        rel_pt = Vector2D.pt_diff(test_pt, correct_pt)
        rel_pts.append(
            (Vector2D.dot_prod(rel_pt,
                               x_axis), Vector2D.dot_prod(rel_pt, y_axis)))
        error = Vector2D.pt_distance(test_pt, correct_pt)
        errors.append(error)
    return (lst_avg(errors), rel_pts)
def sampled(line,NUM_SAMPLES):
    pts = []
    (start,end) = Vector2D.end_points(line)
    dist = Vector2D.pt_distance(start,end)
    for i in range(NUM_SAMPLES):
        pt = Vector2D.extrapolate(line, dist * i / float(NUM_SAMPLES))
        pts.append(pt)
    return pts
Ejemplo n.º 3
0
def score(testfile,correctfile):
    test_pts = Annotating.read_anno(testfile)
    correct_pts = Annotating.read_anno(correctfile)
    net_error = 0.0
    max_error = 0.0
    rel_pts = []
    if IGNORE_COLLAR:
        if MODE == SWEATER or MODE == TEE:
            check_points = (0,1,2,3,4,8,9,10,11,12)
        else:
            check_points = range(len(test_pts))
    else:
        check_points = range(len(test_pts))
    errors = []
    (x_axis,y_axis) = get_axes(correct_pts)
    for i in check_points:#(1,4,8,11):#range(len(test_pts)):
        test_pt = test_pts[i]
        correct_pt = correct_pts[i]
        rel_pt = Vector2D.pt_diff(test_pt,correct_pt)
        rel_pts.append((Vector2D.dot_prod(rel_pt,x_axis),Vector2D.dot_prod(rel_pt,y_axis)))
        error = Vector2D.pt_distance(test_pt,correct_pt)
        errors.append(error)
    return (lst_avg(errors),rel_pts)
Ejemplo n.º 4
0
    def fit(self,model,contour,img_annotated=None,img=None):
        assert not model.illegal()
        if not img_annotated:
            xs = [x for (x,y) in contour]
            ys = [y for (x,y) in contour]
            width = max(xs) - min(xs)
            height = max(ys) - min(ys)
            cv.Set(img_annotated,cv.CV_RGB(255,255,255))
        model.set_image(cv.CloneImage(img_annotated))
        shape_contour = contour
        
        if SHOW_CONTOURS:
            cv.DrawContours(img_annotated,shape_contour,cv.CV_RGB(255,0,0),cv.CV_RGB(255,0,0),0,1,8,(0,0))
        if self.INITIALIZE:
            self.printout("INITIALIZING")
            (real_center,real_top,real_theta,real_scale) = get_principle_info(shape_contour)
            if SHOW_UNSCALED_MODEL:
                model.draw_to_image(img_annotated,cv.CV_RGB(0,0,255))
            model_contour = model.vertices_dense(constant_length=False,density=30,includeFoldLine = False)
            (model_center,model_top,model_theta,model_scale) = get_principle_info(model_contour)
            displ = displacement(model_center,real_center)
            
            #Drawing
            if SHOW_INIT_PTS:
                top_img = cv.CloneImage(img_annotated)
                cv.DrawContours(top_img,shape_contour,cv.CV_RGB(255,0,0),cv.CV_RGB(255,0,0),0,1,8,(0,0))
                model.draw_contour(top_img,cv.CV_RGB(0,0,255),2)
                draw_pt(top_img,real_top,cv.CV_RGB(255,0,0))
                draw_pt(top_img,real_center,cv.CV_RGB(255,0,0))
                draw_pt(top_img,model_top,cv.CV_RGB(0,0,255))
                draw_pt(top_img,model_center,cv.CV_RGB(0,0,255))
                cv.NamedWindow("Top")
                cv.ShowImage("Top",top_img)
                cv.WaitKey()
            
            angle = model_theta - real_theta
            #if self.ORIENT_OPT:
            #    angle = 0
            scale = real_scale/float(model_scale)
            if scale < 0.25:
                scale = 1
            model_trans = translate_poly(model.polygon_vertices(),displ)
            model_rot = rotate_poly(model_trans,-1*angle,real_center)
            model_scaled = scale_poly(model_rot,scale,real_center)
                      
            """ DEBUG
            print "/**************Test****************/"
            A = [ (int(pt[0]),int(pt[1])) for pt in model_trans]
            B = [ (int(pt[0]),int(pt[1])) for pt in model_rot]
            C = [ (int(pt[0]),int(pt[1])) for pt in model_scaled]
            cv.NamedWindow("Debug window")
            im = cv.CloneImage(img_annotated)
            model.draw_contour(im,cv.CV_RGB(100,100,100),2)
            cv.DrawContours(im,shape_contour,cv.CV_RGB(255,0,0),cv.CV_RGB(255,0,0),0,1,8,(0,0))
            draw_pt(im,real_top,cv.CV_RGB(255,0,0))
            draw_pt(im,real_center,cv.CV_RGB(255,0,0))
            draw_pt(im,model_top,cv.CV_RGB(100,100,100))
            draw_pt(im,model_center,cv.CV_RGB(100,100,100))
            cv.PolyLine(im,[A],1,cv.CV_RGB(255,0,0),1)   
            cv.ShowImage("Debug window",im)
            cv.WaitKey()            
            cv.PolyLine(im,[B],1,cv.CV_RGB(0,255,0),1)  
            cv.ShowImage("Debug window",im)
            cv.WaitKey()             
            cv.PolyLine(im,[C],1,cv.CV_RGB(0,0,255),1)               
            cv.ShowImage("Debug window",im)
            cv.WaitKey()
            cv.DestroyWindow("Debug window")
            print "/************EndOfTest*************/"
            self.exitIfKeyPressed("c");
            #"""
               
            #(model_center,model_top,model_theta,model_scale) = get_principle_info(model_scaled)
        
                
            #Do the same to the actual model
            
            # translate model
            model.translate(displ,True)
            """ DEBUG
            print "/**************Test****************/"
            cv.NamedWindow("Translate model")
            im = cv.CloneImage(img_annotated)
            cv.PolyLine(im,[model.polygon_vertices_int()],1,cv.CV_RGB(0,0,255),1)               
            cv.ShowImage("Translate model",im)
            cv.WaitKey()
            cv.DestroyWindow("Translate model")
            print "/************EndOfTest*************/"
            #"""
            
            #rotate model
            if self.ROTATE:
                model.rotate(-1*angle,real_center,True)
            """ DEBUG
            print "/**************Test****************/"
            cv.NamedWindow("Rotate model")
            im = cv.CloneImage(img_annotated)
            cv.PolyLine(im,[model.polygon_vertices_int()],1,cv.CV_RGB(0,0,255),1)               
            cv.ShowImage("Rotate model",im)
            cv.WaitKey()
            cv.DestroyWindow("Rotate model")
            print "/************EndOfTest*************/"
            #"""
                
            #scale model
            model.scale(scale,real_center,True)       
            if SHOW_SCALED_MODEL:
                model.draw_to_image(img_annotated,cv.CV_RGB(0,0,255))
            """ DEBUG
            print "/**************Test****************/"
            cv.NamedWindow("Scale model")
            im = cv.CloneImage(img_annotated)
            cv.PolyLine(im,[model.polygon_vertices_int()],1,cv.CV_RGB(0,0,255),1)               
            cv.ShowImage("Scale model",im)
            cv.WaitKey()
            cv.DestroyWindow("Scale model")
            print "/************EndOfTest*************/"
            #"""

        self.printout("Energy is: %f"%model.score(shape_contour))
        self.printout("Shape contour has %d points"%(len(shape_contour)))
        sparse_shape_contour = make_sparse(shape_contour,1000)
        """ DEBUG
        print "/**************Test****************/"
        cv.NamedWindow("Sparse_shape_contour model")
        im = cv.CloneImage(img_annotated)
        cv.PolyLine(im,[sparse_shape_contour],1,cv.CV_RGB(0,0,255),1)               
        cv.ShowImage("Sparse_shape_contour model",im)
        cv.WaitKey()
        cv.DestroyWindow("Sparse_shape_contour model")
        print "/************EndOfTest*************/"
        #"""
        
        #Optimize
        # Orientation phase 
        if self.ORIENT_OPT:
            self.printout("ORIENTATION OPTIMIZATION")
            init_model = Models.Orient_Model(model,pi/2)
            orient_model_finished = self.black_box_opt(model=init_model,contour=shape_contour,num_iters = self.num_iters,delta=init_model.preferred_delta(),epsilon = 0.01,mode="orient",image=img) 
            """ DEBUG
            print "/**************Test****************/"
            cv.NamedWindow("Orientation phase: final model")
            im = cv.CloneImage(img_annotated)
            cv.PolyLine(im,[orient_model_finished.polygon_vertices_int()],1,cv.CV_RGB(0,0,255),1)               
            cv.ShowImage("Orientation phase: final model",im)
            cv.WaitKey()
            cv.DestroyWindow("Orientation phase: final model")
            print "/************EndOfTest*************/"
            #"""
            model_oriented = orient_model_finished.transformed_model()
        else:
            model_oriented = model
            
        # Symmetric phase 
        if self.SYMM_OPT:
            self.printout("SYMMETRIC OPTIMIZATION")
            new_model_symm = self.black_box_opt(model=model_oriented,contour=shape_contour,num_iters = self.num_iters,delta=model.preferred_delta(),epsilon = 0.01,mode="symm",image=img)
            """ DEBUG
            print "/**************Test****************/"
            cv.NamedWindow("Symmetric phase: final model")
            im = cv.CloneImage(img_annotated)
            cv.PolyLine(im,[new_model_symm.polygon_vertices_int()],1,cv.CV_RGB(0,0,255),1)               
            cv.ShowImage("Symmetric phase: final model",im)
            cv.WaitKey()
            cv.DestroyWindow("Symmetric phase: final model")
            print "/************EndOfTest*************/"
            #"""
        else:
            new_model_symm = model_oriented    
        if SHOW_SYMM_MODEL:
            new_model_symm.draw_to_image(img=img_annotated,color=cv.CV_RGB(0,255,0))
        
        # Asymmetric phase  
        model=new_model_symm.make_asymm()
        if self.HIGH_EXPLORATION:
            exp_factor = 3.0
        else:
            exp_factor = 1.5
        if self.ASYMM_OPT:
            self.printout("ASYMMETRIC OPTIMIZATION")
            new_model_asymm = self.black_box_opt(model=model,contour=shape_contour,num_iters=self.num_iters,delta=model.preferred_delta(),exploration_factor=exp_factor,fine_tune=False,mode="asymm",image=img)
            """ DEBUG
            print "/**************Test****************/"
            cv.NamedWindow("Asymmetric phase: final model")
            im = cv.CloneImage(img_annotated)
            cv.PolyLine(im,[new_model_symm.polygon_vertices_int()],1,cv.CV_RGB(0,0,255),1)               
            cv.ShowImage("Asymmetric phase: final model",im)
            cv.WaitKey()
            cv.DestroyWindow("Asymmetric phase: final model")
            print "/************EndOfTest*************/"
            #"""
        else:
            new_model_asymm = model
        
        #Final tune phase
        if self.FINE_TUNE:
            self.printout("FINAL TUNE")
            #tunable_model = model_oriented.make_tunable()
            tunable_model = new_model_asymm.make_tunable()
            final_model = self.black_box_opt(model=tunable_model,contour=shape_contour,num_iters=self.num_iters,delta=5.0,exploration_factor=1.5,fine_tune=False,image=img)
            final_model = final_model.final()
        else:
            final_model = new_model_asymm
        final_model.draw_to_image(img=img_annotated,color=cv.CV_RGB(255,0,255))
        
        # Find nearest points 
        nearest_pts = []

        for vert in final_model.polygon_vertices():
            nearest_pt = min(shape_contour,key=lambda pt: Vector2D.pt_distance(pt,vert))
            cv.Circle(img_annotated,nearest_pt,5,cv.CV_RGB(255,255,255),3)
            nearest_pts.append(nearest_pt)
                
        fitted_model = Models.Point_Model_Contour_Only_Asymm(*nearest_pts)

        if SHOW_FITTED:
            fitted_model.draw_to_image(img=img_annotated,color=cv.CV_RGB(0,255,255))
        return (nearest_pts,final_model,fitted_model)
Ejemplo n.º 5
0
    def fit(self,model,contour,img_annotated=None,img=None):
        assert not model.illegal()
        if not img_annotated:
            xs = [x for (x,y) in contour]
            ys = [y for (x,y) in contour]
            width = max(xs) - min(xs)
            height = max(ys) - min(ys)
            cv.Set(img_annotated,cv.CV_RGB(255,255,255))
        model.set_image(cv.CloneImage(img_annotated))
        
        shape_contour = contour
        
        if SHOW_CONTOURS:
            cv.DrawContours(img_annotated,shape_contour,cv.CV_RGB(255,0,0),cv.CV_RGB(255,0,0),0,1,8,(0,0))
        if self.INITIALIZE:
            self.printout("INITIALIZING")
            (real_center,real_top,real_theta,real_scale) = get_principle_info(shape_contour)
            if SHOW_UNSCALED_MODEL:
                model.draw_to_image(img_annotated,cv.CV_RGB(0,0,255))
            model_contour = model.vertices_dense(constant_length=False,density=30)
            (model_center,model_top,model_theta,model_scale) = get_principle_info(model_contour)
            displ = displacement(model_center,real_center)
            
            #Drawing
            if SHOW_INIT_PTS:
                top_img = cv.CloneImage(img_annotated)
                cv.DrawContours(top_img,shape_contour,cv.CV_RGB(255,0,0),cv.CV_RGB(255,0,0),0,1,8,(0,0))
                model.draw_contour(top_img,cv.CV_RGB(0,0,255),2)
                draw_pt(top_img,real_top,cv.CV_RGB(255,0,0))
                draw_pt(top_img,real_center,cv.CV_RGB(255,0,0))
                draw_pt(top_img,model_top,cv.CV_RGB(0,0,255))
                draw_pt(top_img,model_center,cv.CV_RGB(0,0,255))
                cv.NamedWindow("Top")
                cv.ShowImage("Top",top_img)
                cv.WaitKey()
            
            angle = model_theta - real_theta
            if self.ORIENT_OPT:
                angle = 0
            scale = real_scale/float(model_scale)
            if scale < 0.25:
                scale = 1
            model_trans = translate_poly(model.polygon_vertices(),displ)
            model_rot = rotate_poly(model_trans,-1*angle,real_center)
            model_scaled = scale_poly(model_rot,scale,real_center)
               
            #(model_center,model_top,model_theta,model_scale) = get_principle_info(model_scaled)
        
                
                #Do the same to the actual model

            model.translate(displ)
            if self.ROTATE:
                model.rotate(-1*angle,real_center)
            model.scale(scale,real_center)
        
            if SHOW_SCALED_MODEL:
                model.draw_to_image(img_annotated,cv.CV_RGB(0,0,255))
    
        self.printout("Energy is: %f"%model.score(shape_contour))
        self.printout("Shape contour has %d points"%(len(shape_contour)))
        sparse_shape_contour = make_sparse(shape_contour,1000)
            
        #Optimize
        if self.ORIENT_OPT:
            init_model = Models.Orient_Model(model,pi/2)
            orient_model_finished = self.black_box_opt(model=init_model,contour=shape_contour,num_iters = self.num_iters,delta=init_model.preferred_delta(),epsilon = 0.01,mode="orient",image=img) 
            model_oriented = orient_model_finished.transformed_model()
        else:
            model_oriented = model
       
        if self.SYMM_OPT:
           self.printout("SYMMETRIC OPTIMIZATION")
           new_model_symm = self.black_box_opt(model=model_oriented,contour=shape_contour,num_iters = self.num_iters,delta=model.preferred_delta(),epsilon = 0.01,mode="symm",image=img)
        else:
            new_model_symm = model_oriented    
        if SHOW_SYMM_MODEL:
           new_model_symm.draw_to_image(img=img_annotated,color=cv.CV_RGB(0,255,0))
        model=new_model_symm.make_asymm()
        if self.HIGH_EXPLORATION:
            exp_factor = 3.0
        else:
            exp_factor = 1.5
        if self.ASYMM_OPT:
            new_model_asymm = self.black_box_opt(model=model,contour=shape_contour,num_iters=self.num_iters,delta=model.preferred_delta(),exploration_factor=exp_factor,fine_tune=False,mode="asymm",image=img)
        else:
            new_model_asymm = model
        
        if self.FINE_TUNE:
            #tunable_model = model_oriented.make_tunable()
            tunable_model = new_model_asymm.make_tunable()
            final_model = self.black_box_opt(model=tunable_model,contour=shape_contour,num_iters=self.num_iters,delta=5.0,exploration_factor=1.5,fine_tune=False,image=img)
            final_model = final_model.final()
        else:
            final_model = new_model_asymm
        final_model.draw_to_image(img=img_annotated,color=cv.CV_RGB(255,0,255))
        nearest_pts = []
        for vert in final_model.polygon_vertices():
            nearest_pt = min(shape_contour,key=lambda pt: Vector2D.pt_distance(pt,vert))
            cv.Circle(img_annotated,nearest_pt,5,cv.CV_RGB(255,255,255),3)
            nearest_pts.append(nearest_pt)
                
        fitted_model = Models.Point_Model_Contour_Only_Asymm(*nearest_pts)
        #fitted_model = final_model
        if SHOW_FITTED:
            fitted_model.draw_to_image(img=img_annotated,color=cv.CV_RGB(0,255,255))
        return (nearest_pts,final_model,fitted_model)
Ejemplo n.º 6
0
    def fit(self, model, contour, img_annotated=None, img=None):
        assert not model.illegal()
        if not img_annotated:
            xs = [x for (x, y) in contour]
            ys = [y for (x, y) in contour]
            width = max(xs) - min(xs)
            height = max(ys) - min(ys)
            cv.Set(img_annotated, cv.CV_RGB(255, 255, 255))
        model.set_image(cv.CloneImage(img_annotated))

        shape_contour = contour

        if SHOW_CONTOURS:
            cv.DrawContours(img_annotated, shape_contour, cv.CV_RGB(255, 0, 0),
                            cv.CV_RGB(255, 0, 0), 0, 1, 8, (0, 0))
        if self.INITIALIZE:
            self.printout("INITIALIZING")
            (real_center, real_top, real_theta,
             real_scale) = get_principle_info(shape_contour)
            if SHOW_UNSCALED_MODEL:
                model.draw_to_image(img_annotated, cv.CV_RGB(0, 0, 255))
            model_contour = model.vertices_dense(constant_length=False,
                                                 density=30)
            (model_center, model_top, model_theta,
             model_scale) = get_principle_info(model_contour)
            displ = displacement(model_center, real_center)

            #Drawing
            if SHOW_INIT_PTS:
                top_img = cv.CloneImage(img_annotated)
                cv.DrawContours(top_img, shape_contour, cv.CV_RGB(255, 0, 0),
                                cv.CV_RGB(255, 0, 0), 0, 1, 8, (0, 0))
                model.draw_contour(top_img, cv.CV_RGB(0, 0, 255), 2)
                draw_pt(top_img, real_top, cv.CV_RGB(255, 0, 0))
                draw_pt(top_img, real_center, cv.CV_RGB(255, 0, 0))
                draw_pt(top_img, model_top, cv.CV_RGB(0, 0, 255))
                draw_pt(top_img, model_center, cv.CV_RGB(0, 0, 255))
                cv.NamedWindow("Top")
                cv.ShowImage("Top", top_img)
                cv.WaitKey()

            angle = model_theta - real_theta
            if self.ORIENT_OPT:
                angle = 0
            scale = real_scale / float(model_scale)
            if scale < 0.25:
                scale = 1
            model_trans = translate_poly(model.polygon_vertices(), displ)
            model_rot = rotate_poly(model_trans, -1 * angle, real_center)
            model_scaled = scale_poly(model_rot, scale, real_center)

            #(model_center,model_top,model_theta,model_scale) = get_principle_info(model_scaled)

            #Do the same to the actual model

            model.translate(displ)
            if self.ROTATE:
                model.rotate(-1 * angle, real_center)
            model.scale(scale, real_center)

            if SHOW_SCALED_MODEL:
                model.draw_to_image(img_annotated, cv.CV_RGB(0, 0, 255))

        self.printout("Energy is: %f" % model.score(shape_contour))
        self.printout("Shape contour has %d points" % (len(shape_contour)))
        sparse_shape_contour = make_sparse(shape_contour, 1000)

        #Optimize
        if self.ORIENT_OPT:
            init_model = Models.Orient_Model(model, pi / 2)
            orient_model_finished = self.black_box_opt(
                model=init_model,
                contour=shape_contour,
                num_iters=self.num_iters,
                delta=init_model.preferred_delta(),
                epsilon=0.01,
                mode="orient",
                image=img)
            model_oriented = orient_model_finished.transformed_model()
        else:
            model_oriented = model

        if self.SYMM_OPT:
            self.printout("SYMMETRIC OPTIMIZATION")
            new_model_symm = self.black_box_opt(model=model_oriented,
                                                contour=shape_contour,
                                                num_iters=self.num_iters,
                                                delta=model.preferred_delta(),
                                                epsilon=0.01,
                                                mode="symm",
                                                image=img)
        else:
            new_model_symm = model_oriented
        if SHOW_SYMM_MODEL:
            new_model_symm.draw_to_image(img=img_annotated,
                                         color=cv.CV_RGB(0, 255, 0))
        model = new_model_symm.make_asymm()
        if self.HIGH_EXPLORATION:
            exp_factor = 3.0
        else:
            exp_factor = 1.5
        if self.ASYMM_OPT:
            new_model_asymm = self.black_box_opt(model=model,
                                                 contour=shape_contour,
                                                 num_iters=self.num_iters,
                                                 delta=model.preferred_delta(),
                                                 exploration_factor=exp_factor,
                                                 fine_tune=False,
                                                 mode="asymm",
                                                 image=img)
        else:
            new_model_asymm = model

        if self.FINE_TUNE:
            #tunable_model = model_oriented.make_tunable()
            tunable_model = new_model_asymm.make_tunable()
            final_model = self.black_box_opt(model=tunable_model,
                                             contour=shape_contour,
                                             num_iters=self.num_iters,
                                             delta=5.0,
                                             exploration_factor=1.5,
                                             fine_tune=False,
                                             image=img)
            final_model = final_model.final()
        else:
            final_model = new_model_asymm
        final_model.draw_to_image(img=img_annotated,
                                  color=cv.CV_RGB(255, 0, 255))
        nearest_pts = []
        for vert in final_model.polygon_vertices():
            nearest_pt = min(shape_contour,
                             key=lambda pt: Vector2D.pt_distance(pt, vert))
            cv.Circle(img_annotated, nearest_pt, 5, cv.CV_RGB(255, 255, 255),
                      3)
            nearest_pts.append(nearest_pt)

        fitted_model = Models.Point_Model_Contour_Only_Asymm(*nearest_pts)
        #fitted_model = final_model
        if SHOW_FITTED:
            fitted_model.draw_to_image(img=img_annotated,
                                       color=cv.CV_RGB(0, 255, 255))
        return (nearest_pts, final_model, fitted_model)