def estimate(self, first_set, second_set):
        set_length = len(first_set)
        best_model = None
        best_model_inliers = 0
        inliers_indices = []
        range_set_length = range(set_length)

        for _ in xrange(self.max_iterations):
            if set_length < 3:
                continue
            random_indices = None
            if set_length > self.hypothesis_set_length:
                # This will return a list of hypothesis_set_length numbers selected from the range 0 to set_length, without duplicates.
                random_indices = random.sample(range_set_length, self.hypothesis_set_length)
            else:
                random_indices = range_set_length

            # create random subset
            first_subset = np.array([first_set[i] for i in random_indices], dtype=np.float32)
            second_subset = np.array([second_set[i] for i in random_indices], dtype=np.float32)

            # estimate model on random subset
            current_model = cv2.estimateRigidTransform(first_subset, second_subset, fullAffine=False)

            if current_model is None:
                continue

            current_model_inliers = 0
            current_inliers_indices = []

            # count inliers
            for index in xrange(set_length):
                transformed_point = transform(current_model, first_set[index][0])
                error = math.sqrt(
                    math.pow(transformed_point[0] - second_set[index][0][0], 2)
                    + math.pow(transformed_point[1] - second_set[index][0][1], 2)
                )

                if error < self.max_distance:
                    current_model_inliers += 1
                    if self.remember_inlier_indices:
                        current_inliers_indices.append(index)

            if current_model_inliers > best_model_inliers:
                best_model = current_model
                best_model_inliers = current_model_inliers
                inliers_indices = current_inliers_indices

        if best_model is None or (self.min_inliers is not None and best_model_inliers < self.min_inliers):
            best_model = cv2.estimateRigidTransform(first_set, second_set, fullAffine=False)
            if self.remember_inlier_indices:
                inliers_indices = [i for i in xrange(set_length)]

        return best_model, inliers_indices
Ejemplo n.º 2
0
def RANSAC_2D(point_set1, point_set2, iteration, tolerance):
    x_mean = np.mean(point_set2[:,0])
    y_mean = np.mean(point_set2[:,1])
    print x_mean
    print y_mean
    point_count = point_set1.shape[0]
    best_M = None
    best_inlier = 0
    best_model = None
    for i in range(0, iteration):
        sample_index = np.random.choice(range(0, point_count), 3, replace = False)
        first_set = np.array([point_set1[sample_index]], dtype = np.float32)
        second_set = np.array([point_set2[sample_index]], dtype = np.float32)
        #print first_set.shape
        transformation = cv2.estimateRigidTransform(first_set, second_set, fullAffine = False)
        if transformation is None:
            continue
        #print '******'
        #print first_set
        #print second_set
        #print '******'
        #transformation[1,2] *= -1
        new_point_set1 = np.asarray([T2D.Mirror(transformation, x, x_mean, y_mean) for x in point_set1], dtype=np.float32)
        distance_matrix = np.linalg.norm(new_point_set1 - point_set2, axis = 1)
        #print distance_matrix
        inlier = np.sum(distance_matrix <= tolerance)
        #print inlier
        if inlier > best_inlier:
            best_inlier = inlier
            best_M = transformation
            best_model = new_point_set1
    print best_inlier
    print np.linalg.pinv(best_M[:, 0:2])
    return best_M, best_model
Ejemplo n.º 3
0
def homography_transform(rotated_images1, rotated_images2):
    dst1 = rotated_images1
    dst2 = rotated_images2
    h, w = dst1.shape[:2]

    #compute matching features from two image pairs
    image1_pts, image2_pts = extract_and_match(dst1, dst2)


    # #code to output the features matched in each image
    # image = cv2.addWeighted(rotated_images1,0.5,rotated_images2,0.5,0)
    # image = draw_matches(image, image1_pts, image2_pts)
    # cv2.imshow('image', image)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    # sys.exit()


    print(image1_pts)
    print(image2_pts)
    n = len(image1_pts)
    image2_pts_np = np.reshape(np.float32(image2_pts),(1,n,2))
    image1_pts_np = np.reshape(np.float32(image1_pts),(1,n,2))

    #compute homography transform from img1 to img2
    if(len(image1_pts)>=4):
        homography, mask = cv2.findHomography(np.float32(image1_pts), np.float32(image2_pts), cv2.RANSAC, 1.0)
        affine = cv2.estimateRigidTransform(np.float32(image1_pts), np.float32(image2_pts), fullAffine=True)
        print(homography)
        return homography, affine
    else:
        return None, None
Ejemplo n.º 4
0
    def align_5_points_eye_center(self, imgDim, rgbImg, five_points):
        '''
        @brief 根据:两个眼睛中心,两个嘴角,一个鼻子这五点进行对齐,其它与align_dlib_cpp保持一致。

        @param five_points: 一个shape 为10 的人脸关键点坐标位置。
        @attention 适合于CelebA提供的数据。
        
        '''
        assert imgDim is not None
        assert rgbImg is not None
        from_points = []
        for i in range(0,10,2):
            from_points.append((five_points[i],five_points[i+1]))

        to_points = []
        five_mean_x = [(self.mean_shape_x[36-17]+self.mean_shape_x[39-17])*0.5,(self.mean_shape_x[42-17]+self.mean_shape_x[45-17])*0.5,self.mean_shape_x[33-17],self.mean_shape_x[48-17],self.mean_shape_x[54-17]]
        five_mean_y = [(self.mean_shape_y[36-17]+self.mean_shape_y[39-17])*0.5,(self.mean_shape_y[42-17]+self.mean_shape_y[45-17])*0.5,self.mean_shape_y[33-17],self.mean_shape_y[48-17],self.mean_shape_y[54-17]]
        for i in range(5):
            new_ref_x = (self.padding+five_mean_x[i])/(2*self.padding+1)
            new_ref_y = (self.padding+five_mean_y[i])/(2*self.padding+1)
            to_points.append((imgDim *new_ref_x,imgDim *new_ref_y))
            
        source = np.array(from_points).astype(np.int)
        target = np.array(to_points,).astype(np.int)
        source = np.reshape(source,(1,5,2))
        target = np.reshape(target,(1,5,2))

        H = cv2.estimateRigidTransform(source,target,False)
        if H is None:
            return
        else:
            aligned_face = cv2.warpAffine(rgbImg, H, (imgDim, imgDim))
            return aligned_face
Ejemplo n.º 5
0
    def align_5_points(self, imgDim, rgbImg, five_points):
        '''
        @brief 根据:两个眼角,两个嘴角,一个鼻子这五点进行对齐,其它与align_dlib_cpp保持一致。

        @param five_points: 一个shape 为10 的人脸关键点坐标位置。
        @attention 这个版本适合于两个眼角和鼻子,嘴角
        
        '''
        assert imgDim is not None
        assert rgbImg is not None
        from_points = []
        for i in range(0,10,2):
            from_points.append((five_points[i],five_points[i+1]))

        to_points = []
        for i in [36-17,45-17,33-17,48-17,54-17]:
            new_ref_x = (self.padding+self.mean_shape_x[i])/(2*self.padding+1)
            new_ref_y = (self.padding+self.mean_shape_y[i])/(2*self.padding+1)
            to_points.append((imgDim *new_ref_x,imgDim *new_ref_y))
            
        source = np.array(from_points).astype(np.int)
        target = np.array(to_points,).astype(np.int)
        source = np.reshape(source,(1,5,2))
        target = np.reshape(target,(1,5,2))

        H = cv2.estimateRigidTransform(source,target,False)
        if H is None:
            return
        else:
            aligned_face = cv2.warpAffine(rgbImg, H, (imgDim, imgDim))
            return aligned_face
Ejemplo n.º 6
0
 def align_similarity(self, imgDim, rgbImg, bb=None,
           landmarks=None, landmarkIndices=range(0,68)):
     '''
     @brief 与align_openface()类似,但是通过更多个的点来计算仿射变换矩阵。
     '''
     assert imgDim is not None
     assert rgbImg is not None
     assert landmarkIndices is not None
     if bb is None:
         bb = self.getLargestFaceBoundingBox(rgbImg)
         if bb is None:
             return
     if landmarks is None:
         landmarks = self.findLandmarks(rgbImg, bb)
     npLandmarkIndices = np.array(landmarkIndices)
     npLandmarks = np.array(landmarks).astype(np.int)
     T = (imgDim * self.new_template).astype(np.int)
     source = np.reshape(npLandmarks[npLandmarkIndices],(1,68,2))
     target = np.reshape(T[npLandmarkIndices],(1,68,2))
     H = cv2.estimateRigidTransform(source,target,False)
     if H is None:
         return None
     else:
         aligned_face = cv2.warpAffine(rgbImg, H, (imgDim, imgDim))
         return aligned_face
Ejemplo n.º 7
0
def getTransformationMatrixFromArray(A,B):
	# A,B both have to be numpy array
	# they must contain same number of points say n
	# so shape of A,B = [1,n,2]
	# returns a 3*3 np.matrix and a success flag(=1->OH yeah , 0 ->:())
	T = cv2.estimateRigidTransform(A,B,False)
	return twoD_to_ThreeD_Affine(T)
def icp(d1, d2, max_iterate = 100):
    src = np.array([d1.T], copy=True).astype(np.float32)
    dst = np.array([d2.T], copy=True).astype(np.float32)
    
    knn = cv2.KNearest()
    responses = np.array(range(len(d2[0]))).astype(np.float32)
    knn.train(src[0], responses)
        
    Tr = np.array([[np.cos(0), -np.sin(0), 0],
                   [np.sin(0), np.cos(0),  0],
                   [0,         0,          1]])

    dst = cv2.transform(dst, Tr[0:2])
    max_dist = sys.maxint
    
    scale_x = np.max(d1[0]) - np.min(d1[0])
    scale_y = np.max(d1[1]) - np.min(d1[1])
    scale = max(scale_x, scale_y)
       
    for i in range(max_iterate):
        ret, results, neighbours, dist = knn.find_nearest(dst[0], 1)
        
        indeces = results.astype(np.int32).T     
        indeces = del_miss(indeces, dist, max_dist)  
        
        T = cv2.estimateRigidTransform(dst[0, indeces], src[0, indeces], True)

        max_dist = np.max(dist)
        dst = cv2.transform(dst, T)
        Tr = np.dot(np.vstack((T,[0,0,1])), Tr)
        
        if (is_converge(T, scale)):
            break
        
    return Tr[0:2]
Ejemplo n.º 9
0
def transform_marks(o_centers, new_centres, markers, shape, size=0.5):
    bounds = getArea(new_centres, shape)
    if new_centres is None: return markers
    o_centers = centre_to_array(scale_centers(o_centers, size))
    new_centres = centre_to_array(scale_centers(new_centres, size))
    trs = cv2.estimateRigidTransform(o_centers, new_centres,True)

    if trs is None: return markers
    new_markers = np.zeros(markers.shape, dtype='uint8')
    #downsize for computation
    small_new = cv2.resize(new_markers, (0,0),fx=size, fy=size )
    small_markers = cv2.resize(markers, (0,0),fx=size, fy=size)
 
 
    for x in range(int(bounds[2]*size),int(bounds[3]*size)):
        for y in range(int(bounds[0]*size),int(bounds[1]*size)):
            try:
                if small_markers[x][y] <= 0: continue
            except: pass
            c = np.array([[x],[y],[1]])
 
            v = np.dot(trs,c)
            if v[0]>=0 and v[0] < len(small_markers) and v[1]>=0 and v[1] < len(small_markers[0]):
                small_new[int(v[0]),int(v[1])] = small_markers[x][y]
    new_markers = cv2.resize(small_new, (0,0),fx=1/size, fy=1/size, interpolation = cv2.INTER_NEAREST)
    return new_markers
Ejemplo n.º 10
0
def rigid_scaling_fom_points(fp, tp):
    m = mean(fp[:2], axis=1)
    maxstd = max(std(fp[:2], axis=1)) + 1e-9
    C1 = diag([1 / maxstd, 1 / maxstd, 1]) 
    C1[0][2] = -m[0] / maxstd
    C1[1][2] = -m[1] / maxstd
    fp = dot(C1, fp)
    
    m = mean(tp[:2], axis=1)
    maxstd = max(std(tp[:2], axis=1)) + 1e-9
    C2 = diag([1 / maxstd, 1 / maxstd, 1])
    C2[0][2] = -m[0] / maxstd
    C2[1][2] = -m[1] / maxstd
    tp = dot(C2, tp)
    
    fp = float32(fp[:2,:]).T.reshape(1,-1)
    tp = float32(tp[:2,:]).T.reshape(1,-1)
    fp = fp.reshape([1,-1,2])
    tp = tp.reshape([1,-1,2])
    H = cv2.estimateRigidTransform(fp, tp, fullAffine=False)
    
    H = vstack([H, [0, 0, 1]])
    
    H = dot(linalg.inv(C2), dot(H, C1))
    return H
Ejemplo n.º 11
0
    def stitch_matrix(self, new_image):
        '''stitch_matrix(new_image)
            h -> homogeneous
            i -> image
            k -> keyframe
            root -> root
        '''
        im_to_keyframe = cv2.estimateRigidTransform(new_image, self.keyframe_image, False) 
        if im_to_keyframe is None:
            print '>Failed to map<'
            return False, None

        h_im_to_keyframe = self.make_homogeneous(im_to_keyframe)
        ang_i2k, dx_i2k, dy_i2k, sx, sy = self.motion_from_matrix(im_to_keyframe)

        if (np.fabs(sx - 1.0) > 0.01) or (np.fabs(sy - 1.0) > 0.01):
            # print sx, sy
            return False, h_im_to_keyframe

        h_i2root = np.dot(self.h_k2root, h_im_to_keyframe)
        ang_i2root, dx_i2root, dy_i2root, sxroot, syroot = self.motion_from_matrix(h_i2root)
        rotated = self.rotate(new_image, np.degrees(ang_i2root), scale=sx)
        # self.imshow("Rotated", rotated)

        self.overlay(
            rotated,
            self.keyframe_position[0] + dy_i2root,
            self.keyframe_position[1] + dx_i2root,
            self.full_map
        )

        self.imshow("map", self.full_map)

        return True, h_im_to_keyframe
Ejemplo n.º 12
0
    def getPrevTransform(self):
        ret, prev = self.cap.read()
        prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
        
        p0 = cv2.goodFeaturesToTrack(prev_gray, mask = None, **feature_params)
               
        while(True):
            ret, cur = self.cap.read()
            #print cur
            if cur is None:
                break
            
            cur_gray = cv2.cvtColor(cur,cv2.COLOR_BGR2GRAY)          
            
            #print prev_corner          
            
            img0, img1 = prev_gray, cur_gray
            
            # calculate optical flow
            p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                      
            # Select good points
            good_new = p1[st==1]
            good_old = p0[st==1]

            T = cv2.estimateRigidTransform(good_new, good_old, False)
                        
            print "---"
Ejemplo n.º 13
0
def getTransformationMatrixFromArray(A,B):
	# A,B both have to be numpy array
	# they must contain same number of points say n
	# so shape of A,B = [1,n,2]
	# returns a 3*3 np.matrix and a success flag(=1->OH yeah , 0 ->:())
	T = cv2.estimateRigidTransform(A,B,False)
	#T2 = cv2.estimateRigidTransform(A,B,False)
	#T=(T1+T2)/2
	dummy1,sizeA,dummy2 = A.shape
	dummy1,sizeB,dummy2 = B.shape
	X = np.empty([2,max(sizeA,sizeB),2])
	X[0] = A
	X[1] = B
	# print X
	z =random.randint(0,10000)
	if(T is None):
		# waitForESC()
		textA = "FAILED_CHECK_A"+str(z)
		textB = "FAILED_CHECK_B"+str(z)
		#displayContour_transform(textA,A,700,700)
		#displayContour_transform(textB,B,700,700)
		return None
	textA = "SUCCESS_CHECK_A"+str(z)
	textB = "SUCCESS_CHECK_B"+str(z)
	'''
	r1 = math.sqrt(T[0,0] * T[0,0] + T[0,1]*T[0,1])
	T[0,0] =T[0,0]/r1
	T[0,1] =T[0,1]/r1
	r2 = math.sqrt(T[1,0] * T[1,0] + T[1,1]*T[1,1])
	T[1,0] =T[1,0]/r2
	T[1,1] =T[1,1]/r2
	'''
	#displayContour_transform(textA,A,700,700)
	#displayContour_transform(textB,B,700,700)
	return twoD_to_ThreeD_Affine(T)
Ejemplo n.º 14
0
def RANSAC_2D(point_set1, point_set2, iteration, tolerance):
    point_count = point_set1.shape[0]
    best_M = None
    best_inlier = 0
    best_model = None
    for i in range(0, iteration):
        sample_index = np.random.choice(range(0, point_count), 3, replace = False)
        first_set = np.array([point_set1[sample_index]], dtype = np.float32)
        second_set = np.array([point_set2[sample_index]], dtype = np.float32)
        #print first_set.shape
        transformation = cv2.estimateRigidTransform(first_set, second_set, fullAffine = False)
        if transformation is None:
            continue
        print '******'
        print first_set
        print second_set
        print '******'
        #transformation[1,2] *= -1
        new_point_set1 = np.asarray([np.dot(transformation, np.transpose(np.array([x[0], x[1], 1], dtype=np.float32))) for x in point_set1], dtype=np.float32)
        distance_matrix = np.linalg.norm(new_point_set1 - point_set2, axis = 1)
        inlier = np.sum(distance_matrix <= tolerance)
        #print inlier
        if inlier > best_inlier:
            best_inlier = inlier
            best_M = transformation
            best_model = new_point_set1
    print best_inlier
    return best_M, best_model
Ejemplo n.º 15
0
def track_using_trajectories( cur, prev ):
    global curr_loc_ 
    global static_features_img_
    p0 = cv2.goodFeaturesToTrack( cur, 200, 0.01, 5 )
    insert_int_corners( p0 )

    draw_point( cur, p0, 1 )

    ellipse, p1 = update_mouse_location( p0 )
    if p1 is not None:
        for p in p1:
            cv2.circle( cur, p, 10, 20, 2 )
    cv2.ellipse( cur, ellipse, 1 )
    cv2.circle( cur, curr_loc_, 10, 255, 3)
    display_frame( cur, 1 )
    # cv2.imshow( 'static features', static_features_img_ )
    return 
    # Find a contour
    prevE = find_edges( prev )
    curE = find_edges( cur )
    img = curE - prevE
    cnts, hier = cv2.findContours( img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE )
    cnts = filter( ismouse, cnts )
    cv2.drawContours( img, cnts, -1, 255, 3 )
    display_frame( img, 1)
    return 
    p1, status, err = cv2.calcOpticalFlowPyrLK( prev, cur, p0 )
    mat = cv2.estimateRigidTransform( p0, p1, False )
    # print cv2.warpAffine( curr_loc_, mat, dsize=(2,1) )
    if mat is not None:
        dx, dy = mat[:,2]
        da = math.atan2( mat[1,0], mat[0,0] )
        trajectory_.append( (dx, dy, da) )
        print( "Transformation", dx, dy, da )
        curr_loc_ = (curr_loc_[0] - int(dy), curr_loc_[1] - int(dx))
Ejemplo n.º 16
0
 def getNorth(self, img_mat):
     ''' This method tries to estimate an
     angle for the north, given a sample
     image (img_mat, OpenCV matrix format).
     The angle is given in radians, with
     positive angles meaning clockwise
     rotation of the north (counter clock-
     wise rotation of the robot).
     '''
     kp2, des2 = self.sift.detectAndCompute(img_mat, None)
     results = []
     for kp1, des1 in self.ref_sifts:
         matches = self.flann.knnMatch(des1, des2, k=2)
         good = []
         for m, n in matches:
             if m.distance < 0.7*n.distance:
                 good.append(m)
         results.append((kp1, des1, good))
     kp1, des1, good = max(results, key=lambda x: x[2])
     if len(good) > MIN_MATCHES:
         src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
         dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
         M = cv2.estimateRigidTransform(src_pts, dst_pts, False)
         angle = np.arctan2(M[1,0],M[0,0])
         return angle
     else:
         return None
Ejemplo n.º 17
0
def imRigidTransform(img, srcPts, dstPts):
    srcPts = np.array([srcPts], np.int)
    dstPts = np.array([dstPts], np.int)
    M = cv2.estimateRigidTransform(srcPts, dstPts, False)
    if transformation is not None:
        return cv2.warpAffine(img, M)
    else:
        return None
Ejemplo n.º 18
0
 def process(self, frame):
     if self.prev is not None:
         affine= cv2.estimateRigidTransform(frame, self.prev, True)
         self.paff = _compose(affine, self.paff)
     warp = cv2.warpAffine(frame, _translate(self.paff,(self.w/2)-60,(self.h/2)-48), (self.w, self.h))
     self.out[warp>0] = warp[warp>0]
     
     self.prev = frame
Ejemplo n.º 19
0
def stabilize (img_list):
	cv_images = []

	for i in xrange(len(img_list)):
		cv_images.append(pil_to_opencv(img_list[i]))

	#prev and prev_gray should be of type Mat
	prev = cv_images[0]
	cv2.cvtColor(prev, prev_gray, cv2.COLOR_BGR2GRAY)

	for i in xrange(1, len(cv_images)):
		current = cv_images[i]
		cv2.cvtColor(current, current_gray, cv2.COLOR_BGR2GRAY)

		cv2.goodFeaturesToTrack(prev_gray, prev_corner, 200, 0.01, 30)
		cv2.calcOpticalFlowPyrLK(prev_gray, current_gray, prev_corner, current_corner, status, err)

		prev_corner2 = []
		current_corner2 = []

		for j in xrange(len(status)):
			if(status[j]):
				prev_corner2.push_back(prev_corner[j])
				current_corner2.push_back(current_corner[j])

		T = cv2.estimateRigidTransform(prev_corner2, current_corner2, false)

		if T.data == NULL:
			last_T.copyTo(T)

		T.copyTo(last_T)

		dx = T.at(0,2)
		dy = T.at(1,2)
		da = numpy.atan2(T.at(1,0), T.at(0,0))

		transform = []
		transform.push_back(Transform(dx, dy, da))

		current.copyTo(prev)
		current_gray.copyTo(prev_gray)

	# accumulate transformations to get image trajectory
	x = 0
	y = 0
	a = 0

	trajectory_list = []

	for k in xrange(len(transform)):
		x += transform[i].dx
        y += transform[i].dy
        a += transform[i].da
		#trajectory_list.push_back(Trajectory(x,y,a))

	return img_list
Ejemplo n.º 20
0
def findAffine(src, dst, fullAffine=False):
    #print "src = %s" % str(src)
    #print "dst = %s" % str(dst)
    if len(src) >= affine_minpts:
        affine = cv2.estimateRigidTransform(np.array([src]), np.array([dst]),
                                            fullAffine)
    else:
        affine = None
    #print str(affine)
    return affine
def find_homography(four_pairs):
    src, dst = split_src_dst(four_pairs)
    src = np.float32(np.array(src)).reshape(-1, 1, 2)
    dst = np.float32(np.array(dst)).reshape(-1, 1, 2)
    transform = cv2.estimateRigidTransform(src, dst, False)
    if transform is None:
        return None
    transform = np.vstack((transform, np.array([0, 0, 1])))
    print 'rigid transform:', transform

    return transform
def reestimate_transform(transform, points):
    src, dst = split_src_dst(points)
    src = np.float32(np.array(src)).reshape(-1, 1, 2)
    dst = np.float32(np.array(dst)).reshape(-1, 1, 2)
    transform = cv2.estimateRigidTransform(src, dst, False)
    if transform is None:
        return None
    transform = np.vstack((transform, np.array([0, 0, 1])))

    return transform
    # Define objective as function of all points above and current transform (params)
    '''
Ejemplo n.º 23
0
def getTransformParams( videoCapture, start=0, end=None ):
    assert(videoCapture.isOpened())
    # ensure that we start at the specified frame
    videoCapture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, start)
    maximum = int(videoCapture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    if end is None or end > maximum:
        end = maximum
    assert(end > start)

    curr, currGray = None, None
    prev, prevGray = None, None

    _, prev = videoCapture.read()
    prevGray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)

    transforms = []
    last = None

    for i in range(end - start):
        if not videoCapture.grab(): break
        _, curr = videoCapture.retrieve()
        currGray = cv2.cvtColor(curr, cv2.COLOR_BGR2GRAY)

        prevPt, currPt = [], []
        prevPtFilter, currPtFilter = [], []

        prevPt = cv2.goodFeaturesToTrack(prevGray, mask=None, **FEATURE_PARAMS)
        currPt, stats, _ = cv2.calcOpticalFlowPyrLK(prevGray, currGray, prevPt, None, **LK_PARAMS)

        # get rid of bad matches
        for i, stat in enumerate(stats):
            if stat:
                prevPtFilter.append(prevPt[i])
                currPtFilter.append(currPt[i])

        # estimate translation and rotation only
        m = cv2.estimateRigidTransform(np.array(prevPtFilter), np.array(currPtFilter), False)

        # check for transform not found
        if m is None:
            m = last.copy()
        last = m.copy()

        dx = m[0][2]
        dy = m[1][2]
        da = np.arctan2(m[1][0], m[0][0])
        transforms.append((dx, dy, da))

        prev = curr.copy()
        prevGray = currGray.copy()

    return transforms
Ejemplo n.º 24
0
def transform(refkp, framekp):
    #add the extra [] to make 3D array
    
    if refkp is None: return None
    if framekp is None: return None
    #if less than 5 matches then we can't find a transform
    if len(refkp) < 5: return None
    
    refkp = np.array([kp_to_xy_for_transform(refkp)],dtype = 'float32')
    framekp = np.array([kp_to_xy_for_transform(framekp)], dtype = 'float32')
    trs = cv2.estimateRigidTransform(refkp,framekp,False)
    
    return trs
Ejemplo n.º 25
0
def getTransformationMatrixFrom_3_pairs(A,B,startA,middleA,endA,startB,middleB,endB):
	A3 = np.empty([1,3,2],np.int)
	B3 = np.empty([1,3,2],np.int)

	A3[0][1] = A.points[0][1]
	A3[0][2] = A.points[0][2]
	A3[0][3] = A.points[0][3]

	B3[0][1] = B.points[0][1]
	B3[0][2] = B.points[0][2]
	B3[0][3] = B.points[0][3]

	return cv2.estimateRigidTransform(A3,B3,False)
Ejemplo n.º 26
0
    def _calculate_transform_matrix(self, other_hand):
        src_points = self.getCentersList()
        dest_points = other_hand.getCentersList()
        min_points = min(len(src_points), len(dest_points))
        src_points = src_points[:min_points]
        dest_points = dest_points[:min_points]

        src = np.float32(src_points).reshape(-1, 1, 2)
        dest = np.float32(dest_points).reshape(-1, 1, 2)

        transform = np.eye(3)
        transform[:2, :] = cv2.estimateRigidTransform(src, dest, False)

        return transform
Ejemplo n.º 27
0
def main():
	shape = (1, 10, 2) # Needs to be a 3D array
	source = np.random.randint(0, 100, shape).astype(np.int)
	target = source + np.array([1, 0]).astype(np.int)
	print source
	print source.shape
	print source.dtype
	print source.itemsize
	print source.ndim
	
	print target.shape
	transformation = cv2.estimateRigidTransform(source, target, False)
	print transformation
	return
Ejemplo n.º 28
0
def getTransformationMatrixFromFragment_3pairs(A,B,startA,endA,startB,endB):
	T  = None

	A3 = np.empty([1,3,2],np.int)
	B3 = np.empty([1,3,2],np.int)
	# print A.shape," ",startA," ",endA
	A3[0][0] = A[startA]
	A3[0][2] = A[endA]
	B3[0][0] = B[startB]
	B3[0][2] = B[endB]
	
	middleA = (startA + endA)/2
	middleB = (startB + endB)/2
	
	A3[0][1] = A[middleA]
	B3[0][1] = B[middleB]


	T = cv2.estimateRigidTransform(A3,B3,False)
	#print T
	if(T is not None):
		r1 = math.sqrt(T[0,0] * T[0,0] + T[0,1]*T[0,1])
		T[0,0] =T[0,0]/r1
		T[0,1] =T[0,1]/r1
		r2 = math.sqrt(T[1,0] * T[1,0] + T[1,1]*T[1,1])
		T[1,0] =T[1,0]/r2
		T[1,1] =T[1,1]/r2
		return twoD_to_ThreeD_Affine(T) 
	print "WE are in shit !"
	print "*********************"
	# print A3,B3
	# def displayContour(imgname,contour,height,width):
	img = np.zeros((800,800,1),np.uint8)
	contour = np.empty([2,3,2],np.int)
	contour[0] = A3
	contour[1] = B3
	# cv2.drawContours(img,contour,-1,255,1)
	# cv2.imshow("Contours",img)
	# k=cv2.waitKey(0)
	# if(k==27):
	# 	cv2.destroyAllWindows()
	
	# return
	print "*********************"
	# i = (endA-middleA)
	# while(T is None):
		
	# return twoD_to_ThreeD_Affine(T)
	return None
Ejemplo n.º 29
0
def estimateRigidTransform(srcPts, dstPts, inlierThreshold, outlierCoordScale = 1.0, fullAffine = False):
    srcPts = np.float32(srcPts).reshape(-1,1,2)
    dstPts = np.float32(dstPts).reshape(-1,1,2)
    M = cv2.estimateRigidTransform(srcPts, dstPts, fullAffine = fullAffine)
    print M
    if M is None:
        inlierMask = np.zeros(len(srcPts))
    else:
        inlierMask = []
        mappedPts = cv2.transform(srcPts, M)
        for mappedPt,dstPt in zip(mappedPts, dstPts):
            dist = np.linalg.norm(mappedPt/outlierCoordScale - dstPt/outlierCoordScale)
            inlierMask.append(int(dist < inlierThreshold))
        inlierMask = np.array(inlierMask)
    return M, inlierMask
Ejemplo n.º 30
0
 def findAffine(self, i1, i2, pairs, fullAffine=False):
     src = []
     dst = []
     for pair in pairs:
         c1 = i1.coord_list[pair[0]]
         c2 = i2.coord_list[pair[1]]
         src.append( c1 )
         dst.append( c2 )
     #print "src = %s" % str(src)
     #print "dst = %s" % str(dst)
     affine = cv2.estimateRigidTransform(np.array([src]).astype(np.float32),
                                         np.array([dst]).astype(np.float32),
                                         fullAffine)
     #print str(affine)
     return affine
Ejemplo n.º 31
0
def estimate_partial_transform(matched_keypoints):
    """Wrapper of cv2.estimateRigidTransform for convenience in vidstab process

    :param matched_keypoints: output of match_keypoints util function; tuple of (cur_matched_kp, prev_matched_kp)
    :return: transform as list of [dx, dy, da]
    """
    cur_matched_kp, prev_matched_kp = matched_keypoints

    transform = cv2.estimateRigidTransform(np.array(prev_matched_kp),
                                           np.array(cur_matched_kp), False)
    if transform is not None:
        # translation x
        dx = transform[0, 2]
        # translation y
        dy = transform[1, 2]
        # rotation
        da = np.arctan2(transform[1, 0], transform[0, 0])
    else:
        dx = dy = da = 0

    return [dx, dy, da]
Ejemplo n.º 32
0
def similarityTransform(inPoints, outPoints):
    s60 = math.sin(60 * math.pi / 180)
    c60 = math.cos(60 * math.pi / 180)

    inPts = np.copy(inPoints).tolist()
    outPts = np.copy(outPoints).tolist()

    xin = c60 * (inPts[0][0] - inPts[1][0]) - s60 * (inPts[0][1] -
                                                     inPts[1][1]) + inPts[1][0]
    yin = s60 * (inPts[0][0] - inPts[1][0]) + c60 * (inPts[0][1] -
                                                     inPts[1][1]) + inPts[1][1]
    inPts.append([np.int(xin), np.int(yin)])
    xout = c60 * (outPts[0][0] - outPts[1][0]) - s60 * (
        outPts[0][1] - outPts[1][1]) + outPts[1][0]
    yout = s60 * (outPts[0][0] - outPts[1][0]) + c60 * (
        outPts[0][1] - outPts[1][1]) + outPts[1][1]

    outPts.append([np.int(xout), np.int(yout)])
    tform = cv2.estimateRigidTransform(np.array([inPts]), np.array([outPts]),
                                       False)
    return tform
Ejemplo n.º 33
0
def similarityTransform(inPoints, outPoints):
  s60 = math.sin(60*math.pi/180)
  c60 = math.cos(60*math.pi/180)

  inPts = np.copy(inPoints).tolist()
  outPts = np.copy(outPoints).tolist()

  # The third point is calculated so that the three points make an equilateral triangle
  xin = c60*(inPts[0][0] - inPts[1][0]) - s60*(inPts[0][1] - inPts[1][1]) + inPts[1][0]
  yin = s60*(inPts[0][0] - inPts[1][0]) + c60*(inPts[0][1] - inPts[1][1]) + inPts[1][1]

  inPts.append([np.int(xin), np.int(yin)])

  xout = c60*(outPts[0][0] - outPts[1][0]) - s60*(outPts[0][1] - outPts[1][1]) + outPts[1][0]
  yout = s60*(outPts[0][0] - outPts[1][0]) + c60*(outPts[0][1] - outPts[1][1]) + outPts[1][1]

  outPts.append([np.int(xout), np.int(yout)])

  # Now we can use estimateRigidTransform for calculating the similarity transform.
  tform = cv2.estimateRigidTransform(np.array([inPts]), np.array([outPts]), False)
  return tform
Ejemplo n.º 34
0
def compute_trajectory(filename):
    if 'mp4' in filename:
        pkl = filename.replace('mp4', 'pkl')
    else:
        pkl = filename.replace('avi', 'pkl')

    if os.path.exists(pkl):
        transform = pickle.load(open(pkl, 'rb'))
        return transform

    cap = cv2.VideoCapture(filename)
    ret, frame_prev = cap.read()
    frame_prev = cv2.cvtColor(frame_prev, cv2.COLOR_BGR2GRAY)
    point_prev = cv2.goodFeaturesToTrack(frame_prev,
                                         mask=None,
                                         **feature_params)
    transform = []

    while True:
        ret, frame = cap.read()
        if not ret:
            break
        frame_cur = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        point_cur, status, err = cv2.calcOpticalFlowPyrLK(
            frame_prev, frame_cur, point_prev, None, **lk_params)
        good_cur = point_cur[status == 1]
        good_prev = point_prev[status == 1]
        trans = cv2.estimateRigidTransform(good_prev, good_cur, False)
        if trans is None:
            transform.append(transform[-1])
        else:
            transform.append(trans)
        frame_prev = frame_cur
        point_prev = cv2.goodFeaturesToTrack(frame_prev,
                                             mask=None,
                                             **feature_params)
    cap.release()
    transform = np.array(transform)
    pickle.dump(transform, open(pkl, 'wb'))
    return transform
    def align_dlib_cpp(self, rgbImg, landmarks=None):
        '''
        @brief: 与dlib C++版本实现的裁剪对齐方法一致。
        @attention
        '''
        assert rgbImg is not None

        npLandmarks = np.array(landmarks)[:, :2]
        shape_x = [npLandmarks[i][0] for i in range(68)]
        shape_y = [npLandmarks[i][1] for i in range(68)]
        from_points = []
        to_points = []
        for i in range(17, 68):
            # 忽略掉低于嘴唇的部分
            if i >= 55 and i <= 59:
                continue
            # 忽略眉毛部分
            if i >= 17 and i <= 26:
                continue
            # 上下左右都padding
            new_ref_x = (self.padding +
                         self.mean_shape_x[i - 17]) / (2 * self.padding + 1)
            new_ref_y = (self.padding +
                         self.mean_shape_y[i - 17]) / (2 * self.padding + 1)

            from_points.append((shape_x[i], shape_y[i]))
            to_points.append(
                (self.image_size * new_ref_x, self.image_size * new_ref_y))

        source = np.array(from_points).astype(np.int)
        target = np.array(to_points, ).astype(np.int)
        source = np.reshape(source, (1, 36, 2))
        target = np.reshape(target, (1, 36, 2))
        H = cv2.estimateRigidTransform(source, target, False)
        if H is None:
            return None
        else:
            aligned_face = cv2.warpAffine(rgbImg, H,
                                          (self.image_size, self.image_size))
            return aligned_face
Ejemplo n.º 36
0
def RANSAC_2D(point_set1, point_set2, iteration, tolerance):
    point_count = point_set1.shape[0]
    best_M = None
    best_inlier = 0
    best_model = None
    for i in range(0, iteration):
        sample_index = np.random.choice(range(0, point_count),
                                        3,
                                        replace=False)
        first_set = np.array([point_set1[sample_index]], dtype=np.float32)
        second_set = np.array([point_set2[sample_index]], dtype=np.float32)
        #print first_set.shape
        transformation = cv2.estimateRigidTransform(first_set,
                                                    second_set,
                                                    fullAffine=False)
        if transformation is None:
            continue
        #transformation[0, 1] *= -1
        #transformation[1, 0] *= -1

        print '******'
        print first_set
        print second_set
        print '******'
        #transformation[1,2] *= -1
        new_point_set1 = np.asarray([
            np.dot(transformation,
                   np.transpose(np.array([x[0], x[1], 1], dtype=np.float32)))
            for x in point_set1
        ],
                                    dtype=np.float32)
        distance_matrix = np.linalg.norm(new_point_set1 - point_set2, axis=1)
        inlier = np.sum(distance_matrix <= tolerance)
        #print inlier
        if inlier > best_inlier:
            best_inlier = inlier
            best_M = transformation
            best_model = new_point_set1
    print best_inlier
    return best_M, best_model
Ejemplo n.º 37
0
def align(image_stack):
    """
    Align the image stack with rigid affine transforms between frames
    """

    log.info("Aligning images with rigid transform.")

    # calculate the image corners to auto-crop frames;
    # only using the northwest and southeast corners can lead to
    # errors if the other two corners form a smaller bounding box
    (r, c) = image_stack[0].shape[:2]
    corners = np.array([[0., c], [0., r], [1., 1.]], dtype=np.float)
    nw_corner, se_corner = corners[:, 0], corners[:, 1]

    transform = np.eye(3)
    _stack = [image_stack[0]]

    # iterate over each pair of images and compute the cumulative
    # rigid transform to project the current frame onto the first
    # image frame
    for anchor, img in zip(image_stack[:-1], image_stack[1:]):

        new_t = cv2.estimateRigidTransform(img, anchor, fullAffine=False)
        transform[:2, :2] = new_t[:2, :2].dot(transform[:2, :2])
        transform[:2, 2] = new_t[:, 2] + transform[:2, 2]
        new_im = cv2.warpAffine(img, transform[:2, :], (c, r))
        _stack.append(new_im)

        bounds = transform.dot(corners)
        nw_corner = np.max([nw_corner, bounds[:, 0]], axis=0)
        se_corner = np.min([se_corner, bounds[:, 1]], axis=0)

        log.debug("Transformation matrix:\n" + str(transform))

    lx, ly = nw_corner[:2]
    rx, ry = se_corner[:2]

    _stack = [img[ly:ry, lx:rx] for img in _stack]

    return _stack, _stack[0].shape
def callback(image):
    bridge = CvBridge()
    global first
    if first:
        print("firsted")
        global first_image
        first_img = bridge.imgmsg_to_cv2(image, "bgr8")
        global prev
        prev = first_img
        global first
        ref_set.append({'img': image, 'tf': [0,0,0,0,0]})
        first = False
    else:
        transformation = Transform()
        global first_img
        global prev
        curr = bridge.imgmsg_to_cv2(image, "bgr8")
        #new = first_img[:, 60:300]
        test1 = deepcopy(first_img)
        test2 = deepcopy(first_img)
        test1 = test1[30:209, 40:279]
        test2 = test2[35:214, 40:279]
        affine = cv2.estimateRigidTransform(test1, test2, False)
        print(affine)
        if affine is not None:
            transform = affineToTransform(affine, summed_transformation)
# Summing
            summed_transformation.translation.x += transform.translation.x
            summed_transformation.translation.y *= transform.translation.y
            summed_transformation.translation.z += transform.translation.z
            summed_transformation.rotation.x += transform.rotation.x
            summed_transformation.rotation.y += transform.rotation.y
            summed_transformation.rotation.z += transform.rotation.z
            summed_transformation.rotation.w += transform.rotation.w
            #print(summed_transformation)
            cmdpub.publish(summed_transformation)
            global prev
            prev = curr
        else:
            print("skipped")
    def compute_location(self, kp1, des1, kp2, des2):
        """
        compute the global location of center of current image
        :param kp1: captured keyPoints
        :param des1: captured descriptions
        :param kp2: map keyPoints
        :param des2: map descriptions
        :return: global pose
        """

        good = []
        pose = None

        if des1 is not None and des2 is not None:
            matches = self.matcher.knnMatch(des1, des2, k=2)

            for match in matches:
                if len(match) > 1 and match[0].distance < MATCH_RATIO * match[1].distance:
                    good.append(match[0])

            if len(good) > MIN_MATCH_COUNT:
                src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
                dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
                transform = cv2.estimateRigidTransform(src_pts, dst_pts, False)
                if transform is not None:
                    transformed_center = cv2.transform(CAMERA_CENTER, transform)  # get global pixel
                    transformed_center = [transformed_center[0][0][0] / METER_TO_PIXEL,  # map to global pose
                                          (MAP_PIXEL_HEIGHT - 1 - transformed_center[0][0][1]) / METER_TO_PIXEL]
                    yaw = np.arctan2(transform[1, 0], transform[0, 0])  # get global heading

                    # correct the pose if the drone is not level
                    z = math.sqrt(self.z ** 2 / (1 + math.tan(self.angle_x) ** 2 + math.tan(self.angle_y) ** 2))
                    offset_x = np.tan(self.angle_x) * z
                    offset_y = np.tan(self.angle_y) * z
                    global_offset_x = math.cos(yaw) * offset_x + math.sin(yaw) * offset_y
                    global_offset_y = math.sin(yaw) * offset_x + math.cos(yaw) * offset_y
                    pose = [transformed_center[0] + global_offset_x, transformed_center[1] + global_offset_y, z, yaw]

        return pose, len(good)
Ejemplo n.º 40
0
def findMinDistFace(landmarks1, landmarks2):
    faceind = np.zeros((len(landmarks2), ))
    for i in range(len(landmarks2)):
        if landmarks2[i] is None:
            continue
        mindist = np.inf
        for j in range(len(landmarks1)):
            if landmarks1[j] is None:
                continue
            T = cv2.estimateRigidTransform(landmarks1[j], landmarks2[i], False)
            if T is None:
                continue
            T_full = np.vstack((T, np.array([0, 0, 1])))
            landmarks1_full = np.vstack(
                (landmarks1[j].T, np.ones((1, landmarks1[j].shape[0]))))
            landmarks1_trans = np.dot(T_full, landmarks1_full)
            landmarks1_trans = landmarks1_trans[0:2, :].T
            dist = calcSimilarity(landmarks1_trans, landmarks2[i])
            if dist < mindist:
                faceind[i] = j
                mindist = dist
    return faceind.astype(int)
Ejemplo n.º 41
0
def get_cov_from_video(video_name, size):
    cap, n_frames, fps, prev = video_open(video_name, size)
    new_size = size
    old = []
    last_affine = ...
    cumulative_transform = np.insert(np.array([[1, 0], [0, 1]]), [2], [0], axis=1)
    for i in range(n_frames-1):
        # read frames
        ret2, cur = cap.read()
        cur = cv2.resize(cur, new_size, cv2.INTER_CUBIC)
        # get affine transform between frames
        affine = cv2.estimateRigidTransform(prev, cur, False)
        # Sometimes there is no Affine transform between frames, so we use the last
        if not np.all(affine):
            affine = last_affine
        last_affine = affine
        # Accumulated frame to frame original transform
        #cumulative_transform = sum_2_affine(cumulative_transform, affine)
        # save original affine for comparing with stabilized
        old.append(affine)
    cov = covariance(*get_params_from_trajectory(old))
    return cov
Ejemplo n.º 42
0
def tran_similarity(src_img, src_points, dst_img, dst_points):  # 获取人脸平均脸
    s60 = math.sin(60 * math.pi / 180)
    c60 = math.cos(60 * math.pi / 180)

    in_pts = np.copy(dst_points).tolist()
    out_pts = np.copy(src_points).tolist()

    x_in = c60 * (in_pts[0][0] - in_pts[1][0]) - s60 * (in_pts[0][1] - in_pts[1][1]) + in_pts[1][0]
    y_in = s60 * (in_pts[0][0] - in_pts[1][0]) + c60 * (in_pts[0][1] - in_pts[1][1]) + in_pts[1][1]

    in_pts.append([np.int(x_in), np.int(y_in)])

    x_out = c60 * (out_pts[0][0] - out_pts[1][0]) - s60 * (out_pts[0][1] - out_pts[1][1]) + out_pts[1][0]
    y_out = s60 * (out_pts[0][0] - out_pts[1][0]) + c60 * (out_pts[0][1] - out_pts[1][1]) + out_pts[1][1]

    out_pts.append([np.int(x_out), np.int(y_out)])

    m = cv2.estimateRigidTransform(np.array([in_pts]), np.array([out_pts]), False)

    output = cv2.warpAffine(dst_img, m, (src_img.shape[1], src_img.shape[0]))

    return output
Ejemplo n.º 43
0
def compute_transform(matcher, kp1, des1, kp2, des2):
    """
    computes the transformation between two sets of keypoints and descriptors
    """
    transform = None

    if des1 is not None and des2 is not None:
        matches = matcher.knnMatch(des1, des2, k=2)

        good = []
        for match in matches:
            if len(match) > 1 and match[0].distance < MATCH_RATIO * match[1].distance:
                good.append(match[0])

        src_pts = np.float32([kp1[m.queryIdx] for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx] for m in good]).reshape(-1, 1, 2)

        # estimateRigidTransform needs at least three pairs
        if src_pts is not None and dst_pts is not None and len(src_pts) > 3 and len(dst_pts) > 3:
            transform = cv2.estimateRigidTransform(src_pts, dst_pts, False)

    return transform
Ejemplo n.º 44
0
def align(imgDim, rgbImg, detectedLandmarks, templateLandmarks):
    assert imgDim is not None
    assert rgbImg is not None
    assert templateLandmarks is not None

    detectedLandmarks = np.float32(np.array(detectedLandmarks))
    templateLandmarks = np.float32(np.int32(np.array(templateLandmarks)))

    # if landmarks are not detected, use previous frames' warp matrix
    if (len(detectedLandmarks) > 0):
        H = cv2.estimateRigidTransform(detectedLandmarks, templateLandmarks,
                                       True)
    else:
        H = None

    SMOOTH_NUM = 3

    if (len(pastH) < SMOOTH_NUM):
        if H is None:
            return []

        for ii in range(SMOOTH_NUM):
            pastH.append(H)

    smoothH = np.zeros((2, 3))

    if H is not None:
        pastH.pop()
        pastH.insert(0, H)

    # Take average of last 'SMOOTH_NUM' warp matrices to reduce jitter
    for ii in range(SMOOTH_NUM):
        smoothH = smoothH + pastH[ii]

    smoothH = smoothH / SMOOTH_NUM

    transformed = cv2.warpAffine(rgbImg, smoothH, (imgDim, imgDim))

    return transformed
Ejemplo n.º 45
0
def find_transformation(previous_points, current_points):
    """finds the transformation matrix from previous points to current points.
  
  The transformation matrix is found using estimateRigidTransform 
  (fancier alternatives have been tried, but are not that stable).

  Parameters
  ----------
  previous_points: numpy.ndarray
    Set of 'starting' 2d points 
  current_points: numpy.ndarray
    Set of 'destination' 2d points

  Returns
  -------
  transformation_matrix: numpy.ndarray
    the affine transformation matrix between
    the two sets of points. 
  
  """
    from cv2 import estimateRigidTransform
    return estimateRigidTransform(previous_points, current_points, False)
Ejemplo n.º 46
0
    def findGroupAffine(self, i1, fullAffine=False):
        # find the affine transform matrix representing the best fit
        # against all the placed neighbors.  Builds a cumulative
        # src/dest list with our src points listed once for each image
        # pair.

        src = []
        dst = []
        for i, pairs in enumerate(i1.match_list):
            if len(pairs) < 3:
                # can't compute affine transform on < 3 points
                continue
            i2 = self.image_list[i]
            if not i2.placed:
                # don't consider non-yet-placed neighbors
                continue
            # add coordinate matches for this image pair
            for pair in pairs:
                c1 = i1.coord_list[pair[0]]
                c2 = i2.coord_list[pair[1]]
                src.append( c1 )
                dst.append( c2 )

        if len(src) < 3:
            # not enough points to compute affine transformation
            return np.array( [ [1.0, 0.0, 0.0 ], [0.0, 1.0, 0.0] ] )

        # find the affine matrix on the communlative set of all
        # matching coordinates for all matching image pairs
        # simultaneously...
        affine = cv2.estimateRigidTransform(np.array([src]).astype(np.float32),
                                            np.array([dst]).astype(np.float32),
                                            fullAffine)
        if affine == None:
            # it's possible given a degenerate point set, the affine
            # estimator will return None, so return the identity
            affine = np.array( [ [1.0, 0.0, 0.0 ], [0.0, 1.0, 0.0] ] )
        return affine
Ejemplo n.º 47
0
def calculateAffinityMatrixAndDraw(bestImage, inliersDataBase, inliersWebCam,
                                   imgout):
    # The affinity mat A
    A = cv2.estimateRigidTransform(inliersDataBase,
                                   inliersWebCam,
                                   fullAffine=True)
    A = np.vstack((A, [0, 0, 1]))

    # Calculate the points of the rectangle occupied by the recognized object
    a = np.array([0, 0, 1], np.float)
    b = np.array([bestImage.shape[1], 0, 1], np.float)
    c = np.array([bestImage.shape[1], bestImage.shape[0], 1], np.float)
    d = np.array([0, bestImage.shape[0], 1], np.float)
    center = np.array(
        [float(bestImage.shape[0]) / 2,
         float(bestImage.shape[1]) / 2, 1], np.float)

    # Multiply the points of the virtual space, to convert them into
    # real image points
    a = np.dot(A, a)
    b = np.dot(A, b)
    c = np.dot(A, c)
    d = np.dot(A, d)
    center = np.dot(A, center)

    # The points are dehomogenized
    areal = (int(a[0] / a[2]), int(a[1] / b[2]))
    breal = (int(b[0] / b[2]), int(b[1] / b[2]))
    creal = (int(c[0] / c[2]), int(c[1] / c[2]))
    dreal = (int(d[0] / d[2]), int(d[1] / d[2]))
    centerreal = (int(center[0] / center[2]), int(center[1] / center[2]))

    # The polygon and the file name of the image are painted in the middle of the polygon
    points = np.array([areal, breal, creal, dreal], np.int32)
    cv2.polylines(imgout, np.int32([points]), 1, (0, 255, 255), thickness=3)
    #    Obj_utilscv.draw_str(imgout, centerreal, bestImage.nameFile.upper())
    # The detected object is displayed in a separate window
    cv2.imshow('ImageDetector', bestImage.imageBinary)
Ejemplo n.º 48
0
def getdrift2(green, sample_interval=100):
    ''' This works by creating a list of frames averaged by every sample_interval.
    This is a more robust solution than getdrift1'''
    frames = []
    nframes = int(np.ceil(green.shape[0] / sample_interval) + 1)
    #create the list of frames
    for n in np.arange(nframes):
        f = green[n * sample_interval:(n + 1) * sample_interval].mean(0)
        frames.append((f * 256 / green.max()).astype('uint8'))
    allvectors = []

    for i in range(nframes - 1):
        motionVectors = []
        for j in range(nframes - 1):
            flow = cv2.estimateRigidTransform(frames[i], frames[j], False)
            motionVectors.append([flow[0, 2], flow[1, 2]])
        motionVectors = np.array(motionVectors)
        allvectors.append(motionVectors)

    # take the derivative so all the vectors look similar
    allvectors_d = []
    for v in allvectors:
        motionVectors_d = [np.array([0, 0])]
        for f in np.arange(nframes - 2) + 1:
            motionVectors_d.append(v[f] - v[f - 1])
        motionVectors_d = np.array(motionVectors_d)
        allvectors_d.append(motionVectors_d)
    allvectors_d = np.array(allvectors_d)
    motionVectors = np.median(allvectors_d, 0)

    # take the integral so we have position rather than velocity
    for i in np.arange(motionVectors.shape[0] - 1) + 1:
        motionVectors[i] = motionVectors[i] + motionVectors[
            i -
            1]  #take the integral, so we have position rather than velocity
    motionVectors = np.repeat(motionVectors, sample_interval, axis=0)
    motionVectors2 = smoothMotion(motionVectors, 3)  #fit with polynomial
    return motionVectors2
Ejemplo n.º 49
0
def similarityTransform(inPoints, outPoints):
    """
    求解相似矩阵
    参数:
    ==========
    inPoints:输入点
    outPoints:输出点
    返回值
    ==========
    tform:相似矩阵
    """
    pass

    s60 = math.sin(60*math.pi/180)
    c60 = math.cos(60*math.pi/180)

    inPts= np.copy(inPoints).tolist()
    outPts = np.copy(outPoints).tolist()

    xin= c60*(inPts[1][0]-inPts[0][0])+s60*(inPts[1][1]-inPts[0][1])+inPts[0][0]
    yin = c60*(inPts[1][1]-inPts[0][1])-s60*(inPts[1][0]-inPts[0][0])+inPts[0][1]

    inPts.append([np.int(xin), np.int(yin)])

    xout= c60*(outPts[1][0]-outPts[0][0])+s60*(outPts[1][1]-outPts[0][1])+outPts[0][0]
    yout = c60*(outPts[1][1]-outPts[0][1])-s60*(outPts[1][0]-outPts[0][0])+outPts[0][1]

    outPts.append([np.int(xout), np.int(yout)])

    if cv2.__version__ > '3.5':
        print('opencv 版本过高,使用estimateAffinePartial2D方法代替estimateRigidTransform')
        _tform = cv2.estimateAffinePartial2D(np.array(inPts), np.array(outPts))
        tform = _tform[0]
    else:
        tform = cv2.estimateRigidTransform(np.array(inPts), np.array(outPts), False)
    # tform = cv2.estimateAffinePartial2D(np.array(inPts),np.array(outPts),False)

    return tform
Ejemplo n.º 50
0
def calculateAffinityMatrixAndDraw(bestImage, inliersDataBase, inliersWebCam,
                                   imgout):
    #Se calcula la matriz de afinidad A
    A = cv2.estimateRigidTransform(inliersDataBase,
                                   inliersWebCam,
                                   fullAffine=True)
    A = np.vstack((A, [0, 0, 1]))

    #Se calculan los puntos del rectangulo que ocupa el objeto reconocido
    a = np.array([0, 0, 1], np.float)
    b = np.array([bestImage.shape[1], 0, 1], np.float)
    c = np.array([bestImage.shape[1], bestImage.shape[0], 1], np.float)
    d = np.array([0, bestImage.shape[0], 1], np.float)
    centro = np.array(
        [float(bestImage.shape[0]) / 2,
         float(bestImage.shape[1]) / 2, 1], np.float)

    #Se multiplican los puntos del espacio virtual, para convertirlos en
    #puntos reales de la imagen
    a = np.dot(A, a)
    b = np.dot(A, b)
    c = np.dot(A, c)
    d = np.dot(A, d)
    centro = np.dot(A, centro)

    #Se deshomogeneizan los puntos
    areal = (int(a[0] / a[2]), int(a[1] / b[2]))
    breal = (int(b[0] / b[2]), int(b[1] / b[2]))
    creal = (int(c[0] / c[2]), int(c[1] / c[2]))
    dreal = (int(d[0] / d[2]), int(d[1] / d[2]))
    centroreal = (int(centro[0] / centro[2]), int(centro[1] / centro[2]))

    #Se pinta el polígono y el nombre del fichero de la imagen en el centro del polígono
    points = np.array([areal, breal, creal, dreal], np.int32)
    cv2.polylines(imgout, np.int32([points]), 1, (255, 255, 255), thickness=2)
    utilscv.draw_str(imgout, centroreal, bestImage.nameFile.upper())
    #Se visualiza el objeto detectado en una ventana a parte
    cv2.imshow('ImageDetector', bestImage.imageBinary)
Ejemplo n.º 51
0
def transform_image_from_points(img: np.array,
                                matches: list,
                                kp_ref: list,
                                kp_img: list,
                                transform: str = "rigid"):
    """
    transfroms the image `img` using a list of keypoints from a source and target image.
    uses the optional parameter "transform" which if set to "h**o", will use a homography transform other than a rigid
    one.

    :param img: image to transform
    :type: np.array
    :param matches: list of matches
    :type: list(cv2.Match)
    :param kp_ref: list of keypoints for the reference points
    :param kp_img: list of keypoints for the target image points
    :param transform: how to transform the image
    :return: transformed image
    :rtype: np.array
    """
    src_pts = np.float32([kp_ref[m.queryIdx].pt
                          for m in matches]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp_img[m.trainIdx].pt
                          for m in matches]).reshape(-1, 1, 2)

    if transform == "h**o":
        # homography transform, better
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 20.0)
        return cv2.warpPerspective(img,
                                   M, (img.shape[1], img.shape[0]),
                                   flags=cv2.INTER_CUBIC +
                                   cv2.WARP_INVERSE_MAP)
    else:
        # rigid transform,
        M = cv2.estimateRigidTransform(src_pts, dst_pts, False)
        return cv2.warpAffine(img,
                              M, (img.shape[1], img.shape[0]),
                              flags=cv2.INTER_CUBIC + cv2.WARP_INVERSE_MAP)
    def feature_transform(self, kp1, des1, kp2, des2):
        transform = None

        if des1 is not None and des2 is not None:
            matches = self.matcher.knnMatch(des1, des2, k=2)

            good = []
            for match in matches:
                if len(match
                       ) > 1 and match[0].distance < 0.7 * match[1].distance:
                    good.append(match[0])

            src_pts = np.float32([kp1[m.queryIdx].pt
                                  for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt
                                  for m in good]).reshape(-1, 1, 2)

            # estimateRigidTransform needs at least three pairs
            if src_pts is not None and dst_pts is not None and len(
                    src_pts) > 3 and len(dst_pts) > 3:
                transform = cv2.estimateRigidTransform(src_pts, dst_pts, False)

        return transform
def similarityTransform(inPoints, outPoints):
    """
    求解相似矩阵
    参数:
    ==========
    inPoints:输入点
    outPoints:输出点
    返回值
    ==========
    tform:相似矩阵
    """
    pass

    s60 = math.sin(60 * math.pi / 180)
    c60 = math.cos(60 * math.pi / 180)

    inPts = np.copy(inPoints).tolist()
    outPts = np.copy(outPoints).tolist()

    xin = c60 * (inPts[1][0] - inPts[0][0]) + s60 * (inPts[1][1] -
                                                     inPts[0][1]) + inPts[0][0]
    yin = c60 * (inPts[1][1] - inPts[0][1]) - s60 * (inPts[1][0] -
                                                     inPts[0][0]) + inPts[0][1]

    inPts.append([np.int(xin), np.int(yin)])

    xout = c60 * (outPts[1][0] - outPts[0][0]) + s60 * (
        outPts[1][1] - outPts[0][1]) + outPts[0][0]
    yout = c60 * (outPts[1][1] - outPts[0][1]) - s60 * (
        outPts[1][0] - outPts[0][0]) + outPts[0][1]

    outPts.append([np.int(xout), np.int(yout)])

    tform = cv2.estimateRigidTransform(np.array(inPts), np.array(outPts),
                                       False)

    return tform
Ejemplo n.º 54
0
def getRigidTransform(img1, img2):
    # find the keypoints and descriptors with SIFT
    sift = cv2.xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    # Find nearest 2 neighbors
    feature_matches = flann.knnMatch(des1, des2, k=2)
    # store all the good matches as per Lowe's ratio test.
    good = []
    for m, n in feature_matches:
        if m.distance < 0.7 * n.distance:
            good.append(m)
    img1_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 2)
    img2_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 2)

    # orb = cv2.ORB_create()
    # kp1, des1 = orb.detectAndCompute(img1,None)
    # kp2, des2 = orb.detectAndCompute(img2,None)

    # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    # matches = bf.match(des1,des2)

    # img1_pts = np.float32([ kp1[m.queryIdx].pt for m in matches ]).reshape(-1,2)
    # img2_pts = np.float32([ kp2[m.trainIdx].pt for m in matches ]).reshape(-1,2)

    A = cv2.estimateRigidTransform(img1_pts, img2_pts, False)
    if A is None:
        return 0, 0, 0
    dx = A[0, 2]
    dy = A[1, 2]
    da = np.arctan2(A[1, 0], A[0, 0])
    return dx, dy, da
Ejemplo n.º 55
0
def icp(d1, d2, max_iterate=100):
    src = np.array([d1.T], copy=True).astype(np.float32)
    dst = np.array([d2.T], copy=True).astype(np.float32)

    #knn = cv2.KNearest()
    knn = cv2.ml.KNearest_create()

    # knn = NearestNeighbors()
    responses = np.array(range(len(d2[0]))).astype(np.float32)
    knn.train(src[0], responses)

    Tr = np.array([[np.cos(0), -np.sin(0), 0], [np.sin(0),
                                                np.cos(0), 0], [0, 0, 1]])

    dst = cv2.transform(dst, Tr[0:2])
    max_dist = sys.maxint

    scale_x = np.max(d1[0]) - np.min(d1[0])
    scale_y = np.max(d1[1]) - np.min(d1[1])
    scale = max(scale_x, scale_y)

    for i in range(max_iterate):
        ret, results, neighbours, dist = knn.find_nearest(dst[0], 1)

        indeces = results.astype(np.int32).T
        indeces = del_miss(indeces, dist, max_dist)

        T = cv2.estimateRigidTransform(dst[0, indeces], src[0, indeces], True)

        max_dist = np.max(dist)
        dst = cv2.transform(dst, T)
        Tr = np.dot(np.vstack((T, [0, 0, 1])), Tr)

        if (is_converge(T, scale)):
            break

    return Tr[0:2]
Ejemplo n.º 56
0
def ransac_rigid(source_points, target_points, confidence, threshold):
    try:
        max_iters = 25000
        transform, inliers = cv2.estimateAffinePartial2D(source_points, target_points, 0, ransacReprojThreshold = threshold, maxIters = max_iters, confidence = confidence)
        source_points = np.squeeze(source_points, None)
        target_points = np.squeeze(target_points, None)
        transform = cv2.estimateRigidTransform(
            np.resize(source_points[matlib.repmat(inliers.astype(bool), 1, 2)],
            (np.sum(inliers), 2)),
            np.resize(target_points[matlib.repmat(inliers.astype(bool), 1, 2)],
            (np.sum(inliers), 2)),
            0)
        if transform is not None:
            t_transform = transform
            transform = np.eye(3)
            transform[0:2, 0:3] = t_transform
            failed = False
        else:
            transform = np.eye(3)
            failed = True
    except:
        transform = np.eye(3)
        failed = True
    return transform, failed
Ejemplo n.º 57
0
def face_align(_image, _landmark, _target_shape):
    """
    人脸对齐

    Args:
        _image: 人脸图片
        _landmark:  人脸图片上的landmark
        _target_shape:  目标尺寸

    Returns:    对齐后的人脸

    """
    reference_facial_points = np.array(
        [[0.31556875, 0.4615741], [0.6826229, 0.45983392],
         [0.5002625, 0.6405054], [0.3494719, 0.82469195],
         [0.6534365, 0.8232509]],
        dtype=np.float32)
    target_facial_points = reference_facial_points.copy() * _target_shape
    h, w = _image.shape[:2]
    remapped_landmark = _landmark.copy() * [w, h]
    transform_matrix = cv2.estimateRigidTransform(remapped_landmark,
                                                  target_facial_points, True)
    face_img = cv2.warpAffine(_image, transform_matrix, _target_shape)
    return face_img
Ejemplo n.º 58
0
def icp(a, b, init_pose=(0, 0, 0), no_iterations=13):
    src = np.array([a.T], copy=True).astype(np.float32)
    dst = np.array([b.T], copy=True).astype(np.float32)
    #Initialise with the initial pose estimation
    Tr = np.array([[np.cos(init_pose[2]), -np.sin(init_pose[2]), init_pose[0]],
                   [np.sin(init_pose[2]),
                    np.cos(init_pose[2]), init_pose[1]], [0, 0, 1]])

    src = cv2.transform(src, Tr[0:2])
    for i in range(no_iterations):
        #Find the nearest neighbours between the current source and the
        #destination cloudpoint
        nbrs = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(dst[0])
        distances, indices = nbrs.kneighbors(src[0])
        #Compute the transformation between the current source
        #and destination cloudpoint
        T = cv2.estimateRigidTransform(src, dst[0, indices.T], False)
        #Transform the previous source and update the
        #current source cloudpoint
        src = cv2.transform(src, T)
        #Save the transformation from the actual source cloudpoint
        #to the destination
        Tr = np.dot(Tr, np.vstack((T, [0, 0, 1])))
    return Tr[0:2]
Ejemplo n.º 59
0
def rigid_estimation(mov):
    nbFrames = len(mov)
    file = open("outputFiles/outTransform.txt", "w")

    lk_params = dict(winSize  = (15,15),
                     maxLevel = 2,
                     criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

    # retrieve initial frame
    prev = mov[0]
    gray_prev = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)

    last_T = np.matrix('1 0 0; 0 1 0')
    prev_to_cur_transform = []

    # iterate over all frames
    k = 1
    for t in range(1, nbFrames):
        # retrieve current frame
        cur = mov[t]
        gray_cur = cv2.cvtColor(cur, cv2.COLOR_BGR2GRAY)

        # find features to track in previous frames
        prev_corner = cv2.goodFeaturesToTrack(gray_prev, 200, 0.01, 30)
        # calculate new coordinates of features in current frame
        cur_corner, status, err = cv2.calcOpticalFlowPyrLK(gray_prev, gray_cur, 
                                                           prev_corner, None, 
                                                           **lk_params) 

        # remove outliers
        prev_corner1 = []
        cur_corner1 = []
        for s in range(0, len(status)):
            if status[s]:
                prev_corner1.append(prev_corner[s])
                cur_corner1.append(cur_corner[s])

        print "Frame: " + str(k) +  "/" + str(nbFrames) +  " - good optical flow: "  + str(len(prev_corner1));

        # estimate affine transform between the frames
        prev_corner1 = np.asarray(prev_corner1)
        cur_corner1 = np.asarray(cur_corner1)
        T = cv2.estimateRigidTransform(prev_corner1, cur_corner1, False)

        # in the rare case that no transformation were found, use the 
        # previously known transform
        if (T is None):
            T = last_T

        last_T = T

        # decompose the transform
        dx = T[0][2]
        dy = T[1][2]
        da = atan2(T[1][0], T[0][0])
        file.write(str(k) + " " + str(dx) + " " + str(dy) + " " + str(da) + "\n")
        k += 1

        # store the trasnform parameters
        prev_to_cur_transform.append(TransformParam(da, dx, dy))

        prev = cur
        gray_prev = gray_cur

    file.close()

    print "Done"
    return prev_to_cur_transform
Ejemplo n.º 60
0
        tpts1=pts1[ii][0]
        tpts2=nextPts[ii][0]
        # if status[ii][0]==1:
        if (status[ii]==1) and (err[ii]<20):
            cv2.circle(frm2c, tuple(tpts1), 3, (255,0,0))
            cv2.line(frm2c, tuple(tpts1), tuple(tpts2), (255,255,0))
            cv2.circle(frm2c, tuple(tpts2), 3, (0,0,255))
    # pts1Good=pts1[ ((status==1) & (err<20))]
    pts1Good=pts1[ status==1 ]
    pts1Good=np.reshape(pts1Good, (pts1Good.shape[0],1,pts1Good.shape[1]))
    # nextPtsG=nextPts[((status==1) & (err<20))]
    nextPtsG=nextPts[ status==1 ]
    nextPtsG=np.reshape(nextPtsG, (nextPtsG.shape[0],1,nextPtsG.shape[1]))
    # print pts1Good.shape
    # print nextPtsG.shape
    T=cv2.estimateRigidTransform(pts1Good, nextPtsG, False)
    dx,dy=T[0,2],T[1,2]
    dxy=np.array([dx,dy])
    pCenter=np.array([frm2c.shape[1]/2, frm2c.shape[0]/2])
    dr=np.sqrt(dx**2+dy**2)
    cv2.line(frm2c, (pCenter[0],pCenter[1]), (int(pCenter[0]+dxy[0]),int(pCenter[1]+dxy[1])), (0,255,0))
    cv2.circle(frm2c, (pCenter[0],pCenter[1]), 5, (0,255,0))
    cv2.circle(frm2c, (pCenter[0],pCenter[1]), int(dr), (0,255,0))
    cv2.circle(frm2c, (int(pCenter[0]+dxy[0]),int(pCenter[1]+dxy[1])), 5, (0,0,255))

    print T
    print "dxy=(%s, %s)" % (T[0,2], T[1,2])
    cv2.imshow("win2", frm2c)
    while True:
        key = cv2.waitKey(0)
        if key==27: