def OpticalFlow(img0, img1, lk_params, feature_params):
    # convert image to gray
    img0_tmp = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY)
    img1_tmp = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)

    p0 = cv2.goodFeaturesToTrack(img0_tmp, mask = None, **feature_params)    
    p1 = cv2.goodFeaturesToTrack(img1_tmp, mask = None, **feature_params)    
    if p0 is None or  p1 is None:
        return -1.0, [], []
    
    st1_0, st0_1 = [], []
    if p0 is not None:
        p0_1, st0_1, err0_1 = cv2.calcOpticalFlowPyrLK(img0_tmp, img1_tmp, p0, None, **lk_params)
        st0_1_tmp = list(np.array(st0_1).T[0])
    if p1 is not None:
        p1_0, st1_0, err1_0 = cv2.calcOpticalFlowPyrLK(img1_tmp, img0_tmp, p1, None, **lk_params)
        st1_0_tmp = list(np.array(st1_0).T[0])

    l_move_x = []
    l_move_y = []
    for pts0, pts1, s in itertools.izip(p0, p1_0, st1_0):
        if s[0] == 1:
            l_move_x.append(pts1[0][0]-pts0[0][0])
            l_move_y.append(pts1[0][1]-pts0[0][1])

    return np.mean(st0_1_tmp + st1_0_tmp), l_move_x, l_move_y
示例#2
0
def score_OF(roi, frame1, frame0, lk_params, feature_params):
    img0 = cv2.cvtColor(frame0[roi.ymin:roi.ymax, roi.xmin:roi.xmax], cv2.COLOR_BGR2GRAY)
    img1 = cv2.cvtColor(frame1[roi.ymin:roi.ymax, roi.xmin:roi.xmax], cv2.COLOR_BGR2GRAY)

    p0 = cv2.goodFeaturesToTrack(img0, mask = None, **feature_params)    
    p1 = cv2.goodFeaturesToTrack(img1, mask = None, **feature_params)    
    if p0 is None or  p1 is None:
        return -1.0, 0.0, 0.0, 0.0, 0.0
    
    p1_0, st1_0, err1_0 = cv2.calcOpticalFlowPyrLK(img0, img1, p0, p1, **lk_params)
    p0_1, st0_1, err0_1 = cv2.calcOpticalFlowPyrLK(img1, img0, p1, p0, **lk_params)
    
    nb_c_p0=0.0
    nb_p0=0.0
    nb_c_p1=0.0
    nb_p1=0.0
    move_x = 0.0
    move_y = 0.0

    for pts0, pts1, s in itertools.izip(p0, p1_0, st1_0):
        nb_p0+=1
        if s[0] == 1:
            nb_c_p0+=1
            move_x+=(pts1[0][0]-pts0[0][0])
            move_y+=(pts1[0][1]-pts0[0][1])
    if nb_c_p0>0:
        move_x=int(round(move_x/nb_c_p0,0))
        move_y=int(round(move_y/nb_c_p0,0))
    
    for pts0, pts1, s in itertools.izip(p1, p0_1, st0_1):
        nb_p1+=1
        if s[0] == 1:
            nb_c_p1+=1

    return (nb_c_p0/nb_p0+nb_c_p1/nb_p1)/2, nb_p0, nb_p1, move_x, move_y
def calcGFTTShift(fimg1, fimg2):
    frm1=cv2.imread(fimg1, 0)
    frm2=cv2.imread(fimg2, 0)
    frm1=cv2.resize(frm1, (int(frm1.shape[1]/kdif), int(frm1.shape[0]/kdif)))
    frm2=cv2.resize(frm2, (int(frm2.shape[1]/kdif), int(frm2.shape[0]/kdif)))
    pts1=cv2.goodFeaturesToTrack(frm1, 1000, 0.01, 30)
    pts2=cv2.goodFeaturesToTrack(frm2, 1000, 0.01, 30)
    nextPts, status, err = cv2.calcOpticalFlowPyrLK(frm1, frm2, pts1, pts2)
    # print status
    pts1Good=pts1[ status==1 ]
    # pts1Good=np.reshape(pts1Good, (pts1Good.shape[0],1,pts1Good.shape[1]))
    nextPtsG=nextPts[ status==1 ]
    # nextPtsG=np.reshape(nextPtsG, (nextPtsG.shape[0],1,nextPtsG.shape[1]))
    # T=cv2.estimateRigidTransform(pts1Good, nextPtsG, True)
    T,msk=cv2.findHomography(pts1Good, nextPtsG, cv2.RANSAC)
    print T
    if T==None:
        dxy=(0,0)
    else:
        dx,dy=T[0,2],T[1,2]
        dxy=(dx,dy)
    tmp=np.zeros((frm1.shape[0], frm1.shape[1], 3), np.uint8)
    frm2_shift=np.roll(frm2, int(math.floor(-dxy[0])), 1)
    frm2_shift=np.roll(frm2_shift, int(math.floor(-dxy[1])), 0)
    tmp[:,:,2]=frm1
    tmp[:,:,1]=frm2_shift
    tmp[:,:,0]=0
    cv2.imshow("frame #2 shift",  cv2.resize(tmp, (tmp.shape[1]/1, tmp.shape[0]/1)))
    return dxy
示例#4
0
    def test_umat_optical_flow(self):
        img1 = self.get_sample("samples/data/right01.jpg", cv.IMREAD_GRAYSCALE)
        img2 = self.get_sample("samples/data/right02.jpg", cv.IMREAD_GRAYSCALE)
        # Note, that if you want to see performance boost by OCL implementation - you need enough data
        # For example you can increase maxCorners param to 10000 and increase img1 and img2 in such way:
        # img = np.hstack([np.vstack([img] * 6)] * 6)

        feature_params = dict(maxCorners=239,
                              qualityLevel=0.3,
                              minDistance=7,
                              blockSize=7)

        p0 = cv.goodFeaturesToTrack(img1, mask=None, **feature_params)
        p0_umat = cv.goodFeaturesToTrack(cv.UMat(img1), mask=None, **feature_params)
        self.assertEqual(p0_umat.get().shape, p0.shape)

        p0 = np.array(sorted(p0, key=lambda p: tuple(p[0])))
        p0_umat = cv.UMat(np.array(sorted(p0_umat.get(), key=lambda p: tuple(p[0]))))
        self.assertTrue(np.allclose(p0_umat.get(), p0))

        _p1_mask_err = cv.calcOpticalFlowPyrLK(img1, img2, p0, None)

        _p1_mask_err_umat0 = list(map(lambda umat: umat.get(), cv.calcOpticalFlowPyrLK(img1, img2, p0_umat, None)))
        _p1_mask_err_umat1 = list(map(lambda umat: umat.get(), cv.calcOpticalFlowPyrLK(cv.UMat(img1), img2, p0_umat, None)))
        _p1_mask_err_umat2 = list(map(lambda umat: umat.get(), cv.calcOpticalFlowPyrLK(img1, cv.UMat(img2), p0_umat, None)))

        for _p1_mask_err_umat in [_p1_mask_err_umat0, _p1_mask_err_umat1, _p1_mask_err_umat2]:
            for data, data_umat in zip(_p1_mask_err, _p1_mask_err_umat):
                self.assertEqual(data.shape, data_umat.shape)
                self.assertEqual(data.dtype, data_umat.dtype)
        for _p1_mask_err_umat in [_p1_mask_err_umat1, _p1_mask_err_umat2]:
            for data_umat0, data_umat in zip(_p1_mask_err_umat0[:2], _p1_mask_err_umat[:2]):
                self.assertTrue(np.allclose(data_umat0, data_umat))
示例#5
0
    def test_goodFeaturesToTrack(self):
        arr = self.get_sample("samples/data/lena.jpg", 0)
        original = arr.copy(True)
        threshes = [x / 100.0 for x in range(1, 10)]
        numPoints = 20000

        results = dict([(t, cv2.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes])
        # Check that GoodFeaturesToTrack has not modified input image
        self.assertTrue(arr.tostring() == original.tostring())
        # Check for repeatability
        for i in range(1):
            results2 = dict(
                [(t, cv2.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes]
            )
            for t in threshes:
                self.assertTrue(len(results2[t]) == len(results[t]))
                for i in range(len(results[t])):
                    self.assertTrue(cv2.norm(results[t][i][0] - results2[t][i][0]) == 0)

        for t0, t1 in zip(threshes, threshes[1:]):
            r0 = results[t0]
            r1 = results[t1]
            # Increasing thresh should make result list shorter
            self.assertTrue(len(r0) > len(r1))
            # Increasing thresh should monly truncate result list
            for i in range(len(r1)):
                self.assertTrue(cv2.norm(r1[i][0] - r0[i][0]) == 0)
示例#6
0
def _get_distance(before, after):

	# Get lists of key points (corners)
	threshold = 50
	# before_kp = corner.detect_corners(before, threshold)
	# after_kp = corner.detect_corners(after, threshold)

	before = cv2.cvtColor(before, cv2.COLOR_BGR2GRAY)
	after = cv2.cvtColor(after, cv2.COLOR_BGR2GRAY)

	before_kp = cv2.goodFeaturesToTrack(before, 4, 0.01, 10)
	after_kp = cv2.goodFeaturesToTrack(after, 4, 0.01, 10)

	kp_len = min(len(before_kp), len(after_kp))

	before_features = drawKeyPoints(before, before_kp)
	after_features = drawKeyPoints(after, after_kp)
	cv2.imwrite('before_features.jpg', before_features)
	cv2.imwrite('after_features.jpg', after_features)

	before_kp_pts = np.asarray([kp.ravel() for kp in before_kp[:kp_len-1]])
	after_kp_pts = np.asarray([kp.ravel() for kp in after_kp[:kp_len-1]])

	total_distance = 0
	for pt in before_kp_pts:
		closest_pt = get_closest_pt(pt, after_kp_pts)
		dist = np.sqrt((pt[0] - closest_pt[0])**2 + (pt[1] - closest_pt[1])**2)
		# print pt, closest_pt, dist
		total_distance += dist

	return total_distance/kp_len, kp_len
    def detectionMethod(self, im, im_copy, frame_gray, net, p, detectedItem, colorScheme):
        scores, boxes = im_detect(net, im)
        NMS_THRESH = 0.3
        detectedItemsInThisFrame = []

        for cls_ind, cls in enumerate(CLASSES[1:]):
            cls_ind += 1 # because we skipped background
            cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
            cls_scores = scores[:, cls_ind]
            dets = np.hstack((cls_boxes,
                              cls_scores[:, np.newaxis])).astype(np.float32) #stacks them together
            keep = nms(dets, NMS_THRESH) #Removes overlapping bounding boxes
            dets = dets[keep, :]

            #print "if cls == {0}: {1}".format(str(detectedItem), (cls == detectedItem))
            if cls == detectedItem:
                print "under else"
                inds = np.where(dets[:, -1] >= 0.5)[0] #Threshold applied to score values here
                im = im[:, :, (2, 1, 0)]

                for i in inds:
                    print "running for loop"
                    bbox = dets[i, :4]
                    detectedBBox = bbox.astype(int)
                    score = dets[i, -1]
                    bboxCentroid = c.mathArrayCentroid(detectedBBox)
                    cv2.circle(im_copy, bboxCentroid, 5, colorScheme, -1) #bbox centroid of detectedItemArray
                    #Calculate bbox centroid. Use it to determine if the item should be added to detectedItemArray

                    #Check if the centroid of the detected box is within the designated traffic intersection area
                    if (str(detectedItem) == "car") or (str(detectedItem) == "bus"):
                        print "if statement, detected car or bus"
                        if p.contains_point(bboxCentroid) == 1:
                            print "within area"
                            #Calculate corners of interest within the bounding box area and add them all to the corner array
                            detectedPixels = frame_gray[bbox[1]:bbox[3], bbox[0]:bbox[2]] #[y1:y2, x1:x2]
                            detectedPixelsColor = im_copy[bbox[1]:bbox[3], bbox[0]:bbox[2]] #for show on colored image
                            corners = cv2.goodFeaturesToTrack(detectedPixels, mask=detectedPixels, **self.feature_params).reshape(-1, 2)

                            # for x, y in np.float32(corners).reshape(-1, 2): #black
                            #     cv2.circle(detectedPixels, (x,y), 5, (0, 0, 0), -1)
                            #     cv2.circle(detectedPixelsColor, (x, y), 5, (0, 0, 0), -1)

                            detectedItemsInThisFrame.append([[detectedBBox, corners]])
                        else:
                            print "car/bus not added. Coordinates: ", bbox
                    
                    else:
                        print "else not car or bus detected"
                        detectedPixels = frame_gray[bbox[1]:bbox[3], bbox[0]:bbox[2]] #[y1:y2, x1:x2]
                        detectedPixelsColor = im_copy[bbox[1]:bbox[3], bbox[0]:bbox[2]] #for show on colored image
                        corners = cv2.goodFeaturesToTrack(detectedPixels, mask=detectedPixels, **self.feature_params).reshape(-1, 2)

                print "detectedItemsInThisFrame len: {0}-------------------------------------".format(len(detectedItemsInThisFrame))
                print "detectedItemsInThisFrame: ", detectedItemsInThisFrame

                return detectedItemsInThisFrame
示例#8
0
def stabilize (img_list):
	cv_images = []

	for i in xrange(len(img_list)):
		cv_images.append(pil_to_opencv(img_list[i]))

	#prev and prev_gray should be of type Mat
	prev = cv_images[0]
	cv2.cvtColor(prev, prev_gray, cv2.COLOR_BGR2GRAY)

	for i in xrange(1, len(cv_images)):
		current = cv_images[i]
		cv2.cvtColor(current, current_gray, cv2.COLOR_BGR2GRAY)

		cv2.goodFeaturesToTrack(prev_gray, prev_corner, 200, 0.01, 30)
		cv2.calcOpticalFlowPyrLK(prev_gray, current_gray, prev_corner, current_corner, status, err)

		prev_corner2 = []
		current_corner2 = []

		for j in xrange(len(status)):
			if(status[j]):
				prev_corner2.push_back(prev_corner[j])
				current_corner2.push_back(current_corner[j])

		T = cv2.estimateRigidTransform(prev_corner2, current_corner2, false)

		if T.data == NULL:
			last_T.copyTo(T)

		T.copyTo(last_T)

		dx = T.at(0,2)
		dy = T.at(1,2)
		da = numpy.atan2(T.at(1,0), T.at(0,0))

		transform = []
		transform.push_back(Transform(dx, dy, da))

		current.copyTo(prev)
		current_gray.copyTo(prev_gray)

	# accumulate transformations to get image trajectory
	x = 0
	y = 0
	a = 0

	trajectory_list = []

	for k in xrange(len(transform)):
		x += transform[i].dx
        y += transform[i].dy
        a += transform[i].da
		#trajectory_list.push_back(Trajectory(x,y,a))

	return img_list
def detect(old_frame,faces):
    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
    # Detect faces in the image
    faces = faceCascade.detectMultiScale(
        old_gray,
        scaleFactor=1.1,
        minNeighbors=5,
        minSize=(20, 20),
        flags = cv2.CASCADE_SCALE_IMAGE 
    )
    if len(faces)==0:
        return [],[],[],[]
    f_temp=faces
    points=[]
    r_gray=[]
    r_color=[]
    coords=[]
    if len(faces)>0:
        ##reduces overlaping faces
        for i in range (len(faces)):
            (x0, y0, w0, h0) = faces[i] 
            for k in range (len(faces)):
                (x1, y1, w1, h1) = faces[k] 
                if x1>x0 and y1>y0 and x1+w1<x0+w0 and h1+h1<y0+h0:                    
                    f_temp = np.delete(f_temp, (k), axis=0)
        faces=f_temp
        
        (x, y, w, h) = faces[0]        
        roi_gray = old_gray[y:y+h, x:x+w]
        roi_color=old_frame[y:y+h, x:x+w]
        points.append( cv2.goodFeaturesToTrack(roi_gray, mask = None, **feature_params))
        r_gray=[roi_gray]
        r_color=[roi_color]
        coords=[ (x, y, w, h) ]

    for (x, y, w, h) in faces:
        cv2.rectangle(old_frame, (x, y), (x+w, y+h), (0, 255, 0), 2)  
        roi_gray=old_gray[y:y+h, x:x+w]
        roi_color=old_frame[y:y+h, x:x+w]

        p=cv2.goodFeaturesToTrack(roi_gray, mask = None, **feature_params)
        if not np.in1d(p,points).all():            
            points.append(p)
            r_gray.append( roi_gray )
            r_color.append( roi_color)
            coords.append( (x, y, w, h) )
        # Create a mask image for drawing purposes
             
    cv2.imshow('old_frame',old_frame)

    return r_gray,points,faces,coords
示例#10
0
    def test_lk_homography(self):
        self.render = TestSceneRender(self.get_sample('samples/python2/data/graf1.png'),
            self.get_sample('samples/c/box.png'), noise = 0.1, speed = 1.0)

        frame = self.render.getNextFrame()
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        self.frame0 = frame.copy()
        self.p0 = cv2.goodFeaturesToTrack(frame_gray, **feature_params)

        isForegroundHomographyFound = False

        if self.p0 is not None:
            self.p1 = self.p0
            self.gray0 = frame_gray
            self.gray1 = frame_gray
            currRect = self.render.getCurrentRect()
            for (x,y) in self.p0[:,0]:
                if isPointInRect((x,y), currRect):
                    self.numFeaturesInRectOnStart += 1

        while self.framesCounter < 200:
            self.framesCounter += 1
            frame = self.render.getNextFrame()
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            if self.p0 is not None:
                p2, trace_status = checkedTrace(self.gray1, frame_gray, self.p1)

                self.p1 = p2[trace_status].copy()
                self.p0 = self.p0[trace_status].copy()
                self.gray1 = frame_gray

                if len(self.p0) < 4:
                    self.p0 = None
                    continue
                H, status = cv2.findHomography(self.p0, self.p1, cv2.RANSAC, 5.0)

                goodPointsInRect = 0
                goodPointsOutsideRect = 0
                for (x0, y0), (x1, y1), good in zip(self.p0[:,0], self.p1[:,0], status[:,0]):
                    if good:
                        if isPointInRect((x1,y1), self.render.getCurrentRect()):
                            goodPointsInRect += 1
                        else: goodPointsOutsideRect += 1

                if goodPointsOutsideRect < goodPointsInRect:
                    isForegroundHomographyFound = True
                    self.assertGreater(float(goodPointsInRect) / (self.numFeaturesInRectOnStart + 1), 0.6)
            else:
                p = cv2.goodFeaturesToTrack(frame_gray, **feature_params)

        self.assertEqual(isForegroundHomographyFound, True)
示例#11
0
    def run(self):
        while True:
            ret, frame = self.cam.read()
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()
            if self.p0 is not None:
                p2, trace_status = checkedTrace(self.gray1, frame_gray, self.p1)

                self.p1 = p2[trace_status].copy()
                self.p0 = self.p0[trace_status].copy()
                self.gray1 = frame_gray

                if len(self.p0) < 4:
                    self.p0 = None
                    continue
                H, status = cv2.findHomography(self.p0, self.p1, (0, cv2.RANSAC)[self.use_ransac], 10.0)
                h, w = frame.shape[:2]
                overlay = cv2.warpPerspective(self.frame0, H, (w, h))
                vis = cv2.addWeighted(vis, 0.5, overlay, 0.5, 0.0)

                for (x0, y0), (x1, y1), good in zip(self.p0[:,0], self.p1[:,0], status[:,0]):
                    if good:
                        cv2.line(vis, (x0, y0), (x1, y1), (0, 128, 0))
                    cv2.circle(vis, (x1, y1), 2, (red, green)[good], -1)
                draw_str(vis, (20, 20), 'track count: %d' % len(self.p1))
                if self.use_ransac:
                    draw_str(vis, (20, 40), 'RANSAC')
            else:
                p = cv2.goodFeaturesToTrack(frame_gray, **feature_params)
                if p is not None:
                    for x, y in p[:,0]:
                        cv2.circle(vis, (x, y), 2, green, -1)
                    draw_str(vis, (20, 20), 'feature count: %d' % len(p))

            cv2.imshow('lk_homography', vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
            if ch == ord(' '):
                self.frame0 = frame.copy()
                self.p0 = cv2.goodFeaturesToTrack(frame_gray, **feature_params)
                if self.p0 is not None:
                    self.p1 = self.p0
                    self.gray0 = frame_gray
                    self.gray1 = frame_gray
            if ch == ord('r'):
                self.use_ransac = not self.use_ransac
示例#12
0
def find_features(image, rectangle, features_num):
	# rectangle (x1,x2,y1,y2)

	dist2B = list()
	features = list()
	global features_number

	x1 = rectangle[0]
	x2 = rectangle[2]
	y1 = rectangle[1]
	y2 = rectangle[3]

	cropped_image = image[y1:y2, x1:x2]
	corners = cv2.goodFeaturesToTrack(cropped_image, mask = None, **feature_params)

	print ("\n")

	for corner in corners:
		#print (corner)
		distance = (y2-y1) - corner[0][1]
		dist2B.append(distance)

		corner[0][0] = rectangle[0] + corner[0][0]
		corner[0][1] = rectangle[1] + corner[0][1]
		features.append(corner)

	for i in range(len(features)):
		d = dist2B.pop(0)
		height_from_floor.append(d)

	return features, height_from_floor
示例#13
0
def findCornersInMask(frameGray, cx, cy, h, w, featureParams):
    """Find Shi-Tomasi corners in a subpart of a frame.
    The research mask is centered around (cx, cy).
    
    Parameters
    ----------
    frameGray : np.ndarray
        frame to search corners from.    
    
    cx : int
        X coordinate for mask center.
        
    cy : int
        Y coordinate for mask center.  
    
    w : int
        Mask width. Should be odd.
        
    h : int
        Mask height. Should be odd.
    
    featureParams : dict
        See **kwargs in ``cv2.cv2.goodFeaturesToTrack()``.

    Returns
    -------
    tuple
        First element is a N x 1 x 2 array containing coordinates of N good 
        corners to track. Second element being the frame contanining the mask.
    
    """
    new_mask, pts = createWhiteMask(frameGray, cx, cy, h, w)
    new_p0 = cv2.goodFeaturesToTrack(frameGray, mask=new_mask, **featureParams)
    return new_p0, new_mask
示例#14
0
def shiDetector(img):
    corners = cv2.goodFeaturesToTrack(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), 25, 0.01, 10)
    corners = np.int0(corners)
    for i in corners:
        x,y = i.ravel()
        cv2.circle(img,(x,y),3,255,-1)
    return img
示例#15
0
def good_corners():
    img = cv2.imread('Z:/Cartographer/Test.png')
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    blur = cv2.bilateralFilter(gray, 7, 175, 175)
    corners = cv2.goodFeaturesToTrack(blur,20,0.01,10)
    corners = np.int0(corners)

    #make a set to keep track of the coordinates that are detected
    coordinates = []
    dict_gradient_maps = {}
    
    for i in corners:
        x,y = i.ravel()
        point = Point()
        point.x = x
        point.y = y
        coordinates.append(point)
        cv2.circle(img,(x,y),3,255,-1)

    while len(coordinates ) > 2 :
        smallest = find_smallest(coordinates)
        #if it's been part of a gradient check and the gradient coordinate list
        #is a good size, then the coordinates are removed from the list - they
        #don't belong in there anymore
        #we dont want to check the gradient for something we're checking against
        coordinates.remove(smallest)
        dict_gradient_maps[smallest] = gradients(coordinates)
        
    dict_gradient_maps = {key: value for key, value in dict_gradient_maps.items() if len(value) > 0}    
    plt.imshow(img),plt.show()
    print dict_gradient_maps
示例#16
0
def goodFeaturesToTrack_Demo(val):
    maxCorners = max(val, 1)

    # Parameters for Shi-Tomasi algorithm
    qualityLevel = 0.01
    minDistance = 10
    blockSize = 3
    gradientSize = 3
    useHarrisDetector = False
    k = 0.04

    # Copy the source image
    copy = np.copy(src)

    # Apply corner detection
    corners = cv.goodFeaturesToTrack(src_gray, maxCorners, qualityLevel, minDistance, None, \
        blockSize=blockSize, gradientSize=gradientSize, useHarrisDetector=useHarrisDetector, k=k)

    # Draw corners detected
    print('** Number of corners detected:', corners.shape[0])
    radius = 4
    for i in range(corners.shape[0]):
        cv.circle(copy, (corners[i,0,0], corners[i,0,1]), radius, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED)

    # Show what you got
    cv.namedWindow(source_window)
    cv.imshow(source_window, copy)
def findCorners(img):
    corners = cv2.goodFeaturesToTrack(img, 4, 0.01, img.shape[0]/10)
    corners = sortCorners(corners)
    print('Detected corners: '+str(corners))
    for i in range(4):
        cv2.circle(img, (int(corners[i][0]),int(corners[i][1])), 10, (255,0,0), -1)
    return img, corners
示例#18
0
	def featureDetection(im):
		# Initiate FAST object with default values
		# fast = cv2.FastFeatureDetector_create(20,True)
		# fast = cv2.FastFeatureDetector_create()
		# fast.setNonmaxSuppression(True)
		# fast.setThreshold(20)
		# # find and draw the keypoints
		# keypoints = fast.detect(im)
		# keypoints=np.array([[k.pt] for k in keypoints],dtype='f4')
		# print 'fast keypoints',keypoints.shape

		# orb = cv2.ORB_create()
		# keypoints = orb.detect(im,None)
		# keypoints=np.array([[k.pt] for k in keypoints],dtype='f4')
		# print 'orb shape',keypoints.shape

		# params for ShiTomasi corner detection
		feature_params = dict(maxCorners=500,
			qualityLevel=0.3,
			minDistance=7,
			blockSize=7)
		keypoints = cv2.goodFeaturesToTrack(im, mask=None, **feature_params)
		print('goodFeaturesToTrack shape', keypoints.shape)

		return keypoints
示例#19
0
 def process(self, img):
     img = super(GoodFeaturesProcessor, self).process(img)
     corners = cv2.goodFeaturesToTrack(img, int(self.num), int(self.distance), float(self.quality))
     if corners:
         for c in corners:
             cv2.ellipse(img, (c[0][0], c[0][1]), (5, 5), 0, 0, 360, 255, -1)
     return img
示例#20
0
def track_using_trajectories( cur, prev ):
    global curr_loc_ 
    global static_features_img_
    p0 = cv2.goodFeaturesToTrack( cur, 200, 0.01, 5 )
    insert_int_corners( p0 )

    draw_point( cur, p0, 1 )

    ellipse, p1 = update_mouse_location( p0 )
    if p1 is not None:
        for p in p1:
            cv2.circle( cur, p, 10, 20, 2 )
    cv2.ellipse( cur, ellipse, 1 )
    cv2.circle( cur, curr_loc_, 10, 255, 3)
    display_frame( cur, 1 )
    # cv2.imshow( 'static features', static_features_img_ )
    return 
    # Find a contour
    prevE = find_edges( prev )
    curE = find_edges( cur )
    img = curE - prevE
    cnts, hier = cv2.findContours( img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE )
    cnts = filter( ismouse, cnts )
    cv2.drawContours( img, cnts, -1, 255, 3 )
    display_frame( img, 1)
    return 
    p1, status, err = cv2.calcOpticalFlowPyrLK( prev, cur, p0 )
    mat = cv2.estimateRigidTransform( p0, p1, False )
    # print cv2.warpAffine( curr_loc_, mat, dsize=(2,1) )
    if mat is not None:
        dx, dy = mat[:,2]
        da = math.atan2( mat[1,0], mat[0,0] )
        trajectory_.append( (dx, dy, da) )
        print( "Transformation", dx, dy, da )
        curr_loc_ = (curr_loc_[0] - int(dy), curr_loc_[1] - int(dx))
示例#21
0
def goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance):
    maxCorners=int(maxCorners)
    minDistance=int(minDistance)
    
    corners = cv2.goodFeaturesToTrack(image,maxCorners,qualityLevel,minDistance)
    corners = np.int0(corners)
    return corners
示例#22
0
    def __compensate_shi_tomasi(self, new_frame):
        """
        Measure and compensate for inter-frame motion:
        - get points on both frames
        -- we use Shi & Tomasi here, to be adapted ?
        @rtype : opencv frame
        """
        self.corners = cv2.goodFeaturesToTrack(self.frame_prev, self.n_max_corners, .01, 50)

        # - track points
        [self.corners_next, status, err] = cv2.calcOpticalFlowPyrLK(self.frame_prev, new_frame, self.corners)

        # - track back (more reliable)
        [corners_next_back, status_back, err_back] = cv2.calcOpticalFlowPyrLK(new_frame,
                                                                              self.frame_prev, self.corners_next)

        # - sort out to keep reliable points :
        [self.corners, self.corners_next] = self.__sort_corners(self.corners,
                                                                self.corners_next, status,
                                                                corners_next_back, status_back)

        # - compute the transformation from the tracked pattern
        # -- estimate the rigid transform
        transform, mask = cv2.findHomography(self.corners_next, self.corners, cv2.RANSAC, 5.0)

        # -- see if this transform explains most of the displacements (thresholded..)
        if len(mask[mask > 0]) > 20: # TODO: More robust test here
            print "Enough match for motion compensation"
            acc_frame_aligned = cv2.warpPerspective(self.frame_acc, transform, self.frame_acc.shape[2::-1])
            self.frame_acc = acc_frame_aligned
            return True

        else:
            print "Not finding enough matchs - {}".format(len(mask[mask > 0]))
            return False
示例#23
0
	def detect_points(self):
		# load the image and create grayscale
		self.filter(track=False)

		# search for good points
		features = cv2.goodFeaturesToTrack(self.gray, **feature_params)

		(cb, gray, cr) = cv2.split(self.img)
		gray = cv2.threshold(gray, 150, 255, cv2.THRESH_TOZERO)[1]
		circles = cv2.HoughCircles(gray,cv2.cv.CV_HOUGH_GRADIENT,3,100,param1=100,param2=30,minRadius=3,maxRadius=20)
		self.circles = circles
		confirmed_features = []
        
		if features != None:
			if circles != None:
				for feature in features:
					x1 = int(feature[0][0])
					y1 = int(feature[0][1])

					for circle in circles:
						x2 = circle[0][0]
						y2 = circle[0][1]

						if sqrt( (x2 - x1)**2 + (y2 - y1)**2 ) < circle[0][2] * 1.1:
							confirmed_features.append(feature)
							break

			else:
				confirmed_features = features

			# initialize the tracking
			self.features = confirmed_features
			self.tracks = [[p] for p in features.reshape((-1, 2))]
			self.prev_gray = self.gray
示例#24
0
def find_keypoints ( gray , quality , ksize , blocksize , max_area = None ) :
  """
  Find keypoints

  return keypoints,oob,oob_corners
  """
  gray32 = np.float32(gray)
  points = cv2.goodFeaturesToTrack(gray32,maxCorners = 100, qualityLevel = quality ,minDistance = ksize , blockSize = blocksize )

  if points is None :
    return None , None , None

  if len(points) < 4 :
    return None , None , None

  oob = cv2.minAreaRect(points) 

  if oob is None :
    return None, None , None

  oob_corners = get_oob_corners ( oob = oob )

  if oob_corners is None :
    return None, None , None

  if max_area is None :
    return points , oob , oob_corners

  area = get_polygon_area ( corners = oob_corners )

  if area > max_area :
    return None, None , None

  return points , oob , oob_corners
示例#25
0
def my_function():
    cap = cv2.VideoCapture(0)
    stop = False
    while(not stop):

        # Capture frame-by-frame

        ret, frame = cap.read()

        # Our operations on the frame come here
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # blur = cv2.GaussianBlur(gray, (7, 7), 1.5)
        # edges = cv2.Canny(blur, 0, 30)

        corners = cv2.goodFeaturesToTrack(gray, 25, 0.01, 10)
        try:
            corners = np.int0(corners)
            for i in corners:
                x, y = i.ravel()
                cv2.circle(frame, (x, y), 3, 255, -1)
        except Exception:
            print "nope~"
        # Display the resulting frame
        cv2.imshow('frame', frame)
        if cv2.waitKey(1) > 0:
            stop = True
    cv2.destroyAllWindows()
    return
示例#26
0
    def __motion_estimation_shi_tomasi(self, ref_frame, new_frame, min_matches=20):
        # detect corners
        grey_frame = cv.cvtColor(ref_frame, cv.COLOR_BGR2GRAY)
        corners = cv.goodFeaturesToTrack(grey_frame, self.n_max_corners, .01, 50)    # Better with Fast ?
        corners_next, status, _ = cv.calcOpticalFlowPyrLK(ref_frame, new_frame, corners)    # Track points

        corners_next_back, status_back, _ = cv.calcOpticalFlowPyrLK(new_frame, ref_frame, corners_next)     # Track back

        # - sort out to keep reliable points :
        corners, corners_next = self.__sort_corners(corners, corners_next, status, corners_next_back, status_back, 1.0)

        if len(corners) < 5:
            return None, False

        # Compute the transformation from the tracked pattern
        # -- estimate the rigid transform
        transform, mask = cv.findHomography(corners, corners_next, cv.RANSAC, 5.0)

        # -- see if this transform explains most of the displacements (thresholded..)
        if len(mask[mask > 0]) > min_matches:
            print "Enough match for motion compensation"
            return transform, True

        else:
            print "Not finding enough matchs - {}".format(len(mask[mask > 0]))
            return None, False
示例#27
0
文件: frame.py 项目: FilippoC/pke
    def getOpticalFlow(self, frame2):
        # voir http://jayrambhia.wordpress.com/2012/08/09/lucas-kanade-tracker/
        lk_params = dict(
            winSize  = (10, 10), 
            maxLevel = 5, 
            criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
        )
        feature_params = dict(
            maxCorners = 3000, 
            # à l'origine 0.5
            qualityLevel = 0.1, 
            minDistance = 3, 
            blockSize = 3
        )

        frame1_gray = cv2.cvtColor(self.getCVFrame(), cv2.cv.CV_BGR2GRAY)
        frame2_gray = cv2.cvtColor(frame2.getCVFrame(), cv2.cv.CV_BGR2GRAY)

        pt = cv2.goodFeaturesToTrack(frame1_gray, **feature_params)
        p0 = np.float32(pt).reshape(-1, 1, 2)
        
        p1, st, err = cv2.calcOpticalFlowPyrLK(frame1_gray, frame2_gray, p0, None, **lk_params)

        mean_motion = np.mean(np.absolute(np.subtract(p0, p1)))

        return mean_motion
示例#28
0
def getCornerList(img):
  gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

  corners = cv2.goodFeaturesToTrack(gray,50,0.05,0)
  corners = np.int0(corners)

  return corners;
示例#29
0
    def initLK(self):
        self.is_person, self.is_known_person = False, False
        self.person_id, self.confidence = -1, 0
        self.tracks = []

        frame = self._video_source.getFrame()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        self._face_rect = self._face_finder.findLargestFaceInImage(frame)

        if self._face_rect:
            self.is_person = True
            face_img = util.subimage(frame, self._face_rect)
            self.is_known_person, self.person_id, self.confidence = self._face_identifier.predict(face_img)
            mask = np.zeros_like(gray)
            mask[:] = 255
            for mx, my in [np.int32(tr[-1]) for tr in self.tracks]:
                cv2.circle(mask, (mx, my), 5, 0, -1)
            p = cv2.goodFeaturesToTrack(gray, mask=mask, **self.feature_params)
            if p is not None:
                (point1, point2) = self._face_rect.pt1, self._face_rect.pt2
                ptScaleX = (int(point2.x - point1.x) * 0)
                ptScaleY = (int(point2.y - point1.y) * 0)
                for px, py in np.float32(p).reshape(-1, 2):
                    if (point1.x + ptScaleX <= px <= point2.x - ptScaleX) and (point1.y + ptScaleY <= py <= point2.y - ptScaleY):
                        self.tracks.append([(px, py)])
                self.resetLK = False
        self.prev_gray = gray.copy()
    def getPrevTransform(self):
        ret, prev = self.cap.read()
        prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
        
        p0 = cv2.goodFeaturesToTrack(prev_gray, mask = None, **feature_params)
               
        while(True):
            ret, cur = self.cap.read()
            #print cur
            if cur is None:
                break
            
            cur_gray = cv2.cvtColor(cur,cv2.COLOR_BGR2GRAY)          
            
            #print prev_corner          
            
            img0, img1 = prev_gray, cur_gray
            
            # calculate optical flow
            p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                      
            # Select good points
            good_new = p1[st==1]
            good_old = p0[st==1]

            T = cv2.estimateRigidTransform(good_new, good_old, False)
                        
            print "---"
import numpy as np
import cv2

"""
Shi Tomasi - Good Features To Track;
    *Similar to Harris Corner Detector but gives better results.
    *We can provide the no. of corners to detect
"""

image = cv2.imread('../images/pic1.png')
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# find the strongest corners (as specified by maxCorners)
corners = cv2.goodFeaturesToTrack(image_gray, 50, 0.01, 10)  # 50 - maxCorners: displays the strongest 25 corners

# convert the corners into int64 (int0)
corners = np.int0(corners)

# loop through all the corners
for corner in corners:
    x, y = corner.ravel()  # find the coordinates of the corner
    cv2.circle(image, (x, y), 3, 255, -1)  # draw a dot in the detected corner

cv2.imshow('Result', image)

if cv2.waitKey(0) & 0xFF == 27:
    cv2.destroyAllWindows()
示例#32
0
def run():
    parser = argparse.ArgumentParser(description='This sample demonstrates Lucas-Kanade Optical Flow calculation. \
                                                  The example file can be downloaded from: \
                                                  https://www.bogotobogo.com/python/OpenCV_Python/images/mean_shift_tracking/slow_traffic_small.mp4')
    parser.add_argument('image', type=str, help='path to image file')
    parser.add_argument('display', type=bool, help='Display output')
    args = parser.parse_args()

    if args.display:
        display = True

    # params for ShiTomasi corner detection
    feature_params = dict( maxCorners = 100,
                           qualityLevel = 0.2,
                           minDistance = 7,
                           blockSize = 7 )

    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

    # Create some random colors
    color = np.random.randint(0, 255, (100, 3))

    cap = cv2.VideoCapture(args.image)

    # Start 3 seconds in
    start_frame_number = 70
    cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame_number)

    # Take first frame and find corners in it
    ret, fullFrame = cap.read()
    fencerFrame = fullFrame[fencersTopCrop:fencersBottomCrop, 0:640]
    cameraFrame = fullFrame[cameraTopCrop:cameraBottomCrop, 0:640]

    fshape = fullFrame.shape
    fheight = fshape[0]
    fwidth = fshape[1]
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('output.avi', fourcc, 20.0, (fwidth, fheight))

    fencer_old_frame = fencerFrame.copy()
    fencer_old_gray = cv2.cvtColor(fencer_old_frame, cv2.COLOR_BGR2GRAY)

    camera_old_frame = cameraFrame.copy()
    camera_old_gray = cv2.cvtColor(camera_old_frame, cv2.COLOR_BGR2GRAY)

    # Create a mask image for drawing purposes
    mask = np.zeros_like(fullFrame)
    mask2 = np.zeros_like(fullFrame)

    # get fencer points from pose detection
    allPoints = detectPosesInImage(fencerFrame)

    # nest all the points so that calcOpticalFlowPyrLK works
    fencer1p0 = np.zeros(shape=(18, 1, 2), dtype=np.float32)
    fencer2p0 = np.zeros(shape=(18, 1, 2), dtype=np.float32)
    for i in range(len(allPoints[0])):
        fencer1p0[i][0] = allPoints[0][i]
    for i in range(len(allPoints[1])):
        fencer2p0[i][0] = allPoints[1][i]

    # Get camera points from good features to track
    camerap0 = cv2.goodFeaturesToTrack(camera_old_gray, mask=None, **feature_params)

    totalDisplacement = 0
    totalMovement = 0
    frameCount = 0
    cleanAverageX = 0

    fencer1Positions = []
    fencer1PistePositions = []
    fencer2Positions = []
    fencer2PistePositions = []
    cameraPositions = []

    averageFencerXPositionDifference = 0

    while 1:
        ret, fullFrame = cap.read()

        if not ret:
            break

        fencerFrame = fullFrame[fencersTopCrop:fencersBottomCrop, 0:640]
        cameraFrame = fullFrame[cameraTopCrop:cameraBottomCrop, 0:640]

        frameCount = frameCount + 1

        fencer_frame_gray = cv2.cvtColor(fencerFrame, cv2.COLOR_BGR2GRAY)
        camera_frame_gray = cv2.cvtColor(cameraFrame, cv2.COLOR_BGR2GRAY)

        if fencer1p0 is None:
            break
        if fencer2p0 is None:
            break
        if camerap0 is None:
            break

        # calculate optical flow
        fencer1p1, st1, err1 = cv2.calcOpticalFlowPyrLK(fencer_old_gray, fencer_frame_gray, fencer1p0, None, **lk_params)
        fencer2p1, st2, err2 = cv2.calcOpticalFlowPyrLK(fencer_old_gray, fencer_frame_gray, fencer2p0, None, **lk_params)
        camerap1, cameraStatus, err3 = cv2.calcOpticalFlowPyrLK(camera_old_gray, camera_frame_gray, camerap0, None, **lk_params)

        img = np.zeros_like(fullFrame)
        img = cv2.add(img, fullFrame)

        if camerap1 is not None:
            # Select good points
            camera_good_new = camerap1[cameraStatus==1]
            camera_good_old = camerap0[cameraStatus==1]

            # get the average change
            totalX = 0
            pointCount = 0
            for i, (new, old) in enumerate(zip(camera_good_new, camera_good_old)):
                a, b = new.ravel()
                c, d = old.ravel()
                totalX = (c - a) + totalX
                pointCount = pointCount + 1

            averageX = 0
            if pointCount > 0:
                averageX = totalX / pointCount

            cleanTotalX = 0
            cleanPointCount = 0
            for i, (new, old) in enumerate(zip(camera_good_new, camera_good_old)):
                a, b = new.ravel()
                c, d = old.ravel()

                usePoint = False
                if averageX > 0:
                    if (c - a) * (1 + cameraThreshold) > averageX > (c - a) * (1 - cameraThreshold):
                        usePoint = True
                else:
                    if (c - a) * (1 + cameraThreshold) < averageX < (c - a) * (1 - cameraThreshold):
                        usePoint = True

                if usePoint:
                    cleanTotalX = cleanTotalX + (c - a)
                    cleanPointCount = cleanPointCount + 1

            if cleanPointCount > 0:
                cleanAverageX = cleanTotalX / pointCount

            totalDisplacement = totalDisplacement + cleanAverageX
            totalMovement = totalMovement + abs(cleanAverageX)

            cameraPositions.append(int(totalDisplacement))

            cameraMask = np.zeros_like(fullFrame)
            cv2.circle(cameraMask, (int(-totalDisplacement + 320), 30), 8, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)

            img = cv2.add(img, cameraMask)

        else:
            cameraPositions.append(-1)
            camera_good_new = camerap0[cameraStatus == 1]

        if fencer1p1 is None or fencer2p1 is None:
            continue

            # Select good points
        fencer1_good_new = fencer1p1[st1 == 1]

        fencer1_good_old = fencer1p0[st1 == 1]

        fencer2_good_new = fencer2p1[st2 == 1]
        fencer2_good_old = fencer2p0[st2 == 1]

        lungeMask = np.zeros_like(fullFrame)

        # draw the tracks
        fencer1Points = []
        fencer1PistePoints = []
        for i, (new, old) in enumerate(zip(fencer1_good_new, fencer1_good_old)):
            a, b = new.ravel()
            c, d = old.ravel()
            b = np.int(b + fencersTopCrop)
            d = np.int(d + fencersTopCrop)
            fencer1Points.append([int(a), int(b)])
            fencer1PistePoints.append([int(a + totalDisplacement), int(b)])
            mask = cv2.line(mask, (int(a), int(b)), (int(c), int(d)), color[i].tolist(), 2)


        fencer1Positions.append(fencer1Points)
        fencer1PistePositions.append(fencer1Points)

        # draw the tracks
        fencer2Points = []
        fencer2PistePoints = []
        for i, (new, old) in enumerate(zip(fencer2_good_new, fencer2_good_old)):
            a, b = new.ravel()
            c, d = old.ravel()
            b = np.int(b + fencersTopCrop)
            d = np.int(d + fencersTopCrop)
            fencer2Points.append([int(a), int(b)])
            fencer2PistePoints.append([int(a + totalDisplacement), int(b)])
            mask2 = cv2.line(mask, (int(a), int(b)), (int(c), int(d)), color[i].tolist(), 2)

        fencer2Positions.append(fencer2Points)
        fencer2PistePositions.append(fencer2Points)

        # X position of the fencers necks
        averageFencerXPositionDifference = (averageFencerXPositionDifference + (
                    fencer1Points[1][0] - fencer1Points[1][0])) / 2

        if detectLunge(fencer1Points):
            cv2.circle(lungeMask, (10, 10), 8, (255, 255, 255), thickness=-1, lineType=cv2.FILLED)

        if detectLunge(fencer2Points):
            cv2.circle(lungeMask, (40, 10), 8, (255, 255, 255), thickness=-1, lineType=cv2.FILLED)

        img = cv2.add(img, mask)
        img = cv2.add(img, mask2)
        img = cv2.add(img, lungeMask)

        if display:
            cropMask = np.zeros_like(fullFrame)
            cv2.line(cropMask, (0, cameraTopCrop), (640, cameraTopCrop), (0, 255, 255), 2)
            cv2.line(cropMask, (0, cameraBottomCrop), (640, cameraBottomCrop), (0, 255, 255), 2)
            cv2.line(cropMask, (0, fencersTopCrop), (640, fencersTopCrop), (255, 0, 255), 2)
            cv2.line(cropMask, (0, fencersBottomCrop), (640, fencersBottomCrop), (255, 0, 255), 2)

            cv2.line(cropMask, (320, 0), (320, 360), (255, 255, 0), 2)
            cv2.line(cropMask, (160, 0), (160, 360), (255, 255, 0), 2)
            cv2.line(cropMask, (480, 0), (480, 360), (255, 255, 0), 2)

            img = cv2.add(img, cropMask)

            out.write(img)
            cv2.imshow('frame',img)
            k = cv2.waitKey(30) & 0xff
            if k == 27:
               break

        # Now update the previous frame and previous points
        fencer_old_gray = fencer_frame_gray.copy()
        camera_old_gray = camera_frame_gray.copy()
        fencer1p0 = fencer1_good_new.reshape(-1,1,2)
        fencer2p0 = fencer2_good_new.reshape(-1,1,2)
        camerap0 = camera_good_new.reshape(-1,1,2)

    cap.release()
    out.release()

    if averageFencerXPositionDifference < 0:
        leftFencerPositions = fencer1Positions
        leftFencerPistePositions = fencer1PistePositions
        rightFencerPositions = fencer2Positions
        rightFencerPistePositions = fencer2PistePositions
    else:
        leftFencerPositions = fencer2Positions
        leftFencerPistePositions = fencer2PistePositions
        rightFencerPositions = fencer1Positions
        rightFencerPistePositions = fencer1PistePositions

    outputJson = ''

    outputJson = outputJson + '{'

    outputJson = outputJson + '"leftFencer":'
    outputJson = outputJson + '{'
    # outputJson = outputJson + '"positions":'
    # outputJson = outputJson + json.dumps(leftFencerPositions)
    # outputJson = outputJson + ','
    outputJson = outputJson + '"pistePositions":'
    outputJson = outputJson + json.dumps(leftFencerPistePositions)
    outputJson = outputJson + '}'
    outputJson = outputJson + ','

    outputJson = outputJson + '"rightFencer":'
    outputJson = outputJson + '{'
    # outputJson = outputJson + '"positions":'
    # outputJson = outputJson + json.dumps(rightFencerPositions)
    # outputJson = outputJson + ','
    outputJson = outputJson + '"pistePositions":'
    outputJson = outputJson + json.dumps(rightFencerPistePositions)
    outputJson = outputJson + '}'
    outputJson = outputJson + ','

    outputJson = outputJson + '"camera":'
    outputJson = outputJson + json.dumps(cameraPositions)

    outputJson = outputJson + '}'

    outputJson = outputJson.replace(' ', '')

    print(outputJson)
示例#33
0
import numpy as np
import cv2

origImg = cv2.imread("SampleImages/PuzzlesAndGames/puzzle4.png")
if origImg is None:
    print("Image not loaded successfully, try again")

grayImg = cv2.cvtColor(origImg, cv2.COLOR_BGR2GRAY)
grayImg = np.float32(grayImg)

goodFeats = cv2.goodFeaturesToTrack(grayImg, 400, 0.05, 3)

# print (goodFeats)
# print (goodFeats[1])
# print (goodFeats[0,0,0])

print(goodFeats.shape)

for feat in goodFeats:
    center = (feat[0, 0], feat[0, 1])
    cv2.circle(origImg, center, 3, (255, 255, 0), 1)

cv2.imshow("Corners detected", origImg)
cv2.waitKey(80000)
示例#34
0
def CmList(fi, fjList):

    # fi = cv2.cvtColor(fi, cv2.COLOR_BGR2GRAY)
    # fj = cv2.cvtColor(fj, cv2.COLOR_BGR2GRAY)

    d = np.sqrt(fi.shape[0] * fi.shape[0] + fi.shape[1] * fi.shape[1])
    tc = 0.1 * d
    gamma = 0.5 * d

    # params for ShiTomasi corner detection
    feature_params = dict(maxCorners=100,
                          qualityLevel=0.3,
                          minDistance=7,
                          blockSize=7)

    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))

    p0 = cv2.goodFeaturesToTrack(fi, mask=None, **feature_params)

    motionCostArr = np.zeros(fjList.size)
    it = 0

    for fj in fjList:

        # calculate optical flow and
        # Try to find homography
        try:
            p1, st, err = cv2.calcOpticalFlowPyrLK(fi, fj, p0, None,
                                                   **lk_params)
            good_new = p1[st == 1]
            good_old = p0[st == 1]
            # print good_old
            if len(good_old) > 10:
                h, status = cv2.findHomography(good_old, good_new)

                Cm = 0.0

                pt1 = np.ones([3, 1])
                pt2 = np.ones([3, 1])
                pt1 = np.asmatrix(pt1)
                pt2 = np.asmatrix(pt2)

                for x in xrange(0, good_old.shape[0]):
                    pt1[0, 0] = good_old[x, 0]
                    pt1[1, 0] = good_old[x, 1]
                    pt1[2, 0] = 1

                    pt1 = np.mat(h) * pt1
                    pt1[0, 0] /= pt1[2, 0]
                    pt1[1, 0] /= pt1[2, 0]
                    pt1[2, 0] = 1

                    pt2[0, 0] = good_new[x, 0]
                    pt2[1, 0] = good_new[x, 1]
                    pt2[2, 0] = 1

                    Cm += np.linalg.norm(pt2 - pt1)

                Cm /= good_old.shape[0]

                pt1[0, 0] = fi.shape[1] / 2
                pt1[1, 0] = fi.shape[0] / 2
                pt1[2, 0] = 1

                pt2 = np.mat(h) * pt1
                pt2[0, 0] /= pt2[2, 0]
                pt2[1, 0] /= pt2[2, 0]
                pt2[2, 0] = 1

                C0 = np.linalg.norm(pt2 - pt1)

                # print Cm , C0 , tc , gamma

                if Cm < tc:
                    motionCostArr[it] = C0
                else:
                    motionCostArr[it] = gamma
            else:
                motionCostArr[it] = gamma

            it += 1

        #Exception if homography not found
        except Exception, e:
            motionCostArr[it] = gamma
            it += 1
示例#35
0
    print("Error!! read video failed!")

# parameters for corners detection
featureParams = dict(maxCorners=100,
                     qualityLevel=0.3,
                     minDistance=7,
                     blockSize=7)
# parameters for optical flow estimation
lkParams = dict(winSize=(15, 15),
                maxLevel=2,
                criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10,
                          0.03))

ret, prev = cap.read()
prevGray = cv.cvtColor(prev, cv.COLOR_BGR2GRAY)
p0 = cv.goodFeaturesToTrack(prevGray, mask=None, **featureParams)

while True:
    ret, frame = cap.read()  # capture frame by frame.
    if not ret:  # ret = True or False represent capture sucess of fail.
        break

    gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)

    #calculate optical flow
    p1, st, err = cv.calcOpticalFlowPyrLK(prevGray, gray, p0, None, **lkParams)
    '''
    p1 - optical flow points
    st - confidence level
    '''
    goodPoints = p1[st >= 0.7]
# out = cv2.VideoWriter("D:\Study\Datasets\\Stabledmorecam.avi", fourcc, fps, (w,h))

# Read first frame
_, prev = cap.read()

# Convert frame to grayscale
prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)

# Pre-define transformation-store array
transforms = np.zeros((n_frames - 1, 3), np.float32)

for i in range(n_frames - 2):
    # Detect feature points in previous frame
    prev_pts = cv2.goodFeaturesToTrack(prev_gray,
                                       maxCorners=200,
                                       qualityLevel=0.5,
                                       minDistance=5,
                                       blockSize=2)

    # Read next frame
    success, curr = cap.read()
    if not success:
        break

        # Convert to grayscale
    curr_gray = cv2.cvtColor(curr, cv2.COLOR_BGR2GRAY)

    # Calculate optical flow (i.e. track feature points)
    curr_pts, status, err = cv2.calcOpticalFlowPyrLK(prev_gray, curr_gray,
                                                     prev_pts, None)
DISTANCE_THRESH = 20


#räkna Euclidean distance (avstånds formel)
def d2(p, q):
    return np.linalg.norm(np.array(p) - np.array(q))


#ladda video och första frame
video_cap = cv2.VideoCapture('test.mov')
_, frame = video_cap.read()
frame_counter = 1

#Convertera frame till grayscale och välj points att följa
old_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
prev_pts = cv2.goodFeaturesToTrack(image=old_gray, **feature_params)
"""
for pt in prev_pts:
    x, y = pt.ravel()
    cv2.circle(frame, (x, y), 5, (0,255,0), -1)
cv2.imshow('features', frame)
cv2.waitKey(0)
"""

#Mask för att rita linjer
mask = np.zeros_like(frame)
#Create a mask for the speed
mask_text = np.zeros_like(frame)

#UI loop
while True:
示例#38
0
    def image_callback(self, msg):
        self.image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
        self.ROI = self.image[226:256, 0:876]
        left_frame = self.image[160:305, 101:328]
        right_frame = self.image[160:305, 548:775]
        ''' ********** Optical Flow 初始帧 *********** '''
        if self.initialize_flag == False:
            # 左画面
            self.prev_gray_left = cv2.cvtColor(left_frame, cv2.COLOR_BGR2GRAY)
            self.prev_left = cv2.goodFeaturesToTrack(self.prev_gray_left,
                                                     mask=None,
                                                     **self.feature_params)

            # 右画面
            self.prev_gray_right = cv2.cvtColor(right_frame,
                                                cv2.COLOR_BGR2GRAY)
            self.prev_right = cv2.goodFeaturesToTrack(self.prev_gray_right,
                                                      mask=None,
                                                      **self.feature_params)

            # 长条画面
            self.prev_gray_ROI = cv2.cvtColor(self.ROI, cv2.COLOR_BGR2GRAY)
            self.prev_point = cv2.goodFeaturesToTrack(self.prev_gray_ROI,
                                                      mask=None,
                                                      **self.feature_params)

            self.initialize_flag = True
            '''  *********** Optical Flow 追踪帧 ************ '''
        else:
            # 左画面
            self.prev_gray_left, self.prev_left, self.LY, self.LX = ml.direction_detect(left_frame, \
                self.prev_gray_left, self.prev_left, self.feature_params, self.lk_params, self.color)

            # 右画面
            self.prev_gray_right, self.prev_right, self.RY, self.RX = ml.direction_detect(right_frame, \
                self.prev_gray_right, self.prev_right, self.feature_params, self.lk_params, self.color)

            # 长条画面
            self.prev_gray_ROI, self.prev_point, temp = ml.GetLength(self.ROI, self.prev_gray_ROI, \
                self.prev_point, self.feature_params, self.lk_params, self.color)

            # 计算光流夹角
            if self.LY != 0 and self.LX != 0 and self.RY != 0 and self.RX != 0:

                Lx = np.dot(self.H, self.Left_kf_x.predict())[0]
                self.Left_kf_x.update(self.LX)

                Ly = np.dot(self.H, self.Left_kf_y.predict())[0]
                self.Left_kf_y.update(self.LY)

                Rx = np.dot(self.H, self.Right_kf_x.predict())[0]
                self.Right_kf_x.update(self.RX)

                Ry = np.dot(self.H, self.Right_kf_y.predict())[0]
                self.Right_kf_y.update(self.RY)

                self.Left_Angle = ml.angular([Ly, Lx])
                self.Right_Angle = ml.angular([Ry, Rx])

            # 获得前进方向      self.RotateFlag (1:右, 2:左, 3:前, 4:后)
            self.RotateFlag = ml.GetDirectionOfTravel(self.Left_Angle,
                                                      self.Right_Angle)
            '''******************** 跳过转弯检测的初始5帧 ******************** '''
            if self.dir_flag <= 5:
                self.now_d, self.pre_d, self.pre_d_sub = self.RotateFlag, self.RotateFlag, self.RotateFlag
                self.dir_flag += 1

            else:
                # 当前判断和上一阶段不同
                if self.pre_d != self.RotateFlag:

                    # 当前判断和上一帧不同
                    if self.pre_d_sub != self.RotateFlag:
                        self.count = 0

                    # 当前判断和上一帧相同
                    else:
                        self.count += 1
                    self.pre_d_sub = self.RotateFlag

                # 当前判断和上一阶段相同
                else:
                    self.count = 0

                if self.now_d == 1 or self.now_d == 2:
                    self.TempOfImg[self.OfImgNum] = self.image
                    self.OfImgNum += 1
                '''********************  再次滤波 连续出现5次相同的变化 ********************'''
                if self.count >= 5:
                    # 计算和前一节点的时间间隔
                    if self.now_d != 3:
                        self.UnionTimeStart = rospy.get_time()
                        self.UnionTimeInterval = self.UnionTimeStart - self.UnionTimeEnd
                        self.UnionTimeEnd = rospy.get_time()
                    self.now_d = self.RotateFlag

                    if self.now_d == 3:
                        self.cornor_direction = self.pre_d
                        self.SaveFlagOf = 1

                    self.count = 0

                # 把这一阶段的结果赋值
                self.pre_d = self.now_d

                # 计算角度 + 整合方向  返回的 self.angle 即相机旋转的角度
                self.OF_sum, self.DirectionNow, self.Direction, self.__angle_flag, self.angle = ml.GetRotateAng(self.now_d, \
                    self.OF_sum, temp, self.__angle_flag, self.angle)

                text = str(self.DirectionNow)
                cv2.putText(self.image, text, (40, 50), cv2.FONT_HERSHEY_PLAIN,
                            2.0, (0, 0, 255), 2)
                cv2.imshow('a', self.image)
                cv2.waitKey(1)
示例#39
0
YELLOW_BUOY_MIN = np.array([25, 250, 250])
YELLOW_BUOY_MAX = np.array([27, 255, 255])

MIN_LENGTH = 15.00

arrows = []

#process map
map_image = cv2.imread(MAP, 1)
hsv_img = cv2.cvtColor(map_image, cv2.COLOR_BGR2HSV)
map_threshed = cv2.cvtColor(map_image, cv2.COLOR_BGR2HSV)
#map_mask = cv2.inRange(map_threshed, TRAFFIC_MIN1, TRAFFIC_MAX1)
map_mask = cv2.inRange(map_threshed, TRAFFIC_MIN1, TRAFFIC_MAX1)

corners = cv2.goodFeaturesToTrack(map_mask, 500, 0.02, 10)
corners = np.int0(corners)
np.sort(corners, axis=1)


def distance(point1, point2):
    d = m.sqrt((point1.x - point2.x)**2 + (point1.y - point2.y)**2)
    return d


#pairs items in an array based on their closest neighbor match closest neighbor then kick out if another is closers. for ones without pairs
def midpoint(x1, x2, y1, y2):
    x = (x1 - x2) / 2
    y = (y1 - y2) / 2

    return x, y
示例#40
0
def optical_flow_from_camera():
    cap = cv2.VideoCapture(0)

    # 设置 ShiTomasi 角点检测的参数
    feature_params = dict(maxCorners=100,
                          qualityLevel=0.3,
                          minDistance=7,
                          blockSize=7)
    # 设置 lucas kanade 光流场的参数
    # maxLevel 为使用图像金字塔的层数
    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))
    # 产生随机的颜色值
    color = np.random.randint(0, 255, (100, 3))

    # 获取第一帧,并寻找其中的角点
    _, old_frame = cap.read()
    old_frame = cv2.flip(old_frame, 1)
    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
    p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)

    # 创建一个掩膜为了后面绘制角点的光流轨迹
    mask = np.zeros_like(old_frame)

    while True:
        ret, frame = cap.read()
        frame = cv2.flip(frame, 1)
        if ret:
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # 计算能够获取的角点的新位置
            p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,
                                                   None, **lk_params)
            # Select good points
            good_new = p1[st == 1]
            good_old = p0[st == 1]
            # 绘制角点的轨迹
            for i, (new, old) in enumerate(zip(good_new, good_old)):
                a, b = new.ravel()
                c, d = old.ravel()
                mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
                frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)

            img = cv2.add(frame, mask)

            cv2.imshow('frame', img)
            if cv2.waitKey(30) & 0xff == ord("q"):
                break

            # 更新当前帧和当前角点的位置
            old_gray = frame_gray.copy()
            p0 = good_new.reshape(-1, 1, 2)
        else:
            break

        pass

    cv2.destroyAllWindows()
    cap.release()

    pass
示例#41
0
import numpy as np
import cv2

img = cv2.imread('opencv-template-matching-python-tutorial.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)

corners = cv2.goodFeaturesToTrack(gray, 10000, 0.01, 10)
corners = np.int0(corners)

for corner in corners:
    x, y = corner.ravel()
    cv2.circle(img, (x, y), 3, 255, -1)

cv2.imshow('Corner', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
示例#42
0
    t += 1
    print('t=', t)
    imgC = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    imgC = cv2.GaussianBlur(imgC, (5, 5), 0.5)
    #3-1
    if mouse_status == 2:
        x1, y1, x2, y2 = roi
        cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
#3-2
    if mouse_status == 3:
        print('initialize....')
        mouse_status = 0
        x1, y1, x2, y2 = roi
        roi_mask[:, :] = 0
        roi_mask[y1:y2, x1:x2] = 1
        p1 = cv2.goodFeaturesToTrack(imgC, mask=roi_mask, **params)
        if len(p1) >= 4:
            p1 = cv2.cornerSubPix(imgC, p1, (5, 5), (-1, -1), term_crit)
            rect = cv2.minAreaRect(p1)
            box_pts = cv2.boxPoints(rect).reshape(-1, 1, 2)
            tracking_start = True
#3-3
    if tracking_start:
        p2, st, err = cv2.calcOpticalFlowPyrLK(imgP, imgC, p1, None, **params2)
        p1r, st, err = cv2.calcOpticalFlowPyrLK(imgC, imgP, p2, None,
                                                **params2)
        d = abs(p1 - p1r).reshape(-1, 2).max(-1)
        stat = d < 1.0  # 1.0 is distance threshold
        good_p2 = p2[stat == 1].copy()
        good_p1 = p1[stat == 1].copy()
        for x, y in good_p2.reshape(-1, 2):
示例#43
0
def todo(path):
    import numpy as np
    import cv2
    from pythonFile import click_pct
    import os

    padding = 10  # 特徴点検出領域の半径

    # 読み込む動画の設定
    videoName = path[path.rfind('/') + 1:]
    cap = cv2.VideoCapture(path)
    print(videoName)

    # 動画の設定
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)
    rot = 0
    if width > height:
        rot = 1
        tmp = width
        width = height
        height = tmp

    print(videoName[:-4])

    # Shi-Tomashiのコーナー検出パラメータ
    feature_params = dict(
        maxCorners=255,  # 保持するコーナー数,int
        qualityLevel=0.2,  # 最良値(最大個数値の割合),double
        minDistance=7,  # この距離内のコーナーを棄却,double
        blockSize=7,  # 使用する近傍領域のサイズ,int
        useHarrisDetector=False,  # FalseならShi-Tomashi法
        # k=0.04,         # Harris法の測度に使用
    )

    # 最初のフレームを読み込む
    ret, first_frame = cap.read()
    if rot == 1:
        first_frame = np.rot90(first_frame, -1)

    #グレースケール変換
    first_gray = cv2.cvtColor(first_frame, cv2.COLOR_BGR2GRAY)

    # 読み込んだフレームの特徴点を探す
    prev_points = cv2.goodFeaturesToTrack(
        image=first_gray,  # 入力画像
        mask=None,  # mask=0のコーナーを無視
        **feature_params)
    flow_layer = np.zeros_like(first_frame)
    # 一度すべての点をノイズとする
    noise = [0] * len(prev_points)
    hozon = noise

    for i in prev_points:
        flow_layer = cv2.circle(
            flow_layer,  # 描く画像
            (int(i[0][0]), int(i[0][1])),  # 線を引く始点
            2,  # 線を引く終点
            color=(0, 0, 255),  # 描く色
            thickness=3  # 線の太さ
        )
    frame = cv2.add(first_frame, flow_layer)
    flow_layer2 = np.zeros_like(first_frame)

    #######################################
    # クリックした特徴点を正常な特徴点とする
    #######################################
    while True:
        # クリックした座標を保存
        points = np.array(click_pct.give_coorList(frame), dtype='int')
        # クリックした座標の周囲の点を正常な特徴点とする
        for p in points:
            area = [
                p[0] - padding, p[0] + padding, p[1] - padding, p[1] + padding
            ]
            for index, prev in enumerate(prev_points):
                if (noise[index] == 0) and (area[0] <= int(prev[0][0])) and (
                        area[1] >= int(prev[0][0])) and (area[2] <= int(
                            prev[0][1])) and (area[3] >= int(prev[0][1])):
                    noise[index] = 1
                    print(10)
                    break

        for index, prev in enumerate(prev_points):
            if noise[index] == 0:
                flow_layer2 = cv2.circle(
                    flow_layer2,  # 描く画像
                    (int(prev[0][0]), int(prev[0][1])),  # 線を引く始点
                    5,  # 線を引く終点
                    color=(0, 0, 255),  # 描く色
                    thickness=3  # 線の太さ
                )
            elif noise[index] == 1:
                flow_layer2 = cv2.circle(
                    flow_layer2,  # 描く画像
                    (int(prev[0][0]), int(prev[0][1])),  # 線を引く始点
                    5,  # 線を引く終点
                    color=(0, 0, 255),  # 描く色
                    thickness=3  # 線の太さ
                )
        frame = cv2.add(first_frame, flow_layer2)
        if noise == hozon:
            break
        hozon = noise

    # 結果画像の表示
    cv2.namedWindow("frame", cv2.WINDOW_NORMAL)
    cv2.imshow("frame", frame)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    cv2.imwrite(
        'D:/opticalflow/evaluation/result/' + str(videoName[:-4]) +
        '_Original.jpg', frame)

    return noise
示例#44
0
def start_imag_proc():
    global finalScore
    global curr_player

    finalScore = 0
    count = 0
    breaker = 0
    success = 1
    ## threshold important -> make accessible
    x = 1000

    #check which player
    if curr_player == 1:
        print e1.get()
        score = int(e1.get())
    else:
        print e2.get()
        score = int(e2.get())

    # save score if score is below 1...
    old_score = score

    # Read first image twice (issue somewhere) to start loop:
    success, image = cam.read()
    t = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    # wait for camera
    time.sleep(0.1)
    success, image = cam.read()
    t = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

    while success:
        # wait for camera
        success, image = cam.read()
        t_plus = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        dimg = cv2.absdiff(t, t_plus)
        time.sleep(0.1)
        # cv2.imshow(winName, edges(t_minus, t, t_plus))
        blur = cv2.GaussianBlur(dimg, (5, 5), 0)
        blur = cv2.bilateralFilter(blur, 9, 75, 75)
        ret, thresh = cv2.threshold(blur, 60, 255, 0)
        if cv2.countNonZero(thresh) > x and cv2.countNonZero(
                thresh) < 8000:  ## threshold important -> make accessible
            # wait for camera vibrations
            time.sleep(0.2)
            t_plus = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)
            dimg = cv2.absdiff(t, t_plus)

            ## kernel size important -> make accessible
            # filter noise from image distortions
            kernel = np.ones((8, 8), np.float32) / 40
            blur = cv2.filter2D(dimg, -1, kernel)

            # dilate and erode?

            # number of features to track is a distinctive feature
            # edges = cv2.goodFeaturesToTrack(blur,200,0.01,0,mask=None, blockSize=2, useHarrisDetector=1, k=0.001)
            ## FeaturesToTrack important -> make accessible
            edges = cv2.goodFeaturesToTrack(blur,
                                            640,
                                            0.0008,
                                            1,
                                            mask=None,
                                            blockSize=3,
                                            useHarrisDetector=1,
                                            k=0.06)  # k=0.08
            corners = np.int0(edges)
            testimg = blur.copy()

            # dart outside?
            if corners.size < 40:
                print "### dart not detected"
                continue

            # filter corners
            cornerdata = []
            tt = 0
            mean_corners = np.mean(corners, axis=0)
            for i in corners:
                xl, yl = i.ravel()
                # filter noise to only get dart arrow
                ## threshold important -> make accessible
                if abs(mean_corners[0][0] - xl) > 280:
                    cornerdata.append(tt)
                if abs(mean_corners[0][1] - yl) > 220:
                    cornerdata.append(tt)
                tt += 1

            corners_new = np.delete(corners, [cornerdata],
                                    axis=0)  # delete corners to form new array

            # dart outside?
            if corners_new.size < 30:
                print "### dart not detected"
                continue

            # find left and rightmost corners
            rows, cols = dimg.shape[:2]
            [vx, vy, x, y] = cv2.fitLine(corners_new, cv.CV_DIST_HUBER, 0, 0.1,
                                         0.1)
            lefty = int((-x * vy / vx) + y)
            righty = int(((cols - x) * vy / vx) + y)

            cornerdata = []
            tt = 0
            for i in corners_new:
                xl, yl = i.ravel()
                # check distance to fitted line, only draw corners within certain range
                distance = dist(0, lefty, cols - 1, righty, xl, yl)
                if distance < 40:  ## threshold important -> make accessible
                    cv2.circle(testimg, (xl, yl), 3, 255, -1)
                else:  # save corners out of range to delete afterwards
                    cornerdata.append(tt)
                tt += 1

            corners_final = np.delete(
                corners_new, [cornerdata],
                axis=0)  # delete corners to form new array

            t_plus_new = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)
            dimg_new = cv2.absdiff(t_plus, t_plus_new)
            # filter noise from image distortions
            kernel = np.ones((8, 8), np.float32) / 40
            blur_new = cv2.filter2D(dimg_new, -1, kernel)

            ret, thresh_new = cv2.threshold(blur_new, 60, 255, 0)
            ## threshold important -> make accessible
            ### check for bouncer????????
            if cv2.countNonZero(thresh_new) > 400:
                continue

            x, y, w, h = cv2.boundingRect(corners_final)

            cv2.rectangle(testimg, (x, y), (x + w, y + h), (255, 255, 255), 1)

            breaker += 1
            ###########################
            # find maximum x distance to dart tip, if camera is mounted on top

            maxloc = np.argmax(
                corners_final,
                axis=0)  # check max pos!!!, write image with circle??!!!
            ###########################
            locationofdart = corners_final[maxloc]

            try:
                # check if dart location has neighbouring corners (if not -> continue)
                cornerdata = []
                tt = 0
                for i in corners_final:
                    xl, yl = i.ravel()
                    distance = abs(locationofdart.item(0) -
                                   xl) + abs(locationofdart.item(1) - yl)
                    if distance < 40:  ## threshold important -> make accessible
                        tt += 1
                    else:
                        cornerdata.append(tt)

                if tt < 3:
                    corners_temp = cornerdata
                    maxloc = np.argmax(corners_temp, axis=0)
                    locationofdart = corners_temp[maxloc]
                    print "### used different location due to noise!"

                cv2.circle(testimg,
                           (locationofdart.item(0), locationofdart.item(1)),
                           10, (255, 255, 255), 2, 8)
                cv2.circle(testimg,
                           (locationofdart.item(0), locationofdart.item(1)), 2,
                           (0, 255, 0), 2, 8)

                # check for the location of the dart with the calibration

                dartloc = DartLocation(locationofdart.item(0),
                                       locationofdart.item(1))
                dartInfo = DartRegion(dartloc)  #cal_image

            except:
                print "Something went wrong in finding the darts location!"
                breaker -= 1
                continue

            print dartInfo.base, dartInfo.multiplier

            if breaker == 1:
                dart1entry.insert(10, str(dartInfo.base * dartInfo.multiplier))
                dart = int(dart1entry.get())
                cv2.imwrite("frame2.jpg", testimg)  # save dart1 frame
            elif breaker == 2:
                dart2entry.insert(10, str(dartInfo.base * dartInfo.multiplier))
                dart = int(dart2entry.get())
                cv2.imwrite("frame3.jpg", testimg)  # save dart2 frame
            elif breaker == 3:
                dart3entry.insert(10, str(dartInfo.base * dartInfo.multiplier))
                dart = int(dart3entry.get())
                cv2.imwrite("frame4.jpg", testimg)  # save dart3 frame

            score -= dart

            if score == 0 and dartInfo.multiplier == 2:
                score = 0
                breaker = 3
            elif score <= 1:
                score = old_score
                breaker = 3

            # save new diff img for next dart
            t = t_plus

            if curr_player == 1:
                e1.delete(0, 'end')
                e1.insert(10, score)
            else:
                e2.delete(0, 'end')
                e2.insert(10, score)

            finalScore += (dartInfo.base * dartInfo.multiplier)

            if breaker == 3:
                break

            #cv2.imshow(winName, tnow)

        # missed dart
        elif cv2.countNonZero(thresh) < 35000:
            continue

        # if player enters zone - break loop
        elif cv2.countNonZero(thresh) > 35000:
            break

        key = cv2.waitKey(10)
        if key == 27:
            cv2.destroyWindow(winName)
            break

        count += 1

    finalentry.insert(10, finalScore)

    print finalScore
示例#45
0
'''
Shi-Tomasi Corner Detector & Good Features to Track
N strongest corners in the image by Shi-Tomasi method (or Harris Corner Detection, if you specify it)
'''

import numpy as np
import cv2
from matplotlib import pyplot as plt

img = cv2.imread('Lena.tiff')  # read the image as gray-scale
print(img.shape)

gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
print(gray.shape)

# How many points you want to detect?
NoOfPoints = 25

corners = cv2.goodFeaturesToTrack(gray, NoOfPoints, 0.01, 10)
corners = np.int0(corners)

for i in corners:
    x, y = i.ravel()
    cv2.circle(img, (x, y), 3, 255, -1)

img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img)
plt.show()
示例#46
0
# 코너 검출
import cv2

image_color = cv2.imread("IMAGE/stair.jpg", cv2.IMREAD_COLOR)

image_gray = cv2.cvtColor(image_color, cv2.COLOR_RGB2GRAY)  # 회색조로 변경
corners = cv2.goodFeaturesToTrack(image_gray,
                                  100,
                                  0.01,
                                  5,
                                  blockSize=3,
                                  useHarrisDetector=True,
                                  k=0.03)  # 코너 검출

for i in corners:
    cv2.circle(image_color, tuple(i[0]), 3, (0, 0, 255), 2)  # 원으로 지점 표시

cv2.imshow("image", image_color)

cv2.waitKey(0)
cv2.destroyAllWindows()
示例#47
0
        # Get fingers from projectIM2021_q1:
        points = find_circles(q1_canny(img))
        if len(points) != 5:
            continue

        # Get average of fingers thickness:
        avg_finger_thickness = 2 * np.mean(points[:, 2])

        coordinates_thumb_to_little_finger = points[
            points[:, 1].argsort()]  # Sort points by row
        coordinates_thumb_to_little_finger = np.delete(
            coordinates_thumb_to_little_finger, 2, 1)  # delete third column

        # Get significant points:
        corners = cv2.goodFeaturesToTrack(canny_img, 1000, 0.01, 8)
        corners = np.int0(corners)

        # Draw significant points
        # for i in corners[0:]:
        #     x, y = i.ravel()
        #     cv2.circle(cimg, (x, y), radius=3, color=(255, 0, 0), thickness=4)

        # Find points:
        final_coordinates, approximate_points = find_all_points(
            corners[0:], coordinates_thumb_to_little_finger,
            avg_finger_thickness, sheet)

        # Draw points:
        for c in final_coordinates:
            cv2.circle(cimg, (c[0], c[1]),
示例#48
0
def Shi_tomasi_anchor(image):
    image_np = np.asarray(image)
    gray = cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY)
    corners = cv2.goodFeaturesToTrack(gray, 1, 0.01, 10)
    corners = np.int0(corners)
    return corners.ravel()
示例#49
0
import cv2
import numpy as np

img = cv2.imread('../images/input_harris_GFTT_box.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

corners = cv2.goodFeaturesToTrack(gray, 7, 0.05, 25)
corners = np.float32(corners)

for item in corners:
    x, y = item[0]
    cv2.circle(img, (x, y), 5, 255, -1)

cv2.imshow("Top 'k' features", img)
cv2.waitKey()
示例#50
0
# params for ShiTomasi corner detection
feature_params = dict(maxCorners=100,
                      qualityLevel=0.3,
                      minDistance=7,
                      blockSize=7)
# Parameters for lucas kanade optical flow
lk_params = dict(winSize=(15, 15),
                 maxLevel=2,
                 criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10,
                           0.03))
# Create some random colors
color = np.random.randint(0, 255, (100, 3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)
p0 = cv.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
while True:
    ret, frame = cap.read()
    frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
    # calculate optical flow
    p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None,
                                          **lk_params)
    # Select good points
    good_new = p1[st == 1]
    good_old = p0[st == 1]
    # draw the tracks
    for i, (new, old) in enumerate(zip(good_new, good_old)):
        a, b = new.ravel()
        c, d = old.ravel()
示例#51
0
    def detect_player(self, frame):

        #cvimg = pil2cv(frame)
        #cvimg = cv2.cvtColor(cvimg,cv2.COLOR_BGR2GRAY)
        #cvimg = cvimg - cv2.erode(cvimg,None)

        #match = cv2.matchTemplate(cvimg,self.template,cv2.TM_CCOEFF_NORMED)
        #mn,mx,mnLoc,mxLoc = cv2.minMaxLoc(match)

        #MaxPx,MaxPy = mxLoc

        #trows,tcols = (32,32)
        #return [(MaxPx,MaxPy,tcols,trows)]
        bb = self.bb
        f = frame.crop((bb[0], bb[1], bb[0] + bb[2], bb[1] + bb[3]))
        #ff.show()
        #sys.exit()
        img1 = pil2cv(
            f
        )  #cv2pil(rcv1).crop((mxLoc[0]-32,mxLoc[1]-32,mxLoc[0]+32,mxLoc[1]+32)))
        g = cv2.cvtColor(img1, cv2.cv.CV_BGR2GRAY)  # get grayscale image
        g = g - cv2.erode(g, None)
        pt = cv2.goodFeaturesToTrack(g, **self.feature_params)
        # pt is for cropped image. add x, y in each point.
        try:
            len(pt)
        except:

            self.prev_frame = frame

            self.bb = self.predictBB(bb, self.old_pts, self.new_pts, frame)
            self.old_pts = self.new_pts

            return None
        for i in xrange(len(pt)):
            pt[i][0][0] = pt[i][0][0] + bb[0]
            pt[i][0][1] = pt[i][0][1] + bb[1]

        self.p0 = np.float32(pt).reshape(-1, 1, 2)

        cf = frame
        if self.prev_frame:
            pf = self.prev_frame
        else:
            pf = cf

        newg = pil2cv(cf)
        oldg = pil2cv(pf)

        newg = cv2.cvtColor(newg, cv2.cv.CV_BGR2GRAY)
        oldg = cv2.cvtColor(oldg, cv2.cv.CV_BGR2GRAY)

        newg = newg - cv2.erode(newg, None)
        oldg = oldg - cv2.erode(oldg, None)

        #p0 = np.float32(pt).reshape(-1, 1, 2)

        # For Forward-Backward Error method
        # using calcOpticalFlowPyrLK on both image frames
        # with corresponding tracking points

        p1, st, err = cv2.calcOpticalFlowPyrLK(oldg, newg, self.p0, None,
                                               **self.lk_params)
        p0r, st, err = cv2.calcOpticalFlowPyrLK(newg, oldg, p1, None,
                                                **self.lk_params)
        d = abs(self.p0 - p0r).reshape(-1, 2).max(-1)
        good = d
        self.new_pts = []
        #self.old_pts = self.def_pts
        #print self.old_pts
        for pts, val in itertools.izip(p1, good):
            if val:
                # points using forward-backward error
                self.new_pts.append([pts[0][0], pts[0][1]])

        self.prev_frame = frame

        self.bb = self.predictBB(bb, self.old_pts, self.new_pts, frame)
        self.old_pts = self.new_pts

        return (self.bb[0], self.bb[1], self.bb[2], self.bb[3])
def locate_fridge(img):
	
	max_area = 0.6
	min_area = 0.01
	object_num = 0
	
	# absolute dimensions
	ref_length = 0.297 # [Meters]
	ref_width = 0.210 # [Meters]
	ref_area= ref_length*ref_width # [M**2]
		
	
	# Dimensions of the image file
	img_h, img_w, channels = img.shape 
	img_area = float(img_h*img_w)
	f_height = 0
	f_width  = 0
	indx_1 = 0
	
	top_freezer = True
	bottom_freezer = False
		
	gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	picName = 'step1_fridge.png'
	cv2.imwrite(os.path.join('media/images/',picName) ,gray) 
	
	edged = cv2.Canny(gray, 50, 100)
	
	picName = 'step2_fridge.png'
	cv2.imwrite(os.path.join('media/images/',picName) ,edged)
	
	kernel = np.ones((5,5),np.uint8)
	edged = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel)
	
	picName = 'step3_fridge.png'
	cv2.imwrite(os.path.join('media/images/',picName) ,edged)
	
	
	# We sort the picture
	corners = cv2.goodFeaturesToTrack(gray,25,0.01,10)
	corners = np.int0(corners)
	contours,h = cv2.findContours(edged, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
	contours = sorted(contours, key = cv2.contourArea , reverse = True)[:4]
	
	# We Loop over the Contours
	for cnt in contours:
		peri = cv2.arcLength(cnt, True)
		
		approx = cv2.approxPolyDP(cnt, 0.04 * peri, True)
		
		ratio,corners,x,y,w,h = determine_shape(cnt)
		
		box_area = float(w)*float(h)
		area_ratio = (box_area/img_area)
		
		indx_1 = indx_1+1
		
		if corners>=4 and area_ratio<= max_area and area_ratio>= min_area:
			
			# Increase the number of found objects
			object_num=object_num+1
			
			
			# print 'h:',h
			
			if(object_num==1):
				indx=indx_1
				y1=y
			
			if(object_num <=2):
				f_height = f_height+h
				
				if y>y1 and object_num==2:
					# print 'Bottom Freezer'
					bottom_freezer = True
					top_freezer = False
					
				elif  y<y1 and object_num==2:
					# print 'Top Freezer'
					top_freezer = True
					bottom_freezer = False
				
				# Take the max width
				if w > f_width:
					f_width = w
			
			if(object_num==3):
				fh0 = h
				fw0 = w
				
				
			actual_area = cv2.contourArea(cnt)
			cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
			# print (box_area/img_area)*100.0
	
	# Compute the area of the little paper
	diff_ratio = min(abs((ref_width/ref_length)- (float(fw0)/float(fh0))),abs((ref_width/ref_length)- (float(fh0)/float(fw0))))
	
	# print 'Difference: ', diff_ratio
	
	
	object_area = float(fh0*fw0)
	
	# print 'Height and Width of Fridge: ', f_height, f_width
	# print 'Height and Width of Sheet: ', fh0, fw0
	
	unit_coversion = np.sqrt(ref_area/object_area)
	

	
	fridge_height_m = unit_coversion*float(f_height)
	fridge_width_m = unit_coversion*float(f_width)
	
	# Top freezer 
	if(top_freezer==True):
		#standard_height = range(1.638,1.740,0.001)
		
		if(1.638 <= fridge_height_m <= 1.740):
			print 'true'
		else:
			quench_factor = float(1.638)/float(fridge_height_m)
			fridge_height_m = fridge_height_m*quench_factor
			fridge_width_m = fridge_width_m #*quench_factor
			#print 'FAlSE!'
			
	# In case we have a bottom Freezer		
	elif(bottom_freezer==True):
		if(1.690 <= fridge_height_m <= 1.740):
			print 'true'
		else:
			quench_factor = float(1.638)/float(fridge_height_m)
			fridge_height_m = fridge_height_m*quench_factor
			fridge_width_m = fridge_width_m #*quench_factor
			#print 'FAlSE!'
	
	# print '=================================================='
	
	# if(top_freezer==True):
	# 	print 'The Fridge is a Top Freezer'
	
	# if(bottom_freezer==True):
	# 	print 'The Fridge is a Bottom Freezer'
	
	# print 'Fridge Height: [m] ', fridge_height_m*(1.0+diff_ratio)
	# print 'Fridge Width: [m] ', fridge_width_m*(1.0+diff_ratio)
	# print '=================================================='
	
	
	ratio,corners,x0,y0,w0,h0 = determine_shape(contours[indx])
	#cv2.rectangle(img,(x0,y0),(x0+f_width,y0+f_height),(255,0,0),2)
	cv2.imshow('image',img)
	cv2.waitKey(0)
    
	cv2.putText(img, 'HxW: '+"{:.2f} x {:.2f} [m]".format( fridge_height_m ,fridge_width_m),
		(int(10), int(20)), cv2.FONT_HERSHEY_SIMPLEX,
		0.45, (0, 0, 255), 1)
    	
	picName = 'FRIDGE_ID.png'
	cv2.imwrite(os.path.join('media/images/',picName) , img)
示例#53
0
    def detect_features(self, gray_image, previous_gray_image):
        """
         - This function detects features with the openCV's goodFeaturesToTrack. Then these features are tracked from one
         frame to the next with the openCV's Lucas Kanade optical flow method.
         - Then features are detected again using the goodFeaturesToTrack method, but this time only where there are no
         features already. This is done using a mask.
        :param gray_image: the pre-processed gray image of the arena
        :param previous_gray_image: the pre-processed gray image of the previous frame
        :return: the newest added point from each track
        """

        # Make sure there are features to track
        if len(self.tracks) > 0:
            p0 = np.float32([tr[-1] for tr in self.tracks])  # .reshape(-1, 1, 2)

            # Track all features forward
            p1, st, err = cv2.calcOpticalFlowPyrLK(previous_gray_image, gray_image, p0, None, **self.lk_settings)
            # Track all features from the previous algorithm backwards.
            p0r, st, err = cv2.calcOpticalFlowPyrLK(gray_image, previous_gray_image, p1, None, **self.lk_settings)

            # Now we can use the forward-backward error to eliminate the bad matches
            d = np.abs(np.subtract(p0, p0r)).reshape(-1, 2).max(-1)
            good = d < 1.0

            # Create a new array of tracks
            new_tracks = []

            # Add tracks to new array if the match is good
            for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good):
                # Means that forward-backwards error is high, so not a good match, let's skip it
                if not good_flag:
                    continue

                # Else we add the new point
                tr.append((x, y))

                # Prevent the tracks from growing to long
                if len(tr) > self.track_len:
                    del tr[0]

                # Finally we want to collect the good tracks
                new_tracks.append(tr)

            # Let's overwrite the old set of tracks with the new set of tracks
            self.tracks = new_tracks

            cv2.putText(self.visual_image, "Features:%d" % len(self.tracks), (20, 20), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 0, 0))

        # Creating a mask to prevent the goodFeaturesToTrack algorithm to detect features that have already been
        # detected.
        mask = np.zeros_like(gray_image)
        mask[:] = 255

        # Fill the mask with black(0) at the features positions, so that it doesn't find any features there.
        for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
            cv2.circle(mask, (x, y), 10, 0, -1)

        if self.detector["ShiTomasi"] == True:
            # Track features with the goodFeaturesToTrack algorithm
            p = cv2.goodFeaturesToTrack(gray_image, mask=mask, **self.ShiTom_settings)

        elif self.detector["FAST"] == True:
            # Track features with the FAST feature detector
            p = self.fast.detect(gray_image, mask=mask)
            # We only need the coordinates(pt)
            p = np.array([point.pt for point in p])
        else:
            print "No feature detector is selected"
            p = None

        # Append new features to the tracks array
        if p is not None:
            for xy in p.reshape(-1, 2):
                self.tracks.append([tuple(xy)])

        # We return only points that have a track history of > 5, this to have stable features.
        return [tuple(list(tr[-1])) for tr in self.tracks if len(tr) > 5], gray_image
示例#54
0
def getCorners(I):
    corns = cv2.goodFeaturesToTrack(I, 4, 0.50, 50, None, None, 20)
    return corns
示例#55
0
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('blox.jpg')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
corners = cv.goodFeaturesToTrack(gray, 25, 0.01, 10)
corners = np.int0(corners)
for i in corners:
    x, y = i.ravel()
    cv.circle(img, (x, y), 3, 255, -1)
plt.imshow(img), plt.show()
img2 = cv2.resize(img2, (800, 600))

myAligned = alignImages(img, img2)

height, width, channels = img.shape
print(height)
print(width)

#This time we would add the corners to a white blank image
blank = np.zeros([height, width, 3], dtype=np.uint8)
blank.fill(255)
blank2 = np.zeros([height, width, 3], dtype=np.uint8)
blank2.fill(255)
gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

corners = cv2.goodFeaturesToTrack(gray, 225, 0.01, 10)
print(corners)

corners = np.int0(corners)
# print(corners)

# gray2 = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

corners2 = cv2.goodFeaturesToTrack(myAligned, 225, 0.01, 10)
corners2 = np.int0(corners2)

for i in corners:
    x, y = i.ravel()
    cv2.circle(blank, (x, y), 3, 255, -1)

for i in corners2:
import numpy as np
import cv2

img = cv2.imread('cornerR.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)

corners = cv2.goodFeaturesToTrack(gray, 800, 0.01, 0.5)
corners = np.int0(corners)

for corner in corners:
    x, y = corner.ravel()
    cv2.circle(img, (x, y), 3, (0, 0, 255), -1)

cv2.imshow('CornerUgao', img)
示例#58
0
    def searchkeypoints(self, skip_keypoints=False):
        vidcap = cv2.VideoCapture(self.mp4)
        success, frame = vidcap.read()
        prev_frame = [[[0]]]
        previous_timestamp = 0
        frameCount = 0
        keypoints = list()

        self.frameWidth = frame.shape[1]
        self.frameHeight = frame.shape[0]
        # if video is ok ,start this loop
        while success:
            current_timestamp = vidcap.get(0) * 1000 * 1000
            print("Processing frame#%d (%f ns)" % (frameCount, current_timestamp))

            # first frame break this loop
            if prev_frame[0][0][0] == 0:
                self.frameInfo.append(
                    {"keypoints": None, "timestamp": current_timestamp}
                )
                prev_frame = frame
                previous_timestamp = current_timestamp
                continue

            # if skip_keypoints == true
            # it'll just store the timestamps of each frame.
            # use this parameter to read a video after you've already
            # calibrated your device and already have
            # the values of the various unknowns.
            if skip_keypoints:
                self.frameInfo.append(
                    {"keypoints": None, "timestamp": current_timestamp}
                )
                continue

            old_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
            #            plt.imshow(old_gray, cmap='gray')
            #            plt.figure()
            #            plt.show()
            new_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            #            plt.figure()
            #            plt.imshow(new_gray, cmap='gray')
            #            plt.show()

            # extract some good features to track
            old_corners = cv2.goodFeaturesToTrack(old_gray, 1000, 0.3, 30)

            if old_corners is not None and len(old_corners) > 0:
                for x, y in np.float32(old_corners).reshape(-1, 2):
                    keypoints.append((x, y))

            if keypoints is not None and len(keypoints) > 0:
                for x, y in keypoints:
                    cv2.circle(prev_frame, (int(x + 200), y), 3, (255, 255, 0))

            # plt.imshow(prev_frame, cmap='gray')
            # plt.show()

            if old_corners.any() == None:
                self.frameInfo.append(
                    {"keypoints": None, "timestamp": current_timestamp}
                )
                frameCount += 1
                previous_timestamp = current_timestamp
                prev_frame = frame
                success, frame = vidcap.read()
                continue

            # If we did find keypoints to track, we use optical flow
            # to identify where they are in the new frame:
            # there may be the big defect!!
            new_corners, status, err = cv2.calcOpticalFlowPyrLK(
                old_gray,
                new_gray,
                old_corners,
                None,
                winSize=(15, 15),
                maxLevel=2,
                criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03),
            )

            if new_corners is not None and len(new_corners) > 0:
                for x, y in np.float32(new_corners).reshape(-1, 2):
                    keypoints.append((x, y))

            if keypoints is not None and len(keypoints) > 0:
                for x, y in keypoints:
                    cv2.circle(frame, (int(x + 200), y), 3, (255, 255, 0))

            #            plt.imshow(frame, cmap='gray')
            #            plt.show()

            if len(old_corners) > 4:
                homography, mask = cv2.findHomography(
                    old_corners, new_corners, cv2.RANSAC, 5.0
                )
                # convert to one dimension
                mask = mask.ravel()
                new_corners_homography = np.asarray(
                    [new_corners[i] for i in range(len(mask)) if mask[i] == 1]
                )
                old_corners_homography = np.asarray(
                    [old_corners[i] for i in range(len(mask)) if mask[i] == 1]
                )
            else:
                new_corners_homography = new_corners
                old_corners_homography = old_corners

            self.frameInfo.append(
                {
                    "keypoints": (old_corners_homography, new_corners_homography),
                    "timestamp": current_timestamp,
                }
            )

            frameCount += 1
            previous_timestamp = current_timestamp
            prev_frame = frame
            success, frame = vidcap.read()
        self.numFrames = frameCount
        self.duration = current_timestamp
        return
示例#59
0
#! usr/bin/env python

# importing the Libraries
import numpy as np
import cv2

# Read the image and convert to Grayscale
img  = cv2.imread('IMAGE')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# goodFeaturesToTrack is based on HarrisCorner detector with difference in the calculation of R
# This function takes 4 inputs the first being the grayscale image, second being max number of corners, third min quality level and the last min distances between the corners. 
shi_corner = cv2.goodFeaturesToTrack(gray, 30, 0.01, 10)
# The detected corners then are converted to integers
shi_corner = np.int0(shi_corner)

# The corners are then marked.  
for c in shi_corner:
    x, y = c.ravel()
    cv2.circle(img, (x,y), 3, 255, -1)

# Display the image
cv2.imshow('Corners', img)

if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()
# EPS  epsilon is an upper bound on the error of a floating point numbers = 0.03 (SPEED)

lk_params = dict(winSize=(200, 200),
                 maxLevel=2,
                 criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,
                           0.03))

cap = cv2.VideoCapture(0)

ret, prev_frame = cap.read()

prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)

#PTS TP TRACK

prevPts = cv2.goodFeaturesToTrack(prev_gray, mask=None, **corner_track_params)

mask = np.zeros_like(prev_frame)

while True:
    ret, frame = cap.read()

    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    nextPts, status, err = cv2.calcOpticalFlowPyrLK(prev_gray, frame_gray,
                                                    prevPts, None, **lk_params)

    good_new = nextPts[status == 1]
    good_prev = prevPts[status == 1]

    for i, (new, prev) in enumerate(zip(good_new, good_prev)):