Esempio n. 1
1
def calcNorm(i,j,img,weight_matrix,sigmaI,sigmaS,r):
	row=img.shape[0]
	col=img.shape[1]
	for x in xrange(row):
		for y in xrange(col):
			dist=sqrt(pow((i-x),2)+pow((j-y),2))
			if dist<r:
				temp_s=exp((-pow(cv2.norm((i,j),(x,y),cv2.NORM_L2),2)))/pow(sigmaS,2)
				temp_i=exp((-pow(cv2.norm(img[i][j],img[x][y],cv2.NORM_L2),2)))/pow(sigmaI,2)
				weight_matrix[i+j][x+y]=temp_s*temp_i
Esempio n. 2
0
    def findSamples(self, category, binImg, samples, outImg):
        contours = self.findContourAndBound(binImg.copy(), bounded=True,
                                            upperbounded=True,
                                            bound=self.minContourArea,
                                            upperbound=self.maxContourArea)
        contours = sorted(contours, key=cv2.contourArea, reverse=True)
        for contour in contours:
            # Find the center of each contour
            rect = cv2.minAreaRect(contour)
            centroid = rect[0]

            # Find the orientation of each contour
            points = np.int32(cv2.cv.BoxPoints(rect))
            edge1 = points[1] - points[0]
            edge2 = points[2] - points[1]

            if cv2.norm(edge1) > cv2.norm(edge2):
                angle = math.degrees(math.atan2(edge1[1], edge1[0]))
            else:
                angle = math.degrees(math.atan2(edge2[1], edge2[0]))

            if 90 < abs(Utils.normAngle(self.comms.curHeading) -
                        Utils.normAngle(angle)) < 270:
                angle = Utils.invertAngle(angle)

            samples.append({'centroid': centroid, 'angle': angle,
                            'area': cv2.contourArea(contour),
                            'category': category})

            if self.debugMode:
                Vision.drawRect(outImg, points)
Esempio n. 3
0
def linesDiff(line1, line2):

    norm1 = cv2.norm(line1 - line2, cv2.NORM_L2)
    line3 = line1[2:4] + line1[0:2]
    norm2 = cv2.norm(line3 - line2, cv2.NORM_L2)

    return min(norm1, norm2)
Esempio n. 4
0
    def test_goodFeaturesToTrack(self):
        arr = self.get_sample("samples/data/lena.jpg", 0)
        original = arr.copy(True)
        threshes = [x / 100.0 for x in range(1, 10)]
        numPoints = 20000

        results = dict([(t, cv2.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes])
        # Check that GoodFeaturesToTrack has not modified input image
        self.assertTrue(arr.tostring() == original.tostring())
        # Check for repeatability
        for i in range(1):
            results2 = dict(
                [(t, cv2.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes]
            )
            for t in threshes:
                self.assertTrue(len(results2[t]) == len(results[t]))
                for i in range(len(results[t])):
                    self.assertTrue(cv2.norm(results[t][i][0] - results2[t][i][0]) == 0)

        for t0, t1 in zip(threshes, threshes[1:]):
            r0 = results[t0]
            r1 = results[t1]
            # Increasing thresh should make result list shorter
            self.assertTrue(len(r0) > len(r1))
            # Increasing thresh should monly truncate result list
            for i in range(len(r1)):
                self.assertTrue(cv2.norm(r1[i][0] - r0[i][0]) == 0)
Esempio n. 5
0
    def test_gaussian_mix(self):

        np.random.seed(10)
        cluster_n = 5
        img_size = 512

        points, ref_distrs = make_gaussians(cluster_n, img_size)

        em = cv2.ml.EM_create()
        em.setClustersNumber(cluster_n)
        em.setCovarianceMatrixType(cv2.ml.EM_COV_MAT_GENERIC)
        em.trainEM(points)
        means = em.getMeans()
        covs = em.getCovs()  # Known bug: https://github.com/Itseez/opencv/pull/4232
        found_distrs = zip(means, covs)

        matches_count = 0

        meanEps = 0.05
        covEps = 0.1

        for i in range(cluster_n):
            for j in range(cluster_n):
                if (cv2.norm(means[i] - ref_distrs[j][0], cv2.NORM_L2) / cv2.norm(ref_distrs[j][0], cv2.NORM_L2) < meanEps and
                    cv2.norm(covs[i] - ref_distrs[j][1], cv2.NORM_L2) / cv2.norm(ref_distrs[j][1], cv2.NORM_L2) < covEps):
                    matches_count += 1

        self.assertEqual(matches_count, cluster_n)
Esempio n. 6
0
    def test_fitline(self):

        noise = 5
        n = 200
        r = 5 / 100.0
        outn = int(n*r)

        p0, p1 = (90, 80), (w-90, h-80)
        line_points = sample_line(p0, p1, n-outn, noise)
        outliers = np.random.rand(outn, 2) * (w, h)
        points = np.vstack([line_points, outliers])

        lines = []

        for name in dist_func_names:
            func = getattr(cv2.cv, name)
            vx, vy, cx, cy = cv2.fitLine(np.float32(points), func, 0, 0.01, 0.01)
            line = [float(vx), float(vy), float(cx), float(cy)]
            lines.append(line)

        eps = 0.05

        refVec =  (np.float32(p1) - p0) / cv2.norm(np.float32(p1) - p0)

        for i in range(len(lines)):
            self.assertLessEqual(cv2.norm(refVec - lines[i][0:2], cv2.NORM_L2), eps)
Esempio n. 7
0
    def test_dft(self):

        img = self.get_sample('samples/data/rubberwhale1.png', 0)
        eps = 0.001

        #test direct transform
        refDft = np.fft.fft2(img)
        refDftShift = np.fft.fftshift(refDft)
        refMagnitide = np.log(1.0 + np.abs(refDftShift))

        testDft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT)
        testDftShift = np.fft.fftshift(testDft)
        testMagnitude = np.log(1.0 + cv2.magnitude(testDftShift[:,:,0], testDftShift[:,:,1]))

        refMagnitide = cv2.normalize(refMagnitide, 0.0, 1.0, cv2.NORM_MINMAX)
        testMagnitude = cv2.normalize(testMagnitude, 0.0, 1.0, cv2.NORM_MINMAX)

        self.assertLess(cv2.norm(refMagnitide - testMagnitude), eps)

        #test inverse transform
        img_back = np.fft.ifft2(refDft)
        img_back = np.abs(img_back)

        img_backTest = cv2.idft(testDft)
        img_backTest = cv2.magnitude(img_backTest[:,:,0], img_backTest[:,:,1])

        img_backTest = cv2.normalize(img_backTest, 0.0, 1.0, cv2.NORM_MINMAX)
        img_back = cv2.normalize(img_back, 0.0, 1.0, cv2.NORM_MINMAX)

        self.assertLess(cv2.norm(img_back - img_backTest), eps)
Esempio n. 8
0
def scale_bound(corners, frac):
#     enlarge QR bound by factor
    mass_center = corners.mean(axis=0)
    new_corners = np.zeros(corners.shape)
    for i in range(len(corners)):
        v = corners[i] - mass_center
        u = v / cv2.norm(v)
        new_corners[i, :] = mass_center + frac*cv2.norm(v)*u    
    return new_corners
Esempio n. 9
0
def contour_size_ok(contour, distance, camera):
    ''' contour_size_ok - determine if contour size is ok for height '''
    # find enclosing rectangle
    x, y, width, height = cv2.boundingRect(contour)
    # top_left, top_right, bottom_left, bottom_right
    corners = [(x, y), (x+width, y), (x, y+height), (x+width, y+height)] 
    # convert to ground offsets in meters
    corners = map(lambda _: camera.get_ground_offset(_, distance), corners)

    area = cv2.norm(corners[0], corners[1]) * cv2.norm(corners[0], corners[2])
    return area > 0.8 * TARGET_CIRCLE_AREA
Esempio n. 10
0
    def test_calibration(self):

        from glob import glob
        img_names = []
        for i in range(1, 15):
            if i < 10:
                img_names.append('samples/data/left0{}.jpg'.format(str(i)))
            elif i != 10:
                img_names.append('samples/data/left{}.jpg'.format(str(i)))

        square_size = 1.0
        pattern_size = (9, 6)
        pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
        pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)
        pattern_points *= square_size

        obj_points = []
        img_points = []
        h, w = 0, 0
        img_names_undistort = []
        for fn in img_names:
            img = self.get_sample(fn, 0)
            if img is None:
                continue

            h, w = img.shape[:2]
            found, corners = cv2.findChessboardCorners(img, pattern_size)
            if found:
                term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
                cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)

            if not found:
                continue

            img_points.append(corners.reshape(-1, 2))
            obj_points.append(pattern_points)

        # calculate camera distortion
        rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), None, None, flags = 0)

        eps = 0.01
        normCamEps = 10.0
        normDistEps = 0.05

        cameraMatrixTest = [[ 532.80992189,    0.,          342.4952186 ],
         [   0.,         532.93346422,  233.8879292 ],
         [   0.,            0.,            1.        ]]

        distCoeffsTest = [ -2.81325576e-01,   2.91130406e-02,
           1.21234330e-03,  -1.40825372e-04, 1.54865844e-01]

        self.assertLess(abs(rms - 0.196334638034), eps)
        self.assertLess(cv2.norm(camera_matrix - cameraMatrixTest, cv2.NORM_L1), normCamEps)
        self.assertLess(cv2.norm(dist_coefs - distCoeffsTest, cv2.NORM_L1), normDistEps)
Esempio n. 11
0
 def test_estimateAffine3D(self):
     pattern_size = (11, 8)
     pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
     pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
     pattern_points *= 10
     (retval, out, inliers) = cv2.estimateAffine3D(pattern_points, pattern_points)
     self.assertEqual(retval, 1)
     if cv2.norm(out[2,:]) < 1e-3:
         out[2,2]=1
     self.assertLess(cv2.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3)
     self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1])
Esempio n. 12
0
    def angleFromContour(self, contour):
        points = np.array(cv2.cv.BoxPoints(cv2.minAreaRect(contour)))

        edge1 = points[1] - points[0]
        edge2 = points[2] - points[1]

        #Choose the vertical edge
        if cv2.norm(edge1) > cv2.norm(edge2):
            rectAngle = math.degrees(math.atan2(edge2[1], edge2[0]))
        else:
            rectAngle = math.degrees(math.atan2(edge1[1], edge1[0]))

        return rectAngle
Esempio n. 13
0
def get_full_corners(corners):
#     function finds 4th corner point from three corners
    #compute triangle sides lengths 
    dist_matrix = squareform(pdist(corners))
    #find sharp and 90 corners points
    a_sharp, b_sharp = np.unravel_index(dist_matrix.argmax(), dist_matrix.shape)
    c_square = ({0,1,2} - {a_sharp} - {b_sharp}).pop()
#     find 4th point from second qr square diagonal
    diag_middle = (corners[a_sharp] + corners[b_sharp]) / 2.
    v = corners[c_square] - diag_middle
    u = v / cv2.norm(v)
    d_square = diag_middle - cv2.norm(v)*u
    return np.vstack([corners, d_square])
Esempio n. 14
0
def CSRec_SP(K, Phi, y):
    # y = Phi * x
    # Subspace Pursuit for Compressive Sensing: Closing the
    # Gap Between Performance and Complexity

    (m, N) = Phi.shape
    y_r = y
    _in = 0
    cv = abs(np.dot(y_r.transpose(), Phi))
    cv_sort = np.sort(cv[0,:])[::-1]
    cv_index = np.argsort(cv[0,:])[::-1]
    cv_index = np.sort(cv_index[0:K])
    Phi_x = Phi[:,cv_index]
    Index_save = np.zeros((1,cv_index.shape[0]), dtype=int)
    Index_save[_in,:] = cv_index

    x_p = np.dot(np.dot(np.linalg.inv(np.dot(Phi_x.transpose(),Phi_x)),Phi_x.transpose()),y)
    y_r = y - np.dot(Phi_x,x_p)
    norm_save = np.array(cv2.norm(y_r))
    while 1:
        _in += 1

        #find T^{\prime} and add it to \hat{T}
        cv = abs(np.dot(y_r.transpose(),Phi))
        cv_sort = np.sort(cv[0,:])[::-1]
        cv_index = np.argsort(cv[0,:])[::-1]
        cv_index = np.sort(cv_index[0:K])
        cv_add = np.union1d(Index_save[_in-1,:], cv_index)
        Phi_x = Phi[:,cv_add]

        #find the most significant K indices
        x_p = np.dot(np.dot(np.linalg.inv(np.dot(Phi_x.transpose(),Phi_x)),Phi_x.transpose()),y)
        x_p_sort = np.sort(abs(x_p[:,0]))[::-1]
        i_sort = np.argsort(abs(x_p[:,0]))[::-1]
        cv_index = cv_add[i_sort[0:K]]
        cv_index = np.sort(cv_index)
        Phi_x = Phi[:,cv_index]
        Index_save = np.append(Index_save, cv_index.reshape((1,cv_index.shape[0])), axis = 0)

        #calculate the residue
        x_p = np.dot(np.dot(np.linalg.inv(np.dot(Phi_x.transpose(), Phi_x)), Phi_x.transpose()),y)
        y_r = y - np.dot(Phi_x, x_p)
        norm_save = np.append(norm_save, cv2.norm(y_r))
        if norm_save[_in] == 0 or (norm_save[_in]/norm_save[_in-1] >= 1):
            break

    x_hat = np.zeros((N,1))
    x_hat[Index_save[_in,:]] = np.reshape(x_p,(K,1))
    # return (x_hat, Index_save, norm_save)
    return x_hat
Esempio n. 15
0
 def distFromCenter(self, point):
     """
     Returns the distance from the point to the center of mass of the roi
     
     :param tuple point: The (x, y) point to check
     """
     return norm(self.center, point)
def reprojection_error_ext(objp, imgp, cameraMatrix, distCoeffs, rvecs, tvecs):
    """
    Returns the mean absolute error, and the RMS error of the reprojection
    of 3D points "objp" on the images from a camera
    with intrinsics ("cameraMatrix", "distCoeffs") and poses ("rvecs", "tvecs").
    The original 2D points should be given by "imgp".
    
    See OpenCV's doc about the format of "cameraMatrix", "distCoeffs", "rvec" and "tvec".
    """
    
    mean_error = np.zeros((1, 2))
    square_error = np.zeros((1, 2))
    n_images = len(imgp)

    for i in xrange(n_images):
        imgp_reproj, jacob = cv2.projectPoints(
                objp[i], rvecs[i], tvecs[i], cameraMatrix, distCoeffs )
        error = imgp_reproj.reshape(-1, 2) - imgp[i]
        mean_error += abs(error).sum(axis=0) / len(imgp[i])
        square_error += (error**2).sum(axis=0) / len(imgp[i])

    mean_error = cv2.norm(mean_error / n_images)
    square_error = np.sqrt(square_error.sum() / n_images)
    
    return mean_error, square_error
def normalizeContour(contour):
    moments = cv2.moments(contour)
    number_of_points = len(contour)
    max_distance_from_center = 0
    centroid_x = moments['m10']/moments['m00']
    centroid_y = moments['m01']/moments['m00']
    normalized_contour = np.array(contour).astype('float')
    """ 
    Move the contour to the center of axis
     and get the maximum distance from the center of axis
    """
    for i in range(0, number_of_points): 
        normalized_contour[i][0][0] -= centroid_x
        normalized_contour[i][0][1] -= centroid_y
        distance_from_center = cv2.norm(normalized_contour[i][0])
        max_distance_from_center = max(max_distance_from_center,distance_from_center)
    """
    Normalize the contour making the longest distance
    from the axis equal to 1
    """
    if max_distance_from_center > 0 :
        for i in range(0, number_of_points): 
            normalized_contour[i][0][0] = normalized_contour[i][0][0] / max_distance_from_center
            normalized_contour[i][0][1] = normalized_contour[i][0][0] / max_distance_from_center
    return normalized_contour
Esempio n. 18
0
def propose_pairs(descripts_a, keypts_a, descripts_b, keypts_b):
    """Given a set of descriptors and keypoints from two images, propose good keypoint pairs.

    Feature descriptors should encode local image geometry in a way that is invariant to
    small changes in perspective. They should be comparible using an L2 metric.

    For the top N matching descrpitors, select and return the top corresponding keypoint pairs.

    Returns:
        pair_pts_a (list of N np.matrix of shape 3x1): List of N corresponding keypoints in
            homogeneous form.
        pair_pts_b (list of N np.matrix of shape 3x1): List of N corresponding keypoints in
            homogeneous form.
    """
    # code here

    pair_pts_a = []
    pair_pts_b = []

    dist = []
    for i in range(len(keypts_a)):
        for j in range(len(keypts_b)):
            dist.append((cv2.norm(descripts_a[i], descripts_b[j],
                cv2.NORM_HAMMING), keypts_a[i], keypts_b[j]))
    sorted_dist = sorted(dist, key=lambda x:x[0])

    #N = len(dist) # can change this value
    N = len(dist[0:50])
    for j in range(N):
        pair_pts_a.append(sorted_dist[j][1])
        pair_pts_b.append(sorted_dist[j][2])

    return pair_pts_a, pair_pts_b
Esempio n. 19
0
    def __init__(self, video_src):
        if os.path.exists(self.IMG_DIR):
            print 'dir {} exists. removing...'.format(self.IMG_DIR)
            shutil.rmtree(self.IMG_DIR)

        self.cam = cv2.VideoCapture(video_src)
        self.cur_img_id=0
        self.prev_image=None
        self.saved_frame_cnt = 0
        while (self.cam.isOpened()):
            ret, self.frame = self.cam.read()
            self.cur_img_id = self.cur_img_id + 1
            if ( 0 == ret):
                print 'end of video. saved {} images'.format(self.saved_frame_cnt)
                break
            diff = 0.0
            if (None != self.prev_image):
                diff = cv2.norm(self.prev_image, self.frame, cv2.NORM_L1)
                if (diff < self.IMG_DIFF_THRESHOLD):
                    continue
                    
            print 'saved frame # {}: distance from previous frame {}'.format(self.cur_img_id, diff)
            self.save_img(self.frame)
            self.saved_frame_cnt = self.saved_frame_cnt +1
            self.prev_image = self.frame
Esempio n. 20
0
def computeError(i, j):
    if i == j:
        if method == 'avg':
            return 0.0
        elif method == 'norm':
            return 1.0
        else:
            return float('inf')

    ## reconstructing i with j
    H = computeHomography(j, i) # j -> i
    if H == None:
        return float('inf')
    commonPts = intersect(kpts[i].keys(), kpts[j].keys())
    n_commonPts = len(commonPts)
    if n_commonPts == 0:
        return float('inf')
    all_pts = []
    for pt in commonPts:
        all_pts.append(kpts[j][pt])
    all_pts = cv2.perspectiveTransform(
            np.array([all_pts], dtype='float32'),
            H)
    d = 0.0
    for idx in range(n_commonPts):
        dist = cv2.norm(all_pts[0][idx][:], kpts[i][commonPts[idx]], cv2.NORM_L2)
        if method == 'norm':
            if dist < RADIUS:
                d += 1.0
        else:
            d += dist
    return d / n_commonPts
Esempio n. 21
0
def mouse_call(event,x,y,flags,param):
    global mul,count,number,data_base,pose,shape_len,normBase,passmarks
    if event == cv2.EVENT_LBUTTONDOWN:
        if (x < 110) :
            if (y<50) & (y>20) :
                print "RESET"
                mul = 0.8
                count = 0
                clearSideBar()
                passmarks = 0
                print mul,count
            if (y<100) & (y>80):
                print "pause"
                cv2.waitKey(0)
            if (y<150) & (y>130):
                print "Change shape"
                if number == shape_len-1 :
                    number = 0
                else :
                    number = number + 1
                pose = cv2.imread(data_base[number])
                pose = cv2.cvtColor(pose,cv2.COLOR_BGR2GRAY)
                normBase = cv2.norm(np.zeros((480,640),np.uint8),pose)
                mul = 0.8
                count = 0
                clearSideBar()
                passmarks = 0
Esempio n. 22
0
def sub_im(img):
    global normBase,normValue,flag
    #Finds overlapping region
    img = deepcopy(img)
    #img = cv2.resize(img,(640,480))
    #ima = cv2.resize(ima,(640,480))
	#Overlapping region in white and non everlapping in gray
    score = cv2.addWeighted(img,0.5,pose,0.5,0)
    #score = cv2.subtract(ima,img)
    #print cv2.phaseCorrelate(np.float32(img),np.float32(pose))
	#find the area here
	#find_area(score)
	#find_area(img)
    normValue = cv2.norm(img,pose)
    print normValue
    score = cv2.cvtColor(score,cv2.COLOR_GRAY2BGR)
    
    sensitivity = normBase * 0.1
    flag = 0
    #converting overlaping region into green
    retval ,score[:,:,0] = cv2.threshold(score[:,:,0],240,0,cv2.THRESH_TOZERO_INV)
    if normValue < normBase-sensitivity :
        flag = 1
        retval , score[:,:,2] = cv2.threshold(score[:,:,2],240,0,cv2.THRESH_TOZERO_INV)
    
    #score = cv2.subtract(score,neg_green,dst = score,mask =msk)
    cv2.imshow('score',score)
    
    return score
Esempio n. 23
0
def function4():
    cam = cv2.VideoCapture('http://leafcamera.ddns.net:9000/videostream.cgi?loginuse=admin&loginpas=admin')
    t0 = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)
    presence,absence = 0,0
    while True:
        temp = cam.read()[1]
        t1 = cv2.cvtColor(temp, cv2.COLOR_RGB2GRAY)
        frame = imutils.resize(temp, width=500)
        count = int(cv2.norm(t0,t1,cv2.NORM_L1))
        # print count/360
        if count > 5000*360:
            cv2.putText(frame, "Room Status: {}".format("Occupied"), (10, 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
            presence += 1
            absence = 0
        else:
            cv2.putText(frame, "Room Status: {}".format("Unoccupied"), (10, 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
            absence += 1
            if absence > 60: presence = 0
        curr_time = datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p")
        filename = str(datetime.datetime.now().strftime("%d %B %I-%M-%S %p"))
        cv2.putText(frame, curr_time,(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
        cv2.imshow("Security Feed", frame)
        print presence,absence,count/480
        # break
        if presence > 60:
            # send_push()
            presence = 0
        if (presence-1)%5 == 0: get_face(t1,temp,filename)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cam.release()
    cv2.destroyAllWindows()
Esempio n. 24
0
def function3():
    cam = cv2.VideoCapture('http://leafcamera.ddns.net:9000/videostream.cgi?loginuse=admin&loginpas=admin')
    t0 = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)
    t1 = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)
    t2 = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)
    while True:
        text = 0
        temp = cam.read()[1]
        frame = imutils.resize(temp, width=500)
        tmp = change(t0,t1,t2)
        count = int(cv2.norm(tmp, cv2.NORM_L1))
        # print count/480
        if count > 600*360:
            print "MOTION"
            text = 1
        else:
            print ""
        cv2.imshow('Video',tmp)
        t0 = t1
        t1 = t2
        t2 = cv2.cvtColor(temp, cv2.COLOR_RGB2GRAY)
        if text == 0: cv2.putText(frame, "Room Status: {}".format("No Motion"), (10, 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
        if text == 1: cv2.putText(frame, "Room Status: {}".format("Motion"), (10, 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
        # cv2.imshow("Security Feed", frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cam.release()
    cv2.destroyAllWindows()
Esempio n. 25
0
    def test_texture_flow(self):

        img = self.get_sample('samples/cpp/pic6.png')

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        h, w = img.shape[:2]

        eigen = cv2.cornerEigenValsAndVecs(gray, 15, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        flow = eigen[:,:,2]

        vis = img.copy()
        vis[:] = (192 + np.uint32(vis)) / 2
        d = 80
        points =  np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)

        textureVectors = []

        for x, y in np.int32(points):
            textureVectors.append(np.int32(flow[y, x]*d))

        eps = 0.05

        testTextureVectors = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0],
        [-38, 70], [-79, 3], [0, 0], [0, 0], [-39, 69], [-79, -1],
        [0, 0], [0, 0], [0, -79], [17, -78], [-48, -63], [65, -46],
        [-69, -39], [-48, -63]]

        for i in range(len(testTextureVectors)):
            self.assertLessEqual(cv2.norm(textureVectors[i] - testTextureVectors[i], cv2.NORM_L2), eps)
Esempio n. 26
0
def undistort(mtx, dist, objpoints, imgpoints, rvecs, tvecs, img):
    #read in an image of choice
    #usefule for the reading in and segmenting of board to make hough lines easier
    h, w = img.shape[:2]
    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))

        # undistort
    dst = cv2.undistort(img, mtx, dist, None, newcameramtx)

    # crop the image
    x, y, w, h = roi
    dst = dst[y:y + h, x:x + w]
    cv2.imwrite('calibresult.png', dst)

    # undistort
    mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w, h), 5)
    dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)

    # crop the image
    x, y, w, h = roi
    dst = dst[y:y + h, x:x + w]
    cv2.imwrite('calibresult.png', dst)

    mean_error = 0
    for i in xrange(len(objpoints)):
        imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
        error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
        mean_error += error

    print "total error: ", mean_error / len(objpoints)
Esempio n. 27
0
def simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh):
    dist_from_fixedPoint = []
    img_height, img_width = img.shape[0:2]
    hull_nbPts = hull.shape[0]

    #calculate distance to fixed Point
    for i in range(hull_nbPts):
        dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2))
    
    #sort index from farthest to nearest
    max_indx = np.argsort(-1*np.array(dist_from_fixedPoint))

    #need to eliminate same points and points at edge
    #results stored in idx_ok, the list of candidate indices of hulls 
    idx_ok = []

    for i in range(hull_nbPts):
        idx = max_indx[i]
        if point_not_at_edge(hull[idx,0,0], hull[idx,0,1], img_height, img_width, edge_thresh):
            if(len(idx_ok) == 0):
                idx_ok.append(idx)
            else:
                not_similar = True
                for idx_neighbor in idx_ok:
                    not_similar = (points_not_similar(hull[idx,0,0], hull[idx,0,1], hull[idx_neighbor,0,0], hull[idx_neighbor,0,1],neighbor_thresh))
                    if not not_similar: #if similar break the loop 
                        break
                       
                if(not_similar):
                    idx_ok.append(idx)                    
    return idx_ok
Esempio n. 28
0
def propose_pairs(descripts_a, keypts_a, descripts_b, keypts_b):
    """Given a set of descriptors and keypoints from two images, propose good keypoint pairs.

    Feature descriptors should encode local image geometry in a way that is invariant to
    small changes in perspective. They should be comparible using an L2 metric.

    For the top N matching descrpitors, select and return the top corresponding keypoint pairs.

    Returns:
        pair_pts_a (list of N np.matrix of shape 3x1): List of N corresponding keypoints in 
            homogeneous form. 
        pair_pts_b (list of N np.matrix of shape 3x1): List of N corresponding keypoints in 
            homogeneous form. 
    """
    # code here
    pair_pts_a = []
    pair_pts_b = []
    
    for i in range(len(descripts_a)): 
        temp = []
        for j in range(len(descripts_b)):
            ad = descripts_a[i] 
            bd = descripts_b[j]                
            dist = cv2.norm(ad,bd,cv2.NORM_HAMMING)
            temp.append([dist, j])

        temp = sorted(temp)
        pair_pts_a.append(keypts_a[i])
        pair_pts_b.append(keypts_b[temp[0][1]])
    #print "A: \n", pair_pts_a
    #print "B: \n", pair_pts_b

    return pair_pts_a, pair_pts_b
Esempio n. 29
0
def propose_pairs(descripts_a, keypts_a, descripts_b, keypts_b):
    """Given a set of descriptors and keypoints from two images, propose good keypoint pairs.

    Feature descriptors should encode local image geometry in a way that is invariant to
    small changes in perspective. They should be comparible using an L2 metric.

    For the top N matching descrpitors, select and return the top corresponding keypoint pairs.

    Returns:
        pair_pts_a (list of N np.matrix of shape 3x1): List of N corresponding keypoints in 
            homogeneous form. 
        pair_pts_b (list of N np.matrix of shape 3x1): List of N corresponding keypoints in 
            homogeneous form. 
    """
    # code here
    pair_pts_a = []
    pair_pts_b = []
    distances = []
    zipped_a = zip(descripts_a, keypts_a)
    zipped_b = zip(descripts_b, keypts_b)
    distance_product = [(cv2.norm(a[0],b[0],cv2.NORM_HAMMING), a[1], b[1]) for a in zipped_a for b in zipped_b]
    sorted_list = sorted(distance_product, key=lambda x: x[0])
    N = min(100, len(sorted_list))
    for n in range(N):
        pair_pts_a.append(sorted_list[n][1])
        pair_pts_b.append(sorted_list[n][2])
        distances.append(sorted_list[n][0])
    return pair_pts_a, pair_pts_b
Esempio n. 30
0
    def on_frame(self, frame):
        h, w = frame.shape[:2]
        qi = 0
        #print "on_frame %d x %d" % (h, w)
        frame_diff = cv2.absdiff(frame, self.prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        ret, motion_mask = cv2.threshold(gray_diff, self._threshold, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient(self.motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
        seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history, timestamp, MAX_TIME_DELTA)

        centers = []
        rects = []
        draws = []
        for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw*rh
            if area < 64**2:
                continue
            silh_roi   = motion_mask        [y:y+rh,x:x+rw]
            orient_roi = mg_orient          [y:y+rh,x:x+rw]
            mask_roi   = mg_mask            [y:y+rh,x:x+rw]
            mhi_roi    = self.motion_history[y:y+rh,x:x+rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            if self._use_cv_gui:
                draws.append(lambda vis, rect=rect, angle=angle, color=color:
                                draw_motion_comp(vis, rect, angle, color))
            centers.append( (x+rw/2, y+rh/2) )
            rects.append(rect)

        self.tracker_group.update_trackers(centers, rects)

        #print 'Active trackers: %d' % len(trackers)
        #print 'Tracker score: %s' % ','.join(['%2d'%len(tracker.hits) for tracker in trackers])
        trackers = self.tracker_group.trackers
        cx, cy = None, None
        #print "#trackers = %d" % len(trackers)
        if len(trackers):
            first_tracker = trackers[0]
            cx, cy = center_after_median_threshold(frame, first_tracker.rect)
            cv2.circle(frame, (cx, cy), 5, (255, 255, 255), 3)
        print str(qi)*5; qi += 1
        print self._on_cx_cy
        self._on_cx_cy(cx, cy) # gives None's for no identified balloon
        print str(qi)*5; qi += 1

        if self._use_cv_gui:
            self.on_frame_cv_gui(frame, draws, (cx, cy))
        else:
            self.frame_vis(frame, draws, (cx, cy))

        #time.sleep(0.5)
        self.prev_frame = frame.copy()
        # TODO - print visualization onto image
        return frame
for fname in images:
    img = cv2.imread(fname)
    img = imutils.resize(img, width=1000)  # increases FPS
    h, w = img.shape[:2]
    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1,
                                                      (w, h))

    # undistort
    dst = cv2.undistort(img, mtx, dist, None, newcameramtx)

    # crop the image
    x, y, w, h = roi
    dst = dst[y:y + h, x:x + w]
    fname = fname.split('\\')[1]
    cv2.imwrite('undistorted\\' + fname, dst)

    if cv2.waitKey(500) & 0xFF == ord('q'):
        break

cv2.destroyAllWindows()

mean_error = 0

for i in range(len(objpoints)):
    imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx,
                                      dist)
    error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
    mean_error += error

print("total error: ", mean_error / len(objpoints))
Esempio n. 32
0
def find_squares(bgrcap, n):
    """ Find the positions of squares in the webcam picture"""
    global r_mask, color_filter, white_filter, black_filter

    h, s, v = cv2.split(hsv)
    h_sqr = np.square(h)

    sz = height // n
    border = 1 * sz

    varmax_edges = 20  # wichtig
    for y in range(border, height - border, sz):
        for x in range(border, width - border, sz):

            # rect_h = h[y:y + sz, x:x + sz]
            rect_h = h[y:y + sz, x:x + sz]
            rect_h_sqr = h_sqr[y:y + sz, x:x + sz]

            median_h = np.sum(rect_h) / sz / sz

            sqr_median_hf = median_h * median_h
            median_hf_sqr = np.sum(rect_h_sqr) / sz / sz
            var = median_hf_sqr - sqr_median_hf

            sigma = np.sqrt(var)

            delta = vision_params.delta_C

            if sigma < vision_params.sigma_W:  # sigma liegt für weiß höher, 10-100

                ex_rect = hsv[y - 1 * sz:y + 2 * sz,
                              x - 1 * sz:x + 2 * sz].copy()  # warum copy?
                r_mask = cv2.inRange(
                    ex_rect, (0, 0, vision_params.val_W),
                    (255, vision_params.sat_W,
                     255))  # saturation high 30, value low 180
                r_mask = cv2.bitwise_or(
                    r_mask, white_filter[y - 1 * sz:y + 2 * sz,
                                         x - 1 * sz:x + 2 * sz])
                white_filter[y - 1 * sz:y + 2 * sz,
                             x - 1 * sz:x + 2 * sz] = r_mask

            if sigma < vision_params.sigma_C:  # übrigen echten Farben  1-3
                ex_rect = h[y - 1 * sz:y + 2 * sz,
                            x - 1 * sz:x + 2 * sz].copy()  # warum copy?
                if median_h + delta >= 180:
                    r_mask = cv2.inRange(ex_rect, 0, median_h + delta - 180)
                    r_mask = cv2.bitwise_or(
                        r_mask, cv2.inRange(ex_rect, median_h - delta, 180))
                elif median_h - delta < 0:
                    r_mask = cv2.inRange(ex_rect, median_h - delta + 180, 180)
                    r_mask = cv2.bitwise_or(
                        r_mask, cv2.inRange(ex_rect, 0, median_h + delta))
                else:
                    r_mask = cv2.inRange(ex_rect, median_h - delta,
                                         median_h + delta)

                r_mask = cv2.bitwise_or(
                    r_mask, color_filter[y - 1 * sz:y + 2 * sz,
                                         x - 1 * sz:x + 2 * sz])
                color_filter[y - 1 * sz:y + 2 * sz,
                             x - 1 * sz:x + 2 * sz] = r_mask

            # else:
            #     continue

    black_filter = cv2.inRange(
        bgrcap, (0, 0, 0),
        (vision_params.rgb_L, vision_params.rgb_L, vision_params.rgb_L))
    black_filter = cv2.bitwise_not(black_filter)

    color_filter = cv2.bitwise_and(color_filter, black_filter)
    color_filter = cv2.blur(color_filter, (20, 20))
    color_filter = cv2.inRange(color_filter, 240, 255)

    white_filter = cv2.bitwise_and(white_filter, black_filter)
    white_filter = cv2.blur(white_filter, (20, 20))
    white_filter = cv2.inRange(white_filter, 240, 255)

    itr = iter([white_filter, color_filter])  # apply white filter first!

    for j in itr:
        im2, contours, hierarchy = cv2.findContours(j, cv2.RETR_EXTERNAL,
                                                    cv2.CHAIN_APPROX_SIMPLE)
        for n in range(len(contours)):
            approx = cv2.approxPolyDP(contours[n], sz // 2, True)
            if approx.shape[0] < 4 or approx.shape[0] > 4:
                continue
            corners = approx[:, 0]

            edges = np.array([
                cv2.norm(corners[0] - corners[1], cv2.NORM_L2),
                cv2.norm(corners[1] - corners[2], cv2.NORM_L2),
                cv2.norm(corners[2] - corners[3], cv2.NORM_L2),
                cv2.norm(corners[3] - corners[0], cv2.NORM_L2)
            ])
            edges_mean_sq = (np.sum(edges) / 4)**2
            edges_sq_mean = np.sum(np.square(edges)) / 4
            if edges_sq_mean - edges_mean_sq > varmax_edges:
                continue
            #cv2.drawContours(bgrcap, [approx], -1, (0, 0, 255), 8)
            middle = np.sum(corners, axis=0) / 4
            cent.append(np.asarray(middle))
Esempio n. 33
0
    def test_digits(self):

        digits, labels = self.load_digits(DIGITS_FN)

        # shuffle digits
        rand = np.random.RandomState(321)
        shuffle = rand.permutation(len(digits))
        digits, labels = digits[shuffle], labels[shuffle]

        digits2 = list(map(deskew, digits))
        samples = preprocess_hog(digits2)

        train_n = int(0.9 * len(samples))
        _digits_train, digits_test = np.split(digits2, [train_n])
        samples_train, samples_test = np.split(samples, [train_n])
        labels_train, labels_test = np.split(labels, [train_n])
        errors = list()
        confusionMatrixes = list()

        model = KNearest(k=4)
        model.train(samples_train, labels_train)
        error, confusion = evaluate_model(model, digits_test, samples_test,
                                          labels_test)
        errors.append(error)
        confusionMatrixes.append(confusion)

        model = SVM(C=2.67, gamma=5.383)
        model.train(samples_train, labels_train)
        error, confusion = evaluate_model(model, digits_test, samples_test,
                                          labels_test)
        errors.append(error)
        confusionMatrixes.append(confusion)

        eps = 0.001
        normEps = len(samples_test) * 0.02

        confusionKNN = [[45, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                        [0, 57, 0, 0, 0, 0, 0, 0, 0, 0],
                        [0, 0, 59, 1, 0, 0, 0, 0, 1, 0],
                        [0, 0, 0, 43, 0, 0, 0, 1, 0, 0],
                        [0, 0, 0, 0, 38, 0, 2, 0, 0, 0],
                        [0, 0, 0, 2, 0, 48, 0, 0, 1, 0],
                        [0, 1, 0, 0, 0, 0, 51, 0, 0, 0],
                        [0, 0, 1, 0, 0, 0, 0, 54, 0, 0],
                        [0, 0, 0, 0, 0, 1, 0, 0, 46, 0],
                        [1, 1, 0, 1, 1, 0, 0, 0, 2, 42]]

        confusionSVM = [[45, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                        [0, 57, 0, 0, 0, 0, 0, 0, 0, 0],
                        [0, 0, 59, 2, 0, 0, 0, 0, 0, 0],
                        [0, 0, 0, 43, 0, 0, 0, 1, 0, 0],
                        [0, 0, 0, 0, 40, 0, 0, 0, 0, 0],
                        [0, 0, 0, 1, 0, 50, 0, 0, 0, 0],
                        [0, 0, 0, 0, 1, 0, 51, 0, 0, 0],
                        [0, 0, 1, 0, 0, 0, 0, 54, 0, 0],
                        [0, 0, 0, 0, 0, 0, 0, 0, 47, 0],
                        [0, 1, 0, 1, 0, 0, 0, 0, 1, 45]]

        self.assertLess(
            cv.norm(confusionMatrixes[0] - confusionKNN, cv.NORM_L1), normEps)
        self.assertLess(
            cv.norm(confusionMatrixes[1] - confusionSVM, cv.NORM_L1), normEps)

        self.assertLess(errors[0] - 0.034, eps)
        self.assertLess(errors[1] - 0.018, eps)
Esempio n. 34
0
def ImageProcessing(n_boards, board_w, board_h, board_dim):
    #Initializing variables
    board_n = board_w * board_h
    opts = []
    ipts = []
    npts = np.zeros((n_boards, 1), np.int32)
    intrinsic_matrix = np.zeros((3, 3), np.float32)
    distCoeffs = np.zeros((5, 1), np.float32)
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)

    # prepare object points based on the actual dimensions of the calibration board
    # like (0,0,0), (25,0,0), (50,0,0) ....,(200,125,0)
    objp = np.zeros((board_h * board_w, 3), np.float32)
    objp[:, :2] = np.mgrid[0:(board_w * board_dim):board_dim,
                           0:(board_h * board_dim):board_dim].T.reshape(-1, 2)

    #Loop through the images.  Find checkerboard corners and save the data to ipts.
    for i in range(1, n_boards + 1):

        #Loading images
        print 'Loading... Calibration_Image' + str(i) + '.png'
        image = cv2.imread('Calibration_Image' + str(i) + '.png')

        #Converting to grayscale
        grey_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

        #Find chessboard corners
        found, corners = cv2.findChessboardCorners(
            grey_image, (board_w, board_h),
            cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_NORMALIZE_IMAGE)
        print(found)

        if found == True:

            #Add the "true" checkerboard corners
            opts.append(objp)

            #Improve the accuracy of the checkerboard corners found in the image and save them to the ipts variable.
            cv2.cornerSubPix(grey_image, corners, (20, 20), (-1, -1), criteria)
            ipts.append(corners)

            #Draw chessboard corners
            cv2.drawChessboardCorners(image, (board_w, board_h), corners,
                                      found)

            #Show the image with the chessboard corners overlaid.
            cv2.imshow("Corners", image)

        char = cv2.waitKey(0) & 0xff

    cv2.destroyWindow("Corners")

    print ''
    print 'Finished processes images.'

    #Calibrate the camera
    print 'Running Calibrations...'
    print(' ')
    ret, intrinsic_matrix, distCoeff, rvecs, tvecs = cv2.calibrateCamera(
        opts, ipts, grey_image.shape[::-1], None, None)

    #Save matrices
    print('Intrinsic Matrix: ')
    print(str(intrinsic_matrix))
    print(' ')
    print('Distortion Coefficients: ')
    print(str(distCoeff))
    print(' ')

    #Save data
    print 'Saving data file...'
    np.savez('calibration_data',
             distCoeff=distCoeff,
             intrinsic_matrix=intrinsic_matrix)
    print 'Calibration complete'

    #Calculate the total reprojection error.  The closer to zero the better.
    tot_error = 0
    for i in xrange(len(opts)):
        imgpoints2, _ = cv2.projectPoints(opts[i], rvecs[i], tvecs[i],
                                          intrinsic_matrix, distCoeff)
        error = cv2.norm(ipts[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
        tot_error += error

    print "total reprojection error: ", tot_error / len(opts)

    #Undistort Images

    #Scale the images and create a rectification map.
    newMat, ROI = cv2.getOptimalNewCameraMatrix(intrinsic_matrix,
                                                distCoeff,
                                                image_size,
                                                alpha=crop,
                                                centerPrincipalPoint=1)
    mapx, mapy = cv2.initUndistortRectifyMap(intrinsic_matrix,
                                             distCoeff,
                                             None,
                                             newMat,
                                             image_size,
                                             m1type=cv2.CV_32FC1)

    for i in range(1, n_boards + 1):

        #Loading images
        print 'Loading... Calibration_Image' + str(i) + '.png'
        image = cv2.imread('Calibration_Image' + str(i) + '.png')

        # undistort
        dst = cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR)

        cv2.imshow('Undisorted Image', dst)

        char = cv2.waitKey(0) & 0xff

    cv2.destroyAllWindows()
Esempio n. 35
0
    def _detect(self):
        corners, ids, rejects = cv2.aruco.detectMarkers(
            self.gray_frame, self.ar_dict)
        self.detects = len(corners)
        if self.detects == 0:
            for i in range(4):
                dt = int(round(time.time() * 1000)) - self.marker_time[i]
                if dt > 1000:
                    self.marker_enable[i] = False
                else:
                    self.marker_enable[i] = True
            return

        detect = [False, False, False, False]
        for i, corner in enumerate(corners):
            points = corner[0].astype(np.int32)
            p0 = points[0]
            p1 = points[1]
            p2 = points[2]
            p3 = points[3]
            id = int(ids[i][0])
            if id < 4 and len(points) == 4:
                # enable marker flag
                detect[id] = True
                self.marker_id[id] = True

                # take a marker corner points
                self.marker_pointss[id] = points

                # take a distance of to marker (method of apparent size)
                # (apparent size is sum of outer 4line length)
                normsum = float(
                    cv2.norm(p0, p1) + cv2.norm(p1, p2) + cv2.norm(p2, p3) +
                    cv2.norm(p3, p0))
                dist = (float)(100) / (normsum / PARAM_1)
                self.marker_distances[id] = (int)(dist)

                # take a center of marker corner points
                cx = int(0)
                cy = int(0)
                for ii in range(4):
                    cx += self.marker_pointss[id][ii][0]
                    cy += self.marker_pointss[id][ii][1]
                cx /= 4
                cy /= 4

                # take a difference from screen center to the center of marker
                dcx = cx - (FRAME_W / 2)
                dcy = cy - (FRAME_H / 2)

                # take a cm/dot as in the marker
                cmpdot = (PARAM_2 * 4) / normsum

                # take a difference [cm] to the marker as space
                dcm_x_f = cmpdot * (float)(dcx)
                dcm_x = int(round(cmpdot * dcx))
                dcm_y = int(round(cmpdot * dcy))
                dcm_y += int(round(cmpdot * PARAM_3))
                self.marker_diff_cm[id][0] = dcm_x
                self.marker_diff_cm[id][1] = dcm_y

                # take a direction degree to the marker
                # (as tangent with distance and diffrence)
                if dist != 0 and dcm_x != 0:
                    deg = 0
                    if dcm_x > 0:
                        deg = int(
                            round(math.degrees(math.tan(dcm_x_f / dist * -1))))
                        deg *= -1
                    else:
                        dcm_x_f = abs(dcm_x_f)
                        deg = int(
                            round(math.degrees(math.tan(dcm_x_f / dist * -1))))
                    self.marker_degree[id] = deg

                # take a z-tilt of the marker
                # (as square distortion)
                x0 = self.marker_pointss[id][0][0]
                y0 = self.marker_pointss[id][0][1]
                x1 = self.marker_pointss[id][1][0]
                y1 = self.marker_pointss[id][1][1]
                x2 = self.marker_pointss[id][2][0]
                y2 = self.marker_pointss[id][2][1]
                x3 = self.marker_pointss[id][3][0]
                y3 = self.marker_pointss[id][3][1]
                deg1 = self._get_2point_degree(x0, y0, x1, y1)
                deg2 = self._get_2point_degree(x3, y3, x2, y2)
                deg = deg1 + deg2
                self.marker_ztilt[id] = deg

        # renew record a detect time of the marker
        for i in range(4):
            if detect[i] == True:
                self.marker_time[i] = int(round(time.time() * 1000))

        for i in range(4):
            dt = int(round(time.time() * 1000)) - self.marker_time[i]
            if dt > 1000:
                self.marker_enable[i] = False
            else:
                self.marker_enable[i] = True

        return
Esempio n. 36
0
    def circles(self, cv_image):
        cv_image = cv2.resize(cv_image,
                              dsize=(self.screen['width'],
                                     self.screen['height']))
        #if self.blur:
        #    cv_image=cv2.GaussianBlur(cv_image,ksize=[5,5],sigmaX=0)
        #screen = { 'width' : 640, 'height' : 480 }
        grayImg = cv2.cvtColor(cv_image, cv2.cv.CV_BGR2GRAY)
        grayImg = cv2.resize(grayImg,
                             dsize=(self.screen['width'],
                                    self.screen['height']))
        grayImg = cv2.GaussianBlur(grayImg, ksize=(7, 7), sigmaX=0)

        # Calculate adaptive threshold value
        mean = cv2.mean(grayImg)[0]
        lowest = cv2.minMaxLoc(grayImg)[0]
        self.thval = min((mean + lowest) / self.val, self.upperThresh)
        rospy.logdebug(self.thval)

        #Thresholding and noise removal
        grayImg = cv2.threshold(grayImg, self.thval, 255,
                                cv2.THRESH_BINARY_INV)[1]

        #erodeEl = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
        dilateEl = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
        openEl = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))

        #grayImg = cv2.erode(grayImg, erodeEl)
        grayImg = cv2.dilate(grayImg, dilateEl)
        grayImg = cv2.morphologyEx(grayImg, cv2.MORPH_OPEN, openEl)

        out = cv2.cvtColor(grayImg, cv2.cv.CV_GRAY2BGR)

        # Find centroid and bounding box
        pImg = grayImg.copy()
        contours, hierachy = cv2.findContours(pImg, cv2.RETR_LIST,
                                              cv2.CHAIN_APPROX_SIMPLE)

        maxArea = 0
        for contour in contours:
            area = cv2.contourArea(contour)
            if area > self.areaThresh and area < self.upperAreaThresh and area > maxArea:
                #Find the center using moments
                mu = cv2.moments(contour, False)
                centroidx = mu['m10'] / mu['m00']
                centroidy = mu['m01'] / mu['m00']
                maxArea = area

                centroid1 = centroidx
                centroid2 = centroidy
                #rectData['rect'] = cv2.minAreaRect(contour)
                rect = cv2.minAreaRect(contour)
        if maxArea > 0:
            #rectData['detected'] = True
            points = np.array(cv2.cv.BoxPoints(rect))

            #Find the blackline heading
            edge1 = points[1] - points[0]
            edge2 = points[2] - points[1]

            print "points1", points[1]
            print "points0", points[0]
            print "edge1", edge1

            #Choose the vertical edge
            if cv2.norm(edge1) > cv2.norm(edge2):
                edge1[1] = edge1[1] if edge1[1] != 0.0 else math.copysign(
                    0.01, edge1[1])
                #rectData['angle'] = math.degrees(math.atan(edge1[0]/edge1[1]))
                angle = math.degrees(math.atan(edge1[0] / edge1[1]))
            else:
                edge2[1] = edge2[1] if edge2[1] != 0.0 else math.copysign(
                    0.01, edge2[1])
                #rectData['angle'] = math.degrees(math.atan(edge2[0]/edge2[1]))
                angle = math.degrees(math.atan(edge2[0] / edge2[1]))

            #Chose angle to turn if horizontal

            if angle == 90:
                if centroid1 > screen['width'] / 2:
                    angle = -90
            elif angle == -90:
                if centroid1 < screen['width'] / 2:
                    angle = 90

            #Testing
            centerx = int(centroid1)
            centery = int(centroid2)
            cv2.circle(img, (centerx, centery), 5, (0, 255, 0))

            for i in range(4):
                pt1 = (int(points[i][0]), int(points[i][1]))
                pt2 = (int(points[(i + 1) % 4][0]),
                       int(points[(i + 1) % 4][1]))
                cv2.line(out, pt1, pt2, (255, 0, 0))

            cv2.putText(out, str(angle), (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (0, 0, 255))

        try:
            self.image_filter_pub.publish(
                self.bridge.cv2_to_imgmsg(out, encoding="bgr8"))
        except CvBridgeError as e:
            rospy.logerr(e)
Esempio n. 37
0
    def Update(self, img, nkps, harris_det, enable_clust, tracker_hungarian,
               tracker_orb, tracker_conj_orb_hung, en_roi, roi_rect):
        img2 = img
        if len(img.shape) == 3:
            img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
        if en_roi:
            img2 = img2[roi_rect[1]:roi_rect[1] + roi_rect[3],
                        roi_rect[0]:roi_rect[0] + roi_rect[2]]
        if harris_det:
            orb = cv2.ORB_create(nfeatures=nkps,
                                 edgeThreshold=self.brief_size,
                                 patchSize=self.brief_size)
            corners = cv2.goodFeaturesToTrack(img2,
                                              nkps,
                                              0.01,
                                              self.size_orb_area,
                                              mask=np.array([]),
                                              blockSize=3,
                                              useHarrisDetector=0,
                                              k=0.04)
            kp2 = []
            for i in range(corners.shape[0]):
                kp_1 = cv2.KeyPoint(corners[i][0][0], corners[i][0][1],
                                    self.brief_size)
                kp2.append(kp_1)
            detections_kps = kp2
        else:
            orb = cv2.ORB_create(nfeatures=nkps,
                                 edgeThreshold=self.brief_size,
                                 patchSize=self.brief_size)
            detections_kps = orb.detect(img2, None)

        if enable_clust:
            clust_kps = self.orb.clust_keypoints(detections_kps)
            detections_kps = clust_kps
        self.detections_kps = detections_kps
        if len(self.tracks) != 0 and img is not None:
            kp2 = []
            if detections_kps is not None:
                detections_arr = np.array([detections_kps[0].pt])
                if len(detections_kps) != 0:
                    for i in range(1, len(detections_kps)):
                        detections_arr = np.concatenate(
                            [detections_arr, [detections_kps[i].pt]])
                distance = np.array([])
                for i in range(len(self.tracks)):
                    cur_track = np.tile((self.tracks[i].prediction[0][0],
                                         self.tracks[i].prediction[1][0]),
                                        (len(detections_kps), 1))
                    if len(distance) == 0:
                        distance = [
                            np.sqrt(
                                (detections_arr[:, 0] - cur_track[:, 0])**2 +
                                (detections_arr[:, 1] - cur_track[:, 1])**2)
                        ]
                    else:
                        distance = np.concatenate([
                            distance,
                            [
                                np.sqrt((detections_arr[:, 0] -
                                         cur_track[:, 0])**2 +
                                        (detections_arr[:, 1] -
                                         cur_track[:, 1])**2)
                            ]
                        ])
                euc_distance = distance
                assignment = np.tile(-1, (len(self.tracks), ))
                if tracker_hungarian:
                    cost = 0.5 * np.array(distance)
                    row_ind, col_ind = linear_sum_assignment(cost)
                    for i in range(len(row_ind)):
                        assignment[row_ind[i]] = col_ind[i]
                    for i in range(len(assignment)):
                        if assignment[i] != -1:
                            if cost[i][assignment[i]] > self.dist_thresh:
                                assignment[i] = -1
                if tracker_conj_orb_hung:
                    detections_kps, des_kps = orb.compute(img2, detections_kps)
                    distance_Hamming = np.tile(
                        500, (len(self.tracks), len(detections_kps)))
                    for i in range(len(self.tracks)):
                        if min(self.tracks[i].prediction[0][0],
                               self.tracks[i].prediction[1][0],
                               img2.shape[0] - self.tracks[i].prediction[1][0],
                               img2.shape[1] - self.tracks[i].prediction[0][0]
                               ) < self.brief_size + 1:
                            continue
                        kp_pred = self.tracks[i].key_point
                        [kp_pred], des1 = orb.compute(img2, [kp_pred])
                        if len(self.tracks[i].distance_Hamm) > 20:
                            self.tracks[i].distance_Hamm = []
                        for j in range(len(detections_kps)):
                            distance_Hamming[i][j] = cv2.norm(
                                des_kps[j], des1[0], cv2.NORM_HAMMING)
                    distance = euc_distance * (distance_Hamming)
                    cost = 0.5 * np.array(distance)
                    row_ind, col_ind = linear_sum_assignment(cost)
                    for i in range(len(row_ind)):
                        assignment[row_ind[i]] = col_ind[i]
                    for i in range(len(assignment)):
                        if assignment[i] != -1:
                            if euc_distance[i][
                                    assignment[i]] > self.dist_thresh:
                                assignment[i] = -1
                            else:
                                self.tracks[i].distance_Hamm.append(
                                    distance_Hamming[i][assignment[i]])
                if tracker_orb:
                    kp_matches = []
                    matches_distance = []
                    euc_distance_list = []
                    for i in range(len(self.tracks)):
                        if min(self.tracks[i].prediction[0][0],
                               self.tracks[i].prediction[1][0],
                               img.shape[0] - self.tracks[i].prediction[1][0],
                               img.shape[1] - self.tracks[i].prediction[0][0]
                               ) < self.brief_size + 1:
                            kp_matches.append([])
                            matches_distance.append(500)
                            euc_distance_list.append(500)
                            continue
                        kp_pred = self.tracks[i].key_point
                        [kp_id, match_distance] = self.orb.track_kp_selected(
                            img2, nkps, self.size_orb_area, detections_kps,
                            distance[i], kp_pred, self.brief_size)
                        kp_matches.append(detections_kps[kp_id])
                        matches_distance.append(match_distance)
                        euc_distance_list.append(euc_distance[i][kp_id])
                        assignment[i] = kp_id
                    for i in range(len(assignment)):
                        if assignment[i] != -1:

                            if euc_distance_list[i] > self.dist_thresh:
                                assignment[i] = -1
                for i in range(len(assignment)):
                    self.tracks[i].KF.predict(self.tracks[i].prediction)
                    if (assignment[i] != -1):
                        self.tracks[i].invisible_count = 0
                        self.tracks[i].key_point = detections_kps[
                            assignment[i]]
                        if tracker_hungarian or tracker_conj_orb_hung:
                            self.tracks[i].prediction = self.tracks[
                                i].KF.correct(
                                    np.array([[
                                        detections_kps[assignment[i]].pt[0]
                                    ], [detections_kps[assignment[i]].pt[1]]]),
                                    1)

                        if tracker_orb:
                            self.tracks[i].prediction = self.tracks[
                                i].KF.correct(
                                    np.array([[kp_matches[i].pt[0]],
                                              [kp_matches[i].pt[1]]]), 1)
                    else:
                        self.tracks[i].prediction = self.tracks[i].KF.correct(
                            np.array([[0], [0]]), 0)
                        self.tracks[i].invisible_count += 1
                    self.tracks[i].KF.lastResult = self.tracks[i].prediction
                del_tracks = []
                for i in range(len(self.tracks)):
                    if self.tracks[i].invisible_count > self.max_frames_to_skip:
                        del_tracks.append(i)
                if len(del_tracks) > 0:
                    for i in del_tracks:
                        if i < len(self.tracks):
                            del self.tracks[i]
        pass
Esempio n. 38
0
    y = len(contours)
    squares = np.zeros(y)
    template = np.zeros((480, 640), dtype='uint8')
    rectangles = list()
    areas = []
    cnt_list = []
    for cnt in contours:
        perimeter = cv2.arcLength(cnt, True)
        approx = cv2.approxPolyDP(cnt, 0.03 * perimeter, True)
        if len(approx) == 4 and cv2.contourArea(approx) > 10:

            rect = cv2.minAreaRect(approx)
            box = cv2.boxPoints(rect)
            box = np.int0(box)
            a = cv2.norm(box[0], box[1], cv2.NORM_L2)
            b = cv2.norm(box[1], box[2], cv2.NORM_L2)
            c = cv2.norm(box[2], box[3], cv2.NORM_L2)
            d = cv2.norm(box[3], box[0], cv2.NORM_L2)
            if b == 0 or d == 0:
                continue
            r1 = float(a) / float(b)
            r2 = float(c) / float(d)
            r3 = float(a * c) / float(b * d)
            if 0.9 <= r1 <= 1.1 and 0.9 <= r2 <= 1.1:
                rectangles.append(rect)

    boxes_list = []
    z = 0
    waitLength = 1
    rectangles, new_length = calibration.rect_trim(rectangles)
Esempio n. 39
0
    def image_callback(self, img_msg):

        if self.have_cam_info:

            output_msg = ArucoTransformArray()
            output_msg.header.seq = img_msg.header.seq
            output_msg.header.stamp = img_msg.header.stamp
            output_msg.header.frame_id = img_msg.header.frame_id

            cv_img = self.bridge.imgmsg_to_cv2(img_msg,
                                               desired_encoding="passthrough")

            gray_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)

            corners, ids, rejectedImgPoints = aruco.detectMarkers(
                gray_img, ARUCO_DICT, parameters=ARUCO_PARAMETERS)

            rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(
                corners, ARUCO_SQUARE_SIZE, self.CAMERA_MATRIX,
                self.DISTORTION_COEFFICIENTS)

            #Tvecs: +x is to the right (as seen by camera), +y is down (as seen by camera), +z is further from camera
            #Rvecs:

            rospy.loginfo("ids: {ids}, rvecs: {rvecs}, tvecs: {tvecs}".format(
                ids=ids, rvecs=rvecs, tvecs=tvecs))

            test_img = cv_img
            if ids is not None:
                for i in range(len(ids)):
                    test_img = aruco.drawAxis(test_img, self.CAMERA_MATRIX,
                                              self.DISTORTION_COEFFICIENTS,
                                              rvecs[i], tvecs[i],
                                              ARUCO_SQUARE_SIZE)
            rosified_test_img = self.bridge.cv2_to_imgmsg(test_img,
                                                          encoding="rgb8")
            self.debug_image_pub.publish(rosified_test_img)

            if ids is not None:
                for i in range(len(ids)):
                    marker_pose = ArucoTransform()
                    marker_pose.id = ids[i][0]

                    marker_pose.transform.translation.x = tvecs[i][0][0]
                    marker_pose.transform.translation.y = tvecs[i][0][1]
                    marker_pose.transform.translation.z = tvecs[i][0][2]

                    #Conversion from axis-angle to quaternion
                    #Source: http://www.euclideanspace.com/maths/geometry/rotations/conversions/angleToQuaternion/

                    #Get total angle of rotation from axis-angle
                    angle = cv2.norm(rvecs[i][0])
                    #Normalize axis-angle vector
                    axis = (rvecs[i][0][0] / angle, rvecs[i][0][1] / angle,
                            rvecs[i][0][2] / angle)

                    x = axis[0] * sin(angle / 2)
                    y = axis[1] * sin(angle / 2)
                    z = axis[2] * sin(angle / 2)
                    w = cos(angle / 2)

                    marker_pose.transform.rotation.x = x
                    marker_pose.transform.rotation.y = y
                    marker_pose.transform.rotation.z = z
                    marker_pose.transform.rotation.w = w

                    output_msg.transforms.append(marker_pose)

            self.markers_pub.publish(output_msg)
Esempio n. 40
0
def contourInGroup(center, group):
    if cv2.norm(center, calCenter(group)) < GROUP_RADIUS:
        return True
    else:
        return False
Esempio n. 41
0
                np.clip(
                    (motion_history -
                     (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
            vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        elif visual_name == 'grad_orient':
            hsv[:, :, 0] = mg_orient / 2
            hsv[:, :, 2] = mg_mask * 255
            vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
            x, y, rw, rh = rect
            area = rw * rh
            if area < 64**2:
                continue
            silh_roi = motion_mask[y:y + rh, x:x + rw]
            orient_roi = mg_orient[y:y + rh, x:x + rw]
            mask_roi = mg_mask[y:y + rh, x:x + rw]
            mhi_roi = motion_history[y:y + rh, x:x + rw]
            if cv2.norm(silh_roi, cv2.NORM_L1) < area * 0.05:
                continue
            angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi,
                                              timestamp, MHI_DURATION)
            color = ((255, 0, 0), (0, 0, 255))[i == 0]
            draw_motion_comp(vis, rect, angle, color)

        draw_str(vis, (20, 20), visual_name)
        cv2.imshow('motempl', vis)

        prev_frame = frame.copy()
        if cv2.waitKey(5) == 27:
            break
                            (255, 255, 255), 1)
        source = cv2.circle(source, (int(x), int(y)), 2, (0, 255, 255), 2)

        rect = cv2.minAreaRect(c)  # пытаемся вписать прямоугольник
        box = cv2.boxPoints(rect)  # поиск четырех вершин прямоугольника
        box = np.int0(box)  # округление координат
        center = (int(rect[0][0]), int(rect[0][1]))
        area = int(rect[1][0] * rect[1][1])  # вычисление площади

        #вычисление координат двух векторов, являющихся сторонам прямоугольника
        edge1 = np.int0((box[1][0] - box[0][0], box[1][1] - box[0][1]))
        edge2 = np.int0((box[2][0] - box[1][0], box[2][1] - box[1][1]))

        # выясняем какой вектор больше
        usedEdge = edge1
        if cv2.norm(edge2) > cv2.norm(edge1):
            usedEdge = edge2
        reference = (1, 0)  # горизонтальный вектор, задающий горизонт

        # вычисляем угол между самой длинной стороной прямоугольника и горизонтом
        angle = 180.0 / math.pi * math.acos(
            (reference[0] * usedEdge[0] + reference[1] * usedEdge[1]) /
            (cv2.norm(reference) * cv2.norm(usedEdge)))
        if area > 500:
            cv2.drawContours(img, [box], 0, (255, 0, 0),
                             2)  # рисуем прямоугольник
            cv2.circle(img, center, 5, (255, 0, 0),
                       2)  # рисуем маленький кружок в центре прямоугольника
            # выводим в кадр величину угла наклона
            if (center[0] - x > 20 and center[1] - y > 20):
                cv2.putText(img, str(round(180 - angle, 1)),
Esempio n. 43
0
"""
Homework 2. Task 3.

Author: Mikhail Kita, group 371
"""

import cv2
from common import *

reduced = [scale(elem, 1.0 / 32.0, 8) for elem in array]
result = [scale(elem, 1.0 / 8.0, 32) for elem in reduced]

draw_map(reduced, "fig4_1.png")
draw_map(result, "fig4_2.png")

temp = [cv2.norm(result[i], array[i]) for i in range(0, 32)]

with open(path + "norm_image.txt", 'w') as f:
    f.write(str(cv2.norm(np.array(temp))))

filtered = [low_pass_filter(elem) for elem in array]
reduced = [scale(elem, 1.0 / 32.0, 8) for elem in filtered]
result = [scale(elem, 1.0 / 8.0, 32) for elem in reduced]

draw_map(reduced, "fig5_1.png")
draw_map(result, "fig5_2.png")

temp = [cv2.norm(result[i], filtered[i]) for i in range(0, 32)]

with open(path + "norm_image_filt.txt", 'w') as f:
    f.write(str(cv2.norm(np.array(temp))))
def camera_intrinsic_calibration(req, image_data):
    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    criteria_cal = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10,
                    0.001)
    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((6 * 7, 3), np.float32)
    objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)

    for img in image_data:
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, (7, 6), None)

        # If found, add object points, image points (after refining them)
        if ret == True:
            objpoints.append(objp)
            # Once we find the corners, we can increase their accuracy
            corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                        criteria)
            imgpoints.append(corners2)

    # index list for imgpoints
    list1 = np.arange(0, len(imgpoints), 1)
    # camera intrinsic matrix
    mtx = np.zeros((3, 3))
    if len(req.params) > 3:
        mtx[0, 0] = req.params[0]
        mtx[1, 1] = req.params[1]
        mtx[0, 2] = req.params[2]
        mtx[1, 2] = req.params[3]
    mtx[2, 2] = 1

    # optimize data step by step based on sampled imgs, get best one
    min_error = 1000
    best_mtx = mtx
    for i in range(10):
        cur_data = list1
        if len(imgpoints) > 20:
            # randomly select 20 keypoints to do calibration
            cur_data = sample(list1, 20)
        cur_obj = list(objpoints[i] for i in cur_data)
        cur_img = list(imgpoints[i] for i in cur_data)
        try:
            ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
                cur_obj,
                cur_img,
                gray.shape[::-1],
                cameraMatrix=mtx,
                distCoeffs=None,
                rvecs=None,
                tvecs=None,
                flags=0,
                criteria=criteria_cal)
        except:
            gray = cv2.cvtColor(image_data[0], cv2.COLOR_BGR2GRAY)
            ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
                cur_obj,
                cur_img,
                gray.shape[::-1],
                cameraMatrix=mtx,
                distCoeffs=None,
                rvecs=None,
                tvecs=None,
                flags=0,
                criteria=criteria_cal)

        #evaluate
        tot_error = 0
        mean_error = 0
        for j in range(len(cur_obj)):
            imgpoints2, _ = cv2.projectPoints(cur_obj[j], rvecs[j], tvecs[j],
                                              mtx, dist)
            error = cv2.norm(cur_img[j], imgpoints2,
                             cv2.NORM_L2) / len(imgpoints2)
            tot_error += error
        mean_error = tot_error / len(cur_obj)
        if mean_error < min_error:
            min_error = mean_error
            best_mtx = mtx

        rospy.loginfo(rospy.get_caller_id() + 'I get corners %s',
                      len(imgpoints))
        rospy.loginfo(rospy.get_caller_id() + 'I get parameters %s',
                      best_mtx[0, 0])
    return imgpoints, best_mtx
Esempio n. 45
0
# Hence intrinsic parameters are the same

criteria_stereo = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)

# This step is performed to transformation between the two cameras and calculate Essential and Fundamenatl matrix
retStereo, newCameraMatrixL, distL, newCameraMatrixR, distR, rot, trans, essentialMatrix, fundamentalMatrix = cv.stereoCalibrate(
    objpoints, imgpointsL, imgpointsR, newCameraMatrixL, distL,
    newCameraMatrixR, distR, grayL.shape[::-1], criteria_stereo, flags)

# Reprojection Error
mean_error = 0

for i in range(len(objpoints)):
    imgpoints2, _ = cv.projectPoints(objpoints[i], rvecsL[i], tvecsL[i],
                                     newCameraMatrixL, distL)
    error = cv.norm(imgpointsL[i], imgpoints2, cv.NORM_L2) / len(imgpoints2)
    mean_error += error

print("total error: {}".format(mean_error / len(objpoints)))

########## Stereo Rectification #################################################

rectifyScale = 1
rectL, rectR, projMatrixL, projMatrixR, Q, roi_L, roi_R = cv.stereoRectify(
    newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], rot,
    trans, rectifyScale, (0, 0))
print(Q)

stereoMapL = cv.initUndistortRectifyMap(newCameraMatrixL, distL, rectL,
                                        projMatrixL, grayL.shape[::-1],
                                        cv.CV_16SC2)
Esempio n. 46
0
print(rvecs)
print("tvecs:")
print(tvecs)
print("rotation_matrixs:")
print(rotation_matrixs)

# Part3: Evaluation

import matplotlib.pyplot as plt
index_list = [str(i) for i in range(IMG_NUM)]
errors_list = []
mean_error = 0
for i in range(len(object_points)):
    recovered_points, _ = cv2.projectPoints(object_points[i], rvecs[i],
                                            tvecs[i], cameraMatrix, distCoeffs)
    error = cv2.norm(image_points[i], recovered_points,
                     cv2.NORM_L2) / len(recovered_points)
    mean_error += error
    errors_list.append(error)

mean_error /= IMG_NUM
print("mean error: {}".format(mean_error))
plt.hlines(mean_error,
           0,
           20,
           colors='r',
           linestyles="dashed",
           label='Mean Error = {:.4f}'.format(mean_error))
plt.bar(index_list, errors_list, fc='blue')
plt.xlabel("Image Index")
plt.ylabel("Mean Error in Pixels")
plt.legend()
Esempio n. 47
0
    def gotFrame(self, img):
        allCentroidList = []

        img = cv2.resize(img, (640, 480))

        hsvImg = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        hsvImg = np.array(hsvImg, dtype=np.uint8)

        binImg = self.morphology(
            cv2.inRange(hsvImg, self.greenParams['lo'],
                        self.greenParams['hi']))
        kern = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
        binImg = cv2.morphologyEx(binImg, cv2.MORPH_OPEN, kern)

        dilateKern = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
        binImg = cv2.dilate(binImg, dilateKern, iterations=1)

        # Find contours and fill them
        for i in range(4):
            binImgCopy = binImg.copy()
            contours, hierarchy = cv2.findContours(binImgCopy, cv2.RETR_CCOMP,
                                                   cv2.CHAIN_APPROX_SIMPLE)
            cv2.drawContours(image=binImg,
                             contours=contours,
                             contourIdx=-1,
                             color=(255, 255, 255),
                             thickness=-1)
        '''
        Detect the board first
        '''

        # Detecting the board
        # Find the largest contours and make sure its a square
        scratchImgCol = cv2.cvtColor(binImg, cv2.COLOR_GRAY2BGR)
        '''
        Board contour detection
        '''
        binImgCopy = binImg.copy()
        contours, hierarchy = cv2.findContours(binImgCopy, cv2.RETR_CCOMP,
                                               cv2.CHAIN_APPROX_NONE)
        # contours, hierarchy = cv2.findContours(binImgCopy,
        #     cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        contours = filter(lambda c: cv2.contourArea(c) > self.minContourArea,
                          contours)
        if len(contours) == 0:
            return scratchImgCol

        self.comms.foundSomething = True
        contours = sorted(contours, key=cv2.contourArea, reverse=True)

        largestContour = contours[0]
        rect = cv2.minAreaRect(largestContour)

        ((center_x, center_y), (width, height),
         angle) = cv2.minAreaRect(largestContour)
        box = cv2.cv.BoxPoints(rect)
        boxInt = np.int0(box)
        cv2.drawContours(scratchImgCol, [boxInt], 0, (0, 0, 255), 2)

        # epislon = cv2.arcLength(largestContour, True)*0.01
        # approxCurve = cv2.approxPolyDP(largestContour, epislon, False)
        # cv2.drawContours(scratchImgCol, [approxCurve], 0, (255,0,0), 2)
        '''
        Determine skew
        '''
        boxpoints = np.int32(box)
        leftEdge = cv2.norm(boxpoints[2] - boxpoints[1])
        rightEdge = cv2.norm(boxpoints[3] - boxpoints[0])
        '''
        self.comms.skew = rightEdge - leftEdge
        if abs(self.comms.skew) < self.skewLimit:
            self.comms.direction = "NONE"
        if rightEdge > leftEdge:
            self.comms.direction = "RIGHT"
        else:
            self.comms.direction = "LEFT"
        '''

        # Find centroid of rect returned
        if not self.comms.isMovingState:
            mu = cv2.moments(largestContour)
            muArea = mu['m00']
            # tempBoardCentroid = (int(mu['m10']/muArea), int(mu['m01']/muArea))
            tempBoardCentroidx = int(mu['m10'] / muArea)
            tempBoardCentroidy = int(mu['m01'] / muArea)
            tempBoardArea = muArea

            self.comms.boardCentroid = (tempBoardCentroidx, tempBoardCentroidy)
            self.comms.boardArea = tempBoardArea

            # Dist where centroid of board is off
            self.comms.boardDeltaX = float(
                (self.comms.boardCentroid[0] - self.aimingCentroid[0]) * 1.0 /
                vision.screen['width'])
            self.comms.boardDeltaY = float(
                (self.comms.boardCentroid[1] - self.aimingCentroid[1]) * 1.0 /
                vision.screen['height'])
        '''
        Detect black circles
        '''

        labImg = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
        circleBinImg = self.morphology(
            cv2.inRange(labImg, self.thresParams['lo'],
                        self.thresParams['hi']))
        kern = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
        # circleBinImg = cv2.morphologyEx(circleBinImg, cv2.MORPH_OPEN, kern)
        dilateKern = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
        circleBinImg = cv2.dilate(circleBinImg, kern, iterations=1)

        # return cv2.cvtColor(circleBinImg, cv2.COLOR_GRAY2BGR)

        circleCont, circleHie = cv2.findContours(circleBinImg.copy(),
                                                 cv2.RETR_CCOMP,
                                                 cv2.CHAIN_APPROX_SIMPLE)
        circleCont = filter(lambda c: cv2.contourArea(c) > 1000, circleCont)
        circleCont = sorted(circleCont, key=cv2.contourArea, reverse=True)

        mask = np.zeros_like(binImg, dtype=np.uint8)
        cv2.fillPoly(mask, [np.int32(largestContour)], 255)
        binImg2 = binImg.copy()
        binImg2 = cv2.bitwise_not(binImg2, mask=mask)
        # return cv2.cvtColor(binImg2, cv2.COLOR_GRAY2BGR)

        # Find contours of the binImg2
        binCont, binhie = cv2.findContours(binImg2.copy(), cv2.RETR_CCOMP,
                                           cv2.CHAIN_APPROX_SIMPLE)

        # binCont = []    # Quick hack to remove

        circleBinImg = cv2.cvtColor(binImg2, cv2.COLOR_GRAY2BGR)
        # if len(binCont) > 0 and len(circleCont) > 0:
        # rospy.loginfo("Both ")
        for cont in binCont:
            if cv2.contourArea(cont) > 600:
                (cx, cy), radius = cv2.minEnclosingCircle(cont)

                if len(circleCont) > 0:
                    for contour in circleCont:
                        if cv2.contourArea(contour) < 25000:
                            (circx,
                             circy), circrad = cv2.minEnclosingCircle(contour)
                            # If circle radius inside contour
                            if cx - radius < circx < cx + radius and cy - radius < circy < cy + radius:
                                cv2.circle(circleBinImg,
                                           (int(circx), int(circy)),
                                           int(circrad), (255, 255, 0), 2)
                                cv2.circle(circleBinImg,
                                           (int(circx), int(circy)), 1,
                                           (255, 0, 255), 2)
                                allCentroidList.append((cx, cy, radius))

            # return circleBinImg
            '''
        elif len(binCont) > 0 and len(circleCont) == 0:
            # rospy.loginfo("Binary only")
            for cont in binCont:
                if cv2.contourArea(cont) > 600:
                    (cx, cy), radius = cv2.minEnclosingCircle(cont)
                    cv2.circle(circleBinImg, (int(cx), int(cy)), int(radius), (255,255,0), 2)
                    cv2.circle(circleBinImg, (int(cx), int(cy)), 1, (255, 0, 255), 2)
                    allCentroidList.append((cx, cy, radius))

        elif len(binCont) == 0 and len(circleCont) > 0:
            # rospy.loginfo("Circle only")
            for cont in circleCont:
                (cx, cy), radius = cv2.minEnclosingCircle(cont)
                if 15 < radius < 100:
                    cv2.circle(circleBinImg, (int(cx), int(cy)), int(radius), (255,255,0), 2)
                    cv2.circle(circleBinImg, (int(cx), int(cy)), 1, (255, 0, 255), 2)
                    allCentroidList.append((cx, cy, radius))
        # '''
        # return circleBinImg

        if self.comms.isMovingState:
            scratchImgCol = circleBinImg

        if len(allCentroidList) > 0:
            self.comms.foundCircles = True

        allCentroidList = sorted(allCentroidList,
                                 key=lambda centroid: centroid[2],
                                 reverse=True)
        if len(allCentroidList) > 0:
            self.comms.centroidToShoot = (int(allCentroidList[0][0]),
                                          int(allCentroidList[0][1]))
            self.comms.radius = allCentroidList[0][2]

        cv2.circle(scratchImgCol, self.comms.centroidToShoot, 3, (0, 255, 255),
                   2)
        cv2.circle(circleBinImg, self.comms.centroidToShoot, 3, (0, 255, 255),
                   2)

        # How far the centroid is off the screen center
        self.comms.deltaX = float(
            (self.comms.centroidToShoot[0] - self.aimingCentroid[0]) * 1.0 /
            vision.screen['width'])
        self.comms.deltaY = float(
            (self.comms.centroidToShoot[1] - self.aimingCentroid[1]) * 1.0 /
            vision.screen['height'])
        '''
        Draw Everything
        '''
        if self.comms.centroidToShoot[0] == -1 and self.comms.centroidToShoot[
                1] == -1:
            self.comms.centroidToShoot = self.getAimingCentroid()
        self.comms.angleFromCenter = self.calculateAngle(
            self.comms.centroidToShoot, self.aimingCentroid)
        cv2.line(scratchImgCol, (int(self.comms.centroidToShoot[0]),
                                 int(self.comms.centroidToShoot[1])),
                 (int(self.aimingCentroid[0]), int(self.aimingCentroid[1])),
                 (0, 255, 0), 2)
        centerx = int(
            (self.comms.centroidToShoot[0] + self.aimingCentroid[0]) / 2)
        centery = int(
            (self.comms.centroidToShoot[1] + self.aimingCentroid[1]) / 2)
        cv2.putText(scratchImgCol,
                    "{0:.2f}".format(self.comms.angleFromCenter),
                    (centerx + 20, centery - 20), cv2.FONT_HERSHEY_PLAIN, 1,
                    (255, 150, 50), 1)

        # Board variables
        cv2.putText(scratchImgCol, "Board Area: " + str(self.comms.boardArea),
                    (410, 30), cv2.FONT_HERSHEY_PLAIN, 1, (204, 204, 204))
        cv2.circle(scratchImgCol, self.comms.boardCentroid, 2, (0, 0, 255), 2)
        cv2.putText(scratchImgCol, "Board X: " + str(self.comms.boardDeltaX),
                    (410, 60), cv2.FONT_HERSHEY_PLAIN, 1, (204, 204, 204))
        cv2.putText(scratchImgCol, "Board Y: " + str(self.comms.boardDeltaY),
                    (410, 80), cv2.FONT_HERSHEY_PLAIN, 1, (204, 204, 204))

        # Circle variables
        cv2.putText(scratchImgCol, "X  " + str(self.comms.deltaX), (30, 30),
                    cv2.FONT_HERSHEY_PLAIN, 1, (255, 153, 51))
        cv2.putText(scratchImgCol, "Y  " + str(self.comms.deltaY), (30, 60),
                    cv2.FONT_HERSHEY_PLAIN, 1, (255, 153, 51))

        cv2.putText(scratchImgCol, "Rad " + str(self.comms.radius), (30, 85),
                    cv2.FONT_HERSHEY_PLAIN, 1, (255, 153, 51))

        # Skew variables
        cv2.putText(scratchImgCol, "Skew: " + str(self.comms.direction),
                    (430, 430), cv2.FONT_HERSHEY_PLAIN, 1, (255, 51, 153))
        cv2.putText(scratchImgCol, "Skew angle: " + str(self.comms.skew),
                    (430, 460), cv2.FONT_HERSHEY_PLAIN, 1, (255, 51, 153))

        # Draw stuff in case return things
        # Draw center of screen
        scratchImgCol = self.drawCenterRect(scratchImgCol)
        # scratchImgCol = vision.drawCenterRect(scratchImgCol)
        cv2.putText(scratchImgCol, "TORPEDO", (20, 430),
                    cv2.FONT_HERSHEY_PLAIN, 2, (255, 153, 51))
        cv2.putText(scratchImgCol, str(self.comms.state), (20, 460),
                    cv2.FONT_HERSHEY_DUPLEX, 1, (211, 0, 148))

        # return np.hstack((scratchImgCol, circleBinImg))
        return scratchImgCol
Esempio n. 48
0
                                      t.GREEN, 2)
                        cv2.rectangle(frame, (x, y), (x + w, y + h), t.GREEN,
                                      2)
                    else:
                        cv2.rectangle(frame_lane1, (x, y), (x + w, y + h),
                                      t.PINK, 2)
                        cv2.rectangle(frame, (x, y), (x + w, y + h), t.PINK, 2)

                # ################## TRACKING #################################
                # Look for existing blobs that match this one
                closest_blob = None
                if tracked_blobs:
                    # Sort the blobs we have seen in previous frames by pixel distance from this one
                    closest_blobs = sorted(
                        tracked_blobs,
                        key=lambda b: cv2.norm(b['trail'][0], center))

                    # Starting from the closest blob, make sure the blob in question is in the expected direction
                    distance = 0.0
                    for close_blob in closest_blobs:
                        distance = cv2.norm(center, close_blob['trail'][0])

                        # Check if the distance is close enough to "lock on"
                        if distance < r(
                                BLOB_LOCKON_DIST_PX_MAX) and distance > r(
                                    BLOB_LOCKON_DIST_PX_MIN):
                            closest_blob = close_blob
                            continue  # retirar depois
                            # If it's close enough, make sure the blob was moving in the expected direction
#                            if close_blob['trail'][0][1] < center[1]:  # verifica se esta na dir up
#                                continue
Esempio n. 49
0
def CameraCalibration():
    # Préparation des critères d'arret de la fonction cornerSubPix
    criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    ############ to adapt ##########################
    objp = np.zeros((4 * 6, 3), np.float32)
    objp[:, :2] = np.mgrid[0:4, 0:6].T.reshape(-1, 2)
    #
    objp[:, :2] *= 38  # taille de la case 38mm
    #################################################
    #
    objpoints = []  # points de l'image en 3D
    imgpoints = []  # points de l'image sur le plan de l'image 2D
    ############ to adapt ##########################
    images = glob.glob('Images/chess2/*.jpeg')
    #################################################

    for fname in images:
        img = cv.imread(fname)
        img = cv.pyrDown(cv.imread(fname))
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        # cette fonction trouve les coins d'un échiquier/damier
        ############ to adapt ##########################
        ret, corners = cv.findChessboardCorners(gray, (4, 6), None)
        #################################################
        print(ret)
        # Si des coins son trouvé, on les ajoute à objpoints et, après appliquer cornerSubPix qui
        # trouve une position plus exacte des coins, à imgpoints
        if ret == True:
            objpoints.append(objp)
            corners2 = cv.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                       criteria)
            imgpoints.append(corners)
            # Trouve (et puis affiche) les coin du damier
            ############ to adapt ##########################
            cv.drawChessboardCorners(img, (4, 6), corners2, ret)
            #################################################
            cv.namedWindow('img', 0)
            cv.imshow('img', img)
            cv.waitKey(500)
    cv.destroyAllWindows()

    # Maintenant on calibre notre camera à l'aide de la fonction calibrateCamera
    # qui va prendre en paramètre les 2 tableaux de points et la taille de l'image
    # et nous retourne:
    # mtx: La matrice de la camera qui defini la focale et le centre de la camera
    # dist: Les coefficient de distortion
    # rvecs: les vecteurs de rotations
    # tvecs: Les vecteurs de translation
    ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints,
                                                      gray.shape[::-1], None,
                                                      None)
    print('camraMatrix\n', mtx)
    print('dist\n', dist)

    ############ to adapt ##########################
    img = cv.pyrDown(cv.imread('Images/chess2/test.jpeg'))
    #################################################
    h, w = img.shape[:2]
    newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1,
                                                     (w, h))
    print('newcameramtx\n', newcameramtx)

    # touver une fonction de mapping de l'ancienne image, et utiliser cette
    # fonction pour enlever la distortion
    mapx, mapy = cv.initUndistortRectifyMap(mtx, dist, None, newcameramtx,
                                            (w, h), 5)
    dst = cv.remap(img, mapx, mapy, cv.INTER_LINEAR)
    # delimiter la region d'intéret et faire un crop
    x, y, w, h = roi

    dst = dst[y:y + h, x:x + w]
    cv.namedWindow('img', 0)
    cv.imshow('img', dst)
    cv.waitKey(1)
    ############ to adapt ##########################
    cv.imwrite('calibresultM.png', dst)
    #################################################
    mean_error = 0
    for i in range(len(objpoints)):
        imgpoints2, _ = cv.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx,
                                         dist)
        error = cv.norm(imgpoints[i], imgpoints2, cv.NORM_L2) / len(imgpoints2)
        mean_error += error
    print("total error: {}".format(mean_error / len(objpoints)))

    return newcameramtx
Esempio n. 50
0
class IntrinsicsCalibrationTool:
    def __init__(self, camera):
        cv2.namedWindow("Intrinsics Calibration Tool")
        cv2.namedWindow("Stop image")

        # Chess board dimensions
        if (True):  # A4 Bord
            self.size = 0.02518  # Size of checkerboard in meter
            self.patterncorners = (9, 6)
            self.iterations = 2  # Dilation
            print "Using A4 board with dimension: " + str(self.patterncorners)
        if (False):  # A3 Bord
            self.size = 0.0375
            #size= 0.033875 # Size of checkerboard in meter
            # The number of corners are the inner corners
            # In principe rij,kolom
            self.patterncorners = (9, 6)  # Pieter > Drive
            self.iterations = 4  # Dilation
            print "Using A3 board with dimension: " + str(self.patterncorners)

        self.objp = np.zeros(
            (self.patterncorners[0] * self.patterncorners[1], 3), np.float32)
        self.objp[:, :2] = np.mgrid[0:self.patterncorners[0],
                                    0:self.patterncorners[1]].T.reshape(
                                        -1, 2) * self.size

        self.reset()

        self.image_bridge = CvBridge()
        self.image_subscriber = rospy.Subscriber("/camera/" + camera +
                                                 "/stream",
                                                 Image,
                                                 self.imageCallback,
                                                 queue_size=1)
        self.camera_name = camera
        rospy.loginfo("Intrinsics calibration tool ready.")

        print "Press ENTER to accept the shown image."

        while True:
            k = cv2.waitKey()
            if (k == 99):  # Press 'c' to proceed to calibration.
                break
            elif (k == 27):
                pass

            if (not goodImages.empty()):
                img = goodImages.get()
                res = self.addImage(img)
            else:
                print "GoodImages empty"

        print 'Starting calibration'
        self.doCalibration()

    def reset(self):
        global stopImg
        global nrImgs
        stopImg = None
        nrImgs = 0

    def imageCallback(self, message):
        try:
            cv_image = self.image_bridge.imgmsg_to_cv2(message, "bgr8")
        except CvBridgeError as e:
            print(e)

        self.height, self.width = cv_image.shape[:2]

        if (self.testImage(cv_image)):
            goodImages.put(cv_image)
            print 'Images in queue: ' + str(goodImages.qsize())

    """Returns true if img gives valid chessboard
    """

    def testImage(self, img):
        global stopImg
        global nrImgs
        # Convert to grey
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        self.height, self.width = img.shape[:2]

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(
            gray,
            self.patterncorners,
            corners=None,
            flags=cv.CV_CALIB_CB_ADAPTIVE_THRESH
            | cv.CV_CALIB_CB_NORMALIZE_IMAGE | cv.CV_CALIB_CB_FILTER_QUADS)

        return ret

    def addImage(self, img):
        global stopImg
        global nrImgs
        # Convert to grey
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        self.shape = gray.shape
        self.height, self.width = img.shape[:2]

        # Initialise stop Img
        if stopImg is None:
            print "Image dimensions: " + str(self.width) + "-" + str(
                self.height)
            stopImg = np.zeros(
                (self.height / stopScale, self.width / stopScale, 1), np.uint8)

        if not (stopImg is None):
            sih, siw = stopImg.shape[:2]
            calibratieRand = 25  # Percentage
            randh = (
                self.height / stopScale
            ) * calibratieRand / 100  # Buitenste 10% beschouwen we als rand die we negeren
            randw = (self.width / stopScale) * calibratieRand / 100

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(
            gray,
            self.patterncorners,
            corners=None,
            flags=cv.CV_CALIB_CB_ADAPTIVE_THRESH
            | cv.CV_CALIB_CB_NORMALIZE_IMAGE | cv.CV_CALIB_CB_FILTER_QUADS)

        # If found, add object points, image points (after refining them)
        if ret == True:
            #
            cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), criteria)

            # Draw and display the corners
            img_with_corners = img.copy()
            cv2.drawChessboardCorners(img_with_corners, self.patterncorners,
                                      corners, ret)

            # Show corners for visual inspection
            cv_image_reduced = imutils.resize(img_with_corners,
                                              width=self.width / 4,
                                              height=self.height / 4)
            cv2.imshow("Intrinsics Calibration Tool", cv_image_reduced)
            cv2.waitKey(20)

            k = cv2.waitKey()
            #print k
            if (k == 27):  # Enter key to stop (27=Esc, 10=Enter)
                #print "Drop all"
                #with self.goodImages.mutex:
                #	self.goodImages.queue.clear()
                #return None
                print "Not adding this image"
                return False
            elif (k == 10):
                print "Adding this image corners"
                imgpoints.append(corners)

                # Log corners in stopImg
                pixels, _, _ = corners.shape  # (70, 1, 2)
                up = 35
                for pixel in range(1, pixels):
                    cornerY = int(corners[pixel][0][0] / stopScale)
                    cornerX = int(corners[pixel][0][1] / stopScale)  # Corner y

                    if (stopImg[cornerX, cornerY] + up) < np.iinfo(
                            np.uint8).max:
                        stopImg[cornerX,
                                cornerY] = stopImg[cornerX, cornerY] + up

            # Connect registered corners
                thresholdvalue = 14  # TODO Litteral
                ret, stopImgEval = cv2.threshold(stopImg, thresholdvalue, 255,
                                                 cv2.THRESH_BINARY)
                stopImgEval = cv2.dilate(stopImgEval,
                                         None,
                                         iterations=self.iterations)
                cv2.rectangle(stopImgEval, (randw - 1, randh - 1),
                              (siw - randw + 1, sih - randh + 1), (127), 1)

                # Show stop condition state
                cv2.imshow("Stop image", stopImgEval)
                cv2.waitKey(20)

                # Add corner points
                nrImgs = nrImgs + 1
                print "Valid calibration image " + str(nrImgs)
                objpoints.append(self.objp)

                return True

    def doCalibration(self):
        print "Calculating Calibration"
        #ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
        ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
            objpoints, imgpoints, self.shape[::-1], None, None)

        print "Camera matrix:"
        print mtx
        print "Distortion coefficients:"
        print dist

        # Convert result to ROS CameraInfo message.
        K = [0.] * 9
        K[0] = mtx[0][0]
        K[2] = mtx[0][2]
        K[4] = mtx[1][1]
        K[5] = mtx[1][2]
        K[8] = 1.
        R = [0.] * 9
        R[0] = 1.
        R[4] = 1.
        R[8] = 1.
        P = [0.] * 12
        P[0] = mtx[0][0]
        P[2] = mtx[0][2]
        P[5] = mtx[1][1]
        P[6] = mtx[1][2]
        P[10] = 1.
        if len(dist[0]) == 4 or len(dist[0]) == 5:
            distortion_model = 'plumb_bob'
            D = [0.] * 5
            for i, val in enumerate(dist[0]):
                D[i] = val
        elif len(dist[0]) == 8:
            distortion_model = 'rational_polynomial'
            D = [0.] * 8
            for i, val in enumerate(dist[0]):
                D[i] = val
        else:
            rospy.logerr("Unknown distortion model!")

        camera_info = CameraInfo(height=self.height,
                                 width=self.width,
                                 distortion_model=distortion_model,
                                 D=D,
                                 K=K,
                                 R=R,
                                 P=P,
                                 binning_x=1,
                                 binning_y=1)
        print camera_info
        # Save CameraInfo message to camera.
        try:
            rospy.loginfo("Trying to save camera calibration...")
            set_camera_info = rospy.ServiceProxy(
                '/camera/' + self.camera_name + '/set_camera_info',
                SetCameraInfo)
            response = set_camera_info(camera_info)
            if response.success:
                rospy.loginfo("Camera calibration saved.")
            else:
                rospy.logwarn("Camera calibration not saved: " +
                              response.status_message)
        except rospy.ServiceException, e:
            rospy.logerror("Service call failed: " + e)

        # Stats
        mean_error = 0
        for i in xrange(len(objpoints)):
            imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i],
                                              mtx, dist)
            error = cv2.norm(imgpoints[i], imgpoints2,
                             cv2.NORM_L2) / len(imgpoints2)
            mean_error += error

        print "total error (should be well below 1): ", mean_error / len(
            objpoints)
def checker_calib(images):
    
    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    
    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    
    a = 9 # of collumn corners
    b = 7 # of row corners
    scale = 10 #square size in mm
    objp = np.zeros((b*a,3), np.float32)
    objp[:,:2] = np.mgrid[0:a,0:b].T.reshape(-1,2)*scale
    
    # Arrays to store object points and image points from all the images.
    objpoints = [] # 3d point in real world space (object points)
    imgpoints = [] # 2d points in image plane (image points)
    print(len(images))
    
    counter = 0
    for idx,fname in enumerate(images):
        while counter < len(images):
            img = cv2.imread(fname)
            img = np.array(img, dtype='uint8')
            gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        
            # Find the chess board corners
            ret, corners = cv2.findChessboardCorners(gray, (a,b),None)
        
            # If found, add object points, image points (after refining them)
            if ret == True:
                objpoints.append(objp)
                
                #find corners more accurately
                corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
                imgpoints.append(corners2)
                
                # Draw and display the corners and pattern
                img = cv2.drawChessboardCorners(img, (a,b), corners2,ret)
                #cv2.imshow('img',img)
                #cv2.waitKey(500)
                
                
                counter = counter + 1
                print('calibrated with image' + str(counter-1))
            
    
    cv2.destroyAllWindows()
    
    # Calibration
    #ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,np.shape(gray_matrix[::-1]), None, None)
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
    
    ###refine the camera matrix
    ##If the scaling parameter alpha=0, it returns undistorted image with minimum unwanted pixels.
    img = cv2.imread(images[len(images)-1])
    h = img.shape[0]
    w = img.shape[1]
    
    #Returns the new camera matrix based on the free scaling paramete; play around with alpha parameter
    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
    
    ## undistortion method 1 (same thing as undistort rectify map method)
    dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
        
    
    # Display the original image next to the calibrated image
    cv2.imwrite('imageoriginal.png', img)
    cv2.imwrite('imagecalibrated.png', dst)
    
    #calculate reprojection error
    #estimation of how exact is the found parameters
    #absolute norm between what we got with our transformation and the corner finding algorithm
    #average error = mean of the errors calculate for all the calibration images
    mean_error = 0
    for i in range(len(objpoints)):
        imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
        error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
        mean_error = mean_error + error
    
    total_error = mean_error / len(objpoints)
    
    return [newcameramtx, total_error]
    
img_2 = cv.imread('img_3_2.jpg', 0)
img_2 = rescaleFrame(img_2)
img_2 = cv.medianBlur(img_2, 5)

th2_i2 = cv.adaptiveThreshold(img_2,255,cv.ADAPTIVE_THRESH_MEAN_C,\
            cv.THRESH_BINARY,11,2)
th3_i2 = cv.adaptiveThreshold(img_2,255,cv.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv.THRESH_BINARY,11,2)
ll = cv.cvtColor(img_2, cv.COLOR_GRAY2RGB)
cv.imshow('at_gauss_2', th2_i2)
cv.imwrite('at_gauss_2.jpg', th2_i2)

# DISTANCE CALCULATION OF MEAN

threshatm_L1 = cv.norm(img_1 - img_2, cv.NORM_L1)
print("L1 distance of before normalization: ", threshatm_L1)
threshatm_L2 = cv.norm(img_1 - img_2, cv.NORM_L2)
print("L2 distance of before normalization: ", threshatm_L2)
threshatm_L1 = cv.norm(th2_i1 - th2_i2, cv.NORM_L1)
print("L1 distance of after normalization: ", threshatm_L1)
threshatm_L2 = cv.norm(th2_i1 - th2_i2, cv.NORM_L2)
print("L2 distance of after normalization: ", threshatm_L2)
"""

# DISTANCE CALCULATION OF GAUSSIAN
threshatg_L1 = cv.norm(img_1 - img_2, cv.NORM_L1)
print("L1 distance of before normalization: ",threshatg_L1)
threshatg_L2 = cv.norm(img_1 - img_2, cv.NORM_L2)
print("L2 distance of before normalization: ",threshatg_L2)
threshatg_L1 = cv.norm(th3_i1 - th3_i2, cv.NORM_L1)
def calibra_camara(tam_cuadros, path_imagenes, path):
    # criterio de terminación
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
                tam_cuadros, 0.001)
    #Dieferencias en el patron de calibración
    if imagenes_a_x_grados == 51:
        puntos_verticales = 7
        puntos_horizontales = 8
    else:
        puntos_verticales = 5
        puntos_horizontales = 8
    # preparar puntos de objeto, como (0,0,0,0), (1,0,0,0), (2,0,0,0)...., (6,5,0)
    objp = np.zeros((puntos_horizontales * puntos_verticales, 3), np.float32)
    objp[:, :2] = np.mgrid[0:puntos_horizontales,
                           0:puntos_verticales].T.reshape(-1, 2)
    # Arrays para almacenar puntos de objeto y puntos de imagen de todas las imágenes.
    objpoints = []  # PUNTOS 3D DEL MUNDO
    imgpoints = []  # PUNTOS 2D EN EL PLANO DE LA IMAGEN
    images = glob.glob(path_imagenes + '/calibra/*.JPG')
    i = 0
    for fname in images:
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # Encuentra las esquinas del tablero de ajedrez
        ret, corners = cv2.findChessboardCorners(
            gray, (puntos_horizontales, puntos_verticales), None)
        if ret == True:
            objpoints.append(objp)
            corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                        criteria)
            imgpoints.append(corners2)
            # Dibuja y muestra las esquinas
            img = cv2.drawChessboardCorners(
                img, (puntos_horizontales, puntos_verticales), corners2, ret)
            #cv2.imshow('img',img)
            #cv2.waitKey(50)
            filename = 'tablero_de_calibracion_esquinas' + str(i) + '.png'
            cv2.imwrite(os.path.join(path, filename), img)
            i = i + 1
        else:
            print('NO es buena la imagen' + fname)
    #cv2.destroyAllWindows()

    #PARAMETROS INTRÍNSECOS Y EXTRÍNSECOS DE LA CÁMARA
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                       gray.shape[::-1], None,
                                                       None)

    i = 0
    for fname in images:
        img = cv2.imread(fname)
        dst = undistorted_images(img, mtx, dist)

        #cv2.imshow("undistorted", dst),cv2.waitKey(50)
        filename = 'tablero_de_calib_sin_distorsion_por_camara' + str(
            i) + '.png'
        cv2.imwrite(os.path.join(path, filename), dst)
        i = i + 1
    #cv2.destroyAllWindows()

    #CÁLCULO DEL ERROR DE REPROYECCIÓN
    tot_error = 0
    for i in range(len(objpoints)):
        imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i],
                                          mtx, dist)
        error = cv2.norm(imgpoints[i], imgpoints2,
                         cv2.NORM_L2) / len(imgpoints2)
        tot_error += error
    print("Error de reproyección total: ", tot_error, 'píxeles')
    return mtx, dist
Esempio n. 54
0
    cv2.waitKey(500)

# Calibrate the camera and save the results
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objectPointsArray,
                                                   imgPointsArray,
                                                   gray.shape[::-1], None,
                                                   None)
np.savez('../data/calib.npz', mtx=mtx, dist=dist, rvecs=rvecs, tvecs=tvecs)

# Print the camera calibration error
error = 0

for i in range(len(objectPointsArray)):
    imgPoints, _ = cv2.projectPoints(objectPointsArray[i], rvecs[i], tvecs[i],
                                     mtx, dist)
    error += cv2.norm(imgPointsArray[i], imgPoints,
                      cv2.NORM_L2) / len(imgPoints)

print("Total error: ", error / len(objectPointsArray))

# Load one of the test images
img = cv2.imread('../data/left12.jpg')
h, w = img.shape[:2]

# Obtain the new camera matrix and undistort the image
newCameraMtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
undistortedImg = cv2.undistort(img, mtx, dist, None, newCameraMtx)

# Crop the undistorted image
# x, y, w, h = roi
# undistortedImg = undistortedImg[y:y + h, x:x + w]
Esempio n. 55
0
 def norm_sqrt(vect1, vect2):
     return cv2.norm((int(vect1[0] - vect2[0]), int(vect1[1] - vect2[1])), cv2.NORM_L2)
                                  useExtrinsicGuess  = True
                                  )
        """
        XYZ_azul = calculate_XYZ(cAzul[0], cAzul[1])
        m_azul = calculate_M(cAzul[0], cAzul[1])
        m_verde = calculate_M(cVerde[0], cVerde[1])
        XYZ_verde = calculate_XYZ(cVerde[0], cVerde[1])

        dy = m_verde[2] - m_azul[2]
        dx = m_verde[0] - m_azul[0]
        d = 90
        print("C: " + str(np.sqrt(dy * dy / (d * d - dx * dx))))

        # print(str(np.linalg.norm((XYZ_verde-XYZ_azul))))

        cv2.putText(frame, str(cv2.norm(cAzul, cVerde)), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
    except Exception as e:
        print(e)
    """
    if False:
        img_erosion = cv2.erode(mask, kernel, iterations=2)
        img_dilation = cv2.dilate(img_erosion, kernel, iterations=2)
        img_after = img_dilation
    else:
        img_dilation = cv2.dilate(mask, kernel, iterations=3)
        img_erosion = cv2.erode(img_dilation, kernel, iterations=3)
        img_after = img_erosion
    """

    #result = cv2.bitwise_and(frame, frame, mask = mask_azul)
Esempio n. 57
0
"""
Homework 2. Task 1.

Author: Mikhail Kita, group 371
"""

import cv2
from common import *

reduced = scale(line, 1.0 / 32.0, 8)
result = scale(reduced, 1.0 / 8.0, 32)

draw_plot(reduced, "fig2_1.png")
draw_plot(result, "fig2_2.png")

with open(path + "norm_row.txt", 'w') as f:
    f.write(str(cv2.norm(result, line)))
Esempio n. 58
0
def processFrame(frame):
    frame = frame[frameCroopY[0]:frameCroopY[1], 0:frameWidth]
    liveFrame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

    # liveFrame = cv2.medianBlur(liveFrame, 1)

    # frame, kernel, x, y
    # liveFrame = cv2.GaussianBlur(liveFrame, (9, 9), 0)

    # frame, sigmaColor, sigmaSpace, borderType
    liveFrame = cv2.bilateralFilter(liveFrame, 10, 50, cv2.BORDER_WRAP)

    # _, liveFrame = cv2.threshold(liveFrame, 220, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    liveFrame = cv2.Canny(liveFrame, 75, 150, 9)

    # cv2.goodFeaturesToTrack(img,maxCorners,qualityLevel, minDistance, corners, mask, blockSize, useHarrisDetector)
    corners = cv2.goodFeaturesToTrack(liveFrame, 2000, 0.01, 10)

    if corners is not None:
        corners = np.int0(corners)

        for i in corners:
            x, y = i.ravel()
            cv2.rectangle(liveFrame, (x - 1, y - 1), (x + 1, y + 1),
                          (255, 255, 255), -100)
        # cv2.circle(liveFrame, (x, y), 3, 255, -1)

    _, cnts, _ = cv2.findContours(liveFrame.copy(), cv2.RETR_EXTERNAL,
                                  cv2.CHAIN_APPROX_SIMPLE)

    for c in cnts:
        # detect aproximinated contour
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.04 * peri, True)
        # cv2.drawContours(frame, [approx], 0, (255, 0, 0), 1)

        x, y, w, h = cv2.boundingRect(c)
        # draw a green rectangle to visualize the bounding rect
        cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)

        if len(approx) == 4:
            # calculate area
            area = cv2.contourArea(approx)
            cv2.drawContours(frame, [approx], 0, (0, 0, 255), 1)

            if (area >= 1000):
                cv2.drawContours(frame, [approx], 0, (255, 0, 0), 2)
                difference = abs(
                    round(
                        cv2.norm(approx[0], approx[2]) -
                        cv2.norm(approx[1], approx[3])))

                if (difference < 30):
                    # use [c] insted [approx] for precise detection line
                    # c = c.astype("float")
                    # c *= ratio
                    # c = c.astype("int")
                    #  cv2.drawContours(image, [c], 0, (0, 255, 0), 3)
                    # (x, y, w, h) = cv2.boundingRect(approx)
                    # ar = w / float(h)

                    # draw detected object
                    cv2.drawContours(frame, [approx], 0, (0, 255, 0), 3)

                    # draw detected data
                    M = cv2.moments(c)
                    if (M["m00"] != 0):
                        cX = int((M["m10"] / M["m00"]))
                        cY = int((M["m01"] / M["m00"]))

                        # a square will have an aspect ratio that is approximately
                        # equal to one, otherwise, the shape is a rectangle
                        # shape = "square" if ar >= 0.95 and ar <= 1.05 else "rectangle"
                        (x, y, w, h) = cv2.boundingRect(approx)
                        ar = w / float(h)

                        # calculate width and height
                        width = w * mmRatio
                        height = h * mmRatio

                        messurment = '%0.2fmm * %0.2fmm | %s' % (width, height,
                                                                 difference)

                        # draw text
                        cv2.putText(frame, messurment,
                                    (approx[0][0][0], approx[0][0][1]),
                                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),
                                    2)

    liveFrame = cv2.cvtColor(liveFrame, cv2.COLOR_GRAY2BGR)
    combined = np.vstack((liveFrame, frame))

    height, width = combined.shape[:2]
    return cv2.resize(combined, (int(width / scale), int(height / scale)))
            if (isFirstFrame == False):
                isFirstFrame = True
                hull2Prev = np.array(hull2, np.float32)
                img2GrayPrev = np.copy(img2Gray)

            lk_params = dict(winSize=(101, 101),
                             maxLevel=5,
                             criteria=(cv2.TERM_CRITERIA_EPS
                                       | cv2.TERM_CRITERIA_COUNT, 20, 0.001))
            hull2Next, st, err = cv2.calcOpticalFlowPyrLK(
                img2GrayPrev, img2Gray, hull2Prev, np.array(hull2, np.float32),
                **lk_params)

            # Final landmark points are a weighted average of detected landmarks and tracked landmarks
            for k in range(0, len(hull2)):
                d = cv2.norm(np.array(hull2[k]) - hull2Next[k])
                alpha = math.exp(-d * d / sigma)
                hull2[k] = (1 - alpha) * np.array(
                    hull2[k]) + alpha * hull2Next[k]
                hull2[k] = fbc.constrainPoint(hull2[k], img2.shape[1],
                                              img2.shape[0])

            # Update varibales for next pass
            hull2Prev = np.array(hull2, np.float32)
            img2GrayPrev = img2Gray
            ################ End of Optical Flow and Stabilization Code ###############

            # Warp the triangles
            for i in range(0, len(dt)):
                t1 = []
                t2 = []
Esempio n. 60
0
def calc_calibration(image_location,
                     size,
                     show=False,
                     save=False,
                     savename=None):
    '''
	Calculates Calibration matrix for a given camera. 
	
	Parameters: 
		image_location (str): Directory containing images taken by a given camera
		size (tup of ints) : (rows,columns) vector containing dimensions of chessboard
		show (bool): True displays images with corners found. False does not display any images.
		save (bool): True serializes calibration parameters after they are calculated. False does not.
		savename (str): Filename of serialized calibration parameters. 
	
	Returns: 
		ret (double): Nonzero if a pattern is obtained
		mtx (np.array): Camera Matrix with intrinsic parameters
		dist (np.array): Matrix of distortion Coefficients
		rvecs (np.array): Rotation Vectors
		tvecs (np.array): Translation Vectors
		error_term (double): Reprojection Error 
	'''
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((size[1] * size[0], 3), np.float32)
    objp[:, :2] = np.mgrid[0:size[1], 0:size[0]].T.reshape(-1, 2)

    # Arrays to store object points and image points from all the images.
    objpoints = []  # 3d point in real world space
    imgpoints = []  # 2d points in image plane.

    # set image path here !!!! images should be in alone in unique directory
    for fname in os.listdir(image_location):
        img = cv2.imread(image_location + fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, (size[1], size[0]),
                                                 None)

        # If found, add object points, image points (after refining them)
        if ret:
            objpoints.append(objp)
            corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                        criteria)
            imgpoints.append(corners2)
            # Draw and display the corners
            img = cv2.drawChessboardCorners(img, (size[1], size[0]), corners2,
                                            ret)
            if show:
                cv2.imshow('img', img)
                cv2.waitKey(500)
    if show:
        cv2.destroyAllWindows()
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                       gray.shape[::-1], None,
                                                       None)

    # this function calculates calibration error by projecting point vectors back onto original image
    tot_error = 0
    for i in range(len(objpoints)):
        imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i],
                                          mtx, dist)
        error = cv2.norm(imgpoints[i], imgpoints2,
                         cv2.NORM_L2) / len(imgpoints2)
        tot_error += error

    error_term = tot_error / len(objpoints)

    if save:
        calib_dict = {
            'ret': ret,
            'mtx': mtx,
            'dist': dist,
            'rvecs': rvecs,
            'tvecs': tvecs,
            'error_term': error_term
        }
        #store dictionary into pickle file
        with open(savename + '_dict.pickle', 'wb') as handle:
            pickle.dump(calib_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
    return ret, mtx, dist, rvecs, tvecs, error_term