def on_frame(self, vis):
        match = self.match_frames()
        if match is None:
            return
        w, h = getsize(self.frame)
        p0, p1, H = match
        for (x0, y0), (x1, y1) in zip(np.int32(p0), np.int32(p1)):
            cv2.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0))
        x0, y0, x1, y1 = self.ref_rect
        corners0 = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
        img_corners = cv2.perspectiveTransform(corners0.reshape(1, -1, 2), H)
        cv2.polylines(vis, [np.int32(img_corners)], True, (255, 255, 255), 2)

        corners3d = np.hstack([corners0, np.zeros((4, 1), np.float32)])
        fx = 0.9
        K = np.float64([[fx*w, 0, 0.5*(w-1)],
                        [0, fx*w, 0.5*(h-1)],
                        [0.0,0.0,      1.0]])
        dist_coef = np.zeros(4)
        ret, rvec, tvec = cv2.solvePnP(corners3d, img_corners, K, dist_coef)
        verts = ar_verts * [(x1-x0), (y1-y0), -(x1-x0)*0.3] + (x0, y0, 0)
        verts = cv2.projectPoints(verts, rvec, tvec, K, dist_coef)[0].reshape(-1, 2)
        for i, j in ar_edges:
            (x0, y0), (x1, y1) = verts[i], verts[j]
            cv2.line(vis, (int(x0), int(y0)), (int(x1), int(y1)), (255, 255, 0), 2)
Example #2
0
    def run(self):
        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()

            w, h = getsize(self.frame)
            vis = np.zeros((h, w*2, 3), np.uint8)
            vis[:h,:w] = self.frame
            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:,w:] = target.image
                draw_keypoints(vis[:,w:], target.keypoints)
                x0, y0, x1, y1 = target.rect
                cv2.rectangle(vis, (x0+w, y0), (x1+w, y1), (0, 255, 0), 2)

            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
                    tracked = tracked[0]
                    cv2.polylines(vis, [np.int32(tracked.quad)], True, (255, 255, 255), 2)
                    for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0), np.int32(tracked.p1)):
                        cv2.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0))
            draw_keypoints(vis, self.tracker.frame_points)

            self.rect_sel.draw(vis)
            cv2.imshow('plane', vis)
            ch = cv2.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == 27:
                break
Example #3
0
    def run(self):
        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()

            w, h = getsize(self.frame)
            vis = np.zeros((h, w*2, 3), np.uint8)
            vis[:h,:w] = self.frame
            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:,w:] = target.image
                draw_keypoints(vis[:,w:], target.keypoints)
                x0, y0, x1, y1 = target.rect
                cv2.rectangle(vis, (x0+w, y0), (x1+w, y1), (0, 255, 0), 2)

            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
                    tracked = tracked[0]
                    cv2.polylines(vis, [np.int32(tracked.quad)], True, (255, 255, 255), 2)
                    for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0), np.int32(tracked.p1)):
                        cv2.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0))
            draw_keypoints(vis, self.tracker.frame_points)

            self.rect_sel.draw(vis)
            cv2.imshow('plane', vis)
            ch = cv2.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == 27:
                break
    def run(self):
        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = np.fliplr(frame).copy()
                self.frame_points, self.frame_desc = self.detector.detectAndCompute(self.frame, None)
                if self.frame_desc is None:  # detectAndCompute returns descs=None if not keypoints found
                    self.frame_desc = []
            
            w, h = getsize(self.frame)
            vis = np.zeros((h, w*2, 3), np.uint8)
            vis[:h,:w] = self.frame
            if self.ref_frame is not None:
                vis[:h,w:] = self.ref_frame
                x0, y0, x1, y1 = self.ref_rect
                cv2.rectangle(vis, (x0+w, y0), (x1+w, y1), (0, 255, 0), 2)
                draw_keypoints(vis[:,w:], self.ref_points)
            draw_keypoints(vis, self.frame_points)

            if playing and self.ref_frame is not None:
                self.on_frame(vis)
            
            self.rect_sel.draw(vis)
            cv2.imshow('plane', vis)
            ch = cv2.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == 27:
                break
    def run_original(self):

        while True:
            playing = not self.paused and not self.rect_sel.dragging
            # flag used to quit imaging, when the size detected.
            flag = 0
            # if playing or self.frame is None:
            #     # ret, frame = self.cap.read()
            #     if not ret:
            #         break
            #     self.frame = frame.copy()
            # size of the frame (image) - used to draw rectangle
            w, h = getsize(self.frame)
            vis = np.zeros((h, w*2, 3), np.uint8)
            # copy to the image (original)
            vis[:h,:w] = self.frame
            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:,w:] = target.image
                draw_keypoints(vis[:,w:], target.keypoints)
                x0, y0, x1, y1 = target.rect # rectangle coordinates
                cv.rectangle(vis, (x0+w, y0), (x1+w, y1), (0, 255, 0), 2) # green line drawn
                # finding the width of the region
                width_region = distance(x0, x1, y0, y1)
                flag = 1 # set flag = 1, quit the program!
            self.rect_sel.draw(vis)
            cv.imshow('Selected Region', vis) # show the image
            ch = cv.waitKey(1)
            if ch == 27 or flag == 1:
                print("Quitting")
                return width_region
Example #6
0
def build_pyramid(img, leveln=6, dtype=np.int16):
    levels = []
    for i in xrange(leveln - 1):
        next_img = cv2.pyrDown(img)
        img1 = cv2.pyrUp(next_img, dstsize=getsize(img))
        levels.append(img - img1)
        img = next_img
    levels.append(img)
    return np.array(levels)
Example #7
0
def build_pyramid(img, leveln=6, dtype=np.int16):
    levels = []
    for i in xrange(leveln-1):
        next_img = cv2.pyrDown(img)
        img1 = cv2.pyrUp(next_img, dstsize=getsize(img))
        levels.append(img-img1)
        img = next_img
    levels.append(img)
    return np.array(levels)
Example #8
0
File: lappyr.py Project: vfn/opencv
def build_lappyr(img, leveln=6, dtype=np.int16):
    img = dtype(img)
    levels = []
    for i in xrange(leveln - 1):
        next_img = cv2.pyrDown(img)
        img1 = cv2.pyrUp(next_img, dstsize=getsize(img))
        levels.append(img - img1)
        img = next_img
    levels.append(img)
    return levels
Example #9
0
def build_lappyr(img, leveln=6, dtype=np.int16):
    img = dtype(img)
    levels = []
    for _i in xrange(leveln-1):
        next_img = cv2.pyrDown(img)
        img1 = cv2.pyrUp(next_img, dstsize=getsize(img))
        levels.append(img-img1)
        img = next_img
    levels.append(img)
    return levels
Example #10
0
    def run(self):
        def find_line(p0, p1):
            x0, y0 = p0
            x1, y1 = p1
            k = (x1 - x0) / (y1 - y0)
            b = y1 - k * x0
            return k, b

        def cross(l0, l1):
            k0, b0 = l0
            k1, b1 = l1
            x = (b1 - b0) / (k0 - k1)
            y = k0 * x + b0
            return x, y

        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()

            w, h = getsize(self.frame)
            vis = np.zeros((h, w * 2, 3), np.uint8)
            vis[:h, :w] = self.frame
            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:, w:] = target.image
                draw_keypoints(vis[:, w:], target.keypoints)
                x0, y0, x1, y1 = target.rect
                cv2.rectangle(vis, (x0 + w, y0), (x1 + w, y1), (0, 255, 0), 2)

            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
                    tracked = tracked[0]
                    cv2.polylines(vis, [np.int32(tracked.quad)], True,
                                  (255, 255, 255), 2)
                    # print([np.int32(tracked.quad)])
                    p0, p1, p2, p3 = np.int32(tracked.quad)
                    l0 = find_line(p0, p2)
                    l1 = find_line(p1, p3)
                    x, y = cross(l0, l1)
                    z = 3 * 180 / (p3[1] - p0[1]) * 1
                    print(x, y, z)
            draw_keypoints(vis, self.tracker.frame_points)

            self.rect_sel.draw(vis)
            cv2.imshow('plane', vis)
            ch = cv2.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == 27:
                break
Example #11
0
File: mover.py Project: Jeff-Yu/nas
    def mv(self, src) :
        size = common.getsize(src)

        ksort = collections.OrderedDict(sorted(self.volumes.items(), key=lambda t: t[1]))

        for k in ksort :
            vsize = self.volumes.get(k, 0) 
            if vsize > size :
                common.mv(src, k)
                self.volumes[k] = common.df(k)
                return
    def run(self, Cover_Dimensions, Actual_Dimensions):
        '''
        Runs the program, of selection of the image's region. Approximates the
        dimensions of the cover based on given dimensions.
        '''
        while True:
            # flag used to quit imaging, when the size detected.
            flag = 0
            playing = not self.rect_sel.dragging

            # get size of the book cover
            w, h = getsize(self.frame)
            vis = np.zeros((h, w*2, 3), np.uint8)
            vis[:h,:w] = self.frame

            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:,w:] = target.image
                draw_keypoints(vis[:,w:], target.keypoints)
                x0, y0, x1, y1 = target.rect # get the selected region coordinates
                cv.rectangle(vis, (x0+w, y0), (x1+w, y1), (0, 255, 0), 2)

                Region_Dimensions = [x1 - x0, y1 - y0]
                print("Region_Dimensions:", Region_Dimensions)
                # Ratio of the cover dimensions (width) to region dimensions (width)
                # similarly for height
                ratio_width = float(Cover_Dimensions)/(Region_Dimensions[0])

                # Hard coded the actual width and height of the region.
                # Actual width (in cm) = 14
                # Actual height (in cm) = 1.1
                width_of_book = ratio_width * Actual_Dimensions[0]
                # height_of_book = ratio_height * Actual_Dimensions[1]

            # Optional (useful in case of video source)
            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
                    tracked = tracked[0]
                    cv.polylines(vis, [np.int32(tracked.quad)], True, (255, 255, 255), 2)
                    for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0), np.int32(tracked.p1)):
                        cv.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0))
                        print("Detected")
                        # break when the region is detected.
                        flag = 1
                        break
            draw_keypoints(vis, self.tracker.frame_points)

            self.rect_sel.draw(vis)
            cv.imshow('Selected Region', vis)
            cv.waitKey(1)
            if flag == 1:
                print("Dimensions of the book : ", width_of_book)
                return
Example #13
0
def getdirsize(pathname=None, verbose=False, printfunc=humanPrint):
    size = 0
    if isdir(pathname):
        for child in getchildren(pathname):
            if (pathname): child = pathname + "/" + child
            size += getdirsize(child, verbose, printfunc)
    else:
        size = getsize(pathname)
    if verbose:
        if pathname:
            printfunc(pathname, size)
        else:
            printfunc(environ["CASTOR_HOME"], size)
    return size
    def run(self):
        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()

            blue = cv.cvtColor(frame, cv.COLOR_BGR2Luv)
            # Display the resulting frame
            cv.imshow('self.frame', blue)

            w, h = getsize(self.frame)
            vis = np.zeros((h, w * 2, 3), np.uint8)
            vis[:h, :w] = self.frame
            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:, w:] = target.image
                draw_keypoints(vis[:, w:], target.keypoints)
                x0, y0, x1, y1 = target.rect
                #cv.rectangle(vis, (x0+w, y0), (x1+w, y1), (255, 0, 0), 2)
                center_x = int((x0 + x1 + 2 * w) / 2)
                center_y = int((y0 + y1) / 2)
                r_x = int(abs((x0 - x1) / 2))
                r_y = int(abs((x0 - x1) / 2))
                radius = int(math.sqrt((r_x * r_x) + (r_y * r_y)))

                cv.circle(vis, (center_x, center_y), radius, (0, 0, 255), 2)
            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
                    tracked = tracked[0]
                    cv.polylines(vis, [np.int32(tracked.quad)], True,
                                 (255, 0, 255), 2)
                    for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0),
                                                  np.int32(tracked.p1)):
                        cv.line(vis, (x0 + w, y0), (x1, y1), (255, 0, 0))
            draw_keypoints(vis, self.tracker.frame_points)

            self.rect_sel.draw(vis)
            cv.imshow('plane', vis)
            ch = cv.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == 27:
                break
Example #15
0
    def run(self):
        img = cv2.imread('image.jpg')
        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()

            w, h = getsize(self.frame)
            vis = np.zeros((h, w * 2, 3), np.uint8)
            vis[:h, :w] = self.frame

            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:, w:] = target.image
                draw_keypoints(vis[:, w:], target.keypoints)
                x0, y0, x1, y1 = target.rect
                cv2.rectangle(vis, (x0 + w, y0), (x1 + w, y1), (0, 255, 0), 2)

            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
                    tracked = tracked[0]
                    #cv2.fillPoly(vis, [np.int32(tracked.quad)], 255)
                    cv2.polylines(vis, [np.int32(tracked.quad)], True,
                                  (255, 0, 0), 2)

                    xt, yt = tracked.quad[3]
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(vis, 'Planar track', (xt, yt), font, 1,
                                (0, 0, 255), 2)
                    cv2.circle(vis, (xt, yt), 30, (0, 255, 0), 2)

                    #for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0), np.int32(tracked.p1)):
                    #    cv2.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0))
            draw_keypoints(vis, self.tracker.frame_points)

            self.rect_sel.draw(vis)
            cv2.imshow('plane', vis)
            ch = cv2.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == 27:
                break
Example #16
0
    def checkPoster(self, cv_image):
        if not self.recognised:
            self.frame = cv_image.copy()
            w, h = getsize(self.frame)
            vis = np.zeros((h, w, 3), np.uint8)

            vis[:h,:w] = self.frame

            tracked = self.tracker.track(self.frame)
            print(len(tracked))
            if len(tracked) > 0:
                rospy.loginfo("3")
                for tracked_ob in tracked:
                    rospy.loginfo ('Found ' + tracked_ob.target.data)
                    self.character = tracked_ob.target.data

                    # Calculate Homography
                    h, status = cv2.findHomography(tracked_ob.p0, tracked_ob.p1)
                    self.recognised = True
Example #17
0
    def run(self):
        idxrange = range(1, 27)
        img0 = cv2.imread(
            os.path.join(thisdir,
                         '../data/castle/castle.%03d.jpg' % (idxrange[0], )))
        w, h = getsize(img0)
        prev_keypoints, prev_descrs = self.detector.detectAndCompute(
            img0.copy(), None)
        self.matcher.add([prev_descrs.astype(np.uint8)])

        for imgidx in idxrange:
            print('-------------------------------------------------------')
            print(
                os.path.join(thisdir, '../data/castle/castle.%03d.jpg' %
                             (imgidx, )) + '\n' +
                os.path.join(thisdir, '../data/castle/castle.%03d.jpg' %
                             (imgidx + 1, )))
            img1 = cv2.imread(
                os.path.join(thisdir,
                             '../data/castle/castle.%03d.jpg' % (imgidx, )))
            img2 = cv2.imread(
                os.path.join(thisdir, '../data/castle/castle.%03d.jpg' %
                             (imgidx + 1, )))
            if img1 is None or img2 is None:
                raise Exception('Fail to open images.')

            # Detect and match keypoints
            prev_keypoints, prev_descrs = self.detector.detectAndCompute(
                img1.copy(), None)
            curr_keypoints, curr_descrs = self.detector.detectAndCompute(
                img2.copy(), None)
            self.matcher.clear()
            self.matcher.add([prev_descrs.astype(np.uint8)])
            matches = self.matcher.knnMatch(curr_descrs, k=2)
            matches = [
                m[0] for m in matches
                if len(m) == 2 and m[0].distance < m[1].distance * 0.75
            ]
            print('%d matches.' % len(matches))
            p0 = [prev_keypoints[m.trainIdx].pt for m in matches]
            p1 = [curr_keypoints[m.queryIdx].pt for m in matches]
            p0, p1 = np.float32((p0, p1))

            # Skip first two frames
            # if imgidx<2: continue

            # Estimate homography
            H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 13.0)
            status = status.ravel() != 0
            print('inliner percentage: %.1f %%' %
                  (status.mean().item(0) * 100., ))
            if status.sum() < MIN_MATCH_COUNT:
                continue
            p0, p1 = p0[status], p1[status]

            # Display inliners
            imgpair = cv2.addWeighted(img2, .5, img1, .5, 0)
            draw_matches(imgpair, p0, p1)
            cv2.imshow('keypoint matches', imgpair)

            # Estimate fundamental matrix
            F, status = cv2.findFundamentalMat(p0, p1, cv2.FM_8POINT, 3, .99)

            # Estimate camera matrix
            p0 = np.hstack((p0, np.ones((p0.shape[0], 1)))).astype(np.float32)
            K = cv2.initCameraMatrix2D([p0], [p1], (w, h))
            p0 = p0[:, :2]

            # Estimate essential matrix
            E, status = cv2.findEssentialMat(p0, p1, cameraMatrix=K)
            ret, R, t, status = cv2.recoverPose(E,
                                                p0,
                                                p1,
                                                cameraMatrix=K,
                                                mask=status)
            rvec, jacobian = cv2.Rodrigues(R)
            print('(R, t)=', rvec.T, '\n', t.T)

            # Dense rectification
            retval, H1, H2 = cv2.stereoRectifyUncalibrated(p0, p1, F, (w, h))

            # Triangulation
            projMat1 = np.hstack((np.eye(3), np.zeros((3, 1))))
            projMat2 = get_P_prime_from_F(F)
            points4D = cv2.triangulatePoints(projMat1, projMat2, p0.T, p1.T)

            # Plot triangulation results
            objectPoints = points4D.T[:, :3].astype(np.float32)
            print(objectPoints)
            np.savetxt('pts.txt', objectPoints, fmt='%.5f')
            plt.close()
            fig, axarr = plt.subplots(2, 2)
            axarr[0, 0].plot(p0[:, 0], 480 - p0[:, 1], '.')
            axarr[0, 1].plot(p1[:, 0], 480 - p1[:, 1], '.')
            axarr[1, 0].plot(-objectPoints[:, 2], -objectPoints[:, 1], '.')
            axarr[1, 1].plot(-objectPoints[:, 2], objectPoints[:, 0], '.')
            plt.ion()
            plt.draw()
            plt.waitforbuttonpress(1)

            retval, rvec, tvec, inliners = cv2.solvePnPRansac(
                objectPoints, p1.astype(np.float32), K, np.zeros((1, 4)))
            print('rvec, tvec = ', rvec.T, '\n', tvec.T)

            rectified = np.zeros((h, w, 3), np.uint8)
            disparity = np.zeros((h, w, 3), np.float32)

            warpSize = (int(w * .9), int(h * .9))
            img1_warp = cv2.warpPerspective(img1, H1, warpSize)
            img2_warp = cv2.warpPerspective(img2, H2, warpSize)
            rectified = cv2.addWeighted(img1_warp, .5, img2_warp, .5, 0)

            disparity = self.stereoMatcher.compute(
                cv2.cvtColor(img1_warp, cv2.COLOR_BGR2GRAY),
                cv2.cvtColor(img2_warp, cv2.COLOR_BGR2GRAY))
            disparity, buf = cv2.filterSpeckles(disparity, -self.maxDiff,
                                                pow(20, 2), self.maxDiff)

            rectified = cv2.addWeighted(img1_warp, .5, img2_warp, .5, 0)
            cv2.imshow('rectified', rectified)
            cv2.imshow('disparity', ((disparity + 50) * .5).astype(np.uint8))

            [exit(0) if cv2.waitKey() & 0xff == 27 else None]

            prev_keypoints, prev_descrs = curr_keypoints, curr_descrs
Example #18
0
    def run(self):
        setting_fn = 'setting.json'
        if myutil.isfile(setting_fn):
            print('setting exists')
            self.load_setting(setting_fn)
        # load preset frame and rect
        if True and myutil.isfile('h**o.json'):
            print('preset exists')
            tmp_frame = self.load_config('h**o.json')
            #self.frame = tmp_frame

        print('test: {}'.format(
            self.distance_to_camera(self.KNOWN_WIDTH, self.focal_length, 135)))

        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()
                self.auto_output_frame = frame.copy()

            w, h = getsize(self.frame)
            vis = np.zeros((h, w * 2, 3), np.uint8)
            # if tmp_frame:
            #     vis[:,w:] = tmp_frame
            vis[:h, :w] = self.frame
            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:, w:] = target.image
                draw_keypoints(vis[:, w:], target.keypoints)
                x0, y0, x1, y1 = target.rect
                cv.rectangle(vis, (x0 + w, y0), (x1 + w, y1), (0, 255, 0), 2)

            is_ok_to_export = False
            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
                    tracked = tracked[0]
                    wtf = np.int32(tracked.quad)
                    if self.check_wtf(wtf):
                        self.draw_result(vis, wtf)
                        cv.fillPoly(vis, [wtf], (255, 0, 0))
                        for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0),
                                                      np.int32(tracked.p1)):
                            cv.line(vis, (x0 + w, y0), (x1, y1), (0, 255, 0))
                        is_ok_to_export = True
                        if self.auto_save:
                            self.draw_result(self.auto_output_frame, wtf)
                            fn = 'autosave_{:04d}.png'.format(self.auto_serial)
                            self.save_image(fn, self.auto_output_frame)
                            self.auto_serial += 1

            draw_keypoints(vis, self.tracker.frame_points)

            self.rect_sel.draw(vis)
            cv.imshow(WIN_NAME, vis)
            ch = cv.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            elif ch == 27:
                break
            elif ch == ord('s'):
                fn = 'saved_{:04d}.png'.format(self.serial)
                self.serial += 1
                self.save_image(fn, vis)
Example #19
0
def merge_lappyr(levels):
    img = levels[-1]
    for lev_img in levels[-2::-1]:
        img = cv2.pyrUp(img, dstsize=getsize(lev_img))
        img += lev_img
    return np.uint8(np.clip(img, 0, 255))
Example #20
0
def merge_pyramid(levels):
    img = levels[-1]
    for lev_img in levels[-2::-1]:
        img = cv2.pyrUp(img, dstsize=getsize(lev_img))
        img += lev_img
    return img
Example #21
0
    def run(self):
	direccion = None
        while not rospy.is_shutdown():
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
		#print(self.cv_image)
                ret = True
		frame = self.cv_image
		#print(self.cv_image)
		#print("**************************** frame")
		#print(frame)
		#ret, frame = self.cap.read()
		if not ret:
                    break
                self.frame = frame.copy()
	    
            w, h = getsize(self.frame)
            vis = np.zeros((h, w*2, 3), np.uint8)
            vis[:h,:w] = self.frame
            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:,w:] = target.image
                draw_keypoints(vis[:,w:], target.keypoints)
                x0, y0, x1, y1 = target.rect
		#print(x0,y0,x1,y1)
                cv2.rectangle(vis, (x0+w, y0), (x1+w, y1), (0, 255, 0), 2)
	    
            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
		    tracked = tracked[0]
		    if not self.reiniciar_exploracion_timer:
		    	self.pubNavegacion.publish('STOP')
			self.reiniciar_exploracion_timer = True
		    	rospy.Timer(rospy.Duration(15), self.reiniciar_exploracion)
		    #Aca se imprimen los 4 puntos que genera
		    #print(str(np.int32(tracked.quad[0])))
		    #print(str(np.int32(tracked.quad[1])))
		    #print(str(np.int32(tracked.quad[2])))
		    #print(str(np.int32(tracked.quad[3])))
		    #Este es el punto medio del poligono que genera. 
		    ptoMedio = (np.int32(tracked.quad[0]) + np.int32(tracked.quad[1]) + np.int32(tracked.quad[2]) + np.int32(tracked.quad[3]))/4
		    direccion = (ptoMedio[0]-320)/-320.0
		    twist = Twist(Vector3(15,0,0),Vector3(0,0,direccion))
		    self.pubVel.publish(twist)
		    
                    cv2.polylines(vis, [np.int32(tracked.quad)], True, (255, 255, 255), 2)
                    for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0), np.int32(tracked.p1)):
                        cv2.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0))
		else:
		    if self.reiniciar_exploracion_timer:
			direccion = direccion or 0.5
		    	twist = Twist(Vector3(0 if direccion > 0.2 else 10,0,0),Vector3(0,0, direccion if direccion > 0.2 else 0))
		    	self.pubVel.publish(twist)

            draw_keypoints(vis, self.tracker.frame_points)
	    
            self.rect_sel.draw(vis)
            cv2.imshow('plane', vis)
            ch = cv2.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == 27:
                break
Example #22
0
def merge_pyramid(levels):
    img = levels[-1]
    for lev_img in levels[-2::-1]:
        img = cv2.pyrUp(img, dstsize=getsize(lev_img))
        img += lev_img
    return img
Example #23
0
File: lappyr.py Project: vfn/opencv
def merge_lappyr(levels):
    img = levels[-1]
    for lev_img in levels[-2::-1]:
        img = cv2.pyrUp(img, dstsize=getsize(lev_img))
        img += lev_img
    return np.uint8(np.clip(img, 0, 255))