Ejemplo n.º 1
0
def main():
    video = VideoCapture(video_sources.video_6)
    work_area = WorkAreaView(video_sources.video_6_work_area_markers)

    vc = VideoController(10, 'pause')
    video_wnd, = Wnd.create('video')
    # h_wnd, s_wnd, v_wnd = Wnd.create('H', 'S', 'V')
    # L_wnd, a_wnd, b_wnd = Wnd.create('L', 'a', 'b')

    frames_iter = work_area.skip_non_area(video.frames())
    for frame, _ in frames_iter:
        # hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        # h_wnd.imshow(hsv[:, :, 0])
        # s_wnd.imshow(hsv[:, :, 1])
        # v_wnd.imshow(hsv[:, :, 2])

        # lab = cv2.cvtColor(frame, cv2.COLOR_BGR2Lab)
        # L_wnd.imshow(lab[:, :, 0])
        # a_wnd.imshow(lab[:, :, 1])
        # b_wnd.imshow(lab[:, :, 2])

        vis_img = utils.put_frame_pos(frame, video.frame_pos(), xy=(2, 55))
        video_wnd.imshow(vis_img)

        if vc.wait_key() == 27: break

    video.release()
Ejemplo n.º 2
0
def main():
    video = VideoCapture(video_sources.video_2)

    frame = video.read()
    backSubtractor = BackgroundSubtractorAVG(0.2, denoise(frame))

    for frame in video.frames():
        with utils.timeit_context():
            frame = denoise(frame)
            foreGround = backSubtractor.getForeground(frame)
            # Apply thresholding on the background and display the resulting mask
            ret, mask = cv2.threshold(foreGround, 15, 255, cv2.THRESH_BINARY)

        cv2.imshow('input', frame)
        cv2.imshow('foreground', foreGround)
        # Note: The mask is displayed as a RGB image, you can
        # display a grayscale image by converting 'foreGround' to
        # a grayscale before applying the threshold.
        cv2.imshow('mask', mask)

        if cv2.waitKey(10) & 0xFF == 27:
            break

    video.release()
    cv2.destroyAllWindows()
Ejemplo n.º 3
0
def main():
    video = VideoCapture(video_sources.video_2)
    workArea = WorkAreaView(video_sources.video_2_work_area_markers)

    vc = VideoController(10, 'pause')
    (video_wnd, bin_diff_wnd, gray_diff_wnd, colorDiffWnd,
     learned_BG_wnd) = Wnd.create('video', 'binary diff', 'gray diff',
                                  'color diff', 'Learned BG')
    colorAbsDiffWnd = Wnd('color_abs_diff')
    segmentedWnd = Wnd('segmented')

    segmenter = Segmenter()

    frames_iter = workArea.skip_non_area(video.frames())

    motionDetector = MotionDetector(next(frames_iter)[0], 3)
    backgroundModel = BackgroundModel(15)
    prevBackground = None
    for frame, _ in frames_iter:
        motionDetector.detect(frame)

        if motionDetector.motionEnded():
            # calc fgMask
            mask, gray_diff, color_diff, colorAbsDiff = calcForegroundMask(
                prevBackground, frame)
            # bin_diff_wnd.imshow(resize(mask, 0.5))
            bin_diff_wnd.imshow(cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR))

            # gray_diff_wnd.imshow(resize(gray_diff, .5))
            # colorDiffWnd.imshow(resize(color_diff, .5))
            # colorAbsDiffWnd.imshow(resize(colorAbsDiff, .5))
            markers, objectsCount = segmenter.segment(mask)
            segmentedWnd.imshow(
                resize(Segmenter.markersToDisplayImage(markers, objectsCount),
                       .5))

            backgroundModel = BackgroundModel(15)

        if motionDetector.isSilence():
            backgroundModel.learn(frame, foregroundMask=None)
            learned_BG_wnd.imshow(resize(backgroundModel.learned, .5))

        if motionDetector.motionStarted():
            prevBackground = backgroundModel.learned
            backgroundModel = None
            learned_BG_wnd.imshow(resize(prevBackground, .5))

        # VIS

        vis_img = motionDetector.indicateCurrentState(frame.copy())
        vis_img = utils.put_frame_pos(vis_img, video.frame_pos(), xy=(2, 55))
        video_wnd.imshow(vis_img)
        # bin_diff_wnd.imshow(resize(motionDetector.bin_diff, .5))
        # gray_diff_wnd.imshow(resize(motionDetector.gray_diff, .5))
        # VIS END

        if vc.wait_key() == 27: break

    video.release()
Ejemplo n.º 4
0
def main():
    video = VideoCapture(video_sources.video_6)
    work_area = WorkAreaView(video_sources.video_6_work_area_markers)

    vc = VideoController(10, 'pause')
    (video_wnd, bin_diff_wnd, gray_diff_wnd, frame0_diff_wnd,
     learned_BG_wnd) = Wnd.create('video', 'binary diff', 'gray diff',
                                  'diff with frame0', 'Learned BG')

    frames_iter = work_area.skip_non_area(video.frames())

    motion_detector = MotionDetector(next(frames_iter)[0], 3)
    background = BackgroundModel(motion_detector, 15)

    for frame, _ in frames_iter:
        motion_detector.detect(frame)
        if not background.done:
            background.learn()
        else:
            if motion_detector.motion_ended():
                frame0_diff = cv2.absdiff(background.learned, frame)
                gray_of_color_diff = Helper.to_gray(frame0_diff)

                frame0_diff_wnd.imshow(
                    resize(
                        np.hstack(
                            (frame0_diff, Helper.to_bgr(gray_of_color_diff))),
                        .5))

                _, binary = cv2.threshold(gray_of_color_diff, 35, 255,
                                          cv2.THRESH_BINARY)
                cv2.imshow('1 binary', resize(binary, .5))

                # VIS
        if background.done:
            learned_BG_wnd.imshow(resize(background.learned, 1))

        vis_img = motion_detector.put_current_state(frame.copy())
        vis_img = utils.put_frame_pos(vis_img, video.frame_pos(), xy=(2, 55))
        video_wnd.imshow(vis_img)
        # bin_diff_wnd.imshow(resize(motion_detector.bin_diff, .5))
        # gray_diff_wnd.imshow(resize(motion_detector.gray_diff, .5))
        # VIS END

        if vc.wait_key() == 27: break

    video.release()
Ejemplo n.º 5
0
def main():
    feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)
    lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

    video = VideoCapture(video_sources.video_2)
    wnd = CvNamedWindow('video')
    vc = VideoController(delay=50)

    prev_gray = None
    po = None
    tracking = False

    for frame in video.frames():
        if tracking:
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # calculate optical flow
            p1, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, frame_gray, p0, None, **lk_params)
            p1 = p1[st == 1]
            # draw the tracks
            for pt in p1:
                a, b = pt.ravel()
                frame = cv2.circle(frame, (a, b), 5, (0, 255, 0), -1)
            prev_gray = frame_gray
            p0 = p1.reshape(-1, 1, 2)

        wnd.imshow(frame)

        key = vc.wait_key()
        if key == 27:
            break
        elif not tracking and key == ord('r'):  # init tracking
            roi = cv2.selectROI('roi', frame)
            cv2.destroyWindow('roi')

            if roi is None or sum(roi) == 0:
                continue

            prev_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            p0 = cv2.goodFeaturesToTrack(prev_gray, mask=roi_mask(prev_gray, roi), **feature_params)
            if p0 is not None and len(p0) > 0:
                tracking = True

    video.release()
Ejemplo n.º 6
0
def main():
    video = VideoCapture(video_sources.video_6)

    diff_wnd = CvNamedWindow('diff')
    mask_wnd = CvNamedWindow('mask')
    input_wnd = CvNamedWindow('input')

    # prev_frame = denoise(video.read())
    prev_frame = cv2.cvtColor(denoise(video.read()), cv2.COLOR_BGR2GRAY)

    vm = VideoController(10)
    diff = None
    for frame in video.frames():
        # with utils.timeit_context('frame processing'):
        #     frame = denoise(frame)
        #     diff = cv2.absdiff(prev_frame, frame, dst=diff)
        #     ret, mask = cv2.threshold(diff, 45, 255, cv2.THRESH_BINARY)
        #     binary_mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
        #     # cn = cv2.countNonZero(binary_mask)
        #     nonzero = cv2.findNonZero(binary_mask)

        with utils.timeit_context('frame processing'):
            frame = denoise(frame)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            diff = cv2.absdiff(prev_frame, gray, dst=diff)
            ret, mask = cv2.threshold(diff, 45, 255, cv2.THRESH_BINARY)
            # cn = cv2.countNonZero(binary_mask)
            nonzero = cv2.findNonZero(mask)

        diff_wnd.imshow(diff)
        mask_wnd.imshow(mask)
        input_wnd.imshow(utils.put_frame_pos(frame.copy(), video.frame_pos()))

        prev_frame = gray

        if not vm.wait_key():
            break

    video.release()
    cv2.destroyAllWindows()
Ejemplo n.º 7
0
class VideoStream:
    def __init__(self, uri, transform=None, queue_size=128, gpu_id=None, quiet=True, read_type='numpy'):
        # initialize the video capture along with the boolean
        # used to indicate if the thread should be stopped or not
        self.cap = VideoCapture(uri, gpu_id=gpu_id, quiet=quiet)
        self.stopped = False
        self.transform = transform
        self.read_type = read_type

        # initialize queue and thread
        self.Q = Queue(maxsize=queue_size)
        self.thread = Thread(target=self.update, args=())
        self.thread.daemon = True

    def start(self):
        # start a thread to read frames from the file video stream
        self.thread.start()
        return self

    def update(self):
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the thread
            if self.stopped: break

            # otherwise, ensure the queue has room in it
            if not self.Q.full():
                # read the next frame from the file
                frame = self.cap.read(type=self.read_type)

                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file
                if frame is None:
                    self.stopped = True
                    
                # if there are transforms to be done, might as well
                # do them on producer thread before handing back to
                # consumer thread. ie. Usually the producer is so far
                # ahead of consumer that we have time to spare.
                #
                # Python is not parallel but the transform operations
                # are usually OpenCV native so release the GIL.
                #
                # Really just trying to avoid spinning up additional
                # native threads and overheads of additional
                # producer/consumer queues since this one was generally
                # idle grabbing frames.
                if self.transform:
                    frame = self.transform(frame)

                # add the frame to the queue
                self.Q.put(frame)
            else:
                time.sleep(0.1)  # Rest for 10ms, we have a full queue
        self.cap.release()

    def read(self):
        # return next frame in the queue
        return self.Q.get()

    # Insufficient to have consumer use while(more()) which does
    # not take into account if the producer has reached end of stream.
    def running(self):
        return self.more() or not self.stopped

    def more(self):
        # return True if there are still frames in the queue. If stream is not stopped, try to wait a moment
        tries = 0
        while self.Q.qsize() == 0 and not self.stopped and tries < 5:
            time.sleep(0.1)
            tries += 1
        return self.Q.qsize() > 0

    def stop(self):
        # wait until stream resources are released (producer thread might be still grabbing frame)
        self.thread.join()
        # indicate that the thread should be stopped
        self.stopped = True
Ejemplo n.º 8
0
class RealSawyerLift(object):

    def __init__(self, control_freq=20, horizon=1000, camera_height=256,grip_thresh=.25,
                 camera_width=256, use_image=False, use_proprio=False,
                 port1=None, joint_proprio=True, do_sleep=True):

        self.joint_proprio = joint_proprio
        self.ctrl_freq = control_freq
        self.t = horizon
        self.use_image = use_image
        self.use_proprio = use_proprio
        self.do_sleep = do_sleep

        assert self.use_image or self.use_proprio, 'One of use_image and use_proprio must be set'
        # self.controller = controller
        self.cam_dim = (camera_height, camera_width)

        self.control_timestep = 1. / self.ctrl_freq
        self.is_py2 = sys.version_info[0] < 3
        self.gripper_closed = 0 # 0 = open 1 = closed
        self.timestep = 0
        self.done = False
        self.has_object_obs = False
        self.data_size = 96000

        self.grip_thresh=grip_thresh
        self.sawyer_interface = None
        self.camera = None

        #np.random.seed(int(time.time()))
        self.port1 = port1 #np.random.choice(range(PORT_LOW, PORT_HIGH))
        self.port2 = self.port1 + 1
        self.debounce = 0  # make sure the gripper doesnt toggle too quickly

        self.config = make_config('RealSawyerDemoServerConfig')
        self.config.infer_settings()

        self.controller = self.config.controller.mode

        if self.is_py2:
            self.bridge = PythonBridge(self.port1, self.port2, self.data_size)
            self._setup_robot()
            self._recv_loop()
        else:
            self.bridge = PythonBridge(self.port2, self.port1, self.data_size)
            self._setup_camera()

    def signal_handler(self, signal, frame):
        """
        Exits on ctrl+C
        """
        if self.is_py2:
            self.sawyer_interface.open_gripper()
            if self.controller=='opspace':
                self.osc_controller.reset_flag.publish(True)

    def send(self, data):
        #self.close()
        #exit()
        self.bridge.send(data)

    def recv(self):
        data = self.bridge.recieve()
        action = array('f')
        action.fromstring(data)
        data = np.array(action)

        # print('Recieved', data)
        if data[0] == RESET and data[3] == RESET and data[-1] == RESET:
            self.reset()
        elif data[0] == SUCCESS and data[3] == SUCCESS and data[-1] == SUCCESS:
            return True
        elif data[0] == GET_POSE and data[3] == GET_POSE and data[-1] == GET_POSE:
            pose = self.sawyer_interface.ee_pose
            pose = self.sawyer_interface.pose_endpoint_transform(pose, des_endpoint='right_hand',
                                                                 curr_endpoint='right_gripper_tip')
            pose.append(self.sawyer_interface._gripper.get_position())
            self.send(array('f', pose))
        elif data[0] == GET_PROPRIO and data[3] == GET_PROPRIO and data[-1] == GET_PROPRIO:
            joint_pos = np.array(self.sawyer_interface.q)
            joint_vel = np.array(self.sawyer_interface.dq)

            gripper_pos = np.array(self.sawyer_interface._gripper.get_position())

            ee_pose = self.sawyer_interface.ee_pose
            ee_pose = np.array(self.sawyer_interface.pose_endpoint_transform(ee_pose, des_endpoint='right_hand',
                                                                    curr_endpoint='right_gripper_tip'))

            print(joint_pos.shape, joint_vel.shape, gripper_pos.shape, ee_pose.shape)
            pose = np.concatenate([
                np.sin(joint_pos), np.cos(joint_pos), joint_vel, [gripper_pos], [-gripper_pos], ee_pose
            ])
            print(pose)
            self.send(array('f', pose))
        elif data[0] == CLOSE and data[3] == CLOSE and data[-1] == CLOSE:
            self.close()
            exit()
        else:
            self._apply_action(data)

    def _recv_loop(self):
        while True:
            self.recv()

    def _setup_camera(self):
        self.camera = VideoCapture(0, self.cam_dim)

    def _setup_robot(self):
        self.osc_robot = make_robot(self.config.robot.type, config=self.config)
        self.osc_controller = make_controller(self.config.controller.type, robot=self.osc_robot, config=self.config)
        self.osc_controller.reset()
        self.osc_controller.sync_state()

        self.sawyer_interface = self.osc_robot.robot_arm

        signal.signal(signal.SIGINT, self.signal_handler)  # Handles ctrl+C

    def reset(self):
        self.timestep = 0

        if self.is_py2:
            self.sawyer_interface.open_gripper()
            self.sawyer_interface.reset()
            time.sleep(1)
            # Successful Reset
            self.send(array('f', np.array([SUCCESS] * self.dof).tolist()))
            return
        else:
            # Request a reset
            self.send(array('f', np.array([RESET] * self.dof).tolist()))
            _ = self.recv()

        return self._get_observation()

    #def _process_image(self, img):
    #    h, w, _ = img.shape
    #    new_w = int( float(w) / float(h) * self.cam_dim[0])
    #    new_shape = (new_w, self.cam_dim[0])
    #
    #    resized = cv2.resize(img, new_shape, cv2.INTER_AREA)
    #
    #    center = new_w // 2
    #    left = center - self.cam_dim[1] // 2 # assume for now that this is multiple of 2
    #    right = center + self.cam_dim[1] // 2
    #
    #    img = resized[:,left:right,:]
    #    img = np.array([img])
    #    img = np.transpose(img, (0, 3, 1, 2))
    #    return img

    def _get_image(self):
        ret, frame = self.camera.read()
        if not ret:
            raise RuntimeError('camera read failed')
        return frame

    def _get_proprio(self):
        if self.joint_proprio:
            self.send(array('f', np.array([GET_PROPRIO]*self.dof).tolist()))
        else:
            self.send(array('f', np.array([GET_POSE]*self.dof).tolist()))
        data = self.bridge.recieve()
        proprio = array('f')
        proprio.fromstring(data)
        return np.array(proprio)

    def _get_observation(self):
        start = time.time()
        di = {}

        if self.use_image:
            img = self._get_image()
            di['image'] = img
        if self.use_proprio:
            di['proprio'] = self._get_proprio()
        # print('Observation retrieval time: {}'.format(time.time()-start))
        return di

    def _pre_action(self, action):

        if self.controller == 'velocity':
            # TODO: there is linear interpolation being done between the velocity limits
            #       in sim, do we have something similar to that on the real robot?
            action = np.clip(action, -0.3, 0.3)

        return action

    def _apply_action(self, action):
        if self.is_py2:
            if self.controller == 'velocity':
                self.sawyer_interface.dq = action[:-1]

            elif self.controller == 'opspace':
                self.osc_controller.send_action(action[:-1])

            if self.debounce:
                self.debounce-=1
            else:
                if self.gripper_closed:
                    if abs(action[-1]) < 1-self.grip_thresh:
                        self.sawyer_interface.open_gripper()
                        self.gripper_closed=0
                        self.debounce=5
                        print('open gripper')
                else:
                    if abs(action[-1])>self.grip_thresh:
                        self.sawyer_interface.close_gripper()
                        self.gripper_closed=1
                        self.debounce=5
                        print('close gripper')

        else:
            # send to py2
            self.send(bytes(array('f', action.tolist())))

    @property
    def action_spec(self):
        low = np.ones(self.dof) * -1.
        high = np.ones(self.dof) * 1.
        return low, high

    @property
    def dof(self):
        return 8

    def step(self, action):

        if self.done:
            raise RuntimeError('Env is terminated')

        start = time.time()

        self.timestep += 1
        action = self._pre_action(action)

        self._apply_action(action)

        if self.do_sleep:
            end_time = start + self.control_timestep
            while time.time() < end_time:
                pass

        return self._get_observation(), 0., False, {}

    def render(self):
        cv2.imshow('Observation', self._get_image())
        cv2.waitKey(1)

    def observation_spec(self):
        observation = self._get_observation()
        return observation

    def close(self):
        if self.is_py2:
            self.sawyer_interface.open_gripper()
            self.sawyer_interface.reset()
        else:
            self.camera.release()
            self.send(array('f', np.array([CLOSE] * self.dof).tolist()))