Exemplo n.º 1
0
 def is_streaming(device_id):
     try:
         c = rs.config()
         c.enable_device(device_id)  # device_id is in fact the serial_number
         p = rs.pipeline()
         p.start(c)
         p.stop()
         return False
     except RuntimeError:
         return True
Exemplo n.º 2
0
def pyreassenseCap():

    # Configure depth and color streams
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

    # Start streaming
    pipeline.start(config)

    try:
        while True:

            # Wait for a coherent pair of frames: depth and color
            frames = pipeline.wait_for_frames()
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()
            if not depth_frame or not color_frame:
                continue

            # Convert images to numpy arrays
            depth_image = np.asanyarray(depth_frame.get_data())
            color_image = np.asanyarray(color_frame.get_data())

            # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
            depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

            # Stack both images horizontally
            images = np.hstack((color_image, depth_colormap))

            # Show images
            cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('RealSense', images)
            cv2.imshow('depth_image',depth_image)

            key = cv2.waitKey(1)
            # Press esc or 'q' to close the image window
            if key & 0xFF == ord('q') or key == 27:
                cv2.destroyAllWindows()
                break
    finally:

        # Stop streaming
        pipeline.stop()
    def enable_imu_device(self, serial_no):
        self.pipeline = rs.pipeline()
        cfg = rs.config()
        cfg.enable_device(serial_no)
        try:
            self.pipeline.start(cfg)
        except Exception as e:
            print('ERROR: ', str(e))
            return False

        # self.sync_imu_by_this_stream = rs.stream.any
        active_imu_profiles = []

        active_profiles = dict()
        self.imu_sensor = None
        for sensor in self.pipeline.get_active_profile().get_device().sensors:
            for pr in sensor.get_stream_profiles():
                if pr.stream_type() == rs.stream.gyro and pr.format() == rs.format.motion_xyz32f:
                    active_profiles[pr.stream_type()] = pr
                    self.imu_sensor = sensor
                if pr.stream_type() == rs.stream.accel and pr.format() == rs.format.motion_xyz32f:
                    active_profiles[pr.stream_type()] = pr
                    self.imu_sensor = sensor
            if self.imu_sensor:
                break
        if not self.imu_sensor:
            print('No IMU sensor found.')
            return False
        print('\n'.join(['FOUND %s with fps=%s' % (str(ap[0]).split('.')[1].upper(), ap[1].fps()) for ap in active_profiles.items()]))
        active_imu_profiles = list(active_profiles.values())
        if len(active_imu_profiles) < 2:
            print('Not all IMU streams found.')
            return False
        self.imu_sensor.stop()
        self.imu_sensor.close()
        self.imu_sensor.open(active_imu_profiles)
        self.imu_start_loop_time = time.time()
        self.imu_sensor.start(self.imu_callback)

        # Make the device use the original IMU values and not already calibrated:
        if self.imu_sensor.supports(rs.option.enable_motion_correction):
            self.imu_sensor.set_option(rs.option.enable_motion_correction, 0)
        return True
    def enable_device(self, device_serial, enable_ir_emitter):
        """
        Enable an Intel RealSense Device

        Parameters:
        -----------
        device_serial 	 : string
                             Serial number of the realsense device
        enable_ir_emitter : bool
                            Enable/Disable the IR-Emitter of the device

        """
        pipeline = rs.pipeline()

        # Enable the device
        self._config.enable_device(device_serial)
        pipeline_profile = pipeline.start(self._config)

        # Set the acquisition parameters
        sensor = pipeline_profile.get_device().first_depth_sensor()
        sensor.set_option(rs.option.emitter_enabled, 1 if enable_ir_emitter else 0)
        self._enabled_devices[device_serial] = (Device(pipeline, pipeline_profile))
Exemplo n.º 5
0
    def __init__(
        self,
        g_pool,
        device_id=None,
        frame_size=DEFAULT_COLOR_SIZE,
        frame_rate=DEFAULT_COLOR_FPS,
        depth_frame_size=DEFAULT_DEPTH_SIZE,
        depth_frame_rate=DEFAULT_DEPTH_FPS,
        preview_depth=False,
        device_options=(),
        record_depth=True,
    ):
        logger.debug("_init_ started")
        super().__init__(g_pool)
        self._intrinsics = None
        self.color_frame_index = 0
        self.depth_frame_index = 0
        self.context = rs.context()
        self.pipeline = rs.pipeline(self.context)
        self.pipeline_profile = None
        self.preview_depth = preview_depth
        self.record_depth = record_depth
        self.depth_video_writer = None
        self._needs_restart = False
        self.frame_size_backup = DEFAULT_COLOR_SIZE
        self.depth_frame_size_backup = DEFAULT_DEPTH_SIZE
        self.frame_rate_backup = DEFAULT_COLOR_FPS
        self.depth_frame_rate_backup = DEFAULT_DEPTH_FPS

        self._initialize_device(
            device_id,
            frame_size,
            frame_rate,
            depth_frame_size,
            depth_frame_rate,
            device_options,
        )
        logger.debug("_init_ completed")
def main():
    # Load Detection Model
    infer_func, inputs = detector.load_model(model_path, gpu_id)

    # Tracklet Color (Visualization)
    # (Later, generate colormap)
    trk_colors = np.random.rand(32, 3)

    if is_realsense is True:
        # Configure Depth and Color Streams
        pipeline = rs.pipeline()
        config = rs.config()
        config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

        # Start Streaming
        profile = pipeline.start(config)

        # Getting the depth sensor's depth scale (see rs-align example for explanation)
        depth_sensor = profile.get_device().first_depth_sensor()
        depth_scale = depth_sensor.get_depth_scale()
        print("Depth Scale is: ", depth_scale)

        clipping_distance_in_meters = DEPTH_CLIP_DISTANCE  # 1 meter
        clipping_distance = clipping_distance_in_meters / depth_scale

        # Create an align object
        # rs.align allows us to perform alignment of depth frames to others frames
        # The "align_to" is the stream type to which we plan to align depth frames.
        align_to = rs.stream.color
        align = rs.align(align_to)

        # Initialize Tracklet and Tracklet Candidate Object
        trackers = []
        tracker_cands = []

        # Initialize Frame Index
        fidx = 0

        try:
            while True:
                # Increase Frame Index
                fidx += 1

                # Wait for a coherent pair of frames: RBGD
                frames = pipeline.wait_for_frames()

                # Align the depth frame to color frame
                aligned_frames = align.process(frames)

                # DEPTH (Aligned depth frame)
                depth_frame = aligned_frames.get_depth_frame()

                # RGB
                color_frame = aligned_frames.get_color_frame()

                if not depth_frame or not color_frame:
                    continue

                # Convert Images to numpy arrays
                depth_image = np.asanyarray(depth_frame.get_data())
                color_image = np.asanyarray(color_frame.get_data())

                # Clip Depth Image
                clipped_depth_image = np.where(
                    (depth_image > clipping_distance) | (depth_image <= 0),
                    DEPTH_CLIPPED_VALUE, depth_image)

                # Apply ColorMap on DEPTH Image
                # depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

                # DETECTION MODULE
                curr_dets, DET_TIME = snu_detector(color_image, infer_func,
                                                   inputs)

                # MultiModal Tracking MODULE
                trackers, tracker_cands, MMMOT_TIME = snu_mmmot(
                    color_image, clipped_depth_image, fidx, curr_dets,
                    motparams, trackers, tracker_cands)

                # Detection Visualization
                if is_vis_detection is True:
                    visualize_detections(color_image, curr_dets, line_width=2)

                # MMMOT Visualization
                if is_vis_tracking is True:
                    visualize_tracklets(color_image,
                                        trackers,
                                        trk_colors,
                                        line_width=3)

                # Detection Speed Visualization
                det_fps = "Detector Speed: " + str(round(1000 / DET_TIME,
                                                         2)) + " (fps)"
                mmmot_fps = "Tracker Speed: " + str(round(
                    1000 / MMMOT_TIME, 2)) + " (fps)"
                cv2.putText(color_image,
                            det_fps, (10, 20),
                            CV_FONT,
                            1.3, (255, 0, 255),
                            thickness=2)
                cv2.putText(color_image,
                            mmmot_fps, (10, 50),
                            CV_FONT,
                            1.3, (255, 0, 255),
                            thickness=2)

                # Visualization Window (using OpenCV-Python)
                cv2.imshow('Tracking', color_image)

                # OpenCV imshow window
                cv2.waitKey(1)
        finally:
            # Stop Streaming
            pipeline.stop()

            # Destroy All opencv windows
            cv2.destroyAllWindows()

    else:
        # Sequence Path
        seq_basepath = '../DATA/'
        sequences = ['ETH-Crossing', 'KITTI-17']

        # For every image sequences
        for seq_num, seq in enumerate(sequences):
            # Sequence Number
            seq_num += 1

            # Current Sequence Path
            seqpath = seq_basepath + seq

            # Current Image Sequence Paths
            seq_imgspath = seqpath + '/imgs/'

            # Load Image Sequence Lists
            imgNameList = os.listdir(seq_imgspath)
            imgList = [seq_imgspath + filename for filename in imgNameList]
            imgList.sort()

            # Generalize Save Path

            # Initialize Tracklet and Tracklet Candidate Object
            trackers = []
            tracker_cands = []

            # For every frames in the image sequence
            for fidx, frameName in enumerate(imgList):
                # Frame Index Starts from 1
                fidx += 1

                # Load Current Frame Image
                # frame = io.imread(frameName)
                frame = cv2.imread(frameName)

                # DETECTION MODULE
                curr_dets, DET_TIME = snu_detector(frame, infer_func, inputs)

                # MultiModal Tracking MODULE
                trackers, tracker_cands, MMMOT_TIME = snu_mmmot(
                    frame, [], fidx, curr_dets, motparams, trackers,
                    tracker_cands)

                # Detection Visualization
                if is_vis_detection is True:
                    visualize_detections(frame, curr_dets, line_width=2)

                # MMMOT Visualization
                if is_vis_tracking is True:
                    visualize_tracklets(frame,
                                        trackers,
                                        trk_colors,
                                        line_width=3)

                # Detection Speed Visualization
                det_fps = "Detector Speed: " + str(round(1000 / DET_TIME,
                                                         2)) + " (fps)"
                mmmot_fps = "Tracker Speed: " + str(round(
                    1000 / MMMOT_TIME, 2)) + " (fps)"
                cv2.putText(frame,
                            det_fps, (10, 20),
                            CV_FONT,
                            1.3, (255, 0, 255),
                            thickness=2)
                cv2.putText(frame,
                            mmmot_fps, (10, 50),
                            CV_FONT,
                            1.3, (255, 0, 255),
                            thickness=2)

                # Visualization Window (using OpenCV-Python)
                cv2.imshow('Tracking', frame)

                # OpenCV imshow window
                cv2.waitKey(1)

        # Destroy openCV window
        cv2.destroyAllWindows()
Exemplo n.º 7
0
def main(_argv):
    pipe = rs.pipeline()

    # Build config object and request pose data
    cfg = rs.config()
    cfg.enable_stream(rs.stream.color)

    # Start streaming with requested config
    pipe.start(cfg)

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu

    logger = tf.get_logger()
    logger.disabled = True
    logger.setLevel(logging.FATAL)
    set_memory_growth()

    cfg = load_yaml(FLAGS.cfg_path)

    model = ArcFaceModel(size=cfg['input_size'],
                         backbone_type=cfg['backbone_type'],
                         training=False)

    ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name'])
    if ckpt_path is not None:
        print("[*] load ckpt from {}".format(ckpt_path))
        model.load_weights(ckpt_path)
    else:
        print("[*] Cannot find ckpt from {}.".format(ckpt_path))
        exit()

    if FLAGS.update:
        print('Face bank updating...')
        targets, names = prepare_facebank(cfg, model)
        print('Face bank updated')
    else:
        targets, names = load_facebank(cfg)
        print('Face bank loaded')

    if FLAGS.save:
        video_writer = cv2.VideoWriter('./recording.avi',
                                       cv2.VideoWriter_fourcc(*'XVID'), 10,
                                       (640, 480))
        # frame rate 6 due to my laptop is quite slow...

    while True:

        frame = pipe.wait_for_frames()

        img = frame
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        bboxes, landmarks, faces = align_multi(
            cfg, img, min_confidence=FLAGS.min_confidence, limits=3)
        bboxes = bboxes.astype(int)
        embs = []
        for face in faces:
            if len(face.shape) == 3:
                face = np.expand_dims(face, 0)
            face = face.astype(np.float32) / 255.
            embs.append(l2_norm(model(face)).numpy())

        list_min_idx = []
        list_score = []
        for emb in embs:
            dist = [euclidean(emb, target) for target in targets]
            min_idx = np.argmin(dist)
            list_min_idx.append(min_idx)
            list_score.append(dist[int(min_idx)])
        list_min_idx = np.array(list_min_idx)
        list_score = np.array(list_score)
        list_min_idx[list_score > FLAGS.threshold] = -1
        for idx, box in enumerate(bboxes):
            frame = utils.draw_box_name(box, landmarks[idx],
                                        names[list_min_idx[idx] + 1], frame)
        frame = cv2.resize(frame, (640, 480))
        cv2.imshow('face Capture', frame)

        key = cv2.waitKey(1) & 0xFF
        if FLAGS.save:
            video_writer.write(frame)

        if key == ord('q'):
            break

    if FLAGS.save:
        video_writer.release()
    pipe.stop()
Exemplo n.º 8
0
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import cv2 as cv
import numpy as np
import math
import pyrealsense2 as rs
import logging

# Configure depth and color streams...
# ...from Camera 1
pipeline_1 = rs.pipeline()
config_1 = rs.config()
# config_1.enable_device('908212070822')  #上 自己
config_1.enable_device('912112073118')  #上 手臂
config_1.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
config_1.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)

# ...from Camera 2
pipeline_2 = rs.pipeline()
config_2 = rs.config()
config_2.enable_device('939622072919')  #下
config_2.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
config_2.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)

# Start streaming from both cameras
pipeline_1.start(config_1)
pipeline_2.start(config_2)

# dep
align_to_1 = rs.stream.color
align_1 = rs.align(align_to_1)
Exemplo n.º 9
0
def main():
    # initialize the known distance from the camera to the object, which
    # preset distance from samples
    KNOWN_DISTANCE = 390

    # initialize the known object width, which in this case, the piece of
    # paper, unit mm
    KNOWN_WIDTH = 252
    KNOWN_HEIGHT = 201
    window_name = 'capture'
    '''
    image = cv2.imread(fn)
    marker = find_marker(image)
    #通过摄像头标定获取的像素焦距
    focalLength = (marker[1][0] * KNOWN_DISTANCE) / KNOWN_WIDTH
    print('focalLength = ',focalLength)
    draw_bounding_box(image, marker)
    print('output reference image to focal.png')
    cv2.imwrite('focal.png', image)
    '''

    # Configure depth and color streams
    pipeline = rs.pipeline()
    config = rs.config()
    # 640x480, 640x360
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

    # Start streaming
    pipeline.start(config)

    cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
    cv2.moveWindow(window_name, 0, 0)

    cnt = 0

    try:
        while True:
            # Wait for a coherent pair of frames: depth and color
            frames = pipeline.wait_for_frames()
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()
            if not depth_frame or not color_frame:
                continue

            # Convert images to numpy arrays
            color_image = np.asanyarray(color_frame.get_data())
            #depth_image = np.asanyarray(depth_frame.get_data())

            # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
            #depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

            original = color_image.copy()
            marker = find_marker(color_image)
            if marker == 0:
                print(marker)
                continue

            #dist = distance_to_camera(KNOWN_WIDTH, focalLength, marker[1][0])

            ck.draw_bounding_box(color_image, marker)

            depth_intrinsics = rs.video_stream_profile(
                depth_frame.profile).get_intrinsics()

            # u, v is 2D pixel coordinate
            # return p is 3D (x, y, z)
            def get_point(u, v):
                d = depth_frame.get_distance(u, v)
                p = rs.rs2_deproject_pixel_to_point(depth_intrinsics, [u, v],
                                                    d)
                return p

            def dist_3d(p1, p2):
                d = math.sqrt(
                    math.pow(p1[0] - p2[0], 2) + math.pow(p1[1] - p2[1], 2) +
                    math.pow(p1[2] - p2[2], 2))
                return d

            def get_marker_xyz(marker):
                u = int(marker[0][0])
                v = int(marker[0][1])
                p0 = get_point(u, v)
                u = int(marker[1][0])
                v = int(marker[1][1])
                p1 = get_point(u, v)
                d = dist_3d(p0, p1)
                return d

            d = get_marker_xyz(marker) * 1000
            dist_from_rs = depth_frame.get_distance(int(marker[0][0]),
                                                    int(marker[0][1]))
            dist_from_rs *= 1000

            #print('marker:{} dist_rs:{}'.format(marker, dist_from_rs))
            # cv2.putText(color_image, "%4.0fmm" % dist,
            #             (color_image.shape[1] - 500, color_image.shape[0] - 60),
            #             cv2.FONT_HERSHEY_SIMPLEX,
            #             1.2, (0, 255, 0), 3)

            cv2.putText(
                color_image, "rs: %4dmm" % dist_from_rs,
                (color_image.shape[1] - 500, color_image.shape[0] - 60),
                cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 255), 3)
            cv2.putText(
                color_image, "wc: %4dmm" % d,
                (color_image.shape[1] - 500, color_image.shape[0] - 20),
                cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 127), 3)

            # show a frame
            cv2.imshow(window_name, color_image)
            key = cv2.waitKey(1)
            if key & 0xFF == ord('q') or key == 0x1B:
                break
            elif key & 0xFF == ord('s'):
                fn = 'result-{:02d}.png'.format(cnt)
                print('save result to {}'.format(fn))
                cv2.imwrite(fn, color_image)
                cv2.imwrite('origin-{:02d}.png'.format(cnt), original)
                cnt += 1
    finally:
        cv2.destroyAllWindows()
        # Stop streaming
        pipeline.stop()
Exemplo n.º 10
0
 def __init__(self):
     self.pipeline = rs.pipeline()
     config = rs.config()
     config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
     profile = self.pipeline.start(config)
Exemplo n.º 11
0
    def __init__(self, name, size):
        self.name = name
        self.markersize = size
        self.cnd = 0
        self.frameId = 0
        self.pipeline = rs.pipeline()
        config = rs.config()
        config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)

        self.pipeline.start(config)
        # Check for camera calibration data
        # c_x = 643.47548083
        # c_y = 363.67742746
        # f_x = 906.60886808
        # f_y = 909.34831447
        # k_1 = 0.16962942
        # k_2 = -0.5560001
        # p_1 = 0.00116353
        # p_2 = -0.00122694
        # k_3 = 0.52491878
    
        # self.cameraMatrix = np.array([[f_x, 0, c_x],
        #                        [0, f_y, c_y],
        #                        [0, 0, 1]])
        # self.distCoeffs = np.array([k_1, k_2, p_1, p_2, k_3])
        
        self.cameraMatrix = np.array([[1.38726465e+03, 0.00000000e+00, 9.67009977e+02],
                                      [0.00000000e+00, 1.39067726e+03, 5.44111718e+02],
                                      [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
        self.distCoeffs = np.array([ 0.1772611,  -0.57056992, -0.0008356,   0.00099024,  0.52153116])

        # dist_coef = np.array([0, 0, 0, 0, 0])
        # self.cameraMatrix = np.array([[906.10541873,   0.0,          643.12531806],
                                    #   [0.0,            904.68643316, 359.79710938],
                                    #   [0.0,            0.0,          1.0         ]])
        # self.distCoeffs = np.array([1.53041876e-01, -4.08438606e-01,  1.53722452e-03, -3.95946669e-04, 2.56666605e-01])
        # if not os.path.exists('/home/iclab/wrs_ws/src/aruco_hand_eye/cfg/calibration.pckl'):
        #     print("You need to calibrate the camera you'll be using. See calibration project directory for details.")
        #     self.cameraMatrix = [[603.00869939,   0.0,          318.46049727],
        #                          [0.0,            601.50770586, 251.87010006],
        #                          [0.0,            0.0,          1.0         ]]
        #     self.distCoeffs = [[7.59282092e-02,  2.21483627e-01,  1.41152268e-03, -4.71388619e-04, -1.18482976e+00]]

        #     #exit()
        # else:
        #     f = open('/home/iclab/wrs_ws/src/aruco_hand_eye/cfg/calibration.pckl', 'rb')
        #     (self.cameraMatrix, self.distCoeffs, _, _) = pickle.load(f)
        #     f.close()
        #     if self.cameraMatrix is None or self.distCoeffs is None:
        #         print("Calibration issue. Remove ./calibration.pckl and recalibrate your camera with CalibrateCamera.py.")
        #         exit()
        #     print(self.cameraMatrix)
        #     print('               ')
        #     print(self.distCoeffs)

        # Create grid board object we're using in our stream
        # board = aruco.GridBoard_create(
        #         markersX=2,
        #         markersY=2,
        #         markerLength=0.09,
        #         markerSeparation=0.01,
        #         dictionary=ARUCO_DICT)

        # Create vectors we'll be using for rotations and translations for postures
        self.rvecs = None 
        self.tvecs = None
        self.rvecs_arr = np.zeros((3, NUMBER))
        self.tvecs_arr = np.zeros((3, NUMBER))
        # cam = cv2.VideoCapture('gridboardiphonetest.mp4')
        # self.cam_left = cv2.VideoCapture(5)
        # self.cam_right = cv2.VideoCapture(10)
        self.cam = None
        self.QueryImg = None
        self.init_server()
        frames = self.pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        self.QueryImg = np.asanyarray(color_frame.get_data())
Exemplo n.º 12
0
	def read_detect(self):
		global detect_flag 
		global detections 
		#global classes
		pipeline = rs.pipeline()
		config = rs.config()
		config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
		pipeline.start(config)
		detect_count = 0
		recognize_count = 0
		end_count = 0
		person_exists = False
		recognize_start_flag = False
		recognize_end_flag = False
		img_sequence = []
		count = 0

		try:
			while True:
				read_start = time.time()
				frames = pipeline.wait_for_frames()
				frames.keep()
				color_frame = frames.get_color_frame()
				if not color_frame:
					continue
				orig_image = np.asanyarray(color_frame.get_data())
				detect_size = opt.detect_size
				img, _, _, _ = resize_square(orig_image, height=detect_size, color=(127.5,127.5,127.5)) #img is of shape (416,416,3)
				img = img[:, :, ::-1].transpose(2,0,1)  #bgr->rgb
				img = np.ascontiguousarray(img, dtype=np.float32)
				img /= 255.0
				read_end = time.time()
				#print('read a frame cost:%.3f'%(read_end - read_start))
				detect_start = time.time()
				detections = None
				img = torch.from_numpy(img).unsqueeze(0).to(device)
				if detect_count % self.detect_sample_step  == 0:
					predictions = self.detect_model(img)
					predictions = predictions[predictions[:, :, 4]>opt.conf_thres]
					if len(predictions)>0:
						detections = non_max_suppression(predictions.unsqueeze(0), opt.conf_thres, opt.nms_thres)
						if detections is not None:
							person_exists = True
							#print(detections)
					else:
						person_exists = False
				detect_end = time.time()
				#print('detect a frame cost time:%.3f seconds using cpu'%(detect_end - detect_start))
				detect_count += 1
				detect_size = opt.detect_size
				orig_image_copy = orig_image.copy()   #deep copy
				#Person Detect
				if person_exists:
					image_with_box, box = draw_boxes(orig_image, detect_size, detections, self.detect_classes)
					recognize_start_flag = True
					self.detect_sample_step = 1
					recognize_size = opt.recognize_size
					cv2.imwrite('data/output/copy%s.jpg'%recognize_count, orig_image_copy)
					image_for_recognize = prep_image_for_recognize(orig_image_copy, recognize_size, box)
					if (recognize_count % self.recognize_sample_step) == 0:
						#print(count)
						img_sequence.append(torch.FloatTensor(image_for_recognize))
						if not os.path.exists('data/test/%s'%count):
							os.makedirs('data/test/%s'%count) 
						cv2.imwrite('data/test/%s/%s.jpg'%(count, recognize_count), image_for_recognize)
					recognize_count += 1
					if recognize_count % (self.recognize_sample_step * opt.temporal_sample_length) == 0:
						assert len(img_sequence)==opt.temporal_sample_length, 'got a wrong length of image sequence '
						video_tensor = torch.stack(img_sequence, 0)
						self.Q.put((video_tensor, orig_image))		
						img_sequence = []
						count += 1
					if self.recognition_mark is not None:
						cv2.putText(image_with_box, self.recognition_mark, (10,50), cv2.FONT_HERSHEY_TRIPLEX, 1,(0,0,255),1)
					cv2.imshow('recognition', image_with_box)
					cv2.waitKey(30)
				#No Person Detected
				else:
					if recognize_start_flag == False:
						cv2.putText(orig_image, 'No person', (10,50), cv2.FONT_HERSHEY_TRIPLEX, 1, (0,0,255), 1)
						cv2.imshow('recognition', orig_image)#show Image with mark 'No Person'
						#cv2.imwrite('data/output/img%s.jpg'%Image_count, orig_image)
						cv2.waitKey(30)
						#pass
					else:
						end_count += 1
						if end_count==2:
							recognize_start_flag = False
							self.detect_sample_step = opt.detect_sample_step
							self.recognition_mark = None
							end_count = 0
				
		except Exception as e:
			print(e)
		pass
def extract_from_bag(bag_file, FPS):
    config = rs.config()
    pipeline = rs.pipeline()
    config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, FPS)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, FPS)
    rs.config.enable_device_from_file(config, bag_file, repeat_playback=False)

    # Start
    profile = pipeline.start(config)
    # this makes it so no frames are dropped while writing video
    playback = profile.get_device().as_playback()
    playback.set_real_time(False)

    align_to = rs.stream.color
    align = rs.align(align_to)

    # Get data scale from the device
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()

    count = 0
    depth_set = []
    color_set = []
    frame_index = []
    time_stamps = []
    start_time = 0
    frame_num = -1

    while True:
        try:
            frames = pipeline.wait_for_frames()
            frames.keep()
            if frame_num == frames.get_frame_number():
                continue
            frame_num = frames.get_frame_number()

            # print(frame_num)
            if count == 0:
                time_stamps.append(0)
                start_time = frames.get_timestamp()
            else:
                time_stamps.append(frames.get_timestamp() - start_time)
            frame_index.append(frame_num)
            if frames.size() < 2:
                # Inputs are not ready yet
                continue
        except (RuntimeError):
            print('Total frame count:', count)
            pipeline.stop()
            break

        # align the deph to color frame
        aligned_frames = align.process(frames)
        # Align depth frame according to color frame
        depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()

        # validate that both frames are valid
        if not depth_frame or not color_frame:
            continue

        depth_image = np.asanyarray(depth_frame.get_data())
        # convert to meters
        depth_image = depth_image * depth_scale
        color_image = np.asanyarray(color_frame.get_data())

        depth_set.append(depth_image)
        color_set.append(color_image)

        count += 1

    print("Video processing done!")
    frame_index = np.asarray(frame_index) - frame_index[0]
    time_stamps = np.asarray(time_stamps) / 1000
    return depth_set, color_set, frame_index, time_stamps
Exemplo n.º 14
0
    def __init__(self,
                 width=WIDTH,
                 height=HEIGHT,
                 enable_rgb=True,
                 enable_depth=True,
                 device_id=None):

        self.device_id = "829212070352"  # Lab: "828112071102"  Home:"829212070352"
        self.enable_rgb = enable_rgb
        self.enable_depth = enable_depth

        self.width = width
        self.height = height
        self.resize = (width != WIDTH) or (height != HEIGHT)

        self.pipeline = None
        if self.enable_depth or self.enable_rgb:
            self.pipeline = rs.pipeline()
            config = rs.config()

            # if we are provided with a specific device, then enable it
            if None != self.device_id:
                config.enable_device(self.device_id)

            if enable_depth:
                config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16,
                                     60)  # depth

            if enable_rgb:
                config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8,
                                     60)  # rgb

            # Start streaming
            profile = self.pipeline.start(config)

            # Getting the depth sensor's depth scale (see rs-align example for explanation)
            if self.enable_depth:
                depth_sensor = profile.get_device().first_depth_sensor()
                depth_scale = depth_sensor.get_depth_scale()
                print("Depth Scale is: ", depth_scale)
                if self.enable_rgb:
                    # Create an align object
                    # rs.align allows us to perform alignment of depth frames to others frames
                    # The "align_to" is the stream type to which we plan to align depth frames.
                    align_to = rs.stream.color
                    self.align = rs.align(align_to)

            time.sleep(1)  # let camera warm up

        # initialize frame state
        # original color image
        self.color_image = None
        self.color_frame = None
        self.depth_image = None
        self.depth_frame = None
        self.frame_count = 0
        self.start_time = time.time()
        self.frame_time = self.start_time
        self.running = True

        #For displaying (including marker detection)
        self.images = None
        #For detected markers
        self.useful_color_image = None

        # Aruco marker part
        # Load the dictionary that was used to generate the markers.
        self.dictionary = cv2.aruco.Dictionary_get(
            cv2.aruco.DICT_ARUCO_ORIGINAL)

        # Initialize the detector parameters using default values
        self.parameters = cv2.aruco.DetectorParameters_create()
def main():
    rsContext = rs.context()

    # folderPath = '/home/sload/Desktop/ShaharSarShalom/VideoStreamSamples/'
    #recordedFile = os.path.join(folderPath, '20200209_163025.bag')
    #recordedFile = os.path.join(folderPath, '20200209_163301.bag')

    recordedFile = os.path.join('/home/sload/Desktop/ShaharSarShalom/forSS/recs_160220_wallAt255cmFromCamera', '20200216_174116_secondRec.bag')
    dstFolder = '/home/sload/Desktop/ShaharSarShalom/VideoStreamSamples/20200209_163301/VideoFrames'

    parser = ArgumentParser(add_help=False)
    args = parser.add_argument_group('Options')
    args.add_argument('-isDebug', '--isDebug', default=True, help='')
    args.add_argument('-debugPath', '--debugPath', default='/home/sload/Desktop/ShaharSarShalom/VideoStreamSamples/20200209_163301/DebugPath', help='')
    args.add_argument('-isStreamColorImg', '--isStreamColorImg',default=True,help='')
    args.add_argument('-isStreamInfraredImg', '--isStreamInfraredImg', default=True, help='')
    args.add_argument('-isStreamDepthImg', '--isStreamDepthImg', default=True, help='')

    args = parser.parse_args()
    config = args

    device = rsContext.load_device(recordedFile)

    device = device.as_playback()

    pipe = rs.pipeline(rsContext)
    pipelineProfile = pipe.start()

    # Declare a point cloud object
    pc = rs.pointcloud()
    depthPostProcessing = DepthPostProcessing()

    try:
        #for i in range(0, 300):
        videoFrameCounter = 0
        while True:

            # Wait for all configured streams to produce a frame
            frames = pipe.wait_for_frames()
            # frames.size()
            # frame = frames.as_frame()

            if config.isStreamColorImg:
                videoFrameCounter += 1
                videoFrame = frames.get_color_frame()

                if videoFrame.is_frame() and CheckFrameValidity(videoFrame):
                    if False:
                        PlotVideoFrame(videoFrame)
                    # WriteVideoFrame(videoFrame = videoFrame, dstFolder = dstFolder, fileIndex= videoFrameCounter)

            if config.isStreamInfraredImg:
                frame = frames.get_infrared_frame()
                if CheckFrameValidity(frame):
                    depth_data = frame.get_data()
                    np_image = np.asanyarray(depth_data)
                    cv2.equalizeHist(np_image, np_image)
                    np_image = cv2.applyColorMap(np_image, cv2.COLORMAP_JET)
                    cv2.imshow("Infrared Image", np_image)
                    cv2.namedWindow("Infrared Image", cv2.WINDOW_AUTOSIZE)
                    cv2.waitKey(30)

            if config.isStreamDepthImg:
                depthFrame = frames.get_depth_frame()
                if CheckFrameValidity(depthFrame):
                    PlotDepthFrame(depthFrame)
                    # Plot the depth map as a point cloud
                    # PlotPointCloud(pc, depthFrame, videoFrame)

                    processedDepthFrame = depthPostProcessing.ApplyPostFiltering(depthFrame)
                    PlotDepthFrame(processedDepthFrame)
                    PlotDepthFrame(depthFrame)

        # print(f.profile)
        device.resume()
    finally:
        pipe.stop()
Exemplo n.º 16
0
def AD_GetWalk(index):
    global INDEX, SUBJECTPATH, STATUSBOOL, STATUSARR
    INDEX = index
    try:
        print('trying1')
        global FINALORIGINALPATH
        print('trying2')
        FINALORIGINALPATH = SUBJECTPATH + '/WALK' + str(INDEX) + "/ORIGINAL"
        print('trying3')
        os.makedirs(FINALORIGINALPATH)
        print('trying4')
    except OSError:
        print('Error in creation of directory')
    else:
        print('Successfully created directory')

    # Capture the images and store them in 'newSubject/WALK(index)/ORIGINAL

    print("Started")

    getframe = False

    #################################################################################
    # Create a pipeline
    pipeline = rs.pipeline()

    #Create a config and configure the pipeline to stream
    #  different resolutions of color and depth streams
    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

    # Start streaming
    profile = pipeline.start(config)
    #################################################################################

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    print("Depth Scale is: ", depth_scale)

    # We will be removing the background of objects more than
    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 3  #5.5 meter optimum for gait recognition
    clipping_distance = clipping_distance_in_meters / depth_scale

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)

    # Streaming loop
    try:
        imnumber = 0
        while True:
            # Get frameset of color and depth
            frames = pipeline.wait_for_frames()
            # frames.get_depth_frame() is a 640x360 depth image

            # Align the depth frame to color frame
            aligned_frames = align.process(frames)

            # Get aligned frames
            aligned_depth_frame = aligned_frames.get_depth_frame(
            )  # aligned_depth_frame is a 640x480 depth image
            color_frame = aligned_frames.get_color_frame()

            # Validate that both frames are valid
            if not aligned_depth_frame or not color_frame:
                continue

            depth_image = np.asanyarray(aligned_depth_frame.get_data())
            color_image = np.asanyarray(color_frame.get_data())

            # Remove background - Set pixels further than clipping_distance to grey
            grey_color = 0
            depth_image_3d = np.dstack(
                (depth_image, depth_image,
                 depth_image))  #depth image is 1 channel, color is 3 channels
            bg_removed = np.where(
                (depth_image_3d > clipping_distance) | (depth_image_3d <= 0),
                grey_color, color_image)

            gray = cv2.cvtColor(
                bg_removed,
                cv2.COLOR_BGR2GRAY)  # We do some colour transformations.
            #depthwhite = detect2(gray, aligned_depth_frame, clipping_distance*depth_scale) # We get the output of our detect function.

            im = Image.fromarray(gray)
            #im = Image.fromarray(depthwhite)
            imnumber += 1
            im.save(FINALORIGINALPATH + '/' + "{}.jpeg".format(imnumber))
            cv2.imshow('Align Example', gray)
            #cv2.imshow('Align Example', depthwhite)
            print('KT', str(KILLTHREAD))
            k = cv2.waitKey(1)
            if k & 0xFF == ord("q") or KILLTHREAD:  # Exit condition
                cv2.destroyAllWindows()
                break
    finally:
        pipeline.stop()
Exemplo n.º 17
0
# coding: utf-8
__author__ = "Nicolas Huynh"
__create_date__ = "07/10/2019"
__version__ = "0.1.0"
__maintainer__ = "Nicolas Huynh"
__email__ = "*****@*****.**"
__status__ = "Development"

import matplotlib.pyplot as plt
import numpy
import pandas
import pyrealsense2
from AcquisitionT265 import AcquisitionT265

# Declare RealSense pipeline, encapsulating the actual device and sensors
pipeline = pyrealsense2.pipeline()
cfg = pyrealsense2.config()
cfg.enable_stream(pyrealsense2.stream.pose)
pipeline.start(cfg)

try:
    dataArray = []
    plt.ion()
    fig, ax = plt.subplots()
    origin = numpy.array([0.0, 0.0, 0.0])
    v0 = numpy.array([0.0, 0.0, 0.0])
    a0 = numpy.array([0.0, 0.0, 0.0])
    for i in range(30):
        data = AcquisitionT265(pipeline, origin, v0, a0)
        dataArray.append(data.get_data_pt())
        df = pandas.DataFrame(dataArray,
def record_15():
    global ix
    # Create a pipeline
    pipeline = rs.pipeline()

    #Create a config and configure the pipeline to stream
    #  different resolutions of color and depth streams
    config = rs.config()
    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)

    # Start streaming
    profile = pipeline.start(config)

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_sensor = profile.get_device().first_depth_sensor()

    #Enabling High Accuracy Preset
    depth_sensor.set_option(rs.option.visual_preset, 3.0)
    preset_name = depth_sensor.get_option_value_description(
        rs.option.visual_preset, 3.0)

    #Enabling Emitter To increase accuracy
    enable_ir_emitter = True
    depth_sensor.set_option(rs.option.emitter_enabled,
                            1 if enable_ir_emitter else 0)

    #GET the depth scale from the SDK
    depth_scale = depth_sensor.get_depth_scale()
    print("Depth Scale is: ", depth_scale)

    # We will be removing the background of objects more than
    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 6  #10 meter
    clipping_distance = clipping_distance_in_meters / depth_scale

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)

    colorizer = rs.colorizer()  # TO colorize the depth image

    #FILTERS
    dec_filter = rs.decimation_filter(
    )  # Decimation - reduces depth frame density
    dec_filter.set_option(rs.option.filter_magnitude, 4)

    spat_filter = rs.spatial_filter(
    )  # Spatial    - edge-preserving spatial smoothing
    spat_filter.set_option(rs.option.filter_magnitude, 5)
    spat_filter.set_option(rs.option.filter_smooth_alpha, 1)
    spat_filter.set_option(rs.option.filter_smooth_delta, 50)
    spat_filter.set_option(rs.option.holes_fill, 3)

    temp_filter = rs.temporal_filter()  # Temporal   - reduces temporal noise

    hole_filling = rs.hole_filling_filter()  #FIll ALL THE HOLES
    #FILTERS END

    out = cv2.VideoWriter('data/read.avi',
                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10,
                          (1280, 780))

    #Some Variables
    count = 0
    answer = {}

    cv2.namedWindow("W")
    cv2.setMouseCallback("W", mouse_callback)
    ix = False

    # Streaming loop
    try:
        while True:
            key = cv2.waitKey(1)
            # Press esc or 'q' to close the image window
            if key & 0xFF == ord('q') or key == 27:
                cv2.destroyAllWindows()
                break

            # Get frameset of color and depth
            frames = pipeline.wait_for_frames()
            # frames.get_depth_frame() is a 1280x720 depth image

            # Align the depth frame to color frame
            aligned_frames = align.process(frames)

            # Get aligned frames
            aligned_depth_frame = aligned_frames.get_depth_frame(
            )  # aligned_depth_frame is a 640x480 depth image

            #        aligned_depth_frame = dec_filter.process(aligned_depth_frame)
            aligned_depth_frame = spat_filter.process(
                aligned_depth_frame)  #Applying Filter
            aligned_depth_frame = temp_filter.process(
                aligned_depth_frame)  #Applying Filter
            aligned_depth_frame = hole_filling.process(
                aligned_depth_frame)  #Applying Filter

            color_frame = aligned_frames.get_color_frame()  #Getting RGB frame

            # Validate that both frames are valid
            if not aligned_depth_frame or not color_frame:
                continue

            depth_image = np.asanyarray(
                aligned_depth_frame.get_data())  #Getting final depth image

            #Colorize the depth image
            colorized_depth = np.asanyarray(
                colorizer.colorize(aligned_depth_frame).get_data())

            #        #See the depth image
            #        cv2.imshow("colorized depth",rescale(colorized_depth,50))

            depth_image[depth_image > 5000] = 0

            color_image = np.asanyarray(
                color_frame.get_data())  #Getting final RGB frame

            if count == 0:
                answer[count] = [
                    depth_image, color_image
                ]  #0 is the first image. can be used for background subtraction
                count += 1
                continue
            if ix:
                color_image1, check, values = apply_cascade(
                    color_image.copy(),
                    'models/cascades/haarcascade_lefteye_2splits.xml')
                if check:
                    answer[count] = [depth_image, color_image]
                    count += 1
            cv2.imshow('W', rescale(color_image, 90))
            if len(answer) == 15:
                break

    finally:
        pipeline.stop()  #Turn off the camera
        cv2.destroyAllWindows()  #Remove all the windows
        out.release()

    import pickle
    output = open('data/record_15.pkl', 'wb')
    pickle.dump(answer, output)
    output.close()
Exemplo n.º 19
0
    def initialize(self):
        print("Initialize")

        # openpifpaf configuration
        class Args:
            source = 0
            checkpoint = None
            basenet = None
            dilation = None
            dilation_end = None
            headnets = ['pif', 'paf']
            dropout = 0.0
            quad = 1
            pretrained = False
            keypoint_threshold = None
            seed_threshold = 0.2
            force_complete_pose = False
            debug_pif_indices = []
            debug_paf_indices = []
            connection_method = 'max'
            fixed_b = None
            pif_fixed_scale = None
            profile_decoder = None
            instance_threshold = 0.05
            device = torch.device(type="cpu")
            disable_cuda = True
            scale = 1
            key_point_threshold = 0.05
            head_dropout = 0.0
            head_quad = 0
            default_kernel_size = 1
            default_padding = 0
            default_dilation = 1
            head_kernel_size = 1
            head_padding = 0
            head_dilation = 0
            cross_talk = 0.0
            two_scale = False
            multi_scale = False
            multi_scale_hflip = False
            paf_th = 0.1
            pif_th = 0.1
            decoder_workers = None
            experimental_decoder = False
            extra_coupling = 0.0

        self.args = Args()
        model, _ = nets.factory_from_args(self.args)
        model = model.to(self.args.device)
        # model.cuda()
        self.processor = decoder.factory_from_args(self.args, model)

        # realsense configuration
        try:
            config = rs.config()
            config.enable_device(self.params["device_serial"])
            config.enable_stream(rs.stream.depth, self.width, self.height,
                                 rs.format.z16, 30)
            config.enable_stream(rs.stream.color, self.width, self.height,
                                 rs.format.bgr8, 30)

            self.pointcloud = rs.pointcloud()
            self.pipeline = rs.pipeline()
            cfg = self.pipeline.start(config)
#            profile = cfg.get_stream(rs.stream.color) # Fetch stream profile for depth stream
#            intr = profile.as_video_stream_profile().get_intrinsics() # Downcast to video_stream_profile and fetch intrinsics
#            print (intr.fx, intr.fy)
#            depth_scale = cfg.get_device().first_depth_sensor().get_depth_scale()
#            print("Depth Scale is: " , depth_scale)
#            sys.exit(-1)
        except Exception as e:
            print("Error initializing camera")
            print(e)
            sysexit(-1)
Exemplo n.º 20
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--mirror',
                        help='flip video horizontally',
                        action='store_true')
    parser.add_argument('--model', help='.tflite model path.', required=False)
    parser.add_argument('--res',
                        help='Resolution',
                        default='640x480',
                        choices=['480x360', '640x480', '1280x720'])
    parser.add_argument('--videosrc',
                        help='Which video source to use',
                        default='/dev/video0')
    parser.add_argument('--h264',
                        help='Use video/x-h264 input',
                        action='store_true')
    parser.add_argument('--jpeg',
                        help='Use image/jpeg input',
                        action='store_true')
    args = parser.parse_args()

    default_model = 'models/posenet_mobilenet_v1_075_%d_%d_quant_decoder_edgetpu.tflite'
    if args.res == '480x360':
        src_size = (640, 480)
        appsink_size = (480, 360)
        model = args.model or default_model % (353, 481)
    elif args.res == '640x480':
        src_size = (640, 480)
        appsink_size = (640, 480)
        model = args.model or default_model % (481, 641)
    elif args.res == '1280x720':
        src_size = (1280, 720)
        appsink_size = (1280, 720)
        model = args.model or default_model % (721, 1281)

    print('Loading model: ', model)
    engine = PoseEngine(model, mirror=args.mirror)
    input_shape = engine.get_input_tensor_shape()
    inference_size = (input_shape[2], input_shape[1])

    n = 0
    sum_process_time = 0
    sum_inference_time = 0
    fps_counter = avg_fps_counter(30)

    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, src_size[0], src_size[1],
                         rs.format.z16, 30)
    config.enable_stream(rs.stream.color, src_size[0], src_size[1],
                         rs.format.rgb8, 30)
    pipeline.start(config)

    c = rs.colorizer()
    align_to = rs.stream.color
    align = rs.align(align_to)
    while True:
        frames = pipeline.wait_for_frames()
        # Align the depth frame to color frame
        aligned_frames = align.process(frames)

        color = aligned_frames.get_color_frame()
        depth = aligned_frames.get_depth_frame()
        if not depth: continue

        color_data = color.as_frame().get_data()
        color_image_rgb = np.asanyarray(color_data)
        color_image = cv2.cvtColor(color_image_rgb, cv2.COLOR_RGB2BGR)

        depth_colormap = c.colorize(depth)
        depth_data = depth_colormap.as_frame().get_data()
        depth_image = np.asanyarray(depth_data)
        depth_image = cv2.cvtColor(depth_image, cv2.COLOR_RGB2BGR)

        if (color_image_rgb.shape[0] != appsink_size[1]
                or color_image_rgb.shape[1] != appsink_size[0]):
            color_image_rgb = cv2.resize(color_image_rgb,
                                         dsize=appsink_size,
                                         interpolation=cv2.INTER_NEAREST)

        start_time = time.monotonic()
        outputs, inference_time = engine.DetectPosesInImage(color_image_rgb)
        end_time = time.monotonic()
        n += 1
        sum_process_time += 1000 * (end_time - start_time)
        sum_inference_time += inference_time

        avg_inference_time = sum_inference_time / n
        text_line = 'PoseNet: %.1fms (%.2f fps) TrueFPS: %.2f Nposes %d' % (
            avg_inference_time, 1000 / avg_inference_time, next(fps_counter),
            len(outputs))

        if args.mirror:
            color_image = cv2.flip(color_image, 1)
            depth_image = cv2.flip(depth_image, 1)
        shadow_text(color_image, 10, 20, text_line)
        shadow_text(depth_image, 10, 20, text_line)
        for pose in outputs:
            draw_pose(color_image, pose, src_size, appsink_size)
            draw_pose(depth_image, pose, src_size, appsink_size)

        cv2.imshow('color', color_image)
        cv2.imshow('depth', depth_image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
    return
def rip_bag(path_people_bag, path_save_photo):

    if not os.path.exists(path_people_bag):
        print("Файла не найдена")
        return 0
    clear_dir(path_save_photo)

    try:
        pipeline = rs.pipeline()

        config = rs.config()
        config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 60)
        config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 60)

        config.enable_device_from_file(path_people_bag, repeat_playback=False)

        profile = pipeline.start(config)

        playback = profile.get_device().as_playback()
        playback.set_real_time(False)
        colorizer = rs.colorizer()

        i = 0
        while True:
            try:
                frames = pipeline.wait_for_frames()
                playback.pause()

            except RuntimeError as e:
                print("error, ", str(e))
                # pipeline.stop()

                print("Файл закрылся")
                #del pipeline
                print("del pipeline")
                # config = None
                print("del config")
                # del frames
                print("del frames")

                print("Переменные удалены")
                break
            else:
                i += 1
                color_frame = frames.get_color_frame()

                depth_frame = frames.get_depth_frame()


                print("Saving frame:", i)
                color_frame = np.asanyarray(color_frame.get_data())
                depth_image = np.asanyarray(depth_frame.get_data())

                img = cv2.cvtColor(color_frame, cv2.COLOR_BGR2RGB)

                f = open(os.path.join(path_save_photo, str(i) + '.csv'), 'w')

                for y in range(480):
                    for x in range(640):
                        dist = depth_frame.get_distance(x, y)
                        f.write(str(dist))
                        f.write(',')
                    f.write('\n')
                f.close()

                cv2.imwrite(os.path.join(path_save_photo, str(i) + '.png'), img)
                cv2.imwrite(os.path.join(path_save_photo, str(i) + '_dep.png'), depth_image)

                if i >= 50:
                    break
                playback.resume()

    except BaseException as e:
        print("Errorrrrr", str(e))
Exemplo n.º 22
0
def get_frames(filename):
    # Configure options and start stream
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_device_from_file(filename)
    config.enable_all_streams()
    profile = pipeline.start(config)

    depth_sensor = profile.get_device().first_depth_sensor()
    global depth_scale
    depth_scale = depth_sensor.get_depth_scale()

    frame_num = 0
    frames_processed = 0
    frames_calculated = 0
    frames_to_skip = 1

    color_images = []
    depth_images = []
    while True:
        # Get frame from bag
        frames = pipeline.wait_for_frames()

        # Stop if the bag is done playing
        if frames.frame_number < frame_num:
            break
        else:
            frame_num = frames.frame_number

        # Align depth to colour
        align = rs.align(rs.stream.color)
        frames = align.process(frames)

        # Obtain depth and colour frames
        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()

        # If their is either no depth or color frame try again
        if not depth_frame or not color_frame:
            continue

        # Decimation Filter
        dec_filter = rs.decimation_filter()
        # Edge-preserving smoothing
        spat_filter = rs.spatial_filter()
        # Apply Filters
        depth_frame = dec_filter.process(depth_frame)
        depth_frame = spat_filter.process(depth_frame)

        if frames_processed % frames_to_skip == 0:
            # Obtain Intrinsics
            depth_intrinsics = depth_frame.profile.as_video_stream_profile().intrinsics
            color_intrinsics = color_frame.profile.as_video_stream_profile().intrinsics

            depth_image = (np.asanyarray(depth_frame.get_data())*depth_scale).astype(np.float32)
            depth_image[depth_image > 10] = 0
            depth_image[depth_image < 0.3] = 0
            color_image = np.asanyarray(color_frame.get_data())
            if color_image.shape[0] != depth_image.shape[0]:
                color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
                # color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
                color_image = cv2.resize(color_image, (depth_image.shape[1], depth_image.shape[0])).astype(np.uint8)

            depth_images.append(depth_image)
            color_images.append(color_image)

            frames_calculated += 1
            print("", end='')
            print("\rFrame Number: {}".format(frames_calculated), end='')
            if frames_processed >= 500:
                 break
        frames_processed += 1

    pipeline.stop()
    print("")
    return color_images, depth_images, color_intrinsics, depth_intrinsics
Exemplo n.º 23
0
 def __init__(self,image_size=(640,480)):
     self.pipeline = rs.pipeline()
     config = rs.config()
     config.enable_stream(rs.stream.depth, image_size[0], image_size[1], rs.format.z16, 30)
     config.enable_stream(rs.stream.color, image_size[0], image_size[1], rs.format.bgr8, 30)
     self.profile = self.pipeline.start(config)
        if min_val < new < max_val:
            return False
    return True


NetworkTables.initialize(server='roborio-166-frc.local')

sd = NetworkTables.getTable('SmartDashboard')
# sd.putNumber('someNumber', 1234)
# otherNumber = sd.getNumber('otherNumber')

WIDTH = 640
HEIGHT = 480
POINT_SAMPLES = 5

pipe = rs2.pipeline(
)  # The camera's API sucks, but at least I can guarantee setings
config = rs2.config()
config.enable_stream(rs2.stream.color, WIDTH, HEIGHT, rs2.format.bgr8, 60)
config.enable_stream(rs2.stream.depth, WIDTH, HEIGHT, rs2.format.z16, 60)
profile = pipe.start(config)
s = profile.get_device().query_sensors()[1]
s.set_option(rs2.option.brightness, 0)
s.set_option(rs2.option.contrast, 100)
s.set_option(rs2.option.exposure, 45)
s.set_option(rs2.option.gain, 75)
s.set_option(rs2.option.gamma, 100)
s.set_option(rs2.option.hue, 0)
s.set_option(rs2.option.saturation, 50)
s.set_option(rs2.option.sharpness, 0)
s.set_option(rs2.option.white_balance, 2800)
Exemplo n.º 25
0
def main():

    global handin

    #img_rows, img_cols, maxFrames = 32, 32, 100
    img_rows, img_cols, maxFrames = 32, 32, 55
    depthFrame = 0
    cameraHeightR = 480
    cameraWidthR = 848
    #cameraWidthR = 640
    frameRateR = 60
    #frameRateR = 30

    #crop parameter
    xupam = 350
    yupam = 200

    xdpam = 250
    ydpam = 300

    classGest = ['1','11','12','13','4','5','7','8']
    delayGest = 10
    delayBol = False
    backgroundRemove = True
    boxCounter = True



    #load the model and weight
    json_file = open('3dcnnresult/24/3dcnnmodel.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)

    # load weights into new model
    loaded_model.load_weights("3dcnnresult/24/3dcnnmodel.hd5")
    print("Loaded model from disk")

    loaded_model.compile(loss=categorical_crossentropy,
                         optimizer='rmsprop', metrics=['accuracy'])

    #setup cv face detection
    face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt2.xml')


    # setup Realsense
    # Configure depth and color streams
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, cameraWidthR, cameraHeightR, rs.format.z16, frameRateR)
    config.enable_stream(rs.stream.color, cameraWidthR, cameraHeightR, rs.format.bgr8, frameRateR)
    '''vc = cv2.VideoCapture(0)
    rval , firstFrame = vc.read()
    heightc, widthc, depthcol = firstFrame.shape
    imgTxt = np.zeros((heightc, 400, 3), np.uint8)
    #print('tryyyyy1')'''
    stat =0
    # Start streaming
    profile = pipeline.start(config)

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    # print "Depth Scale is: " , depth_scale

    # We will be removing the background of objects more than
    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 2 # 1 meter
    clipping_distance = clipping_distance_in_meters / depth_scale
    #print(clipping_distance)

    # for text purpose
    imgTxt = np.zeros((480, 400, 3), np.uint8)
    txt = "OpenCV"
    txtLoad = "["
    txtDelay = "["
    txtRecord = "Capture"
    txtDel = "Delay"
    txtProbability = "0%"
    font = cv2.FONT_HERSHEY_SIMPLEX


    framearray = []
    ctrDelay = 0
    channel = 1
    gestCatch = False
    gestStart = False
	
    timeStart = 0
    timeEnd = 0


    x,y,w,h = 0,0,0,0

    align_to = rs.stream.color
    align = rs.align(align_to)

    try:
        while True:

            dataImg = []

            # Wait for a coherent pair of frames: depth and color
            frames = pipeline.wait_for_frames()

            # Align the depth frame to color frame
            aligned_frames = align.process(frames)

            # Get aligned frames
            aligned_depth_frame = aligned_frames.get_depth_frame()  # aligned_depth_frame is a 640x480 depth image
            color_frame = aligned_frames.get_color_frame()

            # Validate that both frames are valid
            if not aligned_depth_frame or not color_frame:
                continue

            depth_image = np.asanyarray(aligned_depth_frame.get_data())
            color_image = np.asanyarray(color_frame.get_data())


            if(backgroundRemove == True):
                # Remove background - Set pixels further than clipping_distance to grey
                #grey_color = 153
                grey_color = 0
                depth_image_3d = np.dstack((depth_image, depth_image, depth_image))  # depth image is 1 channel, color is 3 channels
                bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)
                
                color_image = bg_removed
                draw_image = color_image


            else:
                draw_image = color_image

            #cv2.imshow('color_image', color_image)
            #face detection here
            gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
            #cv2.imshow('gray', gray)
            #cv2.waitKey(5000)

            faces = face_cascade.detectMultiScale(gray, 1.1, 2)

            if len(faces) > 0:
                if gestCatch == False and delayBol == False:

                    for f in faces:
                        xf, yf, wf, hf = f
                        farea = wf*hf
                        #print(farea)
                        if farea > 9000 and farea < 12000:
                            x, y, w, h = f
                            #gestStart = True
                            #x, y, w, h = faces[0]
            else:
                x, y, w, h = x, y, w, h

            fArea = w*h
            #print(fArea)

            #if fArea > 3000:

            # crop the face then pass to resize
            cv2.rectangle(draw_image, (x, y), (x + w, y + h), (255, 0, 0), 2)

            midx = int(x + (w * 0.5))
            midy = int(y + (h * 0.5))

            xUp = (x - (w * 3))
            yUp = (y - (h * 1.5))

            xDn = ((x + w) + (w * 1))
            yDn = ((y + h) + (h * 2))

            if xUp < 1: xUp = 0
            if xDn >= cameraWidthR: xDn = cameraWidthR

            if yUp < 1: yUp = 0
            if yDn >= cameraHeightR: yDn = cameraHeightR

            if handin == False:
                cv2.rectangle(draw_image, (xUp.__int__(), yUp.__int__()), (xDn.__int__(), yDn.__int__()), (0, 0, 255), 2)
            else:
                cv2.rectangle(draw_image, (xUp.__int__(), yUp.__int__()), (xDn.__int__(), yDn.__int__()), (0, 255, 0),
                              2)
            cv2.circle(draw_image, (midx.__int__(), midy.__int__()), 10, (255, 0, 0))

            roi_color = color_image[yUp.__int__():yDn.__int__(), xUp.__int__():xDn.__int__()]



            #find the depth of middle point of face
            if backgroundRemove == True and gestCatch == False:

                depth_imageS = depth_image*depth_scale
                deptRef = depth_imageS.item(midy, midx)
                #print(clipping_distance)

                clipping_distance = (deptRef + 0.2)/depth_scale
                #print(clipping_distance)
                boxColor = color_image.copy()

                handin = checkhandIn(boxCounter, deptRef, midx, midy, w, h, depth_imageS, boxColor, draw_image)

                timeStart = time.time()
				
                if handin == True:
                    gestStart = True
                else:
                    gestStart = False

            #print("gest start = ", gestStart)

            if delayBol == False and gestStart == True:

                if depthFrame < maxFrames:
                    frame = cv2.resize(roi_color, (img_rows, img_cols))
                    framearray.append(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
                    depthFrame = depthFrame+1
                    txtLoad = txtLoad+"["

                    gestCatch = True

                    #print(depthFrame)


                if depthFrame == maxFrames:
                    dataImg.append(framearray)
                    xx = np.array(dataImg).transpose((0, 2, 3, 1))
                    X = xx.reshape((xx.shape[0], img_rows, img_cols, maxFrames, channel))
                    X = X.astype('float32')
                    print('X_shape:{}'.format(X.shape))

                    timeEnd = time.time()

                    numTime = timeEnd - timeStart
                    fps = 55/numTime
					
                    print("fps: ",fps)
					
                    #do prediction
                    resc = loaded_model.predict_classes(X)[0]
                    res = loaded_model.predict_proba(X)[0]

                    resultC = classGest[resc]
                    print("X=%s, Probability=%s" % (resultC, res[resc]*100))


                    for r in range(0,8):
                        print("prob: " + str(res[r]*100))


                    #show text
                    imgTxt = np.zeros((480, 400, 3), np.uint8)
                    txt = "Gesture-" + str(resultC)
                    txtProbability = str(res[resc]*100)+"%"

                    framearray = []
                    #dataImg = []
                    txtLoad = ""
                    depthFrame = 0

                    gestCatch = False
                    handin = False
                    delayBol = True
                    #gestStart = False

                cv2.putText(imgTxt, txtLoad, (10, 20), font, 0.1, (255, 255, 255), 2, cv2.LINE_AA)
                cv2.putText(imgTxt, txtRecord, (10, 50), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
                cv2.putText(imgTxt, txt, (10, 200), font, 2, (255, 255, 255), 2, cv2.LINE_AA)
                cv2.putText(imgTxt, txtProbability, (10, 250), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
            # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
            # depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

            #print(delayBol)
            if delayBol == True:
                ctrDelay = ctrDelay+1
                txtDelay = txtDelay + "["
                #txtDel = "Delay"
                cv2.putText(imgTxt, txtDelay, (10, 70), font, 0.1, (255, 255, 255), 2, cv2.LINE_AA)
                cv2.putText(imgTxt, txtDel, (10, 100), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
                if ctrDelay == delayGest:
                    ctrDelay = 0
                    txtDelay = ""
                    delayBol = False

                    gestCatch = False
                    handin = False
                    gestStart = False

            # Stack both images horizontally
            images = np.hstack((draw_image, imgTxt))

            # Show images
            cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('RealSense', images)
            cv2.waitKey(1)

    finally:

        # Stop streaming
        pipeline.stop()
Exemplo n.º 26
0
def get_deprojection_position(obj_name='bottle'):
    # Create a pipeline
    pipeline = rs.pipeline()

    #  Create a config and configure the pipeline to stream
    #  different resolutions of color and depth streams
    config = rs.config()
    config.disable_stream(rs.stream.color)
    # Start streaming
    profile = pipeline.start(config)

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)
    depth_point = [0, 0, 0]
    # Streaming loop
    try:
        while True:
            # Get frameset of color and depth
            frames = pipeline.wait_for_frames()
            # frames.get_depth_frame() is a 640x360 depth image

            # Align the depth frame to color frame
            aligned_frames = align.process(frames)

            # Get aligned frames
            aligned_depth_frame = aligned_frames.get_depth_frame(
            )  # aligned_depth_frame is a 640x480 depth image
            color_frame = aligned_frames.get_color_frame()

            # Validate that both frames are valid
            if not aligned_depth_frame or not color_frame:
                continue

            color_image = np.asanyarray(color_frame.get_data())
            color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)

            # objs = get_object(color_image, obj_name)
            objs = []
            res = objectTrack(frame)
            objs = objs.append(findPixelLocation(res))

            if objs:
                obj_depth = aligned_depth_frame.get_distance(
                    objs[-1][0], objs[-1][1])
                print('%s pixel: (%d, %d) depth: %f' %
                      (obj_name, objs[-1][0], objs[-1][1], obj_depth))

                depth_intrin = aligned_depth_frame.profile.as_video_stream_profile(
                ).intrinsics
                # depth_pixel = [depth_intrin.ppx, depth_intrin.ppy]
                depth_point = rs.rs2_deproject_pixel_to_point(
                    depth_intrin, objs[-1], obj_depth)
                # print('xyz:', depth_point)
                print('rotate:', rotate_coordinate(depth_point, rotate_angle))

            cv2.namedWindow('Align Example', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('Align Example', color_image)
            key = cv2.waitKey(1)
            # Press esc or 'q' to close the image window
            if key & 0xFF == ord('q') or key == 27:
                cv2.destroyAllWindows()
                break
    finally:
        pipeline.stop()

    return rotate_coordinate(depth_point, rotate_angle)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 21 14:06:40 2021

@author: wouter
"""
import pyrealsense2 as rs
import numpy as np
import cv2
import time
a = 0
k = 0
i = 0
pipeline = rs.pipeline()
config = rs.config()
cx = 0
cy = 0
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 15)
config.enable_stream(rs.stream.infrared, 640, 480, rs.format.y8, 15)

cfg = pipeline.start(config)
dev = cfg.get_device()

colorizer = rs.colorizer()
colorizer.set_option(rs.option.color_scheme, 0)  #0 0-9

depth_sensor = dev.first_depth_sensor()
depth_sensor.set_option(rs.option.visual_preset, 2)  #2 0-5
emitter = depth_sensor.get_option(rs.option.emitter_enabled)
Exemplo n.º 28
0
def main(argv):

    size, file_cnt = read_parameters(argv)

    pipeline = rs.pipeline()
    config = rs.config()

    # Get device product line for setting a supporting resolution
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()

    auto_calibrated_device = rs.auto_calibrated_device(device)

    if not auto_calibrated_device:
        print("The connected device does not support auto calibration")
        return


    config.enable_stream(rs.stream.depth, 256, 144, rs.format.z16, 90)
    conf = pipeline.start(config)
    calib_dev = rs.auto_calibrated_device(conf.get_device())

    def on_chip_calib_cb(progress):
        print(". ")

    while True:
        try:
            operation = input("Please select what the operation you want to do\nc - on chip calibration\nt - tare calibration\ng - get the active calibration\nw - write new calibration\ne - exit\n")

            if operation == 'c':
                print("Starting on chip calibration")
                new_calib, health = calib_dev.run_on_chip_calibration(file_cnt, on_chip_calib_cb, 5000)
                print("Calibration completed")
                print("health factor = ", health)

            if operation == 't':
                print("Starting tare calibration")
                ground_truth = float(input("Please enter ground truth in mm\n"))
                new_calib, health = calib_dev.run_tare_calibration(ground_truth, file_cnt, on_chip_calib_cb, 5000)
                print("Calibration completed")
                print("health factor = ", health)

            if operation == 'g':
                calib = calib_dev.get_calibration_table()
                print("Calibration", calib)

            if operation == 'w':
                print("Writing the new calibration")
                calib_dev.set_calibration_table(new_calib)
                calib_dev.write_calibration()

            if operation == 'e':
                pipeline.stop()
                return

            print("Done\n")
        except Exception as e:
            print(e)
        except:
            print("A different Error")
Exemplo n.º 29
0
    corners = np.int0(corners)
    for i in corners:
        x_corner, y_corner = i.ravel()
    return x_corner, y_corner


"""
WRITE SYSTEM PROPERTIES
"""
print("Python Version: " + sys.version)
print("Windows Version: " + platform.platform())
"""
DEFINE STREAM PROPERTIES
"""
#streams
pipeline = rs.pipeline()  #create pipeline
config = rs.config()  #create a configuration
config.enable_stream(rs.stream.color, columns, rows, rs.format.bgr8,
                     f_rate)  #get the color stream
config.enable_stream(rs.stream.depth, columns, rows, rs.format.z16,
                     f_rate)  #get the depth stream
config.enable_stream(rs.stream.infrared, 1, columns, rows, rs.format.y8,
                     f_rate)  #get left IR streams
config.enable_stream(rs.stream.infrared, 2, columns, rows, rs.format.y8,
                     f_rate)  #get right IR streams

# start streaming
profile = config.resolve(pipeline)
profile = pipeline.start(config)

# get the depth sensor scale
import pyrealsense2 as pyrs
import pathlib


file_path = "./{}/{}".format(data_path, name_path)

#Create directories
if write:
    pathlib.Path(file_path).mkdir(parents=True, exist_ok=True)

fourcc = cv2.VideoWriter_fourcc(*'MJPG')
color_out = cv2.VideoWriter('./{}/{}/color.avi'.format(data_path, name_path), fourcc, 15.0, (640, 480))
depth_out = cv2.VideoWriter('./{}/{}/depth.avi'.format(data_path, name_path), fourcc, 15.0, (640, 480))

#Set up camera
cam = pyrs.pipeline()

config = pyrs.config()
config.enable_stream(pyrs.stream.color, res_col[0], res_col[1], pyrs.format.bgr8, fps)
config.enable_stream(pyrs.stream.depth, res_dep[0], res_dep[1], pyrs.format.z16, fps)

cam.start(config)


index = 0

while True:
    
    #Get frame
    frames = cam.wait_for_frames()
    
Exemplo n.º 31
0
 def start(self):
     # Start streaming
     self.pipeline = rs.pipeline()
     self.pipeline.start(self.config)
     print('pipline start')
Exemplo n.º 32
0
def main():
    global fx, fy, cx, cy, plane_x, plane_y, plane_z
    # args = build_argparser().parse_args() # wzy change
    parser = ArgumentParser(add_help=False)
    args = parser.add_argument_group('Options')
    args.cpu_extension = False
    args.model = 'frozen_darknet_yolov3_model.xml'
    args.bin = 'frozen_darknet_yolov3_model.bin'
    args.device = 'MYRIAD'
    args.labels = 'frozen_darknet_yolov3_model.mapping'
    args.input = 'cam'
    args.prob_threshold = 0.8
    args.iou_threshold = 0.6
    args.raw_output_message = True
    args.no_show = False

    # ------------- 1. Plugin initialization for specified device and load extensions library if specified -------------
    log.info("Creating Inference Engine...")
    ie = IECore()
    if args.cpu_extension and 'CPU' in args.device:
        ie.add_extension(args.cpu_extension, "CPU")

    # -------------------- 2. Reading the IR generated by the Model Optimizer (.xml and .bin files) --------------------
    log.info("Loading network")
    # wzy change
    #net = ie.read_network(args.model, os.path.splitext(args.model)[0] + ".bin")
    # net = ie.read_network('frozen_darknet_yolov3_model.xml', "frozen_darknet_yolov3_model.bin")
    net = ie.read_network(args.model, args.bin)

    # ---------------------------------- 3. Load CPU extension for support specific layer ------------------------------
    if "CPU" in args.device:
        supported_layers = ie.query_network(net, "CPU")
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(args.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)

    assert len(net.inputs.keys(
    )) == 1, "Sample supports only YOLO V3 based single input topologies"

    # ---------------------------------------------- 4. Preparing inputs -----------------------------------------------

    log.info("Preparing inputs")
    input_blob = next(iter(net.inputs))

    #  Defaulf batch_size is 1
    net.batch_size = 1

    # Read and pre-process input images
    n, c, h, w = net.inputs[input_blob].shape

    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None
    # with open('frozen_darknet_yolov3_model.mapping', 'r') as f:
    #     labels_map = [x.strip() for x in f]

    # input_stream = 0 if args.input == "cam" else args.input
    #
    # is_async_mode = True
    # cap = cv2.VideoCapture(input_stream)
    #
    # number_input_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    # number_input_frames = 1 if number_input_frames != -1 and number_input_frames < 0 else number_input_frames
    #
    # wait_key_code = 1
    #
    # # Number of frames in picture is 1 and this will be read in cycle. Sync mode is default value for this case
    # if number_input_frames != 1:
    #     ret, frame = cap.read()
    # else:
    #     is_async_mode = False
    #     wait_key_code = 0

    # ----------------------------------------- 5. Loading model to the plugin -----------------------------------------
    log.info("Loading model to the plugin")
    exec_net = ie.load_network(network=net,
                               num_requests=2,
                               device_name=args.device)

    cur_request_id = 0
    next_request_id = 1
    render_time = 0
    parsing_time = 0

    # ----------------------------------------------5.1 realsense input video stream -----------------------------------
    # Configure depth and color streams
    pipeline = rs.pipeline()
    # 创建 config 对象:
    config = rs.config()
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

    # Start streaming
    pipeline.start(config)
    get_img_flag = False
    count = 0
    while not (get_img_flag and count > 10):  # 多取几幅图片,前几张不清晰
        # Wait for a coherent pair of frames(一对连贯的帧): depth and color
        frames = pipeline.wait_for_frames()
        print('wait for frames in the first loop')
        get_color_frame = frames.get_color_frame()

        if not get_color_frame:  # 如果color没有得到图像,就continue继续
            continue

        color_frame = np.asanyarray(get_color_frame.get_data())
        get_img_flag = True  # 跳出循环
        count += 1
    # ----------------------------------------------- 6. Doing inference -----------------------------------------------
    log.info("Starting inference...")
    print(
        "To close the application, press 'CTRL+C' here or switch to the output window and press ESC key"
    )
    print(
        "To switch between sync/async modes, press TAB key in the output window"
    )
    is_async_mode = True
    try:
        while True:
            # Here is the first asynchronous point: in the Async mode, we capture frame to populate the NEXT infer request
            # in the regular mode, we capture frame to the CURRENT infer request

            # >>>>>>>>>>>  calculate time sum >>>>>>>>>>>>>>>#
            cpu_start = time()
            #    print('--------------------------new loop---------------------------------')
            # if is_async_mode:
            #     ret, next_frame = cap.read()
            # else:
            #     ret, frame = cap.read()
            #
            # if not ret:
            #     break
            # Wait for a coherent pair of frames(一对连贯的帧): depth and color
            next_frames = pipeline.wait_for_frames()

            get_next_color_frame = next_frames.get_color_frame()
            next_color_frame = np.asanyarray(get_next_color_frame.get_data())
            # cv2.imshow("color", next_color_frame)

            if is_async_mode:
                request_id = next_request_id
                in_frame = cv2.resize(color_frame, (w, h))
            else:
                request_id = cur_request_id
                in_frame = cv2.resize(color_frame, (w, h))

            # resize input_frame to network size
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))

            # Start inference
            start_time = time()
            exec_net.start_async(request_id=request_id,
                                 inputs={input_blob: in_frame})
            det_time = time() - start_time

            # Collecting object detection results
            objects = list()
            if exec_net.requests[cur_request_id].wait(-1) == 0:
                output = exec_net.requests[cur_request_id].outputs
                start_time = time()
                for layer_name, out_blob in output.items():
                    out_blob = out_blob.reshape(
                        net.layers[net.layers[layer_name].parents[0]].shape)
                    layer_params = YoloParams(net.layers[layer_name].params,
                                              out_blob.shape[2])
                    # log.info("Layer {} parameters: ".format(layer_name))
                    layer_params.log_params()
                    objects += parse_yolo_region(out_blob, in_frame.shape[2:],
                                                 next_color_frame.shape[:-1],
                                                 layer_params,
                                                 args.prob_threshold)
                parsing_time = time() - start_time

            # Filtering overlapping boxes with respect to the --iou_threshold CLI parameter
            objects = sorted(objects,
                             key=lambda obj: obj['confidence'],
                             reverse=True)
            for i in range(len(objects)):
                if objects[i]['confidence'] == 0:
                    continue
                for j in range(i + 1, len(objects)):
                    if intersection_over_union(
                            objects[i], objects[j]) > args.iou_threshold:
                        objects[j]['confidence'] = 0

            # Drawing objects with respect to the --prob_threshold CLI parameter
            objects = [
                obj for obj in objects
                if obj['confidence'] >= args.prob_threshold
            ]

            #if len(objects) and args.raw_output_message:
            # log.info("\nDetected boxes for batch {}:".format(1))
            # log.info(" Class ID | Confidence | XMIN | YMIN | XMAX | YMAX | COLOR ")

            origin_im_size = color_frame.shape[:-1]

            count = 1

            for obj in objects:
                #print('for obj count:')
                #print(count)
                count = count + 1
                # Validation bbox of detected object
                if obj['xmax'] > origin_im_size[1] or obj[
                        'ymax'] > origin_im_size[0] or obj['xmin'] < 0 or obj[
                            'ymin'] < 0:
                    continue
                # color = (int(min(obj['class_id'] * 12.5, 255)),
                #          min(obj['class_id'] * 7, 255), min(obj['class_id'] * 5, 255))
                color = (100, 100, 150)
                det_label = labels_map[obj['class_id']] if labels_map and len(labels_map) >= obj['class_id'] else \
                    str(obj['class_id'])

                # if args.raw_output_message:
                #  log.info(
                #      "{:^9} | {:10f} | {:4} | {:4} | {:4} | {:4} | {} ".format(det_label, obj['confidence'], obj['xmin'],
                #                                                             obj['ymin'], obj['xmax'], obj['ymax'],
                #                                                           color))

                cv2.rectangle(color_frame, (obj['xmin'], obj['ymin']),
                              (obj['xmax'], obj['ymax']), color, 2)
                cv2.putText(
                    color_frame, "#" + str(obj['class_id']) + ' ' +
                    str(round(obj['confidence'] * 100, 1)) + ' %',
                    (obj['xmin'], obj['ymin'] - 7), cv2.FONT_HERSHEY_COMPLEX,
                    0.6, color, 1)

            # print("obj['class_id']: ", obj['class_id'])

            send_data_byte = bytes(0)
            if len(objects) == 0:
                pickup_leftup_rightdown_corner = [
                    -1, 0, 0, 0, 0, cpu_start, -1
                ]
                for i in range(len(pickup_leftup_rightdown_corner)):
                    #print(pickup_leftup_rightdown_corner[i])
                    pickup_senddata = str(
                        pickup_leftup_rightdown_corner[i]) + ','
                    # print(pickup_senddata.encode())
                    send_data_byte += pickup_senddata.encode()
                    # print(send_data_byte)
                conn.send(send_data_byte)
            else:
                objects = sorted(
                    objects, key=lambda obj: obj['class_id'],
                    reverse=False)  ##sort lambda 函数,按class_id值从小到大排列
                obj = objects[0]
                target_leftup_rightdown_corner = [
                    1, obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'],
                    cpu_start, obj['class_id']
                ]
                for i in range(len(target_leftup_rightdown_corner)):
                    target_senddata = str(
                        target_leftup_rightdown_corner[i]) + ','
                    send_data_byte += target_senddata.encode()
                conn.send(send_data_byte)

            # Draw performance stats over frame
            inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
                "Inference time: {:.3f} ms".format(det_time * 1e3)
            render_time_message = "OpenCV rendering time: {:.3f} ms".format(
                render_time * 1e3)
            async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
                "Async mode is off. Processing request {}".format(cur_request_id)
            parsing_message = "YOLO parsing time is {:.3f} ms".format(
                parsing_time * 1e3)

            cv2.putText(color_frame, inf_time_message, (15, 15),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            cv2.putText(color_frame, render_time_message, (15, 45),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            cv2.putText(color_frame, async_mode_message,
                        (10, int(origin_im_size[0] - 20)),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            cv2.putText(color_frame, parsing_message, (15, 30),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)

            start_time = time()
            if not args.no_show:
                cv2.imshow("DetectionResults", color_frame)

            render_time = time() - start_time

            if is_async_mode:
                cur_request_id, next_request_id = next_request_id, cur_request_id
                color_frame = next_color_frame

            if not args.no_show:
                #key = cv2.waitKey(wait_key_code)
                key = cv2.waitKey(1)

                # ESC key
                if key == 27:
                    break
                # Tab key
                if key == 9:
                    exec_net.requests[cur_request_id].wait()
                    is_async_mode = not is_async_mode
                #  log.info("Switched to {} mode".format("async" if is_async_mode else "sync"))

            cpu_end = time()
            #print('>>>>>>>>>>>>>>>>>>>>>>>>>>cpu time :  ', cpu_end-cpu_start)
    finally:
        pipeline.stop()
    cv2.destroyAllWindows()
Exemplo n.º 33
0
def process_frames(rgb_dir, dpt_dir, dpt_raw_dir, rgb_itc_dir, dpt_itc_dir,
                   lbl_dir):
    """
    reads color and depth frame
    aligns the frames to the color frame
    writes depth frame, color frame and intrinsics to files
    :param lbl_dir: directory to save label file to
    :param dpt_raw_dir: directory to save unaligned depth frames to
    :param rgb_dir: directory to save color frames to
    :param dpt_dir: directory to save depth frames to
    :param rgb_itc_dir: directory to save color intrinsics to
    :param dpt_itc_dir: directory to save depth intrinsics to
    """

    prefix = args.bag_file.split(".")[0]  # name of file without extension
    index_file = open(args.directory + "/" + "all.txt", "a")

    try:
        config = rs.config()
        print(args.bag_directory + args.bag_file)
        rs.config.enable_device_from_file(config,
                                          args.bag_directory + args.bag_file,
                                          repeat_playback=False)
        pipeline = rs.pipeline()
        config.enable_stream(rs.stream.depth)
        config.enable_stream(rs.stream.color)
        profile = pipeline.start(config)
        align_to = rs.stream.color
        align = rs.align(align_to)

        rgb_profile = profile.get_stream(rs.stream.color)
        dpt_profile = profile.get_stream(rs.stream.depth)
        rgb_intrinsics = rgb_profile.as_video_stream_profile().get_intrinsics()
        dpt_intrinsics = dpt_profile.as_video_stream_profile().get_intrinsics()

        i = 0
        while True:
            frames = pipeline.wait_for_frames()
            dpt_raw_frame = frames.get_depth_frame()
            aligned_frames = align.process(frames)

            bgr_frame = aligned_frames.get_color_frame()
            dpt_frame = aligned_frames.get_depth_frame()

            bgr_image = np.asanyarray(bgr_frame.get_data())
            rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_RGB2BGR)
            dpt_image = np.asanyarray(dpt_frame.get_data())
            dpt_raw_image = np.asanyarray(dpt_raw_frame.get_data())

            print("Saving frame:", i)

            rgb_file = rgb_dir + "/" + prefix + "_" + str(i).zfill(6) + ".jpg"
            dpt_file = dpt_dir + "/" + prefix + "_" + str(i).zfill(6) + ".png"
            dpt_raw_file = dpt_raw_dir + "/" + prefix + "_" + str(i).zfill(
                6) + ".png"
            rgb_itc_file = rgb_itc_dir + "/" + prefix + "_" + str(i).zfill(
                6) + ".json"
            dpt_itc_file = dpt_itc_dir + "/" + prefix + "_" + str(i).zfill(
                6) + ".json"
            lbl_file = lbl_dir + "/" + prefix + "_" + str(i).zfill(6) + ".json"
            cv2.imwrite(rgb_file, rgb_image)
            cv2.imwrite(dpt_file, dpt_image)
            cv2.imwrite(dpt_raw_file, dpt_raw_image)

            write_intrinsics(rgb_intrinsics, rgb_itc_file)
            write_intrinsics(dpt_intrinsics, dpt_itc_file)
            index_file.write(
                strip(rgb_file) + " " + strip(dpt_file) + " " +
                strip(rgb_itc_file) + " " + strip(dpt_itc_file) + " " +
                strip(lbl_file) + " " + strip(dpt_raw_file) + "\n")

            time.sleep(1.000)  # dont use all frames
            i += 1
    finally:
        index_file.close()
        pass
Exemplo n.º 34
0
    def run(self):
        try:
            machien = {'id': 1, 'ip': 'http://192.168.0.10'}
            response_machien = requests.put(SERVER_URL + "/myfarm/register/ip",
                                            data=machien)

            print(f" machien ip set server : {response_machien.status_code}")

            # GUI 코드
            while True:
                response_user = requests.get(SERVER_URL + "/user/info/3")

                print(f" user data : {response_user.status_code}")

                if response_user.status_code == 200:
                    break
                time.sleep(5)

            # while True :
            # time.sleep(1)
            # print("exist check ...")
            # pipeline.start()
            # frames = pipeline.wait_for_frames()
            # color_frame = frames.get_color_frame()
            # color_image = np.asarray(color_frame.get_data())

            ## take the only location of mushroom pot -> 1/3 * width,1/2*height
            # recent_image = color_image[100:350, 290:550]
            # check_image = cv2.imread('./check.jpg')[100:350, 290:550]
            # cv2.imwrite('./rec.jpg',check_image)
            # cv2.imwrite('./recent.jpg',recent_image)
            # hist_recent = cv2.calcHist(recent_image, [0,1], None, [180,256], [0,180,0,256])
            # hist_check = cv2.calcHist(check_image, [0,1], None, [180,256], [0,180,0,256])
            # number = cv2.compareHist(hist_recent, hist_check, cv2.HISTCMP_CORREL)

            # print(number)
            # pipeline.stop()
            # if number > 0.4:
            # print('Not exist')

            # else:
            #            배지입력 확인
            # print("Exist !!")
            # break

            while self.isRun:
                params = {'pin': PIN}
                response = requests.get(SERVER_URL + '/farm/exist',
                                        params=params)

                if response.status_code == 200:
                    prg_id = response.text
                    break
                else:
                    print("Not prg")
                time.sleep(5)  # 429 에러 방지

            params = {'id': prg_id, 'type': 'custom'}
            response = requests.get(SERVER_URL + '/farm/data', params=params)
            result = json.loads(response.text)
            water_num = result['water']
            temp_array = result['temperature']
            hum_array = result['humidity']

            params_status = {'id': "1", "status": "true"}
            response_status = requests.put(SERVER_URL + '/myfarm/status',
                                           params=params_status)

            print(f"machien status : {response_status.status_code}")

            data_len = len(temp_array)

            data = [None] * data_len

            for i in range(0, data_len):
                temp_array[i] = str(temp_array[i]['setting_value'])
                hum_array[i] = str(hum_array[i]['setting_value'])
                data[i] = R + C + temp_array[i] + S + hum_array[i]

            print(f"water_num : {water_num}")
            print(f"temp_array : {temp_array}")
            print(f"hum_array : {hum_array}")
            print(f"total_data : {data}")

            # return True if data else False

            # HOUR * 24 / water_num
            WATER_TIME = HOUR * 24 / water_num

            serial_send_len = 0
            hour = 0  # 시간 초로 변환
            # WATER_TIME
            water_time = 10000  # 값 받아 오면 연산할 물주기 시간
            # MOTER_TIME
            moter_time = MOTER_TIME
            picTime = 100000

            now = datetime.datetime.now()
            Arduino = serial.Serial(port='COM5', baudrate=9600)
            print(f"data success : {data}")

            seconds = 0
            dt2 = None
            loadTime = 0
            hum = 0
            temp = 0

            while self.isRun:
                dt1 = datetime.datetime.now()
                result = dt1 - now
                seconds = int(result.total_seconds()) - loadTime

                print(
                    f"Python hour : {hour}, water_time : {water_time} moter_time : {moter_time} image : {picTime}"
                )
                print(f"Python seconds : {seconds}")

                if Arduino.readable():
                    LINE = Arduino.readline()
                    code = str(LINE.decode().replace('\n', ''))
                    print(code)
                    hum = code[18:20]
                    temp = code[38:40]
                    # socket_data(temp, hum)
                    self.signal.emit(str(temp), str(hum))
                if seconds - 2 <= hour <= seconds + 2:

                    if len(data) - 1 < serial_send_len:
                        response_status_prg = requests.put(
                            SERVER_URL + f'/farm/end?id={prg_id}')
                        print(
                            f"prg status : {response_status_prg.status_code}")

                        params_status = {'id': "1", "status": "false"}
                        response_status = requests.put(SERVER_URL +
                                                       '/myfarm/status',
                                                       params=params_status)
                        print(
                            f"machien status : {response_status.status_code}")
                        break

                    Arduino.write(data[serial_send_len].encode('utf-8'))
                    req_data_humi_temp = {
                        'prgId': prg_id,
                        'tempValue': temp,
                        'humiValue': hum
                    }
                    humi_temp_res = requests.post(SERVER_URL + "/data/add",
                                                  data=req_data_humi_temp)
                    print(f"Python res_temp : {humi_temp_res.status_code}")
                    serial_send_len += 1
                    hour += DAY

                if seconds - 2 <= water_time <= seconds + 2:
                    Arduino.write(WATER_ORDER.encode('utf-8'))
                    dt2 = datetime.datetime.now()

                    while Arduino.readable():
                        LINE = Arduino.readline()
                        code = str(LINE.decode().replace('\n', ''))
                        print(code)
                        if code[0:3] == 'end':
                            loadTime += int((datetime.datetime.now() -
                                             dt2).total_seconds())
                            break

                    water_time += WATER_TIME

                if seconds - 2 <= moter_time - HOUR / 3 <= seconds + 2:

                    if pipeline_check == False:
                        pipeline_check = True
                        Arduino.write(MOTER_ORDER.encode('utf-8'))
                        dt2 = datetime.datetime.now()

                        # 3d config
                        device = rs.context().query_devices()[0]
                        advnc_mode = rs.rs400_advanced_mode(device)
                        depth_table_control_group = advnc_mode.get_depth_table(
                        )
                        depth_table_control_group.disparityShift = 60
                        depth_table_control_group.depthClampMax = 4000
                        depth_table_control_group.depthClampMin = 0
                        advnc_mode.set_depth_table(depth_table_control_group)

                        pipeline1 = rs.pipeline()
                        config1 = rs.config()

                        config1.enable_stream(rs.stream.depth, rs.format.z16,
                                              30)
                        config1.enable_stream(rs.stream.color, rs.format.bgr8,
                                              30)

                        # Start streaming
                        profile1 = pipeline1.start(config)
                        # Get stream profile and camera intrinsics
                        profile = pipeline1.get_active_profile()
                        depth_profile = rs.video_stream_profile(
                            profile1.get_stream(rs.stream.depth))
                        pc = rs.pointcloud()
                        decimate = rs.decimation_filter()
                        decimate.set_option(rs.option.filter_magnitude, 1)

                        file_name_3d = [0, 90, 180, 270]
                        file_names_3d = [
                            './3dScan{}.ply'.format(i) for i in file_name_3d
                        ]

                        i = 0

                        while Arduino.readable():
                            LINE = Arduino.readline()
                            code = str(LINE.decode().replace('\n', ''))
                            print(code)
                            print(f"i : {i}")
                            if code[0:3] == 'end':
                                loadTime += int((datetime.datetime.now() -
                                                 dt2).total_seconds())
                                break
                            take_3Dpicture(file_names_3d[i], pipeline1,
                                           decimate, pc)
                            i += 0 if i >= 3 else 1

                        files = {'ply': open(file_names_3d[0], 'rb')}
                        req_data_3d = [("machineid", 1)]
                        res_3d = requests.post(SERVER_URL + "/upload/ply",
                                               files=files,
                                               data=req_data_3d)
                        print(f"Python res_3d : {res_3d.status_code}")

                        pipeline1.stop()
                        moter_time += MOTER_TIME
                        pipeline_check = False
                    else:
                        moter_time += 50

                if seconds - 2 <= picTime + 30 <= seconds + 2:

                    if pipeline_check == False:
                        pipeline_check = True
                        profile = pipeline.start(config)

                        depth_sensor = profile.get_device().first_depth_sensor(
                        )

                        frames = pipeline.wait_for_frames()
                        color_frame = frames.get_color_frame()

                        # Convert images to numpy arrays
                        color_image = np.asanyarray(color_frame.get_data())
                        cv2.imwrite('./color_sensor.jpg', color_image)

                        files = {
                            'compost':
                            ('compost.jpg', open('./color_sensor.jpg', 'rb'))
                        }
                        data1 = [('machineid', 1)]
                        response_image = requests.post(SERVER_URL +
                                                       "/upload/compost",
                                                       files=files,
                                                       data=data1)
                        print(
                            f"Python res_image : {response_image.status_code}")
                        pipeline.stop()
                        picTime += D2_TIME

                        # Turning moter

                        #  MUSHROOM DETECTION , requeset
                        frames = pipeline.wait_for_frames()
                        color_frame = frames.get_color_frame()
                        color_image = np.asanyarray(color_frame.get_data())
                        # detections = detection(color_image)
                        # boxes = get_shiitake_location(detections['detection_boxes'], detections['detection_classes'],
                        #                               0.5)
                        # print(boxes)
                        pipeline_check = False
                        pipeline.stop()

                        # for box in boxes:
                        #     size = get_size(box)
                        #     res = make_data(52, box, size)
                        #     print(res)
                    else:
                        picTime += 50

        except Exception:
            self.errors.emit(100)
            self.isRun = False
Exemplo n.º 35
0
#####################################################
##                  Export to PLY                  ##
#####################################################

# First import the library
import pyrealsense2 as rs


# Declare pointcloud object, for calculating pointclouds and texture mappings
pc = rs.pointcloud()
# We want the points object to be persistent so we can display the last cloud when a frame drops
points = rs.points()

# Declare RealSense pipeline, encapsulating the actual device and sensors
pipe = rs.pipeline()
#Start streaming with default recommended configuration
pipe.start();

try:
    # Wait for the next set of frames from the camera
    frames = pipe.wait_for_frames()

    # Fetch color and depth frames
    depth = frames.get_depth_frame()
    color = frames.get_color_frame()

    # Tell pointcloud object to map to this color frame
    pc.map_to(color)

    # Generate the pointcloud and texture mappings
Exemplo n.º 36
0
def start_pipeline(gui: bool):
    save_path = "./out/cameraparams/"
    pipeline = rs.pipeline()
    config = rs.config()

    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
    pipeline.start(config)
    print('[INFO] Starting pipeline')

    profile = pipeline.get_active_profile()
    # sensor = profile.get_device().first_roi_sensor()
    # roi = sensor.get_region_of_interest()
    # sensor.set_region_of_interest(roi)
    # profile = pipeline.start(config)
    depth_profile = rs.video_stream_profile(profile.get_stream(
        rs.stream.depth))
    color_profile = rs.video_stream_profile(profile.get_stream(
        rs.stream.color))

    depth_intrinsics = depth_profile.get_intrinsics()
    color_intrinsics = color_profile.get_intrinsics()

    depth_extrinsics = depth_profile.get_extrinsics_to(color_profile)
    color_extrinsics = color_profile.get_extrinsics_to(depth_profile)
    print("- depth extrinsics:", depth_extrinsics)
    print("- color extrinsics:", color_extrinsics)

    color_profile = rs.video_stream_profile(profile.get_stream(
        rs.stream.color))
    color_intrinsics = color_profile.get_intrinsics()
    color_extrinsics = color_profile.get_extrinsics_to(depth_profile)

    # Identifying depth scaling factor
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()

    clipping_distance_in_meters = 1
    clipping_distance = clipping_distance_in_meters / depth_scale

    pc = rs.pointcloud()
    colorizer = rs.colorizer()

    align_to = rs.stream.color
    align = rs.align(align_to)
    frame_counter = 0
    try:
        print("[INFO] Starting pipeline stream with frame {:d}".format(
            frame_counter))
        time.sleep(
            1
        )  # Giving a second to auto exposrure, important when gui isn't used
        while True:
            frames = pipeline.wait_for_frames()
            # depth_frame = frames.get_depth_frame()
            non_aligned_color_frame = frames.get_color_frame()

            # Aligning color and depth frames, aligned frames are
            # getting re-sampled from the original one, so they are
            # not exactly the same points.
            aligned_frames = align.process(frames)
            depth_frame = aligned_frames.get_depth_frame()
            color_frame = aligned_frames.get_color_frame()

            depth_colormap = np.asanyarray(
                colorizer.colorize(depth_frame).get_data())
            color_image = np.asanyarray(non_aligned_color_frame.get_data())
            color_image_from_depth = np.asanyarray(color_frame.get_data())

            pts = pc.calculate(depth_frame)
            pc.map_to(color_frame)
            if not depth_frame or not color_frame:
                print("No depth or color frame, try again...")
                continue

            depth_image = np.asanyarray(depth_frame.get_data())
            # np.savetxt("out/depth.csv", depth_image, delimiter=",")
            if gui:
                images = np.hstack(
                    (cv2.cvtColor(color_image,
                                  cv2.COLOR_BGR2RGB), depth_colormap))
                # cv2.cvtColor(color_image, cv2.COLOR_BGR2RGBA)
                # cv2.imwrite(os.path.join(save_path, "{:d}.color.png".format(frame_counter)),
                #             cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR))

                cv2.imshow(str(frame_counter), images)
                key = cv2.waitKey(1)
                if key == ord("d"):
                    cv2.destroyAllWindows()
                    print(
                        "[rs_pipeline] Running landmark detection on a color image..."
                    )
                    landmarks2d = landmark_detector(color_image)
                    landmarks3d = pixels_to_landmarks(color_intrinsics,
                                                      color_frame, depth_frame,
                                                      depth_scale)
                    print(
                        "[rs_pipeline] Computing point cloud mesh elements...")
                    vert, col, face = create_pc_mesh(
                        intrinsics=color_intrinsics,
                        points=pts,
                        color_frame=color_frame,
                        depth_frame=depth_frame,
                        depth_scale=depth_scale)

                    print(
                        "[INFO] Everything's ready, passing back to Thrift...")
                    return color_image, landmarks2d, landmarks3d, [
                        vert, col, face
                    ]
                if key == ord("q"):
                    break
            else:

                print(
                    "[rs_pipeline] Running landmark detection on a color image..."
                )
                landmarks2d = landmark_detector(color_image)
                landmarks3d = pixels_to_landmarks(color_intrinsics,
                                                  color_frame, depth_frame,
                                                  depth_scale)
                print("[rs_pipeline] Computing point cloud mesh elements...")
                vert, col, face = create_pc_mesh(intrinsics=color_intrinsics,
                                                 points=pts,
                                                 color_frame=color_frame,
                                                 depth_frame=depth_frame,
                                                 depth_scale=depth_scale)

                print("[INFO] Everything's ready, passing back to Thrift...")
                return color_image, landmarks2d, landmarks3d, [vert, col, face]
    except Exception as ex:
        print(ex)
    finally:
        pipeline.stop()