Пример #1
0
import pyrealsense2 as rs

pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

# Start streaming
pipeline.start(config)

# Get stream profile and camera intrinsics
profile = pipeline.get_active_profile()
depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
depth_intrinsics = depth_profile.get_intrinsics()

color_pro = rs.video_stream_profile(profile.get_stream(rs.stream.color))
color_int = color_pro.get_intrinsics()

print(depth_intrinsics)
print(color_int)
Пример #2
0
# Configuring streams at different rates
# Accelerometer available FPS: {63, 250}Hz
config.enable_stream(rs.stream.accel, rs.format.motion_xyz32f, 250)
# Gyroscope available FPS: {200,400}Hz
config.enable_stream(rs.stream.gyro, rs.format.motion_xyz32f, 200)
# enabling depth stream
config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
# enabling color stream
config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)

# Start streaming
pipeline.start(config)

# Get stream profile and camera intrinsics
profile = pipeline.get_active_profile()
depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
depth_intrinsics = depth_profile.get_intrinsics()
w, h = depth_intrinsics.width, depth_intrinsics.height

# Processing blocks
pc = rs.pointcloud()
decimate = rs.decimation_filter()
decimate.set_option(rs.option.filter_magnitude, 2**state.decimate)
colorizer = rs.colorizer()

# used to prevent false camera rotation
first = True


def mouse_cb(event, x, y, flags, param):
Пример #3
0
    def capture_data(self):
        """
        """
        pipeline = rs2.pipeline()

        # Start streaming
        pipeline.start(config)
        profile = pipeline.get_active_profile()

        depth_sensor = profile.get_device().first_depth_sensor()
        depth_scale = depth_sensor.get_depth_scale()

        depth_profile = rs2.video_stream_profile(profile.get_stream(rs2.stream.depth))
        depth_intrinsics = depth_profile.get_intrinsics()
        w, h = depth_intrinsics.width, depth_intrinsics.height

        # Processing blocks
        pc = rs2.pointcloud()
        decimate = rs2.decimation_filter()
        colorizer = rs2.colorizer()
        filters = [rs2.disparity_transform(),
                rs2.spatial_filter(),
                rs2.temporal_filter(),
                rs2.disparity_transform(False)]
        # Create a context object. This object owns the handles to all connected realsense devices
        success, frames = pipeline.try_wait_for_frames(timeout_ms=100)
        if not success:
            print("exiting")
            return
        depth_frame = frames.get_depth_frame()
        other_frame = frames.first(other_stream)

        depth_frame = decimate.process(depth_frame)

        # Grab new intrinsics (may be changed by decimation)
        depth_intrinsics = rs.video_stream_profile(
            depth_frame.profile).get_intrinsics()
        w, h = depth_intrinsics.width, depth_intrinsics.height

        depth_frame = decimate.process(depth_frame)

        for f in filters:
            depth_frame = f.process(depth_frame)

        # Create opencv window to render image in
        cv2.namedWindow("Depth Stream", cv2.WINDOW_AUTOSIZE)

        # Streaming loop
        while True:
            # Get frameset of depth
            frames = pipeline.wait_for_frames()

            # Get depth frame
            depth_frame = frames.get_depth_frame()
            depth_data = frames.get_data()
            #distance in meters
            distance1 = depth_frame.get_distance(200,500)
            distance2 = depth_frame.get_distance(600,500)

            depth_intrinsics = rs2.video_stream_profile(
                depth_frame.profile).get_intrinsics()
            
            deproject1 = rs2.rs2_deproject_pixel_to_point(depth_intrinsics, [500, 500], distance1)
            deproject2 = rs2.rs2_deproject_pixel_to_point(depth_intrinsics, [600, 500], distance2)


            #print(deproject1)
            #print(deproject2)
            #depr = rs2.deproject_pixel_to_pointdeproject_pixel_to_point((500,500), distance)
            #5print(depr)
            #print(distance1)
            #print(distance2)

            dist = self.euclid_dist(deproject1, deproject2)
            print(dist)
            # Colorize depth frame to jet colormap
            depth_color_frame = rs2.colorizer().colorize(depth_frame)

            # Convert depth_frame to numpy array to render image in opencv
            depth_color_image = np.asanyarray(depth_color_frame.get_data())

            # Render image in opencv window
            cv2.imshow("Depth Stream", depth_color_image)
            key = cv2.waitKey(1)
            # if pressed escape exit program
            if key == 27:
                cv2.destroyAllWindows()
                break
        print("goodbye")
Пример #4
0
def main():
    try:
        #set the output serial property for create
        ser_port = serial.Serial("/dev/ttyUSB0", baudrate=57600, timeout=0.5)
        print("Serial port set!")
    except:
        print("Serial port not connected")
        quit()

    # configure TCP port
    TCP_IP = get_ip()
    print('Raspberry Pi address: ' + TCP_IP)
    TCP_PORT = 8865
    BUFFER_SIZE = 32
    debug = False
    serialLock = _thread.allocate_lock()

    # start the socket
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    s.bind((TCP_IP, TCP_PORT))
    s.listen(1)
    print("Waiting for connection with Host...")

    conn, addr = s.accept()
    Host_IP = addr[0]
    print('Host Computer address: ' + Host_IP)

    #configure UDP ports
    Dist_UDP_Port = 8833
    Tag_UDP_Port = 8844

    s_Dist = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    s_Tag = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

    # Configure depth and color streams
    print("waiting for camera...")
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

    # Start camera streaming
    try:
        pipeline.start(config)
        print("Camera started and streaming!")
        camera = True
    except:
        print("Camera not connected!")
        camera = False

    #align depth and color images
    align_to = rs.stream.color
    align = rs.align(align_to)

    #Get the camera parameters from the RealSense
    if camera == True:
        profile = pipeline.get_active_profile()
        rgb_profile = rs.video_stream_profile(
            profile.get_stream(rs.stream.color))
        rgb_intrinsics = rgb_profile.get_intrinsics()
        params = [rgb_intrinsics.fx, rgb_intrinsics.fy, 0, 0]

    start = time.time()

    try:

        print("Ready for Commands!")
        while 1:

            print(time.time() - start)

            #calculate and broadcast distance and tag
            if camera == True:

                #print("Waiting for frames from camera...")
                frames = pipeline.wait_for_frames()
                #align frames
                aligned_frames = align.process(frames)
                color_frame = aligned_frames.get_color_frame()
                depth_frame = aligned_frames.get_depth_frame()

                if not color_frame or not depth_frame:
                    continue

                num_points = 9  #(data[4])
                height = 20  #(data[5:])

                dist_data = get_distance(depth_frame, int(num_points),
                                         int(height))
                s_Dist.sendto(str.encode(dist_data), (Host_IP, Dist_UDP_Port))

                tag_data = get_tag(color_frame, depth_frame, params)
                s_Tag.sendto(str.encode(tag_data), (Host_IP, Tag_UDP_Port))

            else:
                s_Dist.sendto(bytes([99]), (Host_IP, Dist_UDP_Port))
                s_Tag.sendto(bytes([99]), (Host_IP, Tag_UDP_Port))

            # check for command from the host
            command = b''
            dataAvail = select.select([conn], [], [], 0)[0]
            if dataAvail:
                command = (conn.recv(BUFFER_SIZE))

                if command == b'color':
                    # Case where the host computer asks the current image from the camera
                    if camera == False:
                        print("No camera connected, cannot call this function")
                        conn.send(bytes([99]))
                        continue

                    color_image = np.asanyarray(color_frame.get_data())
                    pil_image = Image.fromarray(color_image)
                    #Convert to grayscale image before sending to host computer
                    gray = np.array(pil_image.convert('L'))

                    for i in range(480):
                        for j in range(640):
                            conn.send(gray[i, j])

                elif command == b'depth':
                    # Case where the host computer asks the depth image from the camera
                    if camera == False:
                        print("No camera connected, cannot call this function")
                        conn.send(bytes([0]))
                        conn.send(bytes([99]))
                        continue

                    depth_image = np.asanyarray(depth_frame.get_data())
                    for i in range(480):
                        for j in range(640):
                            conn.send(depth_image[i, j])

                elif command == b'stop':
                    # Case where the host computer asks to stop the script
                    quit()

                else:
                    # Case where the host computer command is meant for the iRobot
                    #write data to robot
                    send_message(command, ser_port, serialLock)
                    time.sleep(0.01)
                    BytesToRead = ser_port.inWaiting()
                    if BytesToRead:
                        x = ser_port.read(BytesToRead)
                        conn.send(x)

            #no command
            else:
                #Read data from robot and send to Host
                BytesToRead = ser_port.inWaiting()
                if BytesToRead:
                    x = ser_port.read(BytesToRead)
                    conn.send(x)

    finally:
        print("I was told to Stop, or something went wrong")
        #close TCP connection
        conn.shutdown(1)
        conn.close()
        s.close()
        #close UDP ports
        s_Dist.close()
        s_Tag.close()
        #close the serial connection
        ser_port.close()
        #disconnect from camera
        if camera == True:
            pipeline.stop()
Пример #5
0
def main():
    global colour_image
    global depth_image

    # open3d visualiser
    visualiser = open3d.visualization.Visualizer()
    point_cloud = open3d.geometry.PointCloud()
    point_cloud.points = open3d.utility.Vector3dVector(
        np.array([[i, i, i] for i in range(-5, 5)]))
    visualiser.create_window()
    visualiser.add_geometry(point_cloud)

    # Create windows
    cv2.namedWindow("Colour")
    cv2.namedWindow("Depth")
    cv2.namedWindow("Filtered")
    #cv2.setMouseCallback("Colour", get_colour_threshold)

    # Camera config
    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

    # Start camera pipeline
    pipeline = rs.pipeline()
    pipeline.start(config)

    # Depth and colour alignment
    align_to = rs.stream.color
    align = rs.align(align_to)

    # Spatial filter
    spatial = rs.spatial_filter()

    # Get stream profile and camera intrinsics
    profile = pipeline.get_active_profile()
    depth_profile = rs.video_stream_profile(profile.get_stream(
        rs.stream.depth))
    intrinsics = depth_profile.get_intrinsics()

    # Create camera intrinsics for open3d
    intrinsic = open3d.camera.PinholeCameraIntrinsic(
        intrinsics.width, intrinsics.height, intrinsics.fx, intrinsics.fy,
        intrinsics.width // 2, intrinsics.height // 2)

    while True:
        # Obtain and align frames
        current_frame = pipeline.wait_for_frames()
        current_frame = align.process(current_frame)

        # Get colour and depth frames
        depth_image = np.asanyarray(
            spatial.process(current_frame.get_depth_frame()).get_data())
        colour_image = np.asanyarray(
            current_frame.get_color_frame().get_data())

        # Create rgbd image
        rgbd = open3d.geometry.create_rgbd_image_from_color_and_depth(
            open3d.geometry.Image(cv2.cvtColor(colour_image,
                                               cv2.COLOR_BGR2RGB)),
            open3d.geometry.Image(depth_image),
            convert_rgb_to_intensity=False)

        # Create point cloud
        pcd = open3d.geometry.create_point_cloud_from_rgbd_image(
            rgbd, intrinsic)

        # Update point cloud for visualiser
        point_cloud.points = pcd.points
        point_cloud.colors = pcd.colors

        # Update visualiser
        visualiser.update_geometry()
        visualiser.poll_events()
        visualiser.update_renderer()

        # Segment the image
        if (lower_threshold is not None) and (upper_threshold is not None):
            # Gaussian blur
            smoothed = cv2.GaussianBlur(colour_image, (5, 5), 3)

            # Segment image and filter noise from mask
            mask = cv2.inRange(cv2.cvtColor(smoothed, cv2.COLOR_BGR2HSV),
                               lower_threshold, upper_threshold)
            mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, (11, 11))
            mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, (11, 11))
            im2, contours, hierarchy = cv2.findContours(
                mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

            # Iterate over contours
            for contour in contours:

                # remove small contours
                if cv2.contourArea(contour) > 200:

                    # Get centroid
                    centre = np.mean(contour, axis=0)[0].astype(int)

                    # Display contours
                    cv2.putText(colour_image,
                                str(depth_image[centre[1], centre[0]]) + "mm",
                                (centre[0], centre[1]),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
                    cv2.drawContours(colour_image, np.array([contour]), -1,
                                     [0, 255, 0])

            cv2.imshow("Filtered", mask)

        depth_image = cv2.applyColorMap(
            cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

        cv2.imshow("Depth", depth_image)
        cv2.imshow("Colour", colour_image)
        cv2.waitKey(1)
Пример #6
0
    config.enable_device(sys.argv[1])

config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 848, 480, rs.format.bgr8, 30)
#config.enable_stream(rs.stream.accel)
#config.enable_stream(rs.stream.gyro)

#config.enable_device_from_file(sys.argv[1], repeat_playback=False)

profile = pipeline.start(config)

depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()

rgb_stream = profile.get_stream(rs.stream.color)
rgb_stream_profile = rs.video_stream_profile(rgb_stream)
rgb_intrinsics = rgb_stream_profile.get_intrinsics()

w_minus_1 = rgb_intrinsics.width - 1
h_minus_1 = rgb_intrinsics.height - 1

align = rs.align(rs.stream.color)


class Track(collections.deque):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.realWorldPointIdx = None  # index of real-world 3-D point
        self.point_3d = None  # current 3-D point in camera coordinates
        self.color = None  # real color of the 3-D point
Пример #7
0
print("Depth Scale is: ", depth_scale)

color_stream = profile.get_stream(rs.stream.color)
depth_stream = profile.get_stream(rs.stream.depth)
ext = depth_stream.get_extrinsics_to(color_stream)
print(ext)

cnt = 0
while cnt < 1000:
    frames = pipeline.wait_for_frames()

    depth_frame = frames.get_depth_frame()
    color_frame = frames.get_color_frame()

    # Grab new intrinsics (may be changed by decimation)
    depth_intrinsics = rs.video_stream_profile(
        depth_frame.profile).get_intrinsics()
    w, h = depth_intrinsics.width, depth_intrinsics.height

    # u, v is 2D pixel coordinate
    # return p is 3D (x, y, z)
    def get_point(u, v):
        d = depth_frame.get_distance(u, v)
        p = rs.rs2_deproject_pixel_to_point(depth_intrinsics, [u, v], d)
        return p

    pt = get_point(320, 240)
    print("pt = {}".format(pt))
    cnt += 1

pipeline.stop()
Пример #8
0
def get_video_stream_profile(profile):
    video_stream_profile = rs.video_stream_profile(profile)
    return video_stream_profile

state = AppState()

# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

# Start streaming
pipeline.start(config)

# Get stream profile and camera intrinsics
profile = pipeline.get_active_profile()
depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
depth_intrinsics = depth_profile.get_intrinsics()
w, h = depth_intrinsics.width, depth_intrinsics.height

# Processing blocks
pc = rs.pointcloud()
decimate = rs.decimation_filter()
decimate.set_option(rs.option.filter_magnitude, 2 ** state.decimate)
colorizer = rs.colorizer()


def mouse_cb(event, x, y, flags, param):

    if event == cv2.EVENT_LBUTTONDOWN:
        state.mouse_btns[0] = True
Пример #10
0
def gen_frames():  # generate frame by frame from camera    

    

    while True:        
        # Grab camera data
        if not state.paused:
            # Wait for a coherent pair of frames: depth and color
            frames = pipeline.wait_for_frames()

            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()

            depth_frame = decimate.process(depth_frame)

            # Grab new intrinsics (may be changed by decimation)
            depth_intrinsics = rs.video_stream_profile(
                depth_frame.profile).get_intrinsics()
            w, h = depth_intrinsics.width, depth_intrinsics.height

            depth_image = np.asanyarray(depth_frame.get_data())
            color_image = np.asanyarray(color_frame.get_data())

            depth_colormap = np.asanyarray(
                colorizer.colorize(depth_frame).get_data())

            if state.color:
                mapped_frame, color_source = color_frame, color_image
            else:
                mapped_frame, color_source = depth_frame, depth_colormap

            points = pc.calculate(depth_frame)
            pc.map_to(mapped_frame)

            # Pointcloud data to arrays
            v, t = points.get_vertices(), points.get_texture_coordinates()
            verts = np.asanyarray(v).view(np.float32).reshape(-1, 3)  # xyz
            texcoords = np.asanyarray(t).view(np.float32).reshape(-1, 2)  # uv

        # Render
        now = time.time()

        out.fill(0)

        grid(out, (0, 0.5, 1), size=1, n=10)
        frustum(out, depth_intrinsics)
        axes(out, view([0, 0, 0]), state.rotation, size=0.1, thickness=1)

        if not state.scale or out.shape[:2] == (h, w):
            pointcloud(out, verts, texcoords, color_source)
        else:
            tmp = np.zeros((h, w, 3), dtype=np.uint8)
            pointcloud(tmp, verts, texcoords, color_source)
            tmp = cv2.resize(
                tmp, out.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
            np.putmask(out, tmp > 0, tmp)
        
        dt = time.time() - now
        
        

        ret, buffer = cv2.imencode('.jpg', out)
        frame = buffer.tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')  # concat frame one by one and show result
    
    # Stop streaming
    pipeline.stop()
Пример #11
0
#Deproject(depth, 100, 100, intrinsics);

# Create a pipeline
pipeline = rs.pipeline()

#Create a config and configure the pipeline to stream
#  different resolutions of color and depth streams
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

# Start streaming
profile = pipeline.start(config)

depth_stream = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
intrinsics = depth_stream.get_intrinsics()

# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()

# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)

img_counter = 0

# Streaming loop
Пример #12
0
def main():
    # if not os.path.exists(args.directory):
    #     os.mkdir(args.directory)
    try:
        for s in range(15,16):
            read_path ="/home/user/python_projects/utils-GJ/realsense_bag/kist_scene/"+str(s)+".bag"
            save_path = "/home/user/python_projects/DenseFusion_yolact_base/living_lab_video/"+str(s)+"/"
            begin = time.time()

            index = 0
            config = rs.config()
            config.enable_stream(rs.stream.color)
            config.enable_stream(rs.stream.depth)
            pipeline = rs.pipeline()
            rs.config.enable_device_from_file(config, read_path)
            profile = pipeline.start(config)

            align_to = rs.stream.color
            align = rs.align(align_to)

            depth_sensor = profile.get_device().first_depth_sensor()
            depth_scale = depth_sensor.get_depth_scale()
            print("scale : " + str(depth_scale))

            profile = pipeline.get_active_profile()

            color_stream = profile.get_stream(rs.stream.color)
            color_profile = rs.video_stream_profile(color_stream)
            color_intrinsics = color_profile.get_intrinsics()

            depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
            depth_intrinsics = depth_profile.get_intrinsics()

            print("*** color intrinsics ***")
            print(color_intrinsics)

            print("*** depth intrinsics ***")
            print(depth_intrinsics)

            while True:
                if time.time() - begin > 22:
                    break
                time.sleep(0.02)
                frames = pipeline.wait_for_frames()
                # align the deph to color frame
                aligned_frames = align.process(frames)
                # get aligned frames
                aligned_depth_frame = aligned_frames.get_depth_frame()
                aligned_depth_image = np.asanyarray(aligned_depth_frame.get_data())
                # scaled_depth_image = depth_image * depth_scale

                # convert color image to BGR for OpenCV
                color_frame = frames.get_color_frame()
                color_image = np.asanyarray(color_frame.get_data())
                # r, g, b = cv2.split(color_image)
                # color_image = cv2.merge((b, g, r))

                # cv2.imshow("color", color_image)
                # cv2.imshow("aligned_depth_image", aligned_depth_image)
                # cv2.waitKey(0)
                print("index : " + str(index))

                if not os.path.exists(save_path):
                    os.mkdir(save_path)

                cv2.imwrite(save_path+"depth_image" + str(index) + ".png", aligned_depth_image)
                cv2.imwrite(save_path+"color_image" + str(index) + ".png", color_image)
                # cv2.imwrite("/home/user/kist_scene/scene" + str(i) + "/depth_image" + str(index) + ".png", aligned_depth_image)
                # cv2.imwrite("/home/user/kist_scene/scene" + str(i) + "/color_image" + str(index) + ".png", color_image)
                index += 1
    finally:
        pass
Пример #13
0
def start_pipeline():
    save_path = "./out"
    pipeline = rs.pipeline()
    config = rs.config()

    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)
    pipeline.start(config)
    print('[INFO] Starting pipeline')

    profile = pipeline.get_active_profile()
    # profile = pipeline.start(config)
    depth_profile = rs.video_stream_profile(profile.get_stream(
        rs.stream.depth))

    depth_intrinsics = depth_profile.get_intrinsics()
    w, h = depth_intrinsics.width, depth_intrinsics.height
    print("[INFO] Width: ", w, "Height: ", h)

    fx, fy, = depth_intrinsics.fx, depth_intrinsics.fy
    print("[INFO] Focal length X: ", fx)
    print("[INFO] Focal length Y: ", fy)

    ppx, ppy = depth_intrinsics.ppx, depth_intrinsics.ppy
    print("[INFO] Principal point X: ", ppx)
    print("[INFO] Principal point Y: ", ppy)

    # Identifying depth scaling factor
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()

    pc = rs.pointcloud()
    decimate = rs.decimation_filter()
    spatial = rs.spatial_filter
    hole = rs.hole_filling_filter
    temporal = rs.temporal_filter()
    colorizer = rs.colorizer()

    align_to = rs.stream.color
    align = rs.align(align_to)
    frame_counter = 0
    try:
        print("[INFO] Starting pipeline stream with frame {:d}".format(
            frame_counter))
        while True:
            frames = pipeline.wait_for_frames()
            aligned_frames = align.process(frames)

            aligned_depth_frame = aligned_frames.get_depth_frame()
            aligned_color_frame = aligned_frames.get_color_frame()

            depth_colormap = np.asanyarray(
                colorizer.colorize(aligned_depth_frame).get_data())
            color_image = np.asanyarray(aligned_color_frame.get_data())

            pts = pc.calculate(aligned_depth_frame)
            pc.map_to(aligned_color_frame)

            if not aligned_depth_frame or not aligned_color_frame:
                print("No depth or color frame, try again...")
                continue

            depth_image = np.asanyarray(aligned_depth_frame.get_data())

            images = np.hstack(
                (cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB), depth_colormap))
            cv2.imshow(str(frame_counter), images)
            key = cv2.waitKey(1)
            if key == ord("d"):
                landmarks = landmark_detector(color_image)
                for n, px in landmarks.items():
                    z = aligned_depth_frame.get_distance(px[0], px[1])
                    # Test Landmarks px = [width, height]
                    pt = rs.rs2_deproject_pixel_to_point(
                        depth_intrinsics, [px[0], px[1]], z)
                    w, h = rs.rs2_project_point_to_pixel(depth_intrinsics, pt)
                    print("Original Pixel:", px)
                    print("Deprojected  Pixel:", [w, h])
                    return px, w, h
    except Exception as ex:
        print(ex)
    finally:
        pipeline.stop()
Пример #14
0
 def get_profile_intrinsics(self, profile):
     return rs.video_stream_profile(profile).get_intrinsics()
def main():

    red_lower = (0, 104, 118)
    red_upper = (23, 255, 255)

    x_off = [0]
    y_off = [0]

    level = 10
    search_num = level**2

    for i in range(-10, 11):
        for j in range(-10, 11):
            x_off.append(i)
            y_off.append(j)

    # Configure depth and color streams
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, Width, Height, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, Width, Height, rs.format.bgr8, 30)

    # Start streaming
    profile = pipeline.start(config)

    # Intrinsic Matrix
    depth_profile = rs.video_stream_profile(profile.get_stream(
        rs.stream.depth))
    depth_intrinsics = depth_profile.get_intrinsics()

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    # print("Depth Scale is: " , depth_scale)

    # align depth stream to GRB stream
    align_to = rs.stream.color
    align = rs.align(align_to)

    # create ball tracker for dilter the color
    redtracker = Tracker(Width, Height, red_lower, red_upper)

    # create camera node
    rospy.init_node('position_pub', anonymous=True)

    depth_pub = rospy.Publisher("depth", Float32, queue_size=1)
    depth_state_pub = rospy.Publisher("depth_error", Float32, queue_size=1)
    rate_pub = rospy.Publisher("loop_rate", Float32, queue_size=1)

    camera_pub = rospy.Publisher('camera_view', Vector3, queue_size=10)
    ##### robot_pub = rospy.Publisher('robot_view',Vector3,queue_size = 10)

    # loop rate
    loop_rate = 30
    dt = 1 / loop_rate
    rate = rospy.Rate(loop_rate)

    while not rospy.is_shutdown():

        # Wait for a coherent pair of frames: depth and color
        frames = pipeline.wait_for_frames()

        # Align the depth frame to color frame
        aligned_frames = align.process(frames)

        # Get aligned frames
        aligned_depth_frame = aligned_frames.get_depth_frame(
        )  # aligned_depth_frame is a 640x480 depth image
        color_frame = aligned_frames.get_color_frame()

        # Validate that both frames are valid
        if not aligned_depth_frame or not color_frame:
            continue

        # depth_frame_ = frames.get_depth_frame()
        color_frame = frames.get_color_frame()
        # if not depth_frame_ or not color_frame:
        #    continue

        # Convert images to numpy arrays
        depth_image = np.asanyarray(aligned_depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        depth_intrinsics = rs.video_stream_profile(
            aligned_frames.profile).get_intrinsics()

        # X-480 Y-640
        X, Y = redtracker.track(color_image)
        color_image = redtracker.draw_arrows(color_image)

        #X = 720
        #Y = 550

        Depth = rs.depth_frame.get_distance(aligned_depth_frame, X, Y)

        X_ = X
        Y_ = Y

        TT = 0
        jj = 0
        while (Depth == 0):

            X = X_ + x_off[jj]
            Y = Y_ + y_off[jj]

            Depth = rs.depth_frame.get_distance(aligned_depth_frame, X, Y)

            if (Depth != 0):
                depth_pub.publish(Depth)

            jj = jj + 1

            if (Depth != 0) or (jj == search_num):
                if (jj == search_num) and (Depth == 0):
                    depth_state_pub.publish(0)
                break
                #
        # Depth = depth_image[X,Y]
        # print('X =  %.10f, Y = %.10f, Z = %.10f' % (X, Y, Depth))

        # print(Depth)
        # Y = 240
        # X = 320
        X, Y, Z = rs.rs2_deproject_pixel_to_point(depth_intrinsics, [X, Y],
                                                  Depth)
        if Depth != 0:
            camera_pub.publish(Vector3(X, Y, Z))
        rate_pub.publish(0)
        ## X, Y, Z, trash = rotate(np.array([pi/2, 0,0,1,2,3]),np.array([X,Y,Z,1]))

        ## robot_pub.publish(Vector3(X,Y,Z))
        # print('real_depth: ',(Y, X, Depth))
        # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
        depth_colormap = cv2.applyColorMap(
            cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

        # Depth = depth_image[X, Y]

        # Stack both images horizontally
        images = np.hstack((color_image, depth_colormap))

        # images = color_image
        # Show images
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(images, 'Y: %.4f, X: %.4f Depth: %.4f' % (Y, X, Z),
                    (10, 450), font, 0.8, (0, 255, 0), 2, cv2.LINE_AA)
        cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
        cv2.imshow('RealSense', images)

        key = cv2.waitKey(30)
        if key == ord('q') or key == 27:
            cv2.destroyAllWindows()
            break
        rate.sleep()
        # cv2.waitKey(1)

        # Stop streaming
    pipeline.stop()
def run(dt):
    global w, h
    window.set_caption("RealSense (%dx%d) %dFPS (%.2fms) %s" %
                       (w, h, 0 if dt == 0 else 1.0 / dt, dt * 1000,
                        "PAUSED" if state.paused else ""))

    if state.paused:
        return

    success, frames = pipeline.try_wait_for_frames(timeout_ms=0)
    if not success:
        return

    depth_frame = frames.get_depth_frame()
    other_frame = frames.first(other_stream)

    depth_frame = decimate.process(depth_frame)

    if state.postprocessing:
        for f in filters:
            depth_frame = f.process(depth_frame)

    # Grab new intrinsics (may be changed by decimation)
    depth_intrinsics = rs.video_stream_profile(
        depth_frame.profile).get_intrinsics()
    w, h = depth_intrinsics.width, depth_intrinsics.height

    color_image = np.asanyarray(other_frame.get_data())

    colorized_depth = colorizer.colorize(depth_frame)
    depth_colormap = np.asanyarray(colorized_depth.get_data())

    if state.color:
        mapped_frame, color_source = other_frame, color_image
    else:
        mapped_frame, color_source = colorized_depth, depth_colormap

    points = pc.calculate(depth_frame)
    pc.map_to(mapped_frame)

    # handle color source or size change
    fmt = convert_fmt(mapped_frame.profile.format())
    global image_data
    if (image_data.format, image_data.pitch) != (fmt, color_source.strides[0]):
        empty = (gl.GLubyte * (w * h * 3))()
        image_data = pyglet.image.ImageData(w, h, fmt, empty)
    # copy image data to pyglet
    image_data.set_data(fmt, color_source.strides[0], color_source.ctypes.data)

    verts = np.asarray(points.get_vertices(2)).reshape(h, w, 3)
    texcoords = np.asarray(points.get_texture_coordinates(2))

    if len(vertex_list.vertices) != verts.size:
        vertex_list.resize(verts.size // 3)
        # need to reassign after resizing
        vertex_list.vertices = verts.ravel()
        vertex_list.tex_coords = texcoords.ravel()

    # copy our data to pre-allocated buffers, this is faster than assigning...
    # pyglet will take care of uploading to GPU
    def copy(dst, src):
        """copy numpy array to pyglet array"""
        # timeit was mostly inconclusive, favoring slice assignment for safety
        np.array(dst, copy=False)[:] = src.ravel()
        # ctypes.memmove(dst, src.ctypes.data, src.nbytes)

    copy(vertex_list.vertices, verts)
    copy(vertex_list.tex_coords, texcoords)

    if state.lighting:
        # compute normals
        dy, dx = np.gradient(verts, axis=(0, 1))
        n = np.cross(dx, dy)

        # can use this, np.linalg.norm or similar to normalize, but OpenGL can do this for us, see GL_NORMALIZE above
        # norm = np.sqrt((n*n).sum(axis=2, keepdims=True))
        # np.divide(n, norm, out=n, where=norm != 0)

        # import cv2
        # n = cv2.bilateralFilter(n, 5, 1, 1)

        copy(vertex_list.normals, n)

    if keys[pyglet.window.key.E]:
        points.export_to_ply('./out.ply', mapped_frame)
Пример #17
0
def detecting():
    global medium_goal
    global detect
    # Init realsense
    pipeline = rs.pipeline()  # create pipeline
    config = rs.config()  # create realsense configuration
    config.enable_stream(rs.stream.depth, CAMERA_WIDTH, CAMERA_HEIGHT, rs.format.z16, 30)  # set resolution and depth type
    config.enable_stream(rs.stream.color, CAMERA_WIDTH, CAMERA_HEIGHT, rs.format.bgr8, 30)
    profile = pipeline.start(config)  # start pipeline
    for x in range(5):
        pipeline.wait_for_frames()  # skip 5 frames(skip error & depth_sensor improvement)

    profile = pipeline.get_active_profile()
    depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))

    depth_sensor = profile.get_device().first_depth_sensor()  # (after 5 frame skip) get depth sensor
    depth_scale = depth_sensor.get_depth_scale()

    align_to = rs.stream.color
    align = rs.align(align_to)
    final_map = np.zeros((CAMERA_HEIGHT, CAMERA_WIDTH)).astype('uint8')

    def pixel_to_3d(pixel_x, pixel_y, depth_image, depth_intrinsics):  # Convert pixel (x, y), depth to (x, y, z) coord
        coord = [pixel_y, pixel_x]
        z, x, y = rs.rs2_deproject_pixel_to_point(depth_intrinsics, coord, depth_image[pixel_y][pixel_x] * depth_scale)
        z = -z
        return [x, y, z]

    def depth_filter(depth_data):
        depth_data = depth_to_disparity.process(depth_data)
        depth_data = spatial.process(depth_data)
        depth_data = disparity_to_depth.process(depth_data)
        depth_data = hole_filling.process(depth_data)
        return depth_data

    while True:

        # Get frames
        frames = pipeline.wait_for_frames()
        aligned_frames = align.process(frames)

        depth_frame = aligned_frames.get_depth_frame()
        depth_frame = depth_filter(depth_frame)

        depth_intrinsics = rs.video_stream_profile(depth_frame.profile).get_intrinsics()

        # Validate that both frames are valid
        if not depth_frame:
            continue
        depth_image = np.asanyarray(depth_frame.get_data())

        #depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.005), cv2.COLORMAP_BONE)[:, :, 0]  # TODO:
        depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_BONE)[:, :, 1]  # TODO:
        #depth_colormap = np.dstack((depth_colormap, depth_colormap, depth_colormap))

        disparity_map = disparity(depth_colormap)


        bg_removed_mask = np.where(((5 < depth_colormap - disparity_map) & (depth_colormap - disparity_map < 20)), 0,
                           255).astype('uint8')  # remove floor
        bg_removed = cv2.bitwise_and(depth_colormap, depth_colormap, mask=bg_removed_mask)

        bg_removed2_mask = np.where(depth_colormap > 20, 0, 255).astype('uint8')  # remove far
        bg_removed2 = cv2.bitwise_and(bg_removed, bg_removed, mask=bg_removed2_mask)


        test = np.dstack((bg_removed2, bg_removed2, bg_removed2))

        test_yuv = cv2.cvtColor(test, cv2.COLOR_BGR2YUV)

        test_yuv[:, :, 0] = cv2.equalizeHist(test_yuv[:, :, 0])

        # 1. Histogram Equalization
        test_Hist = cv2.cvtColor(test_yuv, cv2.COLOR_YUV2RGB)

        test_gray = cv2.cvtColor(test_Hist, cv2.COLOR_RGB2GRAY)

        # 2. Binarization
        test_Bi = cv2.threshold(test_gray, 1, 255, cv2.THRESH_BINARY)[1]

        # 3. Noise Reduction (Median Filtering)
        floor_removed_bw = cv2.medianBlur(test_Bi, 5)  # Kunel Size 5

        if (floor_removed_bw == 255).any():

            # gray = cv2.cvtColor(floor_removed, cv2.COLOR_RGB2GRAY)  # TODO:
            # threshold = cv2.threshold(gray, 1, 255, 0)[1]

            # 2) detect obstacles
            contours = cv2.findContours(floor_removed_bw, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]

            floor_removed = cv2.cvtColor(floor_removed_bw, cv2.COLOR_GRAY2RGB)

            # 3) calculate medium goal
            medium_points = np.empty(shape=[0, 3])  # (x, y, r)
            for cnt in contours:
                x, y, w, h = cv2.boundingRect(cnt)  # find obstacle in camera view point
                if (80 < h or 120 < w) and 10 < w and 10 < h:  # here!!!!!!!!!!!!!!!!!!!!!!!! h < 200 is to ignore the wall

                    cv2.drawContours(floor_removed, [cnt], 0, (0, 255, 0), 2)

                    cv2.rectangle(floor_removed, (x, y), (x + w, y + h), (0, 0, 255), 2)

                    # cv2.putText(test_color, "w = " + str(w) + ", h = " + str(h), (300, 300), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 0.5, (255, 127, 0), 1)

                    cx = int(x + w / 2)
                    cy = int(y + h / 2)

                    cv2.circle(floor_removed, (cx, cy), 5, (255, 0, 255), -1)

                    # convert camera view point obstacle (x, y) to bird view (x, y) coordinate
                    points = np.empty(shape=[0, 2])
                    for i in range(w // 10):

                        for j in range(h // 20):
                            points = np.empty(shape=[0, 2])  # (x, y)
                            # obstacle (x,y) in the same z of depth camera
                            point = pixel_to_3d(x + 10 * i, y + 20 * j, depth_image, depth_intrinsics)[0:2]
                            # if calculate_distance(point, [0, 0]) < DIST_TO_OBSTACLE:  # unit : meter      1.5
                            if floor_removed_bw[y + 20 * j][x + 10 * i] == 255:  # unit : meter      1.5
                                # (camera coord) offset tuning
                                point[0] -= 0.1
                                point[1] += 0.1

                                #if (abs(point[0] > 0.5)) and (abs(point[1] ) > 0.5):  # 50cm far from XY_COORD_M
                                points = np.append(points, [point], 0)

                    cv2.putText(floor_removed, str(points.shape), (cx, cy + 20), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 1,(255, 255, 0), 2)

                    # calculate medium points
                    if points.any():

                        if (abs(points[:, 0]) < ROI_X).any():  # ROI_X : 0.4
                            cv2.putText(floor_removed,
                                        str(round(calculate_distance(np.mean(points, 0), [0, 0]), 2)) + 'm',
                                        (cx, cy - 20), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 1, (255, 255, 0), 2)

                            if np.mean(points, 0)[0] > 0:
                                #print("obs in right")
                                medium_point = np.min(points, 0)
                                # medium_point[0] -= 0.65 # offset
                                # medium_point[1] = 0.5
                            else:
                                #print("obs in left")
                                medium_point = np.max(points, 0)
                                # medium_point[0] += 0.65 # offset
                                # medium_point[1] = 0.5

                            medium_point[0] = (np.min(points, 0)[0] + np.max(points, 0)[0]) / 2
                            medium_point[1] = (np.min(points, 0)[1] + np.max(points, 0)[1]) / 2
                            medium_point = np.append(medium_point, calculate_distance([0, 0], medium_point))  # append r : distacne from (0, 0) to obstacle
                            medium_points = np.append(medium_points, [medium_point], 0)
                if medium_points.any():
                    print('detect')
                    detect = True
                    medium_goal = medium_points[np.argmin(medium_points[:, 2])][:2]  # (x, y) about minimum of r
                else:
                    detect = False
Пример #18
0
def run(dt):
    global w, h
    window.set_caption("RealSense (%dx%d) %dFPS (%.2fms) %s" %
                       (w, h, 0 if dt == 0 else 1.0 / dt, dt * 1000,
                        "PAUSED" if state.paused else ""))

    if state.paused:
        return

    success, frames = pipeline.try_wait_for_frames(timeout_ms=0)
    if not success:
        return

    depth_frame = frames.get_depth_frame().as_video_frame()
    other_frame = frames.first(other_stream).as_video_frame()

    depth_frame = decimate.process(depth_frame)

    if state.postprocessing:
        for f in filters:
            depth_frame = f.process(depth_frame)

    # Grab new intrinsics (may be changed by decimation)
    depth_intrinsics = rs.video_stream_profile(
        depth_frame.profile).get_intrinsics()
    w, h = depth_intrinsics.width, depth_intrinsics.height

    color_image = np.asanyarray(other_frame.get_data())

    # facial landmarks code

    p_file = "models/shape_predictor_68_face_landmarks.dat"
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(p_file)

    gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
    rects = detector(gray, 0)

    for (i, rect) in enumerate(rects):
        # Make the prediction and transfom it to numpy array
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)

        # Draw on our image, all the finded cordinate points (x,y)
        for (x, y) in shape:
            cv2.circle(color_image, (x, y), 2, (0, 255, 0), -1)

        # (x, y, w_r, h_r) = face_utils.rect_to_bb(rect)
        # cv2.rectangle(color_image, (x, y), (x + w_r, y + h_r), (0, 255, 0), 3)

    # facial landmark END

    colorized_depth = colorizer.colorize(depth_frame)
    depth_colormap = np.asanyarray(colorized_depth.get_data())

    if state.color:
        mapped_frame, color_source = other_frame, color_image
    else:
        mapped_frame, color_source = colorized_depth, depth_colormap

    points = pc.calculate(depth_frame)
    pc.map_to(mapped_frame)

    # handle color source or size change
    fmt = convert_fmt(mapped_frame.profile.format())
    global image_data
    if (image_data.format, image_data.pitch) != (fmt, color_source.strides[0]):
        empty = (gl.GLubyte * (w * h * 3))()
        image_data = pyglet.image.ImageData(w, h, fmt, empty)
    # copy image data to pyglet
    image_data.set_data(fmt, color_source.strides[0], color_source.ctypes.data)

    verts = np.asarray(points.get_vertices(2)).reshape(h, w, 3)
    texcoords = np.asarray(points.get_texture_coordinates(2))

    if len(vertex_list.vertices) != verts.size:
        vertex_list.resize(verts.size // 3)
        # need to reassign after resizing
        vertex_list.vertices = verts.ravel()
        vertex_list.tex_coords = texcoords.ravel()

    # copy our data to pre-allocated buffers, this is faster than assigning...
    # pyglet will take care of uploading to GPU
    def copy(dst, src):
        """copy numpy array to pyglet array"""
        # timeit was mostly inconclusive, favoring slice assignment for safety
        np.array(dst, copy=False)[:] = src.ravel()
        # ctypes.memmove(dst, src.ctypes.data, src.nbytes)

    copy(vertex_list.vertices, verts)
    copy(vertex_list.tex_coords, texcoords)

    if state.lighting:
        # compute normals
        dy, dx = np.gradient(verts, axis=(0, 1))
        n = np.cross(dx, dy)

        # can use this, np.linalg.norm or similar to normalize, but OpenGL can do this for us, see GL_NORMALIZE above
        # norm = np.sqrt((n*n).sum(axis=2, keepdims=True))
        # np.divide(n, norm, out=n, where=norm != 0)

        # import cv2
        # n = cv2.bilateralFilter(n, 5, 1, 1)

        copy(vertex_list.normals, n)

    if keys[pyglet.window.key.E]:
        points.export_to_ply('./out.ply', mapped_frame)
Пример #19
0
config3.enable_device("938422075202")  # left realsense id 937422070270
config3.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config3.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

# Start streaming
profile = pipeline.start(config)  # front
profile2 = pipeline2.start(config2)  # left
profile3 = pipeline3.start(config3)  # right

profile = pipeline.get_active_profile()
profile2 = pipeline2.get_active_profile()
profile3 = pipeline3.get_active_profile()
#print(profile)

# Get video stream
color_profile_f = rs.video_stream_profile(profile.get_stream(rs.stream.color))
depth_profile_f = rs.video_stream_profile(profile.get_stream(rs.stream.depth))

color_profile_l = rs.video_stream_profile(profile2.get_stream(rs.stream.color))
depth_profile_l = rs.video_stream_profile(profile2.get_stream(rs.stream.depth))

color_profile_r = rs.video_stream_profile(profile3.get_stream(rs.stream.color))
depth_profile_r = rs.video_stream_profile(profile3.get_stream(rs.stream.depth))

# Get front camera intrinsics
depth_intrinsics_front = depth_profile_f.get_intrinsics()
color_intrinsics_front = color_profile_f.get_intrinsics()
print("front color instrinscics")
print(color_intrinsics_front)
print("--------------------------------------------")
print("front depth instrinscics")
Пример #20
0
    def run(self):
        try:
            machien = {'id': 1, 'ip': 'http://192.168.0.10'}
            response_machien = requests.put(SERVER_URL + "/myfarm/register/ip",
                                            data=machien)

            print(f" machien ip set server : {response_machien.status_code}")

            # GUI 코드
            while True:
                response_user = requests.get(SERVER_URL + "/user/info/3")

                print(f" user data : {response_user.status_code}")

                if response_user.status_code == 200:
                    break
                time.sleep(5)

            # while True :
            # time.sleep(1)
            # print("exist check ...")
            # pipeline.start()
            # frames = pipeline.wait_for_frames()
            # color_frame = frames.get_color_frame()
            # color_image = np.asarray(color_frame.get_data())

            ## take the only location of mushroom pot -> 1/3 * width,1/2*height
            # recent_image = color_image[100:350, 290:550]
            # check_image = cv2.imread('./check.jpg')[100:350, 290:550]
            # cv2.imwrite('./rec.jpg',check_image)
            # cv2.imwrite('./recent.jpg',recent_image)
            # hist_recent = cv2.calcHist(recent_image, [0,1], None, [180,256], [0,180,0,256])
            # hist_check = cv2.calcHist(check_image, [0,1], None, [180,256], [0,180,0,256])
            # number = cv2.compareHist(hist_recent, hist_check, cv2.HISTCMP_CORREL)

            # print(number)
            # pipeline.stop()
            # if number > 0.4:
            # print('Not exist')

            # else:
            #            배지입력 확인
            # print("Exist !!")
            # break

            while self.isRun:
                params = {'pin': PIN}
                response = requests.get(SERVER_URL + '/farm/exist',
                                        params=params)

                if response.status_code == 200:
                    prg_id = response.text
                    break
                else:
                    print("Not prg")
                time.sleep(5)  # 429 에러 방지

            params = {'id': prg_id, 'type': 'custom'}
            response = requests.get(SERVER_URL + '/farm/data', params=params)
            result = json.loads(response.text)
            water_num = result['water']
            temp_array = result['temperature']
            hum_array = result['humidity']

            params_status = {'id': "1", "status": "true"}
            response_status = requests.put(SERVER_URL + '/myfarm/status',
                                           params=params_status)

            print(f"machien status : {response_status.status_code}")

            data_len = len(temp_array)

            data = [None] * data_len

            for i in range(0, data_len):
                temp_array[i] = str(temp_array[i]['setting_value'])
                hum_array[i] = str(hum_array[i]['setting_value'])
                data[i] = R + C + temp_array[i] + S + hum_array[i]

            print(f"water_num : {water_num}")
            print(f"temp_array : {temp_array}")
            print(f"hum_array : {hum_array}")
            print(f"total_data : {data}")

            # return True if data else False

            # HOUR * 24 / water_num
            WATER_TIME = HOUR * 24 / water_num

            serial_send_len = 0
            hour = 0  # 시간 초로 변환
            # WATER_TIME
            water_time = 10000  # 값 받아 오면 연산할 물주기 시간
            # MOTER_TIME
            moter_time = MOTER_TIME
            picTime = 100000

            now = datetime.datetime.now()
            Arduino = serial.Serial(port='COM5', baudrate=9600)
            print(f"data success : {data}")

            seconds = 0
            dt2 = None
            loadTime = 0
            hum = 0
            temp = 0

            while self.isRun:
                dt1 = datetime.datetime.now()
                result = dt1 - now
                seconds = int(result.total_seconds()) - loadTime

                print(
                    f"Python hour : {hour}, water_time : {water_time} moter_time : {moter_time} image : {picTime}"
                )
                print(f"Python seconds : {seconds}")

                if Arduino.readable():
                    LINE = Arduino.readline()
                    code = str(LINE.decode().replace('\n', ''))
                    print(code)
                    hum = code[18:20]
                    temp = code[38:40]
                    socket_data(temp, hum)
                    self.signal.emit(str(temp), str(hum))
                if seconds - 2 <= hour <= seconds + 2:

                    if len(data) - 1 < serial_send_len:
                        response_status_prg = requests.put(
                            SERVER_URL + f'/farm/end?id={prg_id}')
                        print(
                            f"prg status : {response_status_prg.status_code}")

                        params_status = {'id': "1", "status": "false"}
                        response_status = requests.put(SERVER_URL +
                                                       '/myfarm/status',
                                                       params=params_status)
                        print(
                            f"machien status : {response_status.status_code}")
                        break

                    Arduino.write(data[serial_send_len].encode('utf-8'))
                    req_data_humi_temp = {
                        'prgId': prg_id,
                        'tempValue': temp,
                        'humiValue': hum
                    }
                    humi_temp_res = requests.post(SERVER_URL + "/data/add",
                                                  data=req_data_humi_temp)
                    print(f"Python res_temp : {humi_temp_res.status_code}")
                    serial_send_len += 1
                    hour += DAY

                if seconds - 2 <= water_time <= seconds + 2:
                    Arduino.write(WATER_ORDER.encode('utf-8'))
                    dt2 = datetime.datetime.now()

                    while Arduino.readable():
                        LINE = Arduino.readline()
                        code = str(LINE.decode().replace('\n', ''))
                        print(code)
                        if code[0:3] == 'end':
                            loadTime += int((datetime.datetime.now() -
                                             dt2).total_seconds())
                            break

                    water_time += WATER_TIME

                if seconds - 2 <= moter_time - HOUR / 3 <= seconds + 2:

                    if pipeline_check == False:
                        pipeline_check = True
                        Arduino.write(MOTER_ORDER.encode('utf-8'))
                        dt2 = datetime.datetime.now()

                        # 3d config
                        device = rs.context().query_devices()[0]
                        advnc_mode = rs.rs400_advanced_mode(device)
                        depth_table_control_group = advnc_mode.get_depth_table(
                        )
                        depth_table_control_group.disparityShift = 60
                        depth_table_control_group.depthClampMax = 4000
                        depth_table_control_group.depthClampMin = 0
                        advnc_mode.set_depth_table(depth_table_control_group)

                        pipeline1 = rs.pipeline()
                        config1 = rs.config()

                        config1.enable_stream(rs.stream.depth, rs.format.z16,
                                              30)
                        config1.enable_stream(rs.stream.color, rs.format.bgr8,
                                              30)

                        # Start streaming
                        profile1 = pipeline1.start(config)
                        # Get stream profile and camera intrinsics
                        profile = pipeline1.get_active_profile()
                        depth_profile = rs.video_stream_profile(
                            profile1.get_stream(rs.stream.depth))
                        pc = rs.pointcloud()
                        decimate = rs.decimation_filter()
                        decimate.set_option(rs.option.filter_magnitude, 1)

                        file_name_3d = [0, 90, 180, 270]
                        file_names_3d = [
                            './3dScan{}.ply'.format(i) for i in file_name_3d
                        ]

                        i = 0

                        while Arduino.readable():
                            LINE = Arduino.readline()
                            code = str(LINE.decode().replace('\n', ''))
                            print(code)
                            print(f"i : {i}")
                            if code[0:3] == 'end':
                                loadTime += int((datetime.datetime.now() -
                                                 dt2).total_seconds())
                                break
                            take_3Dpicture(file_names_3d[i], pipeline1,
                                           decimate, pc)
                            i += 0 if i >= 3 else 1

                        files = {'ply': open(file_names_3d[0], 'rb')}
                        req_data_3d = [("machineid", 1)]
                        res_3d = requests.post(SERVER_URL + "/upload/ply",
                                               files=files,
                                               data=req_data_3d)
                        print(f"Python res_3d : {res_3d.status_code}")

                        pipeline1.stop()
                        moter_time += MOTER_TIME
                        pipeline_check = False
                    else:
                        moter_time += 50

                if seconds - 2 <= picTime + 30 <= seconds + 2:

                    if pipeline_check == False:
                        pipeline_check = True
                        profile = pipeline.start(config)

                        depth_sensor = profile.get_device().first_depth_sensor(
                        )

                        frames = pipeline.wait_for_frames()
                        color_frame = frames.get_color_frame()

                        # Convert images to numpy arrays
                        color_image = np.asanyarray(color_frame.get_data())
                        cv2.imwrite('./color_sensor.jpg', color_image)

                        files = {
                            'compost':
                            ('compost.jpg', open('./color_sensor.jpg', 'rb'))
                        }
                        data1 = [('machineid', 1)]
                        response_image = requests.post(SERVER_URL +
                                                       "/upload/compost",
                                                       files=files,
                                                       data=data1)
                        print(
                            f"Python res_image : {response_image.status_code}")
                        pipeline.stop()
                        picTime += D2_TIME

                        # Turning moter

                        #  MUSHROOM DETECTION , requeset
                        frames = pipeline.wait_for_frames()
                        color_frame = frames.get_color_frame()
                        color_image = np.asanyarray(color_frame.get_data())
                        detections = detection(color_image)
                        boxes = get_shiitake_location(
                            detections['detection_boxes'],
                            detections['detection_classes'], 0.5)
                        print(boxes)
                        pipeline_check = False
                        pipeline.stop()

                        for box in boxes:
                            size = get_size(box)
                            res = make_data(52, box, size)
                            print(res)
                    else:
                        picTime += 50

        except Exception:
            self.errors.emit(100)
            self.isRun = False
Пример #21
0
def start_after():
    global water_num
    global data
    global D2_TIME
    global pipeline
    global config
    global pipeline_check
    global prg_id
    global DAY

    # HOUR * 24 / water_num
    WATER_TIME = HOUR * 24 / water_num

    serial_send_len = 0
    hour = 0  # 시간 초로 변환
    # WATER_TIME
    water_time = 10000  # 값 받아 오면 연산할 물주기 시간
    # MOTER_TIME
    moter_time = MOTER_TIME
    picTime = 100000

    now = datetime.datetime.now()
    Arduino = serial.Serial(port='COM5', baudrate=9600)
    print(f"data success : {data}")

    seconds = 0
    dt2 = None
    loadTime = 0
    hum = 0
    temp = 0

    while True:
        dt1 = datetime.datetime.now()
        result = dt1 - now
        seconds = int(result.total_seconds()) - loadTime

        print(
            f"Python hour : {hour}, water_time : {water_time} moter_time : {moter_time} image : {picTime}"
        )
        print(f"Python seconds : {seconds}")

        if Arduino.readable():
            LINE = Arduino.readline()
            code = str(LINE.decode().replace('\n', ''))
            print(code)
            hum = code[18:20]
            temp = code[38:40]
            socket_data(temp, hum)

        if seconds - 2 <= hour and hour <= seconds + 2:

            if len(data) - 1 < serial_send_len:
                response_status_prg = requests.put(SERVER_URL +
                                                   f'/farm/end?id={prg_id}')
                print(f"prg status : {response_status_prg.status_code}")

                params_status = {'id': "1", "status": "false"}
                response_status = requests.put(SERVER_URL + '/myfarm/status',
                                               params=params_status)
                print(f"machien status : {response_status.status_code}")
                break

            Arduino.write(data[serial_send_len].encode('utf-8'))
            req_data_humi_temp = {
                'prgId': prg_id,
                'tempValue': temp,
                'humiValue': hum
            }
            humi_temp_res = requests.post(SERVER_URL + "/data/add",
                                          data=req_data_humi_temp)
            print(f"Python res_temp : {humi_temp_res.status_code}")
            serial_send_len += 1
            hour += DAY

        if seconds - 2 <= water_time and water_time <= seconds + 2:
            Arduino.write(WATER_ORDER.encode('utf-8'))
            dt2 = datetime.datetime.now()

            while Arduino.readable():
                LINE = Arduino.readline()
                code = str(LINE.decode().replace('\n', ''))
                print(code)
                if code[0:3] == 'end':
                    loadTime += int(
                        (datetime.datetime.now() - dt2).total_seconds())
                    break

            water_time += WATER_TIME

        if seconds - 2 <= moter_time - HOUR / 3 and moter_time - HOUR / 3 <= seconds + 2:

            if pipeline_check == False:
                pipeline_check = True
                Arduino.write(MOTER_ORDER.encode('utf-8'))
                dt2 = datetime.datetime.now()

                # 3d config
                device = rs.context().query_devices()[0]
                advnc_mode = rs.rs400_advanced_mode(device)
                depth_table_control_group = advnc_mode.get_depth_table()
                depth_table_control_group.disparityShift = 60
                depth_table_control_group.depthClampMax = 4000
                depth_table_control_group.depthClampMin = 0
                advnc_mode.set_depth_table(depth_table_control_group)

                pipeline1 = rs.pipeline()
                config1 = rs.config()

                config1.enable_stream(rs.stream.depth, rs.format.z16, 30)
                config1.enable_stream(rs.stream.color, rs.format.bgr8, 30)

                # Start streaming
                profile1 = pipeline1.start(config)
                # Get stream profile and camera intrinsics
                profile = pipeline1.get_active_profile()
                depth_profile = rs.video_stream_profile(
                    profile1.get_stream(rs.stream.depth))
                pc = rs.pointcloud()
                decimate = rs.decimation_filter()
                decimate.set_option(rs.option.filter_magnitude, 1)

                file_name_3d = [0, 90, 180, 270]
                file_names_3d = [
                    './3dScan{}.ply'.format(i) for i in file_name_3d
                ]

                i = 0

                while Arduino.readable():
                    LINE = Arduino.readline()
                    code = str(LINE.decode().replace('\n', ''))
                    print(code)
                    print(f"i : {i}")
                    if code[0:3] == 'end':
                        loadTime += int(
                            (datetime.datetime.now() - dt2).total_seconds())
                        break
                    take_3Dpicture(file_names_3d[i], pipeline1, decimate, pc)
                    i += 0 if i >= 3 else 1

                files = {'ply': open(file_names_3d[0], 'rb')}
                req_data_3d = [("machineid", 1)]
                res_3d = requests.post(SERVER_URL + "/upload/ply",
                                       files=files,
                                       data=req_data_3d)
                print(f"Python res_3d : {res_3d.status_code}")

                pipeline1.stop()
                moter_time += MOTER_TIME
                pipeline_check = False
            else:
                moter_time += 50

        if seconds - 2 <= picTime + 30 and picTime + 30 <= seconds + 2:

            if pipeline_check == False:
                pipeline_check = True
                profile = pipeline.start(config)

                depth_sensor = profile.get_device().first_depth_sensor()

                frames = pipeline.wait_for_frames()
                color_frame = frames.get_color_frame()

                # Convert images to numpy arrays
                color_image = np.asanyarray(color_frame.get_data())
                cv2.imwrite('./color_sensor.jpg', color_image)

                files = {
                    'compost': ('compost.jpg', open('./color_sensor.jpg',
                                                    'rb'))
                }
                data1 = [('machineid', 1)]
                response_image = requests.post(SERVER_URL + "/upload/compost",
                                               files=files,
                                               data=data1)
                print(f"Python res_image : {response_image.status_code}")
                pipeline.stop()
                picTime += D2_TIME
                pipeline_check = False
            else:
                picTime += 50
Пример #22
0
def get_frames(pipeline, t265_pipeline, process_modules, filters, config):
    """Extracts frames from intel real sense pipline
    Applies filters and extracts point cloud
    Arguments:
        pipeline {rs::pipeline} -- RealSense Pipeline
        t265_pipeline {rs::pipeline} -- Optional T265 Pipeline, can be None
        process_modules {tuple} -- align, depth_to_disparity, disparity_to_depth, decimate
        filters {list[rs::filters]} -- List of filters to apply

    Returns:
        (rgb_image, depth_image, ndarray, meta) -- RGB Image, Depth Image (colorized), numpy points cloud, meta information
    """
    if config['playback']['enabled']:
        frames = pipeline.wait_for_frames(timeout_ms=30)
        ts_depth = frames.get_timestamp()
        if t265_pipeline is not None:
            ts_t265 = cycle_pose_frames(t265_pipeline, ts_depth)

    else:
        success, frames = pipeline.try_wait_for_frames(timeout_ms=5)
        if not success:
            return None, None, None
    # Get all the standard process modules
    (align, depth_to_disparity, disparity_to_depth, decimate) = process_modules

    # Align depth frame with color frame.  For later overlap
    frames = align.process(frames)

    depth_frame = frames.get_depth_frame()
    color_frame = frames.get_color_frame()

    ts_domain = depth_frame.get_frame_timestamp_domain()
    ts = depth_frame.get_timestamp()

    # Decimate, subsample image
    if decimate:
        depth_frame = decimate.process(depth_frame)
    # Depth to disparity
    depth_frame = depth_to_disparity.process(depth_frame)
    # Apply any filters we requested
    for f in filters:
        depth_frame = f.process(depth_frame)
    # Disparity back to depth
    depth_frame = disparity_to_depth.process(depth_frame)

    # Grab new intrinsics (may be changed by decimation)
    depth_intrinsics = rs.video_stream_profile(
        depth_frame.profile).get_intrinsics()
    w, h = depth_intrinsics.width, depth_intrinsics.height
    d_intrinsics_matrix = np.array(
        [[depth_intrinsics.fx, 0, depth_intrinsics.ppx],
         [0, depth_intrinsics.fy, depth_intrinsics.ppy], [0, 0, 1]])
    # convert to numpy array
    # dropped color frames will be reused, causing DOUBLE writes of polygons on the same
    # image buffer. Create a copy so this doesn't occur
    color_image = np.copy(np.asanyarray(color_frame.get_data()))
    depth_image = np.asanyarray(depth_frame.get_data())
    depth_ts = depth_frame.get_timestamp()

    threshold = config['filters'].get('threshold')
    if threshold is not None and threshold['active']:
        mask = depth_image[:, :] > int(
            threshold['distance'] * (1 / config['sensor_meta']['depth_scale']))
        depth_image[mask] = 0

    meta = dict(h=h, w=w, intrinsics=d_intrinsics_matrix, ts=depth_ts)

    return color_image, depth_image, meta
Пример #23
0
    config = rs.config()
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)
    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)

    #Start streaming with default recommended configuration

    profile = pipe.start(config)

    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    print("Depth Scale is: ", depth_scale)
    depthScale = depth_scale

    profile2 = pipe.get_active_profile()
    depth_profile = rs.video_stream_profile(
        profile2.get_stream(rs.stream.depth))
    depth_intrinsics = depth_profile.get_intrinsics()
    w, h = depth_intrinsics.width, depth_intrinsics.height

    fx, fy = depth_intrinsics.fx, depth_intrinsics.fy

    print(w)
    print(h)
    print(depth_intrinsics)

    clipping_distance_in_meters = 1.1  #1 meter
    inner_clipping_distance_in_meters = 0.5
    clipping_distance = clipping_distance_in_meters / depth_scale
    inner_clipping_distance = inner_clipping_distance_in_meters / depth_scale

    align_to = rs.stream.color
Пример #24
0
def start_pipeline():
    save_path = "./out"
    pipeline = rs.pipeline()
    config = rs.config()

    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
    pipeline.start(config)
    print('[INFO] Starting pipeline')

    profile = pipeline.get_active_profile()
    # profile = pipeline.start(config)
    depth_profile = rs.video_stream_profile(profile.get_stream(
        rs.stream.depth))

    depth_intrinsics = depth_profile.get_intrinsics()
    w, h = depth_intrinsics.width, depth_intrinsics.height
    print("[INFO] Width: ", w, "Height: ", h)

    fx, fy, = depth_intrinsics.fx, depth_intrinsics.fy
    print("[INFO] Focal length X: ", fx)
    print("[INFO] Focal length Y: ", fy)

    ppx, ppy = depth_intrinsics.ppx, depth_intrinsics.ppy
    print("[INFO] Principal point X: ", ppx)
    print("[INFO] Principal point Y: ", ppy)

    # Identifying depth scaling factor
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()

    pc = rs.pointcloud()
    colorizer = rs.colorizer()

    align_to = rs.stream.depth
    align = rs.align(align_to)
    frame_counter = 0
    try:
        print("[INFO] Starting pipeline stream with frame {:d}".format(
            frame_counter))
        while True:
            frames = pipeline.wait_for_frames()
            aligned_frames = align.process(frames)
            depth_frame = aligned_frames.get_depth_frame()
            color_frame = aligned_frames.get_color_frame()

            depth_colormap = np.asanyarray(
                colorizer.colorize(depth_frame).get_data())
            color_image = np.asanyarray(color_frame.get_data())

            pts = pc.calculate(depth_frame)
            pc.map_to(color_frame)

            if not depth_frame or not color_frame:
                print("No depth or color frame, try again...")
                continue

            images = np.hstack(
                (cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB), depth_colormap))
            cv2.imshow(str(frame_counter), images)

            key = cv2.waitKey(1)

            if key == ord("d"):
                min_distance = 1e-6
                v = pts.get_vertices()
                c = color_frame.get_data()
                vertices = np.asanyarray(v).view(np.float32).reshape(-1,
                                                                     3)  # xyz
                color_data = np.asanyarray(c).reshape(-1, 3)  # Direct RGB
                h, w, _ = color_image.shape
                new_verts = []
                projected_pts = []
                counter = 0
                for x in range(h):
                    for y in range(w):
                        z = depth_frame.get_distance(y, x)
                        if z:
                            point = rs.rs2_deproject_pixel_to_point(
                                depth_intrinsics, [y, x], z)
                            if (np.math.fabs(point[0]) >= min_distance
                                    or np.math.fabs(point[1]) >= min_distance
                                    or np.math.fabs(point[2]) >= min_distance):
                                projected_pts.append([
                                    round(point[0], 8),
                                    round(point[1], 8),
                                    round(point[2], 8)
                                ])
                        else:
                            # print("no info at:", [y, x], z)
                            counter += 1
                            pass  # Ignoring the pixels which doesn't have depth value
                for i in range(pts.size()):
                    if (np.math.fabs(vertices[i][0]) >= min_distance
                            or np.math.fabs(vertices[i][1]) >= min_distance
                            or np.math.fabs(vertices[i][2]) >= min_distance):
                        new_verts.append(vertices[i])

                print("Number of pixels ignored:", counter)
                return projected_pts, new_verts

            if key == ord("q"):
                break

    except Exception as ex:
        print(ex)
    finally:
        pipeline.stop()
config.enable_record_to_file('test1.bag')

# Align depth to color 
align_to = rs.stream.color
align = rs.align(align_to)

# Node init and publisher definition
rospy.init_node('realsense_rgb_align_depth', anonymous = True)
pub_color = rospy.Publisher("rgb_image", Image, queue_size=2)
pub_align = rospy.Publisher("align_depth", Image, queue_size=2)
pub_camera_info = rospy.Publisher("camera_info", CameraInfo, queue_size=2)
rate = rospy.Rate(30) # 30hz

# get color camera data
profile = pipeline.get_active_profile()
color_profile = rs.video_stream_profile(profile.get_stream(rs.stream.color))
color_intrinsics = color_profile.get_intrinsics()

camera_info = CameraInfo()
camera_info.width = color_intrinsics.width
camera_info.height = color_intrinsics.height
camera_info.distortion_model = 'plumb_bob'
cx = color_intrinsics.ppx
cy = color_intrinsics.ppy
fx = color_intrinsics.fx
fy = color_intrinsics.fy
camera_info.K = [fx, 0, cx, 0, fy, cy, 0, 0, 1]
camera_info.D = [0, 0, 0, 0, 0]
camera_info.R = [1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0]
camera_info.P = [fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1.0, 0]
Пример #26
0
def run(dt):
    points = rs.points()
    global w, h
    window.set_caption("RealSense (%dx%d) %dFPS (%.2fms) %s" %
                       (w, h, 0 if dt == 0 else 1.0 / dt, dt * 1000,
                        "PAUSED" if state.paused else ""))

    if state.paused:
        return

    success, frames = pipeline.try_wait_for_frames(timeout_ms=0)
    if not success:
        return

    depth_frame = frames.get_depth_frame()
    other_frame = frames.first(other_stream)
    color = frames.get_color_frame()
    
    depth_frame = decimate.process(depth_frame)

    if state.postprocessing:
        for f in filters:
            depth_frame = f.process(depth_frame)

    # Grab new intrinsics (may be changed by decimation)
    depth_intrinsics = rs.video_stream_profile(
        depth_frame.profile).get_intrinsics()
    w, h = depth_intrinsics.width, depth_intrinsics.height

    color_image = np.asanyarray(other_frame.get_data())

    colorized_depth = colorizer.colorize(depth_frame)
    depth_colormap = np.asanyarray(colorized_depth.get_data())

    if state.color:
        mapped_frame, color_source = other_frame, color_image
    else:
        mapped_frame, color_source = colorized_depth, depth_colormap

    points = pc.calculate(depth_frame)

    pc.map_to(mapped_frame)

    # handle color source or size change
    fmt = convert_fmt(mapped_frame.profile.format())
    global image_data
    if (image_data.format, image_data.pitch) != (fmt, color_source.strides[0]):
        empty = (gl.GLubyte * (w * h * 3))()
        image_data = pyglet.image.ImageData(w, h, fmt, empty)
    # copy image deta to pyglet
    image_data.set_data(fmt, color_source.strides[0], color_source.ctypes.data)

    verts = np.asarray(points.get_vertices(2)).reshape(h, w, 3)
    texcoords = np.asarray(points.get_texture_coordinates(2))

    if len(vertex_list.vertices) != verts.size:
        vertex_list.resize(verts.size // 3)
        # need to reassign after resizing
        vertex_list.vertices = verts.ravel()
        vertex_list.tex_coords = texcoords.ravel()

    copy(vertex_list.vertices, verts)
    copy(vertex_list.tex_coords, texcoords)
Пример #27
0
def main():

    pipeline = rs.pipeline()
    config = rs.config()

    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
    pipeline.start(config)

    profile = pipeline.get_active_profile()
    depth_profile = rs.video_stream_profile(profile.get_stream(
        rs.stream.depth))
    intr = depth_profile.get_intrinsics()

    detector = pyyolo.YOLO("./models/" + MODEL + ".cfg",
                           "./models/" + MODEL + ".weights",
                           "./models/" + DATA + ".data",
                           detection_threshold=0.5,
                           hier_threshold=0.5,
                           nms_threshold=0.45)

    while True:
        # Get RealSense frame first so we can guarantee we have one
        frames = pipeline.wait_for_frames()

        # Frames 1280 width x 720 height
        #get_distance(x: int, y: int) -> float
        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()

        if not depth_frame or not color_frame:
            continue

        # Convert images to numpy arrays
        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        dets = detector.detect(color_image, rgb=False)

        for i, det in enumerate(dets):

            if det.name != TARGET_OBJECT:
                continue
            '''
			TODOS here: Select target object based on how many frames it shows up in one second with high enough confidence
					From there, take the last frame it was found and calculate depth based on that frame.
			'''

            xmin, ymin, xmax, ymax = det.to_xyxy()
            cv2.rectangle(color_image, (xmin, ymin), (xmax, ymax), (0, 0, 255))
            cv2.putText(color_image, det.name + "," + str(det.prob),
                        (xmin, ymin - 10), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (107, 168, 50), 1)

            found = False
            xcenter = int((xmin + xmax) / 2)
            ycenter = int((ymin + ymax) / 2)
            checkNorth = checkSouth = ycenter
            checkEast = checkWest = xcenter
            float_distance = 0

            while checkNorth >= ymin and checkEast <= xmax and checkSouth <= ymax and checkWest >= xmin:
                float_distance = depth_frame.get_distance(xcenter, checkNorth)
                if float_distance != 0:
                    found = True
                    break
                else:
                    checkNorth -= 10

                float_distance = depth_frame.get_distance(checkEast, ycenter)
                if float_distance != 0:
                    found = True
                    break
                else:
                    checkEast += 10

                float_distance = depth_frame.get_distance(xcenter, checkSouth)
                if float_distance != 0:
                    found = True
                    break
                else:
                    checkSouth += 10

                float_distance = depth_frame.get_distance(checkWest, ycenter)
                if float_distance != 0:
                    found = True
                    break
                else:
                    checkWest -= 10

            cv2.circle(color_image, (xcenter, ycenter), 10, (87, 134, 255), 3)
            cv2.putText(color_image, (str(float_distance) +
                                      "m") if found else "Not Available",
                        (xcenter - 20, ycenter - 20), cv2.FONT_HERSHEY_SIMPLEX,
                        1, (87, 134, 255), 1)
            point3D = rs.rs2_deproject_pixel_to_point(intr, [xcenter, ycenter],
                                                      float_distance)
            print("Body Frame: " + str(point3D))
            print("Inertial Frame: " + str(bodyToInertialFrame(point3D)))

        cv2.imshow("color_image preview", color_image)
        if cv2.waitKey(1) == 27:
            break

    pipeline.stop()
Пример #28
0
def main():
    # set the static IP address of this Python server
    TCP_IP = socket.gethostbyname(socket.gethostname())
    TCP_PORT = 8865
    BUFFER_SIZE = 128
    debug = False
    serialLock = _thread.allocate_lock()

    #set the output serial property for create
    ser_port = serial.Serial("/dev/ttyUSB0", baudrate=57600, timeout=0.5)
    print("Serial port set!")

    #start the socket
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    s.bind((TCP_IP, TCP_PORT))
    s.listen(1)
    print("Waiting for connection...")

    conn, addr = s.accept()
    print('Connected to address:')
    print(addr)

    # Configure depth and color streams
    print("waiting for camera...")
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

    # Start streaming
    pipeline.start(config)
    print("Camera started and streaming!")

    #align depth and color images
    align_to = rs.stream.color
    align = rs.align(align_to)

    try:
        while 1:

            print("Waiting for command...")
            dataAvail = select.select([conn], [], [], 0.25)[0]
            while not dataAvail:
                bytesToRead = ser_port.inWaiting()
                if bytesToRead:
                    x = ser_port.read(bytesToRead)
                    print("data received from robot:")
                    print(x)
                    conn.send(x)
                dataAvail = select.select([conn], [], [], 0.25)[0]
            data = (conn.recv(BUFFER_SIZE))
            print("received data:")
            print(data)
            # Command received from the host computer, matches with each of the
            # following cases to handle accordingly.
            if data[:4] == b'dist':
                # Case where the host computer asks for distance readings
                print("Waiting for frames from camera...")
                frames = pipeline.wait_for_frames()
                #align frames
                aligned_frames = align.process(frames)
                color_frame = aligned_frames.get_color_frame()
                depth_frame = aligned_frames.get_depth_frame()

                if not color_frame or not depth_frame:
                    print("waiting...")
                    continue
                print("Got frames!")
                data = data.decode('utf-8')
                chars = len(data) - 4
                num_points = (data[4])
                height = (data[5:])
                print(height)
                dist_data = get_distance(depth_frame, int(num_points),
                                         int(height))
                print("SENDING TO COMPUTER")
                conn.send(str.encode(dist_data))
                print("DATA SENT TO COMPUTER")

            elif data == b'tag':
                # Case where the host computer asks for tag information in the RGB stream
                frames = pipeline.wait_for_frames()
                #align frames
                aligned_frames = align.process(frames)
                color_frame = aligned_frames.get_color_frame()
                depth_frame = aligned_frames.get_depth_frame()

                #Get the camera parameters from the RealSense
                profile = pipeline.get_active_profile()
                rgb_profile = rs.video_stream_profile(
                    profile.get_stream(rs.stream.color))
                rgb_intrinsics = rgb_profile.get_intrinsics()

                params = [rgb_intrinsics.fx, rgb_intrinsics.fy, 0, 0]
                if not color_frame or not depth_frame:
                    print("waiting...")
                    continue
                print("Got frames!")

                #Call get_tag function to get the tag information from the camera
                tag_data = get_tag(color_frame, depth_frame, params)
                print("Sending data to computer...")
                conn.send(str.encode(tag_data))
                print("Data sent!")

            elif data == b'color':
                # Case where the host computer asks the current image from the camera
                frames = pipeline.wait_for_frames()
                aligned_frames = align.process(frames)
                color_frame = aligned_frames.get_color_frame()
                color_image = np.asanyarray(color_frame.get_data())
                pil_image = Image.fromarray(color_image)
                #Convert to grayscale image before sending to host computer
                gray = np.array(pil_image.convert('L'))

                for i in range(480):
                    for j in range(640):
                        conn.send(gray[i, j])

            else:
                # Case where the host computer command is meant for the iRobot
                #write data to port
                send_message(data, ser_port, serialLock)
                print("data sent to the robot")
                time.sleep(1)

                bytesToRead = ser_port.inWaiting()
                x = ser_port.read(bytesToRead)
                print("data received from robot:")
                print(x)
                conn.send(x)

    finally:
        print("Something went wrong")
        #close TCP connection
        conn.shutdown(1)
        conn.close()
        #close the serial connection
        ser_port.close()
        pipeline.stop()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
profile = pipeline.start(config)

# 获取第一个连接的设备----------------------------------------------------------------------------------------------------
depth_sensor = profile.get_device().first_depth_sensor()

# 获取深度像素与真实距离的换算比例----------------------------------------------------------------------------------------------------
depth_scale = depth_sensor.get_depth_scale()
print("Depth Scale is: ", depth_scale)

frames = pipeline.wait_for_frames()
color = frames.get_color_frame()

# 获取颜色帧内参-------------------------------------------------------------------------------------------------------
color_profile = color.get_profile()
cvsprofile = rs.video_stream_profile(color_profile)
color_intrin = cvsprofile.get_intrinsics()
color_intrin_part = [
    color_intrin.ppx, color_intrin.ppy, color_intrin.fx, color_intrin.fy
]
frames = pipeline.wait_for_frames()
color = frames.get_color_frame()

# 1米
clipping_distance_in_meters = 1
clipping_distance = clipping_distance_in_meters / depth_scale

# d2c 深度图对齐到彩色图
align_to = rs.stream.color
align = rs.align(align_to)
colorizer = rs.colorizer()
Пример #30
0
def run(dt):
    global w, h
    window.set_caption("RealSense (%dx%d) %dFPS (%.2fms) %s" %
                       (w, h, 0 if dt == 0 else 1.0 / dt, dt * 1000,
                        "PAUSED" if state.paused else ""))

    if state.paused:
        return

    success, frames = pipeline.try_wait_for_frames(timeout_ms=0)
    if not success:
        return

    depth_frame = frames.get_depth_frame()
    color_frame = frames.get_color_frame()
    other_frame = frames.first(other_stream)

    depth_frame = decimate.process(depth_frame)

    if state.postprocessing:
        for f in filters:
            depth_frame = f.process(depth_frame)

    # Grab new intrinsics (may be changed by decimation)
    depth_intrinsics = rs.video_stream_profile(
        depth_frame.profile).get_intrinsics()
    w, h = depth_intrinsics.width, depth_intrinsics.height

    color_image = np.asanyarray(other_frame.get_data())

    color_image2 = np.asanyarray(color_frame.get_data())

    colorized_depth = colorizer.colorize(depth_frame)
    depth_colormap = np.asanyarray(colorized_depth.get_data())

    if state.color:
        mapped_frame, color_source = color_frame, color_image
    else:
        mapped_frame, color_source = colorized_depth, depth_colormap

    points = pc.calculate(depth_frame)
    pc.map_to(mapped_frame)

    # handle color source or size change
    fmt = convert_fmt(mapped_frame.profile.format())
    global image_data
    if (image_data.format, image_data.pitch) != (fmt, color_source.strides[0]):
        empty = (gl.GLubyte * (w * h * 3))()
        image_data = pyglet.image.ImageData(w, h, fmt, empty)
    # copy image data to pyglet
    image_data.set_data(fmt, color_source.strides[0], color_source.ctypes.data)

    verts = np.asarray(points.get_vertices(2)).reshape(h, w, 3)
    texcoords = np.asarray(points.get_texture_coordinates(2))

    if len(vertex_list.vertices) != verts.size:
        vertex_list.resize(verts.size // 3)
        # need to reassign after resizing
        vertex_list.vertices = verts.ravel()
        vertex_list.tex_coords = texcoords.ravel()

    # copy our data to pre-allocated buffers, this is faster than assigning...
    # pyglet will take care of uploading to GPU
    def copy(dst, src):
        """copy numpy array to pyglet array"""
        # timeit was mostly inconclusive, favoring slice assignment for safety
        np.array(dst, copy=False)[:] = src.ravel()
        # ctypes.memmove(dst, src.ctypes.data, src.nbytes)

    copy(vertex_list.vertices, verts)
    copy(vertex_list.tex_coords, texcoords)

    if state.lighting:
        # compute normals
        dy, dx = np.gradient(verts, axis=(0, 1))
        n = np.cross(dx, dy)

        # can use this, np.linalg.norm or similar to normalize, but OpenGL can do this for us, see GL_NORMALIZE above
        # norm = np.sqrt((n*n).sum(axis=2, keepdims=True))
        # np.divide(n, norm, out=n, where=norm != 0)

        # import cv2
        # n = cv2.bilateralFilter(n, 5, 1, 1)

        copy(vertex_list.normals, n)

    if keys[pyglet.window.key.E]:
        print(points)
        state.save_index += 1
        save_path = "./out_" + str(state.save_index)
        print(depth_intrinsics)

        #write intrinsics file
        
        intrinsic_file = open((save_path + "_intrinsics.txt"), "w")
        intrinsic_file.write(str(depth_intrinsics))
        intrinsic_file.close()
        #export ply
        points.export_to_ply((save_path + ".ply"), mapped_frame)
Пример #31
0
    def visualizer(self, draw=None, path_planner=None, Car=None, blinde=False):
        """
        OpenCV and Numpy Point cloud Software Renderer

        This sample is mostly for demonstration and educational purposes.
        It really doesn't offer the quality or performance that can be
        achieved with hardware acceleration.

        Usage:
        ------
        Mouse:
            Drag with left button to rotate around pivot (thick small axes),
            with right button to translate and the wheel to zoom.

        Keyboard:
            [p]     Pause
            [r]     Reset View
            [d]     Cycle through decimation values
            [z]     Toggle point scaling
            [c]     Toggle color source
            [s]     Save PNG (./out.png)
            [e]     Export points to ply (./out.ply)
            [q\ESC] Quit
        """
        self.generate_window()
        while True:
            # Grab camera data
            if not self.state.paused:
                # Wait for a coherent pair of frames: depth and color
                frames = self.pipe.wait_for_frames()

                depth_frame = frames.get_depth_frame()
                color_frame = frames.get_color_frame()

                depth_frame = self.decimate.process(depth_frame)

                # Grab new intrinsics (may be changed by decimation)
                self.depth_intrinsics = rs.video_stream_profile(
                    depth_frame.profile).get_intrinsics()
                self.w, self.h = self.depth_intrinsics.width, self.depth_intrinsics.height

                depth_image = np.asanyarray(depth_frame.get_data())
                color_image = np.asanyarray(color_frame.get_data())

                depth_colormap = np.asanyarray(
                    self.colorizer.colorize(depth_frame).get_data())

                if self.state.color:
                    mapped_frame, color_source = color_frame, color_image
                else:
                    mapped_frame, color_source = depth_frame, depth_colormap

                points = self.pc.calculate(depth_frame)
                self.pc.map_to(mapped_frame)

                # Pointcloud data to arrays
                v, t = points.get_vertices(), points.get_texture_coordinates()
                verts = np.asanyarray(v).view(np.float32).reshape(-1, 3)  # xyz
                texcoords = np.asanyarray(t).view(np.float32).reshape(-1,
                                                                      2)  # uv

            # Render
            now = time.time()

            out.fill(0)

            if 'axes' in draw:
                axes(out,
                     view(np.zeros((1, 3))),
                     self.state.rotation,
                     size=0.1,
                     thickness=1)

            if not self.state.scale or out.shape[:2] == (self.h, self.w):
                pointcloud(out, verts, texcoords, color_source)
            else:
                tmp = np.zeros((self.h, self.w, 3), dtype=np.uint8)
                pointcloud(tmp, verts, texcoords, color_source)
                tmp = cv2.resize(tmp,
                                 out.shape[:2][::-1],
                                 interpolation=cv2.INTER_NEAREST)
                np.putmask(out, tmp > 0, tmp)

            # todo: get grid to show est "driving plane"
            if 'grid' in draw:
                if path_planner:
                    ground_pos = path_planner.ground_plane[
                        0]  # Get new est center of plane
                    ground_rot = path_planner.ground_plane[1]
                    # axes(out, ground_pos, ground_rot, thickness=2)
                    # line3d(out,ground_pos, path_planner.ground_plane[2],color=[0, 0, 255], thickness=3)
                    grid(out,
                         pos=ground_pos,
                         rotation=ground_rot,
                         size=1,
                         n=15)
                else:
                    ground_pos = [
                        self.state.offset[0],
                        self.state.offset[1] + Car.size[1] / 2 + Car.size[3],
                        self.state.offset[2] + 2
                    ]
                    grid(out,
                         pos=ground_pos,
                         rotation=ground_rot,
                         size=1,
                         n=15)

            # fixme get np.dot to work and hardcoded +2 offset
            if 'car' in draw and Car:
                car_model(out,
                          pos=[
                              self.state.offset[0], self.state.offset[1],
                              self.state.offset[2] + 2
                          ],
                          car_size=Car.size)

            frustum(out, self.depth_intrinsics)

            if any(self.state.mouse_btns):
                axes(out,
                     view(self.state.pivot),
                     self.state.rotation,
                     thickness=4)

            dt = time.time() - now

            if blinde:
                cv2.setWindowTitle(self.state.WIN_NAME,
                                   "FolkraceCar | Blind mode")
            else:
                try:
                    cv2.setWindowTitle(
                        self.state.WIN_NAME,
                        "FolkraceCar | (%dx%d) %dFPS (%.2fms) %s" %
                        (self.w, self.h, 1.0 / dt, dt * 1000,
                         "PAUSED" if self.state.paused else ""))
                    cv2.putText(
                        out, 'Key: [p] Pause '
                        '[r] Reset View '
                        '[d] Decimation '
                        '[z] Scaling '
                        '[c] Color '
                        '[s] Save PNG '
                        '[e] Export cloud', (5, 10), cv2.FONT_HERSHEY_COMPLEX,
                        0.37, (110, 250, 110), 1)
                except:
                    pass

            cv2.imshow(self.state.WIN_NAME, out)
            key = cv2.waitKey(1)

            if key == ord("r"):
                self.state.reset()

            if key == ord("p"):
                self.state.paused ^= True

            if key == ord("d"):
                self.state.decimate = (self.state.decimate + 1) % 3
                self.decimate.set_option(rs.option.filter_magnitude,
                                         2**self.state.decimate)

            if key == ord("z"):
                self.state.scale ^= True

            if key == ord("c"):
                self.state.color ^= True

            if key == ord("s"):
                cv2.imwrite('./out.png', out)

            if key == ord("e"):
                self.points.export_to_ply('./out.ply', mapped_frame)

            if key in (27, ord("q")) or cv2.getWindowProperty(
                    self.state.WIN_NAME, cv2.WND_PROP_AUTOSIZE) < 0:
                break
Пример #32
0
# Configure streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
# other_stream, other_format = rs.stream.infrared, rs.format.y8
other_stream, other_format = rs.stream.color, rs.format.rgb8
config.enable_stream(other_stream, 640, 480, other_format, 30)

# Start streaming
pipeline.start(config)
profile = pipeline.get_active_profile()

depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()

depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
depth_intrinsics = depth_profile.get_intrinsics()
w, h = depth_intrinsics.width, depth_intrinsics.height

# Processing blocks
pc = rs.pointcloud()
decimate = rs.decimation_filter()
decimate.set_option(rs.option.filter_magnitude, 2**state.decimate)
colorizer = rs.colorizer()
filters = [
    rs.disparity_transform(),
    rs.spatial_filter(),
    rs.temporal_filter(),
    rs.disparity_transform(False)
]
# Configure streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
# other_stream, other_format = rs.stream.infrared, rs.format.y8
other_stream, other_format = rs.stream.color, rs.format.rgb8
config.enable_stream(other_stream, 640, 480, other_format, 30)

# Start streaming
pipeline.start(config)
profile = pipeline.get_active_profile()

depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()

depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
depth_intrinsics = depth_profile.get_intrinsics()
w, h = depth_intrinsics.width, depth_intrinsics.height

# Processing blocks
pc = rs.pointcloud()
decimate = rs.decimation_filter()
decimate.set_option(rs.option.filter_magnitude, 2 ** state.decimate)
colorizer = rs.colorizer()
filters = [rs.disparity_transform(),
           rs.spatial_filter(),
           rs.temporal_filter(),
           rs.disparity_transform(False)]


# pyglet