def main(): if len(rs.context().devices): print('Could not find any Realsense Devices.') sys.exit(1) print('Listing available realsense devices...') for i, device in enumerate(rs.context().devices): serial_number = device.get_info(rs.camera_info.serial_number) print(f'{i+1}. {serial_number}') def print_callback(block_id, X_CO): R_CO, T_CO = pose_matrix_to_Rt(X_CO) print(f'{block_id} at {T_CO}') bpe = BlockPoseEst(print_callback, vis=True) bpe.spin()
def config_devices(): devices = [] try: with open(os.path.join(parent_folder, "etc", "camera_serials.json"), "r") as f: cams = json.load(f) except: cams = {} for k, v in cams.items(): cams[k] = {"serial": v, "available": False} context = rs.context() for dev in context.devices: serial_number = int(dev.get_info(rs.camera_info.serial_number)) key = _search(cams, serial_number) if key is not None: cams[key]["available"] = True else: cams["default"] = {"serial": serial_number, "available": True} print("Cams: ", cams) for k, v in cams.items(): if v["available"]: try: new_cam = RSCamera(name=k, serial=v["serial"]) devices.append(new_cam) except: print("there are problems creating the camera pipeline") cams = {} return devices
def createMultiDevice(): c = rs.config() #c.enable_stream(rs.stream.depth, 1024, 768, rs.format.z16, 30) c.enable_stream(rs.stream.depth, 1024, 768, rs.format.z16, 30) c.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 15) device_manager = DeviceContext(rs.context(), c) return device_manager
def SetupCamera(camera="-l"): ctx = rs.context() if len(ctx.devices) == 0: print "No Realsense connect, exit." exit(-1) for d in ctx.devices: sensors = d.query_sensors() for se in sensors: if se.get_info(rs.camera_info.name) == "Stereo Module": for p in se.profiles: p = p.as_video_stream_profile() if p.width() == 1280 and p.height() == 720 and p.fps() == 30 and p.format() == rs.format.y8 and p.stream_name() == "Infrared 1": if camera == "-l" and d.get_info(rs.camera_info.serial_number) == EnvSetupVar.RS_SerialNumber_L: profile = p.as_stream_profile() print p.width(), p.height(), p.fps(), p.format() sensor = se elif camera == "-r" and d.get_info( rs.camera_info.serial_number) == EnvSetupVar.RS_SerialNumber_R: profile = p.as_stream_profile() print "Right:", p.width(), p.height(), p.fps(), p.format() sensor = se sensor.set_option(rs.option.laser_power, 0) return sensor, profile
def __init__(self): self.image = np.zeros((640, 480, 3)) self.color_image = np.zeros((1280, 720, 3)) #cwd = os.getcwd() #self.image_path = cwd + '//images' # Configure depth and color streams self.pipeline = rs.pipeline() self.config = rs.config() #self.config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) self.config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30) ctx = rs.context() self.cam_name = [] if len(ctx.devices) > 0: for d in ctx.devices: self.cam_name.append(d.get_info(rs.camera_info.name)) print ('Found device: ', \ d.get_info(rs.camera_info.name), ' ', \ d.get_info(rs.camera_info.serial_number)) else: print("No Intel Device connected") self.k_streaming_stable = 30 # manually adjusted, make sure streaming stable width_resized = 640 height_resized = 480 self.dim_resized = (width_resized, height_resized) # Start streaming self.pipeline.start(self.config) (self.x, self.y, self.w, self.h) = (275, 165, 640, 480)
def rs_discover_cams(): """Returns a list of ids of all realsense cameras connected via USB. """ ids = [] for dev in rs.context().query_devices(): ids.append(dev.get_info(rs.camera_info.serial_number)) return ids
def main(args, item_metadata): # input('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>') # Load model cfg = setup(args) predictor = DefaultPredictor(cfg) ctx = rs.context() serials = [] for i in range(len(ctx.devices)): serial_num = ctx.devices[i].get_info(rs.camera_info.serial_number) serials.append(serial_num) print(f"serial{i}:{serial_num}") target_items = ['baozi', 'detergent'] for item in target_items: items_dicts = camera_detect(predictor, serials, item_metadata) items_cam1, items_cam2 = items_dicts[0], items_dicts[1] if item in items_cam1.keys() and item in items_cam2.keys(): medium_point = np.mean([items_cam1[item][0], items_cam2[item][0]], axis=0) feature_vector = [] for i in range(3): feature_vector.append( np.mean([items_cam1[item][1][i], items_cam2[item][1][i]], axis=0)) print(medium_point, feature_vector) rotation_matrix = np.array(feature_vector)[[2, 1, 0], :].T rotation_vector = cv2.Rodrigues(rotation_matrix)[0] tcp = np.hstack((medium_point, rotation_vector.T[0])) # print(tcp) item_grasp(tcp, feature_vector[0])
def get_pipes(): context = rs.context() pipe_list = [] depth_scale = 0.0 for dev in context.query_devices(): print("device: ", dev) cfg = rs.config() cam_inf = rs.camera_info serial = dev.get_info(cam_inf.serial_number) cfg.enable_device(serial) pipe = rs.pipeline(context) pipe.start(cfg) type_val = 'p' #An additional value that allows the cameras to be differentiated #get scaling value from depth camera dev_sensors = dev.query_sensors() for sens in dev_sensors: if depth_scale == 0.0 and sens.is_depth_sensor(): type_val = 'd' depth_scale = sens.as_depth_sensor().get_depth_scale() print("depth scale found! ", depth_scale) break pipe_list.append((pipe, type_val)) return pipe_list, depth_scale
def main(): # input('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>') # Load model model = MaskRCNN() ctx = rs.context() # 通过程序去获取已连接摄像头序列号 serial1 = ctx.devices[0].get_info(rs.camera_info.serial_number) serial2 = ctx.devices[1].get_info(rs.camera_info.serial_number) print(serial1, serial2) item = 'croissant' # 相机输入图片切割 原始:[[0, 720], [0, 1280]] cam1_cut = [[200, 720], [400, 1120]] cam2_cut = [[200, 720], [200, 1180]] items_cam1, items_cam2 = camera_detect(model, [serial1, serial2], [cam1_cut, cam2_cut]) # item_pc_add = pc_add(items_cam1[item][0], items_cam2[item][0]) item_pc_add = np.append(items_cam1[item][0], items_cam2[item][0], axis=0) item_color_add = np.append(items_cam1[item][1], items_cam2[item][1], axis=0) if item_pc_add.any(): pc_show(item_pc_add, item_color_add)
def realsense_environment() -> tuple: """ Getting config and context to find devices and enable them """ config = prs2.config() context = prs2.context() return config, context
def record(file_name): input(f"About to record to {file_name}. Press ENTER to start") context = rs.context() devices = context.query_devices() for dev in devices: sensors = dev.query_sensors() for sensor in sensors: if sensor.is_depth_sensor(): sensor.set_option(rs.option.exposure, 200.0) sensor.set_option(rs.option.gain, 200.0) sensor.set_option(rs.option.emitter_enabled, 0) sensor.set_option(rs.option.enable_auto_exposure, 0) config = rs.config() config.disable_all_streams() config.enable_stream(rs.stream.infrared, 1, 848, 480, rs.format.y8, 90) config.enable_record_to_file(file_name) pipeline = rs.pipeline() pipeline.start(config) try: input(f"Recording... Press ENTER to stop") finally: pipeline.stop()
def detectDevice(self): ctx = rs.context() devices = ctx.query_devices() for dev in devices: self.productName = str(dev.get_info(rs.camera_info.product_id)) # DS 435 config if self.productName in "0B07": self.EnableColor = True self.EnableInfrared = True self.EnableDepth = True self.deviceDetect = True break # DS 415 config elif self.productName in "0AD3": self.EnableColor = True self.EnableInfrared = True self.EnableDepth = True self.deviceDetect = True break # DS 410 config elif self.productName in "0AD2": self.EnableColor = False self.EnableInfrared = True self.EnableDepth = True self.deviceDetect = True break if self.deviceDetect is not True: raise Exception("No supported device was found")
def get_debug_device(serial_no): ctx = rs.context() devices = ctx.query_devices() found_dev = False for dev in devices: if len(serial_no) == 0 or serial_no == dev.get_info( rs.camera_info.serial_number): found_dev = True break if not found_dev: print('No RealSense device found' + str('.' if len(serial_no) == 0 else ' with serial number: ' + serial_no)) return 0 # set to advance mode: advanced = rs.rs400_advanced_mode(dev) if not advanced.is_enabled(): advanced.toggle_advanced_mode(True) # print(a few basic information about the device) print(' Device PID: ', dev.get_info(rs.camera_info.product_id)) print(' Device name: ', dev.get_info(rs.camera_info.name)) print(' Serial number: ', dev.get_info(rs.camera_info.serial_number)) print(' Firmware version: ', dev.get_info(rs.camera_info.firmware_version)) debug = rs.debug_protocol(dev) return debug
def realsense_init(self): # Configure depth and color streams self.pipeline = rs.pipeline() config = rs.config() realsense_ctx = rs.context() connected_devices = [] for i in range(len(realsense_ctx.devices)): detected_camera = realsense_ctx.devices[i].get_info( rs.camera_info.serial_number) connected_devices.append(detected_camera) # choose device if more than 1 connected: if len(connected_devices) > 1: print("choose device by pressing the number:") for i in range(len(realsense_ctx.devices)): print("[%s]: %s @ %s" % (i, realsense_ctx.devices[i].get_info( rs.camera_info.name), realsense_ctx.devices[i].get_info( rs.camera_info.physical_port))) idx = int(input(">>> ")) device_product_line = connected_devices[idx] self.UDP_PORT = 5000 + idx print("sending at UDP %s:%s" % (self.UDP_IP, self.UDP_PORT)) else: device_product_line = connected_devices[0] self.UDP_PORT = 5000 config.enable_device(device_product_line) # Get device product line for setting a supporting resolution pipeline_wrapper = rs.pipeline_wrapper(self.pipeline) pipeline_profile = config.resolve(pipeline_wrapper) # Start streaming try: # setup for USB 3 try: config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30) print("trying to stream 1920x1080...", end=" ") self.pipeline.start(config) except Exception: print("no success.") config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30) print("trying to stream 1280x720...", end=" ") self.pipeline.start(config) except Exception: # setup for USB 2 print("no success.") config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) print("streaming in 640x480...", end=" ") self.pipeline.start(config) # set sensitivity parameters: self.device = pipeline_profile.get_device().first_color_sensor() self.device.set_option(rs.option.exposure, self.exposure) self.device.set_option(rs.option.gain, self.gain) print("success!") print("Realsense initialization complete.")
def wait_for_rs_device(serial_no): ctx = rs.context() start = int(round(time.time() * 1000)) now = int(round(time.time() * 1000)) while now - start < 5000: devices = ctx.query_devices() for dev in devices: pid = str(dev.get_info(rs.camera_info.product_id)) if len(serial_no) == 0 or serial_no == dev.get_info( rs.camera_info.serial_number): # print(a few basic information about the device) print(' Device PID: ', dev.get_info(rs.camera_info.product_id)) print(' Device name: ', dev.get_info(rs.camera_info.name)) print(' Serial number: ', dev.get_info(rs.camera_info.serial_number)) print(' Product Line: ', dev.get_info(rs.camera_info.product_line)) print(' Firmware version: ', dev.get_info(rs.camera_info.firmware_version)) return dev time.sleep(5) now = int(round(time.time() * 1000)) raise Exception('No RealSense device' + str( '.' if len(serial_no) == 0 else ' with serial number: ' + serial_no))
def __init__(self): # Identify devices self.device_ls = [] for c in rs.context().query_devices(): self.device_ls.append(c.get_info(rs.camera_info(1))) # Start stream print(f"Connecting to RealSense cameras ({len(self.device_ls)} found) ...") self.pipes = [] self.profiles = [] for i, device_id in enumerate(self.device_ls): pipe = rs.pipeline() config = rs.config() config.enable_device(device_id) config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) self.pipes.append(pipe) self.profiles.append(pipe.start(config)) print(f"Connected to camera {i+1} ({device_id}).") time.sleep(1) # Warm start camera (realsense automatically adjusts brightness during initial frames) for _ in range(60): self._get_frames()
def main(): pipe = None ctx = rs.context() print(len(ctx.devices), "devices connected.") # getting frames frames and printing timestamp range timestamps = [] if len(ctx.devices): # printing devices' serial numbers serial_numbers = [serial_number(device) for device in ctx.devices] print("devices S/N:", ", ".join(serial_numbers)) try: pipe = rs.pipeline() # profile is needed for distinguishing between cameras profile = pipe.start() for i in range(10): frames = pipe.wait_for_frames(timeout_ms=10000) for frame in frames: timestamps.append(frame.get_timestamp()) print("picture were taken between", min(timestamps), "and", max(timestamps)) finally: pipe.stop() else: print("can't get frames!")
def get_profiles(): ctx = rs.context() devices = ctx.query_devices() color_profiles = [] depth_profiles = [] for device in devices: name = device.get_info(rs.camera_info.name) serial = device.get_info(rs.camera_info.serial_number) print('Sensor: {}, {}'.format(name, serial)) print('Supported video formats:') for sensor in device.query_sensors(): for stream_profile in sensor.get_stream_profiles(): stream_type = str(stream_profile.stream_type()) if stream_type in ['stream.color', 'stream.depth']: v_profile = stream_profile.as_video_stream_profile() fmt = stream_profile.format() w, h = v_profile.width(), v_profile.height() fps = v_profile.fps() video_type = stream_type.split('.')[-1] print(' {}: width={}, height={}, fps={}, fmt={}'.format( video_type, w, h, fps, fmt)) if video_type == 'color': color_profiles.append((w, h, fps, fmt)) else: depth_profiles.append((w, h, fps, fmt)) return color_profiles, depth_profiles
def query( monitor_changes = True ): """ Start a new LRS context, and collect all devices :param monitor_changes: If True, devices will update dynamically as they are removed/added """ global rs if not rs: return # # Before we can start a context and query devices, we need to enable all the ports # on the acroname, if any: if acroname: acroname.connect() # MAY THROW! acroname.enable_ports( sleep_on_change = 5 ) # make sure all connected! # # Get all devices, and store by serial-number global _device_by_sn, _context, _port_to_sn _context = rs.context() _device_by_sn = dict() try: log.d( 'discovering devices ...' ) log.debug_indent() for dev in _context.query_devices(): if dev.is_update_device(): sn = dev.get_info( rs.camera_info.firmware_update_id ) else: sn = dev.get_info( rs.camera_info.serial_number ) _device_by_sn[sn] = Device( sn, dev ) log.d( '...', dev ) finally: log.debug_unindent() # if monitor_changes: _context.set_devices_changed_callback( _device_change_callback )
def __init__(self): ctx = rs.context() self.devices = ctx.query_devices() self.configs = list() self.filters = list() for device in self.devices: config = rs.config() config.enable_device(device.get_info(rs.camera_info.serial_number)) config.enable_stream(rs.stream.depth, IMG_WIDTH, IMG_HEIGHT, rs.format.z16, 30) config.enable_stream(rs.stream.color, IMG_WIDTH, IMG_HEIGHT, rs.format.bgr8, 30) self.configs.append(config) align = rs.align(rs.stream.color) spatial = rs.spatial_filter() spatial.set_option(rs.option.filter_magnitude, 5) spatial.set_option(rs.option.filter_smooth_alpha, 1) spatial.set_option(rs.option.filter_smooth_delta, 50) spatial.set_option(ts.option.holes_fill, 3) temporal = rs.temporal_filter() hole_filling = rs.hole_filling_filter() depth_to_disparity = rs.disparity_transform(True) disparity_to_depth = rs.disparity_transform(False) decimate = rs.decimation_filter() self.filters.append({ 'align': align, 'spatial': spatial, 'temporal': temporal, 'hole': hole_filling, 'disparity': depth_to_disparity, 'depth': disparity_to_depth, 'decimate': decimate })
def rs_get_devices(show=False): devices = [] # available_devices for d in rs.context().devices: if d.get_info(rs.camera_info.name).lower()!='platform camera': devices.append(d.get_info(rs.camera_info.serial_number)) if show: print(d) # device: type & S/N return devices # list: serial number
def __init__(self, context, D400_pipeline_configuration, L500_pipeline_configuration=rs.config()): """ Class to manage the Intel RealSense devices Parameters: ----------- context : rs.context() The context created for using the realsense library D400_pipeline_configuration : rs.config() The realsense library configuration to be used for the application when D400 product is attached. L500_pipeline_configuration : rs.config() The realsense library configuration to be used for the application when L500 product is attached. """ assert isinstance(context, type(rs.context())) assert isinstance(D400_pipeline_configuration, type(rs.config())) assert isinstance(L500_pipeline_configuration, type(rs.config())) self._context = context self._available_devices = enumerate_connected_devices(context) self._enabled_devices = {} #serial numbers of te enabled devices self.D400_config = D400_pipeline_configuration self.L500_config = L500_pipeline_configuration self._frame_counter = 0
def __init__(self, disp_parent=None, filename: str = ""): super().__init__(disp_parent, filename) self.pipeline = rs.pipeline(rs.context()) self.config = rs.config() self.resolution = (640, 480) self.framerate = 30
class Grabber(object): # realsense "context" realsense_ctx = rs.context() def __init__(self, cam_serial, name, width, height, framerate): # save parameters self.cam_serial, self.name, self.width, self.height, self.framerate = cam_serial, name, width, height, framerate # create pipeline self.pipeline = rs.pipeline(Grabber.realsense_ctx) # create a config object self.config = rs.config() # enable device self.config.enable_device(cam_serial) # to mark which streams are to be grabbed self.depth, self.color = False, False def start(self, depth=False, color=False): # remember what we're streaming self.depth, self.color = depth, color if depth: self.config.enable_stream(rs.stream.depth, self.width, self.height, rs.format.z16, self.framerate) if color: self.config.enable_stream(rs.stream.color, self.width, self.height, rs.format.bgr8, self.framerate) # Start streaming from file self.pipeline.start(self.config) def grab(self): """ return depth and color """ # to hold result result = dict() # Get frames frames = self.pipeline.wait_for_frames() # Get depth frame if self.depth: depth_frame = frames.get_depth_frame() result["depth"] = np.asanyarray(depth_frame.get_data()) # Get color frame if self.color: color_frame = frames.get_color_frame() result["color"] = np.asanyarray(color_frame.get_data()) return result def stop(self): self.pipeline.stop()
def find_device_that_supports_advanced_mode() : ctx = rs.context() devices = ctx.query_devices(); for dev in devices: if dev.supports(rs.camera_info.product_id) and \ str(dev.get_info(rs.camera_info.product_id)) in DS5_product_ids: return dev raise Exception("No device that supports advanced mode was found")
def discover_cams(): """Returns a list of the ids of all cameras connected via USB.""" ctx = rs.context() ctx_devs = list(ctx.query_devices()) ids = [] for i in range(ctx.devices.size()): ids.append(ctx_devs[i].get_info(rs.camera_info.serial_number)) return ids
def main(): ctx = rs.context() sensors = ctx.query_all_sensors() depth_sensor = sensors[0] depth_sensor = depth_sensor.as_depth_sensor() depth_scale = depth_sensor.get_depth_scale() depth_baseline = 50.0021 profile = depth_sensor.profiles[77]
def find_devices_by_product_line_or_exit(product_line): c = rs.context() devices_list = c.query_devices(product_line) if devices_list.size() == 0: print("No device of the", product_line, "product line was found; skipping test") exit(0) return devices_list
def set_adv(): print('Setting camera parameters') context = rs.context() devs = context.query_devices() d = devs.front() adv = rs.rs400_advanced_mode(d) curr = adv.get_depth_table() curr.disparityShift = 35 curr.depthUnits = 100 curr.depthClampMax = 9000 curr.depthClampMin = 2000 curr.disparityShift adv.set_depth_table(curr) curr = adv.get_depth_control() curr.textureDifferenceThreshold = 500 curr.textureCountThreshold = 0 adv.set_depth_control(curr) curr = adv.get_rau_support_vector_control() curr.minEast = 6 curr.minWest = 4 curr.minSouth = 1 curr.minNorth = 1 curr.minWEsum = 8 curr.minNSsum = 1 curr.uShrink = 4 curr.vShrink = 1 adv.set_rau_support_vector_control(curr) curr = adv.get_hdad() curr.lambdaCensus = 39 curr.lambdaAD = 2000 adv.set_hdad(curr) curr = adv.get_census() curr.uDiameter = 9 curr.vDiameter = 3 adv.set_census(curr) print('Setting exposure, gain...') time.sleep(1) for s in d.sensors: time.sleep(0.2) if (s.get_info(rs.camera_info(0)) == 'Stereo Module'): time.sleep(0.2) print(s.get_option(rs.option.enable_auto_exposure)) time.sleep(0.2) s.set_option(rs.option.enable_auto_exposure, 0) time.sleep(0.2) print(s.get_option(rs.option.enable_auto_exposure)) time.sleep(0.2) print(s.get_option(rs.option.exposure)) s.set_option(rs.option.exposure, 66000.0) time.sleep(0.2) print(s.get_option(rs.option.exposure))
def Photo(e, tred, token, flag): try: pipe = rs.pipeline() color = rs.colorizer() ctx = rs.context() dev = ctx.query_devices() i = 0 while True: if dev[i].get_info(rs.camera_info.ip_address) == tred["IP"]: break i = i + 1 if i == len(dev): raise Exception('This camera is not avaiable') cfg = rs.config() cfg.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) cfg.enable_device(dev[i].get_info(rs.camera_info.serial_number)) print("3D cam started.") #Getting the depth sensor's depth scale (see rs-align example for explanation) while e.is_set(): if (synchronizer.Synchro(token) == 1): try: pipe.start(cfg) # This call waits until a new coherent set of frames is available on a device # Calls to get_frame_data(...) and get_frame_timestamp(...) on a device will return stable values until wait_for_frames(...) is called for j in range(0, 15): frames = pipe.wait_for_frames() depth_frame = frames.get_depth_frame() filename = "3D_%d" % (int(time.time())) if (flag == 1): depth_col = color.colorize(depth_frame) depth_data = np.asanyarray(depth_col.get_data(), dtype=np.uint16) Save.Save(filename, depth_data) else: depth_data = np.asanyarray(depth_frame.get_data(), dtype=np.uint16) Save.Savebw(filename, depth_data) print("immagine 3d") tred["acquiredImages"] = tred["acquiredImages"] + 1 tred["timestamp"] = str(time.gmtime().tm_year) + "-" + str( time.gmtime().tm_mon) + "-" + str( time.gmtime().tm_mday) + "T" + str( time.gmtime().tm_hour) + ":" + str( time.gmtime().tm_min) + ":" + str( time.gmtime().tm_sec) pipe.stop() synchronizer.Update() except Exception as exc: tred["error"] = str(exc) synchronizer.Update() break except Exception as err: tred["error"] = str(err) tred["status"] = "error" synchronizer.removefromQueue(token) e.clear()
def find_device_that_supports_advanced_mode() : ctx = rs.context() ds5_dev = rs.device() devices = ctx.query_devices(); for dev in devices: if dev.supports(rs.camera_info.product_id) and str(dev.get_info(rs.camera_info.product_id)) in DS5_product_ids: if dev.supports(rs.camera_info.name): print("Found device that supports advanced mode:", dev.get_info(rs.camera_info.name)) return dev raise Exception("No device that supports advanced mode was found")
def dev_selection_list(): default = (None, "Select to activate") try: ctx = rs.context() # FIXME cannot use "with rs.context() as ctx:" # got "AttributeError: __enter__" # see https://stackoverflow.com/questions/5093382/object-becomes-none-when-using-a-context-manager dev_pairs = [default] + [get_device_info(d) for d in ctx.devices] except Exception: # FIXME dev_pairs = [default] return zip(*dev_pairs)
def __init__(self, context, pipeline_configuration): """ Class to manage the Intel RealSense devices Parameters: ----------- context : rs.context() The context created for using the realsense library pipeline_configuration : rs.config() The realsense library configuration to be used for the application """ assert isinstance(context, type(rs.context())) assert isinstance(pipeline_configuration, type(rs.config())) self._context = context self._available_devices = enumerate_connected_devices(context) self._enabled_devices = {} self._config = pipeline_configuration self._frame_counter = 0
def __init__( self, g_pool, device_id=None, frame_size=DEFAULT_COLOR_SIZE, frame_rate=DEFAULT_COLOR_FPS, depth_frame_size=DEFAULT_DEPTH_SIZE, depth_frame_rate=DEFAULT_DEPTH_FPS, preview_depth=False, device_options=(), record_depth=True, ): logger.debug("_init_ started") super().__init__(g_pool) self._intrinsics = None self.color_frame_index = 0 self.depth_frame_index = 0 self.context = rs.context() self.pipeline = rs.pipeline(self.context) self.pipeline_profile = None self.preview_depth = preview_depth self.record_depth = record_depth self.depth_video_writer = None self._needs_restart = False self.frame_size_backup = DEFAULT_COLOR_SIZE self.depth_frame_size_backup = DEFAULT_DEPTH_SIZE self.frame_rate_backup = DEFAULT_COLOR_FPS self.depth_frame_rate_backup = DEFAULT_DEPTH_FPS self._initialize_device( device_id, frame_size, frame_rate, depth_frame_size, depth_frame_rate, device_options, ) logger.debug("_init_ completed")
def get_debug_device(serial_no): ctx = rs.context() devices = ctx.query_devices() found_dev = False for dev in devices: if len(serial_no) == 0 or serial_no == dev.get_info(rs.camera_info.serial_number): found_dev = True break if not found_dev: print('No RealSense device found' + str('.' if len(serial_no) == 0 else ' with serial number: '+serial_no)) return 0 # set to advance mode: advanced = rs.rs400_advanced_mode(dev) if not advanced.is_enabled(): advanced.toggle_advanced_mode(True) # print(a few basic information about the device) print(' Device PID: ', dev.get_info(rs.camera_info.product_id)) print(' Device name: ', dev.get_info(rs.camera_info.name)) print(' Serial number: ', dev.get_info(rs.camera_info.serial_number)) print(' Firmware version: ', dev.get_info(rs.camera_info.firmware_version)) debug = rs.debug_protocol(dev) return debug
def run_demo(): # Define some constants resolution_width = 1280 # pixels resolution_height = 720 # pixels frame_rate = 15 # fps dispose_frames_for_stablisation = 30 # frames chessboard_width = 6 # squares chessboard_height = 9 # squares square_size = 0.0253 # meters try: # Enable the streams from all the intel realsense devices rs_config = rs.config() rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate) rs_config.enable_stream(rs.stream.infrared, 1, resolution_width, resolution_height, rs.format.y8, frame_rate) rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate) # Use the device manager class to enable the devices and get the frames device_manager = DeviceManager(rs.context(), rs_config) device_manager.enable_all_devices() # Allow some frames for the auto-exposure controller to stablise for frame in range(dispose_frames_for_stablisation): frames = device_manager.poll_frames() assert( len(device_manager._available_devices) > 0 ) """ 1: Calibration Calibrate all the available devices to the world co-ordinates. For this purpose, a chessboard printout for use with opencv based calibration process is needed. """ # Get the intrinsics of the realsense device intrinsics_devices = device_manager.get_device_intrinsics(frames) # Set the chessboard parameters for calibration chessboard_params = [chessboard_height, chessboard_width, square_size] # Estimate the pose of the chessboard in the world coordinate using the Kabsch Method calibrated_device_count = 0 while calibrated_device_count < len(device_manager._available_devices): frames = device_manager.poll_frames() pose_estimator = PoseEstimation(frames, intrinsics_devices, chessboard_params) transformation_result_kabsch = pose_estimator.perform_pose_estimation() object_point = pose_estimator.get_chessboard_corners_in3d() calibrated_device_count = 0 for device in device_manager._available_devices: if not transformation_result_kabsch[device][0]: print("Place the chessboard on the plane where the object needs to be detected..") else: calibrated_device_count += 1 # Save the transformation object for all devices in an array to use for measurements transformation_devices={} chessboard_points_cumulative_3d = np.array([-1,-1,-1]).transpose() for device in device_manager._available_devices: transformation_devices[device] = transformation_result_kabsch[device][1].inverse() points3D = object_point[device][2][:,object_point[device][3]] points3D = transformation_devices[device].apply_transformation(points3D) chessboard_points_cumulative_3d = np.column_stack( (chessboard_points_cumulative_3d,points3D) ) # Extract the bounds between which the object's dimensions are needed # It is necessary for this demo that the object's length and breath is smaller than that of the chessboard chessboard_points_cumulative_3d = np.delete(chessboard_points_cumulative_3d, 0, 1) roi_2D = get_boundary_corners_2D(chessboard_points_cumulative_3d) print("Calibration completed... \nPlace the box in the field of view of the devices...") """ 2: Measurement and display Measure the dimension of the object using depth maps from multiple RealSense devices The information from Phase 1 will be used here """ # Enable the emitter of the devices device_manager.enable_emitter(True) # Load the JSON settings file in order to enable High Accuracy preset for the realsense device_manager.load_settings_json("./HighResHighAccuracyPreset.json") # Get the extrinsics of the device to be used later extrinsics_devices = device_manager.get_depth_to_color_extrinsics(frames) # Get the calibration info as a dictionary to help with display of the measurements onto the color image instead of infra red image calibration_info_devices = defaultdict(list) for calibration_info in (transformation_devices, intrinsics_devices, extrinsics_devices): for key, value in calibration_info.items(): calibration_info_devices[key].append(value) # Continue acquisition until terminated with Ctrl+C by the user while 1: # Get the frames from all the devices frames_devices = device_manager.poll_frames() # Calculate the pointcloud using the depth frames from all the devices point_cloud = calculate_cumulative_pointcloud(frames_devices, calibration_info_devices, roi_2D) # Get the bounding box for the pointcloud in image coordinates of the color imager bounding_box_points_color_image, length, width, height = calculate_boundingbox_points(point_cloud, calibration_info_devices ) # Draw the bounding box points on the color image and visualise the results visualise_measurements(frames_devices, bounding_box_points_color_image, length, width, height) except KeyboardInterrupt: print("The program was interupted by the user. Closing the program...") finally: device_manager.disable_streams() cv2.destroyAllWindows()
return device_extrinsics def disable_streams(self): self._config.disable_all_streams() """ _____ _ _ |_ _|___ ___ | |_ (_) _ __ __ _ | | / _ \/ __|| __|| || '_ \ / _` | | || __/\__ \| |_ | || | | || (_| | |_| \___||___/ \__||_||_| |_| \__, | |___/ """ if __name__ == "__main__": try: c = rs.config() c.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 6) c.enable_stream(rs.stream.infrared, 1, 1280, 720, rs.format.y8, 6) c.enable_stream(rs.stream.infrared, 2, 1280, 720, rs.format.y8, 6) c.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 6) device_manager = DeviceManager(rs.context(), c) device_manager.enable_all_devices() for k in range(150): frames = device_manager.poll_frames() device_manager.enable_emitter(True) device_extrinsics = device_manager.get_depth_to_color_extrinsics(frames) finally: device_manager.disable_streams()
tm2.connect_controller(addr) # expecting tm2 = The device as tm2 except: print ('Failed to connect to controller ', mac_str) except: print ('Serialized data is not in JSON format (', \ n.serialized_data, ')') # Ignore frames arriving from the sensor (just to showcase controller usage) def on_frame(f): pass try: # Create a context ctx = rs.context() # Wait until a TM2 device connects # We have to wait here, even if the device is already connected (since the library loads the device as usb device) print ('Waiting for all devices to connect...') found = False while not found: if len(ctx.devices) > 0: for d in ctx.devices: if d.get_info(rs.camera_info.product_id) == '2803': tm2 = d.as_tm2() print ('Found TM2 device: ', \ d.get_info(rs.camera_info.name), ' ', \ d.get_info(rs.camera_info.serial_number)) found = True