def __init__(self): if not KinGeo.ctx: KinGeo.ctx = fn.init() self.ctx = fn.init() # self.device = fn.open_device(KinGeo.ctx, 1) # assert self.device is not None self.last_access = 0. # time of last kinect depth map access self._frame_time = 1. / 30. # float of min time in seconds per frame self._depth_arr = None # holds numpy array of self._pc_timestamp = 0.
def __init__(self): """ Initializes Kinect handler """ if not KinGeo.ctx: KinGeo.ctx = fn.init() self.ctx = fn.init() # self.device = fn.open_device(KinGeo.ctx, 1) # assert self.device is not None self.last_access = 0. # time of last kinect depth map access self._frame_time = 1. / KinGeo.default_max_frq # min time in s per frame self._depth_arr = None # holds numpy array of self._pc_timestamp = 0. self._points_arr_timestamp = 0.
def __init__(self, dummy=False, mirror=True): self.__class__._instances.append(weakref.proxy(self)) self.id = next(self._ids) self.resolution = (640, 480) self.dummy = dummy self.mirror = mirror if self.dummy == False: print("looking for kinect...") self.ctx = freenect.init() self.dev = freenect.open_device(self.ctx, self.id) print(self.id) freenect.close_device( self.dev) # TODO Test if this has to be done! self.angle = None self.depth = freenect.sync_get_depth( index=self.id, format=freenect.DEPTH_MM )[0] # get the first Depth frame already (the first one takes much longer than the following) self.filtered_depth = None print("kinect initialized") else: print( "dummy mode. get_frame() will return a synthetic depth frame, other functions may not work" )
def loop(processimg): if not use_webcam: ctx = freenect.init() dev = freenect.open_device(ctx, 0) freenect.set_tilt_degs(dev, 10) freenect.close_device(dev) cv2.namedWindow('processing') for k, v in params.iteritems(): cv2.createTrackbar(k, 'processing', v, 255, onchange(k)) runonce = True while runonce: #runonce = False if imgpath != "": img = cv2.imread(imgpath) else: img = getimg() cv2.imshow('processing', cv2.resize(processimg(img), (width, height))) char = cv2.waitKey(10) if char == 27: break elif char == ord('p'): for k, v in params.iteritems(): print "%s: %d" % (k, v) #freenect.set_tilt_degs(dev, pid) cv2.destroyAllWindows()
def start(): global XAXIS global YAXIS global TEST_CASES global DIRECTION global GLOBAL_DEPTH_MAP plt.ion() plt.figure() ctx = freenect.init() dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1) freenect.set_tilt_degs(dev, 20) freenect.close_device(dev) DIRECTION = 0 for i in xrange(5): initial_map = get_depth() while True: GLOBAL_DEPTH_MAP = get_depth() #returns the depth frame back_movement(GLOBAL_DEPTH_MAP) contoursright = contours_return(GLOBAL_DEPTH_MAP, -10) contoursleft = contours_return(GLOBAL_DEPTH_MAP, 10) door_detection(contoursright, contoursleft) if DOOR_FLAG: door_movement() else: regular_movement() cv2.imshow('final', GLOBAL_DEPTH_MAP) if cv2.waitKey(1) != -1: SERIALDATA.write('\x35') SERIALDATA.close() break cv2.destroyAllWindows()
def start(): ser = serial.Serial('/dev/ttyUSB0') #initialization of serial communication global test_cases, ser, global_depth_map, DOOR_FLAG, DOOR_COUNT ctx = freenect.init() dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1) freenect.set_tilt_degs(dev, 20) freenect.close_device(dev) test_cases = [True, True, True] while True: global_depth_map = get_depth() #returns the depth frame contoursright = contours_return(global_depth_map, -10) contoursleft = contours_return(global_depth_map, 10) door_detection(contoursright, contoursleft, test_cases) if DOOR_FLAG: door_movement(global_depth_map) else: regular_movement(global_depth_map) cv2.imshow('final', global_depth_map) if cv2.waitKey(1) != -1: ser.write('\x35') ser.close() break cv2.destroyAllWindows()
def start(): ser = serial.Serial( '/dev/ttyUSB0') #initialization of serial communication global test_cases, ser, global_depth_map, DOOR_FLAG, DOOR_COUNT ctx = freenect.init() dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1) freenect.set_tilt_degs(dev, 20) freenect.close_device(dev) test_cases = [True, True, True] while True: global_depth_map = get_depth() #returns the depth frame contoursright = contours_return(global_depth_map, -10) contoursleft = contours_return(global_depth_map, 10) door_detection(contoursright, contoursleft, test_cases) if DOOR_FLAG: door_movement(global_depth_map) else: regular_movement(global_depth_map) cv2.imshow('final', global_depth_map) if cv2.waitKey(1) != -1: ser.write('\x35') ser.close() break cv2.destroyAllWindows()
def _identifyDevice(self): try: global freenect import freenect self.a = freenect.init() if freenect.num_devices(self.a) == 0: kinect = False elif freenect.num_devices(self.a) > 1: self._initError('Multiple Kinect1s attached. Unsure how to handle') else: kinect = True except ImportError: kinect = False try: global FN2 import pylibfreenect2 as FN2 if FN2.Freenect2().enumerateDevices() == 1: kinect2 = True elif FN2.Freenect2().enumerateDevices() > 1: self._initError('Multiple Kinect2s attached. Unsure how to handle') else: kinect2 = False except ImportError: kinect2 = False if kinect and kinect2: self._initError('Kinect1 and Kinect2 attached. Unsure how to handle') elif not kinect and not kinect2: self._initError('No depth sensor attached') elif kinect: self.device = 'kinect' else: self.device = 'kinect2'
def run(self): try: ctx = freenect.init() for index in xrange(freenect.num_devices(ctx)): self.devs.append(freenect.open_device(ctx, index)) for device_num, dev in enumerate(self.devs): for stream_type in ('video', 'depth'): self.producers[device_num, stream_type] = \ KinectProducer(dev, device_num, stream_type, self) self.initialized.set() while self.keep_running: while not self.command_q.empty(): self.command_q.get()() if self._should_runloop(): freenect.base_runloop(ctx, self._body) else: self.command_q.get()() finally: with self.lock: self.keep_running = False for producer in self.producers.itervalues(): producer.stop() self.producers = {} freenect.shutdown(ctx) self.devs = [] self.initialized.set()
def __init__(self): # hard coded class attributes for KinectV1's native resolution self.name = 'kinect_v1' self.depth_width = 320 self.depth_height = 240 self.color_width = 640 self.color_height = 480 self.id = 0 self.device = None self.depth = None self.color = None logger.warning( 'Two kernels cannot access the Kinect at the same time. ' 'This will lead to a sudden death of the kernel. ' 'Be sure no other kernel is running before you initialize a KinectV1 object.' ) logger.info("looking for kinect...") ctx = freenect.init() self.device = freenect.open_device(ctx, self.id) print(self.id) freenect.close_device(self.device) # TODO Test if this has to be done! # get the first Depth frame already (the first one takes much longer than the following) self.depth = self.get_frame() logger.info("KinectV1 initialized.")
def run(self): try: self.ctx = freenect.init() self.dev = freenect.open_device(self.ctx, 0) freenect.set_depth_mode(self.dev, freenect.RESOLUTION_MEDIUM, freenect.DEPTH_11BIT) freenect.set_depth_callback(self.dev, self._depth_cb) freenect.set_video_mode(self.dev, freenect.RESOLUTION_MEDIUM, freenect.VIDEO_RGB) freenect.set_video_callback(self.dev, self._video_cb) self.video_started = False self.depth_started = False while self.keep_running: with self.lock: if self.led_update is not None: freenect.set_led(self.dev, self.led_update) self.led_update = None self.update_streams() if not self.video_started and not self.depth_started: self.update_cond.wait() continue self.update.clear() if not self.keep_running: break freenect.base_runloop(self.ctx, self._body) finally: with self.lock: for k in self.depth_consumers.keys() + self.video_consumers.keys(): k.put(StreamerDied("The Kinect streamer died")) self.depth_consumers = {} self.video_consumers = {} self.update_streams() freenect.close_device(self.dev) freenect.shutdown(self.ctx)
def initkinect(): #set up kinect camera ctx = freenect.init() dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1) if not dev: freenect.error_open_device() return [ctx, dev] print('kinect setup complete')
def __init__(self, kinectid=1): self.kinectid = kinectid self.rgb = None self.depth = None self.keep_running = True # init Kenect self.context = freenect.init() self.device = freenect.open_device(self.context, self.kinectid) self.init = freenect.set_depth_mode(self.device, freenect.RESOLUTION_MEDIUM, freenect.DEPTH_REGISTERED)
def start(self, degs=10): self.ctx = freenect.init() if not self.ctx: freenect.error_open_device() self.dev = freenect.open_device(self.ctx, 0) if not self.dev: freenect.error_open_device() freenect.set_tilt_degs(self.dev, -degs) freenect.set_tilt_degs(self.dev, degs) self.intialised == True print('kinect Started')
def __init__(self, calibration_file='config/calibration.frame', debug=False): try: self.device = kinect.open_device(kinect.init(), 0) kinect.set_tilt_degs(self.device, -90) kinect.close_device(self.device) except Exception as e: logging.error('No Kinect detected') self.device = None with open(calibration_file) as calibration_file: self.calibration_frame = np.load(calibration_file) '''
def __init__(self, cam=-1): # Initialize freenect and get the context print 'Initalize kinect' context = freenect.init() # Open the device and get the device print 'Open device' self.kinect = freenect.open_device(context, 0) # Turn the led off print 'Turning the led off' freenect.set_led(self.kinect, freenect.LED_OFF) # Close the device print 'Closing the device' freenect.close_device(self.kinect)
def depth_view(): import matplotlib.pyplot as plt fn_ctx = fn.init() fn_dev = fn.open_device(fn_ctx, fn.num_devices(fn_ctx) - 1) fn.set_tilt_degs(fn_dev, 0) fn.close_device(fn_dev) x = np.arange(0, 256, 1) zeros = np.zeros_like(x) fig = plt.figure() ax = fig.add_subplot(111) view1, = ax.plot(x, zeros, '-k', label='black') view2, = ax.plot(x, zeros, '-r', label='red') np_array = np.array np_max = np.max view1_sety = view1.set_ydata view2_sety = view2.set_ydata ax_relim = ax.relim ax_autoscale_view = ax.autoscale_view while True: dep = get_depth(False) cv2.imshow('raw', dep) dep = cv2.medianBlur(dep, 3) bin = __get_bins(dep) clean = copy(bin) clean = __pipeline(clean) bin[:2] = 0 clean *= np_max(bin) view1_sety(bin) view2_sety(clean) ax_relim() ax_autoscale_view() im = fig2img(fig) graph = np_array(im) # dep = remove_background(dep, 100) dep = isolate_depths(dep) # dep = convert_to_bw(dep) cv2.imshow('depth', dep) cv2.imshow('graph', graph) # show_image('all', frame, masked_frame, depth, video) if cv2.waitKey(1) & 0xFF == ord('q'): cv2.destroyAllWindows() break
def temp_test(): fn_ctx = fn.init() fn_dev = fn.open_device(fn_ctx, fn.num_devices(fn_ctx) - 1) fn.set_tilt_degs(fn_dev, 0) fn.close_device(fn_dev) while True: dep = get_depth() dep *= (dep * 1.3).astype(np.uint8) print("{}\t,\t{}".format(np.min(dep), np.max(dep))) cv2.imshow('depth', dep) if cv2.waitKey(1) & 0xFF == ord('q'): cv2.destroyAllWindows() fn.sync_stop() break
def set_tilt(): TILT_MAX = 30 TILT_STEP = 10 TILT_START = 0 ctx = freenect.init() dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1) if not dev: freenect.error_open_device() print "Starting TILT Cycle" for tilt in xrange(TILT_START, TILT_MAX + TILT_STEP, TILT_STEP): print "Setting TILT: ", tilt freenect.set_led(dev, 6) freenect.set_tilt_degs(dev, tilt) time.sleep(3) freenect.set_tilt_degs(dev, 0)
def keyPressEvent(self, ev): global paused, depth_arr ctx = freenect.init() dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1) # SPACEBAR to 'pause' and 'unpause' if (ev.key() == 32 and paused): t.start(10) paused = False elif (ev.key() == 32): t.stop() paused = True # 'S' key to save curr frame to .pcd elif (ev.key() == 83): background = AsyncWrite(depth_arr) background.start()
def density_plot(): fn_ctx = fn.init() fn_dev = fn.open_device(fn_ctx, fn.num_devices(fn_ctx) - 1) fn.set_tilt_degs(fn_dev, 0) fn.close_device(fn_dev) show_image = cv2.imshow waitkey = cv2.waitKey ravel = np.ravel countbin = np.bincount length = 256 nums = np.arange(0, length, 1) zero = np.zeros_like(nums) import matplotlib.pyplot as plt import matplotlib.animation as animation fig, ax = plt.subplots() line, = ax.plot(nums, zero) ax.set_ylim(0, 10000) ax.set_xlim(0, 256) set_y_data = line.set_ydata def update(data): set_y_data(data) return line, def get_dep(): dep = get_depth() dep = cv2.medianBlur(dep, 3, dep) dep = ravel(dep) # dep = medfilt(dep, 21).astype(np.uint8) return dep def data_gen(): while True: yield countbin(get_dep(), minlength=length) ani = animation.FuncAnimation(fig, update, data_gen) plt.show() cv2.destroyAllWindows() fn.sync_stop()
def loop(processimg): ctx = freenect.init() dev = freenect.open_device(ctx, 0) freenect.set_tilt_degs(dev, 10) freenect.close_device(dev) while True: #img = processimg(np.array(getimg_webcam())) #gotimg, img = c.read() img = getimg_irkinect() #gotimg = True #if not gotimg: # print 'stopped' # break cv2.imshow('processing', processimg(img)) if cv2.waitKey(10) == 27: break cv2.destroyAllWindows()
def set_kinect_angle(angle, device_index=0): # Clamp angle to [-30, 30] angle = min(angle, max(angle, -30), 30) print "Setting Kinect angle to", angle # We have to stop the synchronous runloop to interact with the device. freenect.sync_stop() # Open the device ctx = freenect.init() dev = freenect.open_device(ctx, device_index) # Set angle freenect.set_tilt_degs(dev, angle) # Shutdown context, allowing synchronous runloop to start freenect.shutdown(ctx) return angle
def __init__(self, devno=0, resolution=RESOLUTION_MEDIUM, depth_mode=DEPTH_11BIT, video_mode=VIDEO_RGB): self._ctx = freenect.init() if not self._ctx: raise KinectError("Cannot connect to device.") self.devno = devno self._dev = freenect.open_device(self._ctx, self.devno) if not self._dev: freenect.shutdown(self._ctx) raise KinectError("Cannot open device.") self._depth_callback = None self._video_callback = None self._resolution = resolution self.depth_mode = depth_mode self.video_mode = video_mode
def _identifyDevice(self): try: global freenect import freenect self.a = freenect.init() if freenect.num_devices(self.a) == 0: kinect = False elif freenect.num_devices(self.a) > 1: self._initError( 'Multiple Kinect1s attached. Unsure how to handle') else: kinect = True except ImportError: kinect = False try: global rs import pyrealsense2 as rs ctx = rs.context() if len(ctx.devices) == 0: realsense = False if len(ctx.devices) > 1: self._initError( 'Multiple RealSense devices attached. Unsure how to handle' ) else: realsense = True except ImportError: realsense = False if kinect and realsense: self._initError( 'Kinect1 and RealSense devices attached. Unsure how to handle') elif not kinect and not realsense: self._initError('No depth sensor attached') elif kinect: self.device = 'kinect' else: self.device = 'realsense'
def run(self): try: self.ctx = freenect.init() self.dev = freenect.open_device(self.ctx, 0) freenect.set_depth_mode(self.dev, freenect.RESOLUTION_MEDIUM, freenect.DEPTH_11BIT) freenect.set_depth_callback(self.dev, self._depth_cb) freenect.set_video_mode(self.dev, freenect.RESOLUTION_MEDIUM, freenect.VIDEO_RGB) freenect.set_video_callback(self.dev, self._video_cb) self.video_started = False self.depth_started = False while self.keep_running: with self.lock: if self.led_update is not None: freenect.set_led(self.dev, self.led_update) self.led_update = None self.update_streams() if not self.video_started and not self.depth_started: self.update_cond.wait() continue self.update.clear() if not self.keep_running: break freenect.base_runloop(self.ctx, self._body) finally: with self.lock: for k in self.depth_consumers.keys( ) + self.video_consumers.keys(): k.put(StreamerDied("The Kinect streamer died")) self.depth_consumers = {} self.video_consumers = {} self.update_streams() freenect.close_device(self.dev) freenect.shutdown(self.ctx)
#!/usr/bin/env python import freenect import cv2 import frame_convert2 import numpy as np cv2.namedWindow('Depth') cv2.namedWindow('RGB') keep_running = True mdev = freenect.open_device(freenect.init(),0) freenect.set_depth_mode(mdev,freenect.RESOLUTION_MEDIUM,freenect.DEPTH_REGISTERED) lower = 0 upper = 100 def display_depth(dev, data, timestamp): global keep_running,lower,upper data = 200 * np.logical_and(data > lower, data < upper) data = data.astype(np.uint8) canny = cv2.Canny(data, 100, 255) cv2.imshow('Depth',data) cv2.imshow('RGB',canny) lower+=10 upper+=10 if upper>1900: lower = 0 upper = 100 if cv2.waitKey(10) == 27: keep_running = False
import sys import math import os from cv2 import imwrite from opencv.cv import * from opencv import adaptors from opencv.highgui import * from time import time from freenect import sync_get_depth as get_depth, sync_get_video as get_video, init import numpy as np from rx_config import * from timing_stats import * #initialize the camera ctx = init() # Grab an initial frame to get the video size global depth, rgb (depth,_), (rgb,_) = get_depth(), get_video() rgbFrameSize = cvGetSize(rgb) depthSize = cvGetSize(depth) dwnFrameSize = cvSize(rgbFrameSize.width / 2, rgbFrameSize.height / 2) dwnDepthSize = cvSize(depthSize.width / 2, depthSize.height / 2) print 'rgbSize = %d %d' % (rgbFrameSize.width, rgbFrameSize.height) print 'depthSize = %d %d' % (depthSize.width, depthSize.height) # Allocate processing chain image buffers the same size as
#% ipython /home/m/Dropbox/maze/libfreenect-goodies/demo_freenect.py #% import freenect k=freenect.init()
def main_vision(load): # inits fn_ctx = fn.init() fn_dev = fn.open_device(fn_ctx, fn.num_devices(fn_ctx) - 1) fn.set_tilt_degs(fn_dev, 0) fn.close_device(fn_dev) key_point = KeyPoints(150) predictor = prediction(ModelPath) preds = [] # optimization t0 = 0.0 t1 = 0.0 fps = 0.0 total_fps = 0.0 frames = 0 kp_speed = key_point._get_kp_speedup() draw_speed = key_point._get_draw_speedup() proc_speed = key_point._get_process_speedup() cvtColor = cv2.cvtColor BGR2RGB = cv2.COLOR_BGR2RGB get_kp = key_point.get_key_points draw_kp = key_point.draw_key_points process_image = key_point.__process_image show_image = cv2.imshow wait_for_key = cv2.waitKey copy_thing = copy.copy num_features = key_point.get_num_features() arr_shape = np.shape shape_check = (num_features, 32) ravel = np.ravel append_pred = preds.append get_time = time.time current_class = 0 if load: brain = predictor.load_brain() pred_speed = predictor.get_pred_speed() predict = predictor.predict else: add_speed = predictor.get_add_speed() add_data = predictor.add_data get_length = predictor.get_data_length if load: net = Neural_Net(predictor.brain.getPoint(), np.vstack(predictor.brain.getData()), 4800 * 2, num_features) nl_predict = net.predict nl_speed = net.get_neural_speed() # mainLoop while True: t0 = get_time() # Get a fresh frame depth = get_depth() frame = get_video() show_image('Raw Image', cvtColor(frame, BGR2RGB)) # Process Depth Image # depth = remove_background(depth, 25) depth = remove_background_percent(depth, .5, 50) depth = convert_to_bw(depth) mask = make_mask(depth) # Process Image frame = cvtColor(frame, BGR2RGB) video = copy_thing(frame) frame = process_image(frame, proc_speed) # Make Masked Frame masked_frame = copy_thing(frame) masked_frame[mask] = 0 # Process Key Points kp, des = get_kp(masked_frame, kp_speed) video = draw_kp(video, kp, True, speedup=draw_speed) # Predict current if (load) and (des is not None) and (arr_shape(des) == shape_check): pred = predict(ravel(des), pred_speed) append_pred(pred) print(pred) print(nl_predict([ravel(des)], nl_speed)) # Add object description to data set if (not load) and (des is not None) and (arr_shape(des) == shape_check): add_data(add_speed, np.ravel(des), current_class) print('Current Class and Length:\t%i\t%i' % (get_length(), current_class)) t1 = get_time() fps = (1 / (t1 - t0)) total_fps += fps frames += 1 print('%.2f FPS' % fps) show_image('masked image', masked_frame) show_image('depth', depth) show_image('key points', video) # show_image('all', frame, masked_frame, depth, video) if wait_for_key(1) & 0xFF == ord('q'): cv2.destroyAllWindows() if load: break print('Current Class: %i\nn : Next Class\nr : Continue Current Class\nq : Quit' % (current_class)) inp = raw_input() if inp == 'n': current_class += 1 elif inp == 'q': break # print(np.mean(preds)) cv2.destroyAllWindows() print('Average FPS: %.2f' % (total_fps / frames)) fn.sync_stop() if not load: predictor.create_brain() main_vision(True)
print(".") global i, working if not working: working = True depths[i] = depth filtered_depth = np.median(depths, axis=0) pointclouds.append(point_cloud(filtered_depth)) timestamps.append(timestamp) i += 1 i = i % n working = False if __name__ == "__main__": parser = OptionParser() parser.add_option("-o", "--output", dest="output", help="Output data in .npz format to OUTPUTFILE", metavar="OUTPUTFILE") (options, args) = parser.parse_args() context = freenect.init() device = freenect.open_device(context, 0) freenect.set_depth_mode(device, freenect.RESOLUTION_MEDIUM, freenect.DEPTH_MM) try: print("Capturing...") freenect.runloop(dev=device, depth=depth_callback) except KeyboardInterrupt: if options.output: print("Dumping...") np.savez_compressed(options.output, tangoPointclouds=pointclouds, tangoPointcloudTimestamps=timestamps)
def get_ctx(): global _ctx if not _ctx: _ctx = fn.init() return _ctx
import cv2 import frame_convert2 import numpy as np from apriltag import apriltag cv2.namedWindow('Video') print('Press ESC in window to stop') def get_video(): return frame_convert2.video_cv( freenect.sync_get_video_with_res( resolution=freenect.RESOLUTION_HIGH)[0]) freenect.init() freenect.sync_get_video_with_res(resolution=freenect.RESOLUTION_HIGH) freenect.sync_set_autoexposure(False) freenect.sync_set_whitebalance(False) detector = apriltag("tag36h11", threads=4, decimate=2.0) object_points = np.array( [[-0.1, -0.1, 0.0], [0.1, -0.1, 0.0], [0.1, 0.1, 0.0], [-0.1, 0.1, 0.0]], dtype="double") camera_matrix = np.array([[1.09194704e+03, 0.00000000e+00, 6.79986322e+02], [0.00000000e+00, 1.09300427e+03, 5.09471427e+02], [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]], dtype="double") dist_coeffs = np.array(
def main(): rospy.init_node("camera") rospy.loginfo("Starting Camera Node") rospy.on_shutdown(shutdown_hook) # Set initial camera angle ctx = freenect.init() dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1) freenect.set_tilt_degs(dev, 20) freenect.close_device(dev) r = rospy.Rate(PUBLISH_RATE) cv_bridge = CvBridge() video_pub = rospy.Publisher('/camera/video', Image, queue_size=10) depth_pub = rospy.Publisher('/camera/depth', Image, queue_size=10) # Uses a different encoding for april tags camera_image_pub = rospy.Publisher('/camera/image_raw', Image, queue_size=10) camera_info_pub = rospy.Publisher('/camera/camera_info', CameraInfo, queue_size=10) # Kinect manually calibrated using this http://people.csail.mit.edu/peterkty/teaching/Lab4Handout_Fall_2016.pdf camera_info = CameraInfo() hd = Header() camera_info.height = 480 camera_info.width = 640 camera_info.distortion_model = "plumb_bob" camera_info.D = [ 0.16966890693679473, -0.32564392755677646, 0.0014273722857157428, -0.0007780067287402459, 0.0 ] camera_info.K = [ 522.7790149706918, 0.0, 317.5941836796907, 0.0, 523.5195539902463, 255.18973237498545, 0.0, 0.0, 1.0 ] camera_info.P = [ 532.5130615234375, 0.0, 317.01325625466416, 0.0, 0.0, 535.176025390625, 255.7121671461573, 0.0, 0.0, 0.0, 1.0, 0.0 ] camera_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0] while not rospy.is_shutdown(): video = get_video() depth = get_depth() video_msg = cv_bridge.cv2_to_imgmsg(video, encoding="passthrough") depth_msg = cv_bridge.cv2_to_imgmsg(depth, encoding="passthrough") cam_img_msg = cv_bridge.cv2_to_imgmsg(video, encoding="bgr8") camera_info.header = video_msg.header video_pub.publish(video_msg) depth_pub.publish(depth_msg) camera_image_pub.publish(cam_img_msg) camera_info_pub.publish(camera_info) r.sleep()
def return_mean(a): """ * Function Name: return_mean * Input: Depth Frame or any other matrix. * Output: Returns mean of the frame * Logic: It reduces the noise and calculates the mean of the pixel values in the frame using mean() function * Example Call: return_mean(a) """ median = cv2.medianBlur(a,5) mediane = cv2.medianBlur(a,5) rect = mediane[0:479, 0:639] mean = rect.mean() return mean ctx = freenect.init() #initialize freenect dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1) #open Kinect Device freenect.set_tilt_degs(dev, 30) #Tilts kinect to 30 degrees time.sleep(1) freenect.set_tilt_degs(dev, 0) #Tilts kinect to 0 degree time.sleep(1) freenect.close_device(dev) #closes Kinect while(True): a = get_depth() #gets the depth data from Kinect mean = return_mean(a) #returns the mean of the depth data if mean > 240: ser.write("\x38") #if the front area has more depth than the threshold than the robot will move forward else:
import numpy as np import time from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import freenect ctx=freenect.init() time.sleep(5) z=freenect.sync_get_depth()[0] print z x=[] y=[] zi=[] for i in range(0,479,5): for j in range(0,639,5): x.append(i) y.append(j) zi.append(z[i][j]) #print z[480][640] #x, y, z = zip(*z) print len(x) print len(y) print len(zi) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(x,y,zi) plt.show()
def body(dev, ctx): global last_time global led global tilt if not keep_running: raise freenect.Kill if led > 6 and tilt > 30: return freenect.set_led(dev, led) freenect.set_tilt_degs(dev, tilt) print('led[%d] tilt[%d] accel[%s]' % (led, tilt, freenect.get_accel(dev))) time.sleep(1) led = led + 1 tilt = tilt + 5 def handler(signum, frame): global keep_running keep_running = False print('Press Ctrl-C to stop') dev = freenect.open_device(freenect.init(), 0) while True: body(dev, 0) signal.signal(signal.SIGINT, handler) #freenect.runloop(body=body)
def is_kinect_available(): context = freenect.init() return freenect.num_devices(context) > 0
import numpy as np import time from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import freenect ctx = freenect.init() time.sleep(5) z = freenect.sync_get_depth()[0] print z x = [] y = [] zi = [] for i in range(0, 479, 5): for j in range(0, 639, 5): x.append(i) y.append(j) zi.append(z[i][j]) #print z[480][640] #x, y, z = zip(*z) print len(x) print len(y) print len(zi) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(x, y, zi) plt.show()
GREEN = (0,255,0) RED = (255,0,0) YELLOW = (255,255,0) pygame.init() #Initiates pygame xSize,ySize = 640,480 #Sets size of window screen = pygame.display.set_mode((xSize,ySize),pygame.RESIZABLE) #creates main surface #KINECT depth_raw=None num_to_track=1000 #Number of points to use to determine where the closest 'thing' is x_grid,y_grid=np.ogrid[0:xSize,0:ySize] ctx=freenect.init() #Start up the kinect dev=freenect.open_device(ctx,freenect.LED_OFF) #Pointer to the device itself used for led handling freenect.set_led(dev,0) #Turn led off freenect.close_device(dev) #Release the kinect #CONTROLLER r_small=15 r_medium=75 r_large=125 r_central_button=50 z_main_action=50 z_switch_controller=50 huge_motion_limit=40