示例#1
0
	def run(self):
		try:
			self.ctx = freenect.init()
			self.dev = freenect.open_device(self.ctx, 0)

			freenect.set_depth_mode(self.dev, freenect.RESOLUTION_MEDIUM, freenect.DEPTH_11BIT)
			freenect.set_depth_callback(self.dev, self._depth_cb)
			freenect.set_video_mode(self.dev, freenect.RESOLUTION_MEDIUM, freenect.VIDEO_RGB)
			freenect.set_video_callback(self.dev, self._video_cb)

			self.video_started = False
			self.depth_started = False

			while self.keep_running:
				with self.lock:
					if self.led_update is not None:
						freenect.set_led(self.dev, self.led_update)
						self.led_update = None
					self.update_streams()
					if not self.video_started and not self.depth_started:
						self.update_cond.wait()
						continue
					self.update.clear()
					if not self.keep_running:
						break
				freenect.base_runloop(self.ctx, self._body)
		finally:
			with self.lock:
				for k in self.depth_consumers.keys() + self.video_consumers.keys():
					k.put(StreamerDied("The Kinect streamer died"))
				self.depth_consumers = {}
				self.video_consumers = {}
				self.update_streams()
			freenect.close_device(self.dev)
			freenect.shutdown(self.ctx)
示例#2
0
    def run(self):
        try:
            ctx = freenect.init()

            for index in xrange(freenect.num_devices(ctx)):
                self.devs.append(freenect.open_device(ctx, index))

            for device_num, dev in enumerate(self.devs):
                for stream_type in ('video', 'depth'):
                    self.producers[device_num, stream_type] = \
                        KinectProducer(dev, device_num, stream_type, self)

            self.initialized.set()

            while self.keep_running:
                while not self.command_q.empty():
                    self.command_q.get()()

                if self._should_runloop():
                    freenect.base_runloop(ctx, self._body)
                else:
                    self.command_q.get()()
        finally:
            with self.lock:
                self.keep_running = False
                for producer in self.producers.itervalues():
                    producer.stop()
                self.producers = {}
            freenect.shutdown(ctx)
            self.devs = []
            self.initialized.set()
def start():
    global XAXIS
    global YAXIS
    global TEST_CASES
    global DIRECTION
    global GLOBAL_DEPTH_MAP
    plt.ion()
    plt.figure()
    ctx = freenect.init()
    dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1)
    freenect.set_tilt_degs(dev, 20)
    freenect.close_device(dev)
    DIRECTION = 0
    for i in xrange(5):
        initial_map = get_depth()

    while True:
        GLOBAL_DEPTH_MAP = get_depth()	#returns the depth frame
        back_movement(GLOBAL_DEPTH_MAP)
        contoursright = contours_return(GLOBAL_DEPTH_MAP, -10)
        contoursleft = contours_return(GLOBAL_DEPTH_MAP, 10)
        door_detection(contoursright, contoursleft)
        if DOOR_FLAG:
            door_movement()
        else: regular_movement()
        cv2.imshow('final', GLOBAL_DEPTH_MAP)
        if cv2.waitKey(1) != -1:
            SERIALDATA.write('\x35')
            SERIALDATA.close()
            break

    cv2.destroyAllWindows()
示例#4
0
def loop(processimg):
	if not use_webcam:
		ctx = freenect.init()
		dev = freenect.open_device(ctx, 0)
		freenect.set_tilt_degs(dev, 10)
		freenect.close_device(dev)
		
	
	cv2.namedWindow('processing')
	for k, v in params.iteritems():
		cv2.createTrackbar(k, 'processing', v, 255, onchange(k))
	
	runonce = True
	while runonce:
		#runonce = False
		if imgpath != "":
			img = cv2.imread(imgpath)
		else:
			img = getimg()

		cv2.imshow('processing', cv2.resize(processimg(img), (width, height)))
		char = cv2.waitKey(10)
		if char == 27:
			break
		elif char == ord('p'):
			for k, v in params.iteritems():
				print "%s: %d" % (k, v)
		#freenect.set_tilt_degs(dev, pid)
	cv2.destroyAllWindows()
def start():
    ser = serial.Serial('/dev/ttyUSB0')	#initialization of serial communication
    global test_cases, ser, global_depth_map, DOOR_FLAG, DOOR_COUNT
    ctx = freenect.init()
    dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1)

    freenect.set_tilt_degs(dev, 20)
    freenect.close_device(dev)
    test_cases = [True, True, True]

    while True:
        global_depth_map = get_depth()	#returns the depth frame
        contoursright = contours_return(global_depth_map, -10)
        contoursleft = contours_return(global_depth_map, 10)
        door_detection(contoursright, contoursleft, test_cases)
        if DOOR_FLAG:
            door_movement(global_depth_map)
        else: regular_movement(global_depth_map)
        cv2.imshow('final', global_depth_map)
        if cv2.waitKey(1) != -1:
            ser.write('\x35')
            ser.close()
            break

    cv2.destroyAllWindows()
示例#6
0
文件: bbg.py 项目: spawnedc/bbg
 def __init__(self, cam=-1):
     # Initialize freenect and get the context
     print 'Initalize kinect'
     context = freenect.init()
     # Open the device and get the device
     print 'Open device'
     self.kinect = freenect.open_device(context, 0)
     # Turn the led off
     print 'Turning the led off'
     freenect.set_led(self.kinect, freenect.LED_OFF)
     # Close the device
     print 'Closing the device'
     freenect.close_device(self.kinect)
示例#7
0
def depth_view():
    import matplotlib.pyplot as plt

    fn_ctx = fn.init()
    fn_dev = fn.open_device(fn_ctx, fn.num_devices(fn_ctx) - 1)
    fn.set_tilt_degs(fn_dev, 0)
    fn.close_device(fn_dev)

    x = np.arange(0, 256, 1)
    zeros = np.zeros_like(x)
    fig = plt.figure()
    ax = fig.add_subplot(111)
    view1, = ax.plot(x, zeros, '-k', label='black')
    view2, = ax.plot(x, zeros, '-r', label='red')

    np_array = np.array
    np_max = np.max
    view1_sety = view1.set_ydata
    view2_sety = view2.set_ydata
    ax_relim = ax.relim
    ax_autoscale_view = ax.autoscale_view
    while True:
        dep = get_depth(False)
        cv2.imshow('raw', dep)

        dep = cv2.medianBlur(dep, 3)
        bin = __get_bins(dep)
        clean = copy(bin)
        clean = __pipeline(clean)
        bin[:2] = 0
        clean *= np_max(bin)
        view1_sety(bin)
        view2_sety(clean)
        ax_relim()
        ax_autoscale_view()
        im = fig2img(fig)
        graph = np_array(im)


        # dep = remove_background(dep, 100)
        dep = isolate_depths(dep)
        # dep = convert_to_bw(dep)

        cv2.imshow('depth', dep)
        cv2.imshow('graph', graph)
        # show_image('all', frame, masked_frame, depth, video)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break
示例#8
0
def temp_test():
    fn_ctx = fn.init()
    fn_dev = fn.open_device(fn_ctx, fn.num_devices(fn_ctx) - 1)
    fn.set_tilt_degs(fn_dev, 0)
    fn.close_device(fn_dev)

    while True:
        dep = get_depth()
        dep *= (dep * 1.3).astype(np.uint8)
        print("{}\t,\t{}".format(np.min(dep), np.max(dep)))

        cv2.imshow('depth', dep)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            fn.sync_stop()
            break
示例#9
0
def density_plot():
    fn_ctx = fn.init()
    fn_dev = fn.open_device(fn_ctx, fn.num_devices(fn_ctx) - 1)
    fn.set_tilt_degs(fn_dev, 0)
    fn.close_device(fn_dev)

    show_image = cv2.imshow
    waitkey = cv2.waitKey
    ravel = np.ravel
    countbin = np.bincount

    length = 256
    nums = np.arange(0, length, 1)
    zero = np.zeros_like(nums)

    import matplotlib.pyplot as plt
    import matplotlib.animation as animation

    fig, ax = plt.subplots()
    line, = ax.plot(nums, zero)
    ax.set_ylim(0, 10000)
    ax.set_xlim(0, 256)
    set_y_data = line.set_ydata

    def update(data):
        set_y_data(data)
        return line,

    def get_dep():
        dep = get_depth()
        dep = cv2.medianBlur(dep, 3, dep)
        dep = ravel(dep)
        # dep = medfilt(dep, 21).astype(np.uint8)
        return dep

    def data_gen():
        while True:
            yield countbin(get_dep(), minlength=length)

    ani = animation.FuncAnimation(fig, update, data_gen)
    plt.show()

    cv2.destroyAllWindows()

    fn.sync_stop()
示例#10
0
def loop(processimg):
	ctx = freenect.init()
	dev = freenect.open_device(ctx, 0)
	freenect.set_tilt_degs(dev, 10)
	freenect.close_device(dev)
	while True:
		#img = processimg(np.array(getimg_webcam()))
		#gotimg, img = c.read()
		img = getimg_irkinect()
		#gotimg = True
		#if not gotimg:
		#	print 'stopped'
		#	break

		cv2.imshow('processing', processimg(img))
		if cv2.waitKey(10) == 27:
			break
	cv2.destroyAllWindows()
示例#11
0
def set_kinect_angle(angle, device_index=0):
    # Clamp angle to [-30, 30]
    angle = min(angle, max(angle, -30), 30)
    print "Setting Kinect angle to", angle

    # We have to stop the synchronous runloop to interact with the device.
    freenect.sync_stop()

    # Open the device
    ctx = freenect.init()
    dev = freenect.open_device(ctx, device_index)

    # Set angle
    freenect.set_tilt_degs(dev, angle)

    # Shutdown context, allowing synchronous runloop to start
    freenect.shutdown(ctx)

    return angle
示例#12
0
文件: kinect.py 项目: wgwoods/wigl
    def __init__(self,
                 devno=0,
                 resolution=RESOLUTION_MEDIUM,
                 depth_mode=DEPTH_11BIT,
                 video_mode=VIDEO_RGB):
        self._ctx = freenect.init()
        if not self._ctx:
            raise KinectError("Cannot connect to device.")
        self.devno = devno
        self._dev = freenect.open_device(self._ctx, self.devno)
        if not self._dev:
            freenect.shutdown(self._ctx)
            raise KinectError("Cannot open device.")

        self._depth_callback = None
        self._video_callback = None

        self._resolution = resolution
        self.depth_mode = depth_mode
        self.video_mode = video_mode
def return_mean(a):
    """
    * Function Name:	return_mean
    * Input:		Depth Frame or any other matrix.                        
    * Output:		Returns mean of the frame
    * Logic:		It reduces the noise and calculates the mean of the pixel values in the frame using mean() function
    * Example Call:	return_mean(a)
    """
    median = cv2.medianBlur(a,5)
    mediane = cv2.medianBlur(a,5)
    rect = mediane[0:479, 0:639]
    mean = rect.mean()
    return mean

ctx = freenect.init()	#initialize freenect
dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1)	#open Kinect Device

freenect.set_tilt_degs(dev, 30)	#Tilts kinect to 30 degrees
time.sleep(1)	

freenect.set_tilt_degs(dev, 0)	#Tilts kinect to 0 degree
time.sleep(1)

freenect.close_device(dev)	#closes Kinect
while(True):
    a = get_depth()	#gets the depth data from Kinect
    mean = return_mean(a)	#returns the mean of the depth data
    if mean > 240:	
            ser.write("\x38")	#if the front area has more depth than the threshold than the robot will move forward
    else:
        while(return_mean(get_depth())<242):
示例#14
0
#!/usr/bin/env python
import freenect
import cv2
import frame_convert2
import numpy as np

cv2.namedWindow('Depth')
cv2.namedWindow('RGB')
keep_running = True

mdev = freenect.open_device(freenect.init(),0)
freenect.set_depth_mode(mdev,freenect.RESOLUTION_MEDIUM,freenect.DEPTH_REGISTERED)

lower = 0
upper  = 100

def display_depth(dev, data, timestamp):
    global keep_running,lower,upper
    data = 200 * np.logical_and(data > lower, data < upper)
    data = data.astype(np.uint8)
    canny = cv2.Canny(data, 100, 255)
    cv2.imshow('Depth',data)
    cv2.imshow('RGB',canny)
    lower+=10
    upper+=10
    if upper>1900:
	lower = 0
	upper = 100
    if cv2.waitKey(10) == 27:
        keep_running = False
示例#15
0
def main_vision(load):
    # inits
    fn_ctx = fn.init()
    fn_dev = fn.open_device(fn_ctx, fn.num_devices(fn_ctx) - 1)
    fn.set_tilt_degs(fn_dev, 0)
    fn.close_device(fn_dev)
    key_point = KeyPoints(150)
    predictor = prediction(ModelPath)
    preds = []

    # optimization
    t0 = 0.0
    t1 = 0.0
    fps = 0.0
    total_fps = 0.0
    frames = 0
    kp_speed = key_point._get_kp_speedup()
    draw_speed = key_point._get_draw_speedup()
    proc_speed = key_point._get_process_speedup()
    cvtColor = cv2.cvtColor
    BGR2RGB = cv2.COLOR_BGR2RGB
    get_kp = key_point.get_key_points
    draw_kp = key_point.draw_key_points
    process_image = key_point.__process_image
    show_image = cv2.imshow
    wait_for_key = cv2.waitKey
    copy_thing = copy.copy
    num_features = key_point.get_num_features()
    arr_shape = np.shape
    shape_check = (num_features, 32)
    ravel = np.ravel
    append_pred = preds.append
    get_time = time.time

    current_class = 0
    if load:
        brain = predictor.load_brain()
        pred_speed = predictor.get_pred_speed()
        predict = predictor.predict
    else:
        add_speed = predictor.get_add_speed()
        add_data = predictor.add_data
        get_length = predictor.get_data_length
    if load:
        net = Neural_Net(predictor.brain.getPoint(), np.vstack(predictor.brain.getData()), 4800 * 2, num_features)
        nl_predict = net.predict
        nl_speed = net.get_neural_speed()

    # mainLoop
    while True:
        t0 = get_time()

        # Get a fresh frame
        depth = get_depth()
        frame = get_video()
        show_image('Raw Image', cvtColor(frame, BGR2RGB))

        # Process Depth Image
        # depth = remove_background(depth, 25)
        depth = remove_background_percent(depth, .5, 50)
        depth = convert_to_bw(depth)
        mask = make_mask(depth)

        # Process Image
        frame = cvtColor(frame, BGR2RGB)
        video = copy_thing(frame)
        frame = process_image(frame, proc_speed)
        # Make Masked Frame
        masked_frame = copy_thing(frame)
        masked_frame[mask] = 0

        # Process Key Points
        kp, des = get_kp(masked_frame, kp_speed)
        video = draw_kp(video, kp, True, speedup=draw_speed)

        # Predict current
        if (load) and (des is not None) and (arr_shape(des) == shape_check):
            pred = predict(ravel(des), pred_speed)
            append_pred(pred)
            print(pred)
            print(nl_predict([ravel(des)], nl_speed))
        # Add object description to data set
        if (not load) and (des is not None) and (arr_shape(des) == shape_check):
            add_data(add_speed, np.ravel(des), current_class)
            print('Current Class and Length:\t%i\t%i' % (get_length(), current_class))

        t1 = get_time()
        fps = (1 / (t1 - t0))
        total_fps += fps
        frames += 1
        print('%.2f FPS' % fps)
        show_image('masked image', masked_frame)
        show_image('depth', depth)
        show_image('key points', video)
        # show_image('all', frame, masked_frame, depth, video)
        if wait_for_key(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            if load:
                break
            print('Current Class: %i\nn : Next Class\nr : Continue Current Class\nq : Quit' % (current_class))
            inp = raw_input()
            if inp == 'n':
                current_class += 1
            elif inp == 'q':
                break

    # print(np.mean(preds))
    cv2.destroyAllWindows()
    print('Average FPS: %.2f' % (total_fps / frames))
    fn.sync_stop()
    if not load:
        predictor.create_brain()
        main_vision(True)
示例#16
0
 
TILT_MAX = 20
TILT_STEP = 02
TILT_START = -35 
 
if len(sys.argv) >= 2:
	if sys.argv[1]: TILT_MAX = int(sys.argv[1])
	if sys.argv[2]: TILT_STEP = int(sys.argv[2])
	if sys.argv[2]: TILT_START = int(sys.argv[3])
 
ctx = freenect.init()
print "Number of kinects: %d\n" % freenect.num_devices(ctx)
dev = []
for devIdx in range(0, freenect.num_devices(ctx)):
    print "opening device %d" % devIdx
    dev.append(freenect.open_device(ctx, devIdx))
    if not dev:
        freenect.error_open_device()
 
print "Starting TILT Cycle"
for tilt in xrange(TILT_START, TILT_MAX+TILT_STEP, TILT_STEP):
    for sensor in dev:
        print "Setting TILT: ", tilt
        freenect.set_tilt_degs(sensor, tilt)
    time.sleep(1)
 
print "Starting TILT Cycle"
for tilt in xrange(TILT_MAX+TILT_STEP, TILT_START, (TILT_STEP * -1)):
    for sensor in dev:
        print "Setting TILT: ", tilt
        freenect.set_tilt_degs(sensor, tilt)
示例#17
0
        keep_running = False

def display_rgb(dev, data, timestamp):
    global keep_running
    data = data[:, :, ::-1]  # RGB -> BGR
    cv2.imshow('RGB', data)
    if cv2.waitKey(10) == 27:
        keep_running = False


def body(*args):
    if not keep_running:
        raise freenect.Kill
        cv2.destroyAllWindows()


print('Press ESC in window to stop')

ctx = freenect.init()
devPtr = freenect.open_device(ctx, 0)
#freenect.set_video_mode(devPtr, freenect.VIDEO_IR_8BIT, freenect.RESOLUTION_MEDIUM)
#ir, data = freenect.sync_get_video(format=freenect.VIDEO_IR_8BIT)
#print ir
#print data

freenect.runloop(depth=display_depth,
                 video=display_rgb,
                 body=body,
                 dev=devPtr
                 )
示例#18
0
import freenect
import sys

ctx = freenect.init()
dev = freenect.open_device(ctx, 0)
freenect.set_tilt_degs(dev, float(sys.argv[1]))