Exemple #1
0
    def __init__(self, dummy=False, mirror=True):
        self.__class__._instances.append(weakref.proxy(self))
        self.id = next(self._ids)
        self.resolution = (640, 480)
        self.dummy = dummy
        self.mirror = mirror

        if self.dummy == False:
            print("looking for kinect...")
            self.ctx = freenect.init()
            self.dev = freenect.open_device(self.ctx, self.id)
            print(self.id)
            freenect.close_device(
                self.dev)  # TODO Test if this has to be done!

            self.angle = None
            self.depth = freenect.sync_get_depth(
                index=self.id, format=freenect.DEPTH_MM
            )[0]  # get the first Depth frame already (the first one takes much longer than the following)
            self.filtered_depth = None
            print("kinect initialized")
        else:
            print(
                "dummy mode. get_frame() will return a synthetic depth frame, other functions may not work"
            )
def start():
    ser = serial.Serial('/dev/ttyUSB0')	#initialization of serial communication
    global test_cases, ser, global_depth_map, DOOR_FLAG, DOOR_COUNT
    ctx = freenect.init()
    dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1)

    freenect.set_tilt_degs(dev, 20)
    freenect.close_device(dev)
    test_cases = [True, True, True]

    while True:
        global_depth_map = get_depth()	#returns the depth frame
        contoursright = contours_return(global_depth_map, -10)
        contoursleft = contours_return(global_depth_map, 10)
        door_detection(contoursright, contoursleft, test_cases)
        if DOOR_FLAG:
            door_movement(global_depth_map)
        else: regular_movement(global_depth_map)
        cv2.imshow('final', global_depth_map)
        if cv2.waitKey(1) != -1:
            ser.write('\x35')
            ser.close()
            break

    cv2.destroyAllWindows()
def start():
    global XAXIS
    global YAXIS
    global TEST_CASES
    global DIRECTION
    global GLOBAL_DEPTH_MAP
    plt.ion()
    plt.figure()
    ctx = freenect.init()
    dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1)
    freenect.set_tilt_degs(dev, 20)
    freenect.close_device(dev)
    DIRECTION = 0
    for i in xrange(5):
        initial_map = get_depth()

    while True:
        GLOBAL_DEPTH_MAP = get_depth()	#returns the depth frame
        back_movement(GLOBAL_DEPTH_MAP)
        contoursright = contours_return(GLOBAL_DEPTH_MAP, -10)
        contoursleft = contours_return(GLOBAL_DEPTH_MAP, 10)
        door_detection(contoursright, contoursleft)
        if DOOR_FLAG:
            door_movement()
        else: regular_movement()
        cv2.imshow('final', GLOBAL_DEPTH_MAP)
        if cv2.waitKey(1) != -1:
            SERIALDATA.write('\x35')
            SERIALDATA.close()
            break

    cv2.destroyAllWindows()
Exemple #4
0
	def run(self):
		try:
			self.ctx = freenect.init()
			self.dev = freenect.open_device(self.ctx, 0)

			freenect.set_depth_mode(self.dev, freenect.RESOLUTION_MEDIUM, freenect.DEPTH_11BIT)
			freenect.set_depth_callback(self.dev, self._depth_cb)
			freenect.set_video_mode(self.dev, freenect.RESOLUTION_MEDIUM, freenect.VIDEO_RGB)
			freenect.set_video_callback(self.dev, self._video_cb)

			self.video_started = False
			self.depth_started = False

			while self.keep_running:
				with self.lock:
					if self.led_update is not None:
						freenect.set_led(self.dev, self.led_update)
						self.led_update = None
					self.update_streams()
					if not self.video_started and not self.depth_started:
						self.update_cond.wait()
						continue
					self.update.clear()
					if not self.keep_running:
						break
				freenect.base_runloop(self.ctx, self._body)
		finally:
			with self.lock:
				for k in self.depth_consumers.keys() + self.video_consumers.keys():
					k.put(StreamerDied("The Kinect streamer died"))
				self.depth_consumers = {}
				self.video_consumers = {}
				self.update_streams()
			freenect.close_device(self.dev)
			freenect.shutdown(self.ctx)
def start():
    ser = serial.Serial(
        '/dev/ttyUSB0')  #initialization of serial communication
    global test_cases, ser, global_depth_map, DOOR_FLAG, DOOR_COUNT
    ctx = freenect.init()
    dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1)

    freenect.set_tilt_degs(dev, 20)
    freenect.close_device(dev)
    test_cases = [True, True, True]

    while True:
        global_depth_map = get_depth()  #returns the depth frame
        contoursright = contours_return(global_depth_map, -10)
        contoursleft = contours_return(global_depth_map, 10)
        door_detection(contoursright, contoursleft, test_cases)
        if DOOR_FLAG:
            door_movement(global_depth_map)
        else:
            regular_movement(global_depth_map)
        cv2.imshow('final', global_depth_map)
        if cv2.waitKey(1) != -1:
            ser.write('\x35')
            ser.close()
            break

    cv2.destroyAllWindows()
Exemple #6
0
def start():
    global XAXIS
    global YAXIS
    global TEST_CASES
    global DIRECTION
    global GLOBAL_DEPTH_MAP
    plt.ion()
    plt.figure()
    ctx = freenect.init()
    dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1)
    freenect.set_tilt_degs(dev, 20)
    freenect.close_device(dev)
    DIRECTION = 0
    for i in xrange(5):
        initial_map = get_depth()

    while True:
        GLOBAL_DEPTH_MAP = get_depth()  #returns the depth frame
        back_movement(GLOBAL_DEPTH_MAP)
        contoursright = contours_return(GLOBAL_DEPTH_MAP, -10)
        contoursleft = contours_return(GLOBAL_DEPTH_MAP, 10)
        door_detection(contoursright, contoursleft)
        if DOOR_FLAG:
            door_movement()
        else:
            regular_movement()
        cv2.imshow('final', GLOBAL_DEPTH_MAP)
        if cv2.waitKey(1) != -1:
            SERIALDATA.write('\x35')
            SERIALDATA.close()
            break

    cv2.destroyAllWindows()
Exemple #7
0
    def __init__(self):
        # hard coded class attributes for KinectV1's native resolution
        self.name = 'kinect_v1'
        self.depth_width = 320
        self.depth_height = 240
        self.color_width = 640
        self.color_height = 480

        self.id = 0
        self.device = None
        self.depth = None
        self.color = None
        logger.warning(
            'Two kernels cannot access the Kinect at the same time. '
            'This will lead to a sudden death of the kernel. '
            'Be sure no other kernel is running before you initialize a KinectV1 object.'
        )

        logger.info("looking for kinect...")
        ctx = freenect.init()
        self.device = freenect.open_device(ctx, self.id)
        print(self.id)
        freenect.close_device(self.device)  # TODO Test if this has to be done!
        # get the first Depth frame already (the first one takes much longer than the following)
        self.depth = self.get_frame()
        logger.info("KinectV1 initialized.")
Exemple #8
0
def loop(processimg):
	if not use_webcam:
		ctx = freenect.init()
		dev = freenect.open_device(ctx, 0)
		freenect.set_tilt_degs(dev, 10)
		freenect.close_device(dev)
		
	
	cv2.namedWindow('processing')
	for k, v in params.iteritems():
		cv2.createTrackbar(k, 'processing', v, 255, onchange(k))
	
	runonce = True
	while runonce:
		#runonce = False
		if imgpath != "":
			img = cv2.imread(imgpath)
		else:
			img = getimg()

		cv2.imshow('processing', cv2.resize(processimg(img), (width, height)))
		char = cv2.waitKey(10)
		if char == 27:
			break
		elif char == ord('p'):
			for k, v in params.iteritems():
				print "%s: %d" % (k, v)
		#freenect.set_tilt_degs(dev, pid)
	cv2.destroyAllWindows()
def loop(processimg):
    if not use_webcam:
        ctx = freenect.init()
        dev = freenect.open_device(ctx, 0)
        freenect.set_tilt_degs(dev, 10)
        freenect.close_device(dev)

    cv2.namedWindow('processing')
    for k, v in params.iteritems():
        cv2.createTrackbar(k, 'processing', v, 255, onchange(k))

    runonce = True
    while runonce:
        #runonce = False
        if imgpath != "":
            img = cv2.imread(imgpath)
        else:
            img = getimg()

        cv2.imshow('processing', cv2.resize(processimg(img), (width, height)))
        char = cv2.waitKey(10)
        if char == 27:
            break
        elif char == ord('p'):
            for k, v in params.iteritems():
                print "%s: %d" % (k, v)
        #freenect.set_tilt_degs(dev, pid)
    cv2.destroyAllWindows()
Exemple #10
0
def kill():
    [ctx, dev] = initkinect()
    cv2.destroyAllWindows()
    freenect.sync_stop()
    freenect.stop_video(dev)
    freenect.stop_depth(dev)
    freenect.close_device(dev)
    freenect.shutdown(ctx)
    quit()
Exemple #11
0
    def __init__(self, calibration_file='config/calibration.frame', debug=False):
        try:
            self.device = kinect.open_device(kinect.init(), 0)
            kinect.set_tilt_degs(self.device, -90)
            kinect.close_device(self.device)
        except Exception as e:
            logging.error('No Kinect detected')
            self.device = None

        with open(calibration_file) as calibration_file:
            self.calibration_frame = np.load(calibration_file)
        '''
Exemple #12
0
 def __init__(self, cam=-1):
     # Initialize freenect and get the context
     print 'Initalize kinect'
     context = freenect.init()
     # Open the device and get the device
     print 'Open device'
     self.kinect = freenect.open_device(context, 0)
     # Turn the led off
     print 'Turning the led off'
     freenect.set_led(self.kinect, freenect.LED_OFF)
     # Close the device
     print 'Closing the device'
     freenect.close_device(self.kinect)
Exemple #13
0
def depth_view():
    import matplotlib.pyplot as plt

    fn_ctx = fn.init()
    fn_dev = fn.open_device(fn_ctx, fn.num_devices(fn_ctx) - 1)
    fn.set_tilt_degs(fn_dev, 0)
    fn.close_device(fn_dev)

    x = np.arange(0, 256, 1)
    zeros = np.zeros_like(x)
    fig = plt.figure()
    ax = fig.add_subplot(111)
    view1, = ax.plot(x, zeros, '-k', label='black')
    view2, = ax.plot(x, zeros, '-r', label='red')

    np_array = np.array
    np_max = np.max
    view1_sety = view1.set_ydata
    view2_sety = view2.set_ydata
    ax_relim = ax.relim
    ax_autoscale_view = ax.autoscale_view
    while True:
        dep = get_depth(False)
        cv2.imshow('raw', dep)

        dep = cv2.medianBlur(dep, 3)
        bin = __get_bins(dep)
        clean = copy(bin)
        clean = __pipeline(clean)
        bin[:2] = 0
        clean *= np_max(bin)
        view1_sety(bin)
        view2_sety(clean)
        ax_relim()
        ax_autoscale_view()
        im = fig2img(fig)
        graph = np_array(im)


        # dep = remove_background(dep, 100)
        dep = isolate_depths(dep)
        # dep = convert_to_bw(dep)

        cv2.imshow('depth', dep)
        cv2.imshow('graph', graph)
        # show_image('all', frame, masked_frame, depth, video)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break
Exemple #14
0
def temp_test():
    fn_ctx = fn.init()
    fn_dev = fn.open_device(fn_ctx, fn.num_devices(fn_ctx) - 1)
    fn.set_tilt_degs(fn_dev, 0)
    fn.close_device(fn_dev)

    while True:
        dep = get_depth()
        dep *= (dep * 1.3).astype(np.uint8)
        print("{}\t,\t{}".format(np.min(dep), np.max(dep)))

        cv2.imshow('depth', dep)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            fn.sync_stop()
            break
Exemple #15
0
def density_plot():
    fn_ctx = fn.init()
    fn_dev = fn.open_device(fn_ctx, fn.num_devices(fn_ctx) - 1)
    fn.set_tilt_degs(fn_dev, 0)
    fn.close_device(fn_dev)

    show_image = cv2.imshow
    waitkey = cv2.waitKey
    ravel = np.ravel
    countbin = np.bincount

    length = 256
    nums = np.arange(0, length, 1)
    zero = np.zeros_like(nums)

    import matplotlib.pyplot as plt
    import matplotlib.animation as animation

    fig, ax = plt.subplots()
    line, = ax.plot(nums, zero)
    ax.set_ylim(0, 10000)
    ax.set_xlim(0, 256)
    set_y_data = line.set_ydata

    def update(data):
        set_y_data(data)
        return line,

    def get_dep():
        dep = get_depth()
        dep = cv2.medianBlur(dep, 3, dep)
        dep = ravel(dep)
        # dep = medfilt(dep, 21).astype(np.uint8)
        return dep

    def data_gen():
        while True:
            yield countbin(get_dep(), minlength=length)

    ani = animation.FuncAnimation(fig, update, data_gen)
    plt.show()

    cv2.destroyAllWindows()

    fn.sync_stop()
Exemple #16
0
def loop(processimg):
	ctx = freenect.init()
	dev = freenect.open_device(ctx, 0)
	freenect.set_tilt_degs(dev, 10)
	freenect.close_device(dev)
	while True:
		#img = processimg(np.array(getimg_webcam()))
		#gotimg, img = c.read()
		img = getimg_irkinect()
		#gotimg = True
		#if not gotimg:
		#	print 'stopped'
		#	break

		cv2.imshow('processing', processimg(img))
		if cv2.waitKey(10) == 27:
			break
	cv2.destroyAllWindows()
Exemple #17
0
    def run(self):
        try:
            self.ctx = freenect.init()
            self.dev = freenect.open_device(self.ctx, 0)

            freenect.set_depth_mode(self.dev, freenect.RESOLUTION_MEDIUM,
                                    freenect.DEPTH_11BIT)
            freenect.set_depth_callback(self.dev, self._depth_cb)
            freenect.set_video_mode(self.dev, freenect.RESOLUTION_MEDIUM,
                                    freenect.VIDEO_RGB)
            freenect.set_video_callback(self.dev, self._video_cb)

            self.video_started = False
            self.depth_started = False

            while self.keep_running:
                with self.lock:
                    if self.led_update is not None:
                        freenect.set_led(self.dev, self.led_update)
                        self.led_update = None
                    self.update_streams()
                    if not self.video_started and not self.depth_started:
                        self.update_cond.wait()
                        continue
                    self.update.clear()
                    if not self.keep_running:
                        break
                freenect.base_runloop(self.ctx, self._body)
        finally:
            with self.lock:
                for k in self.depth_consumers.keys(
                ) + self.video_consumers.keys():
                    k.put(StreamerDied("The Kinect streamer died"))
                self.depth_consumers = {}
                self.video_consumers = {}
                self.update_streams()
            freenect.close_device(self.dev)
            freenect.shutdown(self.ctx)
Exemple #18
0
    cv.Resize(cv.fromarray(raw_image), cv.fromarray(image), cv.CV_INTER_AREA)
    cv.CvtColor(cv.fromarray(image), cv.fromarray(image), cv.CV_RGB2HSV)
    
    # Downsample the depth frame using nearest-neighbor to make sure 
    # invalid pixels are handled properly.
    cv.Resize(cv.fromarray(raw_depth), cv.fromarray(depth), cv.CV_INTER_NN)
    
    # Do the object recognition
    color.identify(image, constants, colors)
    global balls, yellow_walls, green_walls
    balls = blobs.find_blobs(colors, depth, color=0)
    yellow_walls = blobs.find_blobs(colors, depth, color=1, min_size=100)
    green_walls = blobs.find_blobs(colors, depth, color=2, min_size=10)

# Work around an initialization bug for synchronous image
try:
    ctx = freenect.init()
    dev = freenect.open_device(ctx, 0)
    if not dev:
        raise Exception
    freenect.start_video(dev) # sync_get_video hangs if we don't do this
    freenect.start_depth(dev) # sync_get_depth hangs if we don't do this
    freenect.stop_depth(dev)
    freenect.stop_video(dev)
    freenect.close_device(dev) # close the device so that c_sync can open it
    freenect.shutdown(ctx)
    process_frame()
    initialized = True
except:
    pass
Exemple #19
0
pygame.init() #Initiates pygame
xSize,ySize = 640,480 #Sets size of window
screen = pygame.display.set_mode((xSize,ySize),pygame.RESIZABLE) #creates main surface

#KINECT

depth_raw=None

num_to_track=1000 #Number of points to use to determine where the closest 'thing' is
x_grid,y_grid=np.ogrid[0:xSize,0:ySize]

ctx=freenect.init() #Start up the kinect
dev=freenect.open_device(ctx,freenect.LED_OFF) #Pointer to the device itself used for led handling
freenect.set_led(dev,0) #Turn led off
freenect.close_device(dev) #Release the kinect

#CONTROLLER

r_small=15
r_medium=75
r_large=125

r_central_button=50
z_main_action=50
z_switch_controller=50

huge_motion_limit=40
active_area_limit=50
exclusion_zone_limit=50
Exemple #20
0
def main():
    rospy.init_node("camera")
    rospy.loginfo("Starting Camera Node")
    rospy.on_shutdown(shutdown_hook)

    # Set initial camera angle
    ctx = freenect.init()
    dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1)
    freenect.set_tilt_degs(dev, 20)
    freenect.close_device(dev)

    r = rospy.Rate(PUBLISH_RATE)
    cv_bridge = CvBridge()

    video_pub = rospy.Publisher('/camera/video', Image, queue_size=10)
    depth_pub = rospy.Publisher('/camera/depth', Image, queue_size=10)

    # Uses a different encoding for april tags
    camera_image_pub = rospy.Publisher('/camera/image_raw',
                                       Image,
                                       queue_size=10)
    camera_info_pub = rospy.Publisher('/camera/camera_info',
                                      CameraInfo,
                                      queue_size=10)

    # Kinect manually calibrated using this http://people.csail.mit.edu/peterkty/teaching/Lab4Handout_Fall_2016.pdf
    camera_info = CameraInfo()
    hd = Header()
    camera_info.height = 480
    camera_info.width = 640
    camera_info.distortion_model = "plumb_bob"
    camera_info.D = [
        0.16966890693679473, -0.32564392755677646, 0.0014273722857157428,
        -0.0007780067287402459, 0.0
    ]
    camera_info.K = [
        522.7790149706918, 0.0, 317.5941836796907, 0.0, 523.5195539902463,
        255.18973237498545, 0.0, 0.0, 1.0
    ]
    camera_info.P = [
        532.5130615234375, 0.0, 317.01325625466416, 0.0, 0.0, 535.176025390625,
        255.7121671461573, 0.0, 0.0, 0.0, 1.0, 0.0
    ]
    camera_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]

    while not rospy.is_shutdown():
        video = get_video()
        depth = get_depth()

        video_msg = cv_bridge.cv2_to_imgmsg(video, encoding="passthrough")
        depth_msg = cv_bridge.cv2_to_imgmsg(depth, encoding="passthrough")
        cam_img_msg = cv_bridge.cv2_to_imgmsg(video, encoding="bgr8")

        camera_info.header = video_msg.header

        video_pub.publish(video_msg)
        depth_pub.publish(depth_msg)
        camera_image_pub.publish(cam_img_msg)
        camera_info_pub.publish(camera_info)

        r.sleep()
def back_movement(z_back):
    if z_back[0:479,200:439].mean() > 200 or z_back[0:479,0:199].mean() > 200 or z_back[0:479,440:639].mean() > 200:
        ser.write('\x50')
        time.sleep(3)


plt.ion()
plt.figure()
a = []
b = []
ctx = freenect.init()
dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1)

freenect.set_tilt_degs(dev, 20)
freenect.close_device(dev)
test_cases = [True, True, True]
#for i in xrange(5):
#    z = get_depth()
#left_mean = z[0:479,0:319].mean()
#right_mean = z[0:479,320:639].mean()
#if left_mean > right_mean:
#    wall = 1
#else: wall = 0
for i in xrange(5):
    zp = get_depth()
wall = 0
while True:
    z = get_depth()	 # returns the depth frame
    back_movement(z)
    contours_right = contours_return(z, -10)
Exemple #22
0
 def stop(self):
     for consumer in set(self.consumers):
         consumer.queue.put(StreamerDied("Stream stopped"))
         consumer.stop()
     freenect.close_device(self.dev)
Exemple #23
0
def main_vision(load):
    # inits
    fn_ctx = fn.init()
    fn_dev = fn.open_device(fn_ctx, fn.num_devices(fn_ctx) - 1)
    fn.set_tilt_degs(fn_dev, 0)
    fn.close_device(fn_dev)
    key_point = KeyPoints(150)
    predictor = prediction(ModelPath)
    preds = []

    # optimization
    t0 = 0.0
    t1 = 0.0
    fps = 0.0
    total_fps = 0.0
    frames = 0
    kp_speed = key_point._get_kp_speedup()
    draw_speed = key_point._get_draw_speedup()
    proc_speed = key_point._get_process_speedup()
    cvtColor = cv2.cvtColor
    BGR2RGB = cv2.COLOR_BGR2RGB
    get_kp = key_point.get_key_points
    draw_kp = key_point.draw_key_points
    process_image = key_point.__process_image
    show_image = cv2.imshow
    wait_for_key = cv2.waitKey
    copy_thing = copy.copy
    num_features = key_point.get_num_features()
    arr_shape = np.shape
    shape_check = (num_features, 32)
    ravel = np.ravel
    append_pred = preds.append
    get_time = time.time

    current_class = 0
    if load:
        brain = predictor.load_brain()
        pred_speed = predictor.get_pred_speed()
        predict = predictor.predict
    else:
        add_speed = predictor.get_add_speed()
        add_data = predictor.add_data
        get_length = predictor.get_data_length
    if load:
        net = Neural_Net(predictor.brain.getPoint(), np.vstack(predictor.brain.getData()), 4800 * 2, num_features)
        nl_predict = net.predict
        nl_speed = net.get_neural_speed()

    # mainLoop
    while True:
        t0 = get_time()

        # Get a fresh frame
        depth = get_depth()
        frame = get_video()
        show_image('Raw Image', cvtColor(frame, BGR2RGB))

        # Process Depth Image
        # depth = remove_background(depth, 25)
        depth = remove_background_percent(depth, .5, 50)
        depth = convert_to_bw(depth)
        mask = make_mask(depth)

        # Process Image
        frame = cvtColor(frame, BGR2RGB)
        video = copy_thing(frame)
        frame = process_image(frame, proc_speed)
        # Make Masked Frame
        masked_frame = copy_thing(frame)
        masked_frame[mask] = 0

        # Process Key Points
        kp, des = get_kp(masked_frame, kp_speed)
        video = draw_kp(video, kp, True, speedup=draw_speed)

        # Predict current
        if (load) and (des is not None) and (arr_shape(des) == shape_check):
            pred = predict(ravel(des), pred_speed)
            append_pred(pred)
            print(pred)
            print(nl_predict([ravel(des)], nl_speed))
        # Add object description to data set
        if (not load) and (des is not None) and (arr_shape(des) == shape_check):
            add_data(add_speed, np.ravel(des), current_class)
            print('Current Class and Length:\t%i\t%i' % (get_length(), current_class))

        t1 = get_time()
        fps = (1 / (t1 - t0))
        total_fps += fps
        frames += 1
        print('%.2f FPS' % fps)
        show_image('masked image', masked_frame)
        show_image('depth', depth)
        show_image('key points', video)
        # show_image('all', frame, masked_frame, depth, video)
        if wait_for_key(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            if load:
                break
            print('Current Class: %i\nn : Next Class\nr : Continue Current Class\nq : Quit' % (current_class))
            inp = raw_input()
            if inp == 'n':
                current_class += 1
            elif inp == 'q':
                break

    # print(np.mean(preds))
    cv2.destroyAllWindows()
    print('Average FPS: %.2f' % (total_fps / frames))
    fn.sync_stop()
    if not load:
        predictor.create_brain()
        main_vision(True)
Exemple #24
0
def doloop():

    #Series of commands to do pointer operations on the kinect (motor, led, accelerometer)
    ctx = init()  #Initiates device
    mdev = open_device(ctx, 0)  #Opens the device for commands
    set_led(mdev, 1)  #Sets LED to green
    close_device(
        mdev)  #Closes device. Device must be closed immediately after usage

    #Mean filter caches
    yList = [0, 0, 0, 0, 0, 0]
    xList = [0, 0, 0, 0, 0, 0]

    #Sets color tuples
    RED = (255, 0, 0)
    BLUE = (0, 0, 255)
    TEAL = (0, 200, 100)
    BLACK = (0, 0, 0)

    #Sets the size of the screen
    xSize = 640
    ySize = 480

    done = False  #Main while loop bool counter
    pygame.init()  #Initiates pygame
    screen = pygame.display.set_mode(
        (xSize, ySize), pygame.RESIZABLE)  #Creates the pygame window
    screen.fill(BLACK)  #Fills the window black

    #Initiates the xTempPos and yTempPos values so that the point will remain stationary
    #if the minimum value is larger than 600
    xTempPos = xSize / 2
    yTempPos = ySize / 2

    global depth, rgb  #Makes the depth and rgb variables global

    while not done:
        screen.fill(BLACK)  #Makes the pygame window black after each iteration

        # Get a fresh frame
        (depth, _) = get_depth()
        (rgb, _) = get_video()

        minVal = np.min(depth)  #This is the minimum value from the depth image
        minPos = np.argmin(
            depth)  #This is the raw index of the minimum value above
        xPos = np.mod(minPos, xSize)  #This is the x component of the raw index
        yPos = minPos // xSize  #This is the y component of the raw index

        #This is the mean filter process
        """
        A mean filter works by collecting values in a cache list and taking the mean of them
        to determine the final value. It works in this case to decrease the amount of
        volatility the minimum position experiences to get a smoother display with a more
        consistent value. My computer works smoothly with a 5 bit cache where as a faster
        computer may need a larger cache and a slower computer may need a smaller cache
        """
        xList.append(xPos)
        del xList[0]
        xPos = int(mean(xList))
        yList.append(yPos)
        del yList[0]
        yPos = int(mean(yList))
        """
        This if statement says that if the minimum value is below 600 to store the minimum
        positions in xTempPos and yTempPos and to make the dot color red. Also if the minimum
        value is larger than 600, xPos and yPos become the last stored minimum and maximum
        positions. It also changes the color to purple
        """
        if minVal < 600:
            xTempPos = xPos
            yTempPos = yPos
            COLOR = cv.RGB(255, 0, 0)
        else:
            xPos = xTempPos
            yPos = yTempPos
            COLOR = cv.RGB(100, 0, 100)

        #cv.Circle(rgb, (xPos, yPos), 2, COLOR, 40) #draws a circle of a certain color at minimum position

        #cv.ShowImage('Image',rgb) #Shows the image
        cv.WaitKey(5)  #Keyboard interupt
        """
        The if statement below sets up the virtual joystick by basically breaking the pygame
        window into four parts. A dot representing the minimum position is drawn on the window
        and the corresponding button based on the position is "pressed". The quarter of the
        window in which the button "pressed" corresponds to turns teal after being "pressed"

        Top Right   : A
        Bottom Right: B
        Bottom Left : Y
        Top Right   : X
        """
        if xPos <= xSize / 2 and yPos <= ySize / 2:
            command = 'A'
            rect1 = pygame.Rect((xSize / 2, 0), (xSize / 2, ySize / 2))
            pygame.draw.rect(screen, TEAL, rect1)
        elif xPos <= xSize / 2 and yPos > ySize / 2:
            command = 'B'
            rect1 = pygame.Rect((xSize / 2, ySize / 2), (xSize / 2, ySize / 2))
            pygame.draw.rect(screen, TEAL, rect1)
        elif xPos > xSize / 2 and yPos <= ySize / 2:
            command = 'X'
            rect1 = pygame.Rect((0, 0), (xSize / 2, ySize / 2))
            pygame.draw.rect(screen, TEAL, rect1)
        else:
            command = 'Y'
            rect1 = pygame.Rect((0, ySize / 2), (xSize / 2, ySize / 2))
            pygame.draw.rect(screen, TEAL, rect1)
        pygame.draw.line(
            screen, BLUE, (xSize / 2, ySize / 2),
            (xSize - xPos,
             yPos))  #Draws a line from the middle to the minimum position
        pygame.draw.circle(screen, RED, (xSize - xPos, yPos),
                           10)  #Draws the circle on pygame window
        pygame.display.flip()  #Displays the processed pygame window
        print command, minVal  #Prints the "pressed" button and the minimum value
        for e in pygame.event.get():  #Itertates through current events
            if e.type is pygame.QUIT:  #If the close button is pressed, the while loop ends
                done = True
Exemple #25
0
 def shutdown(self):
     if self._dev:
         freenect.close_device(self._dev)
     if self._ctx:
         freenect.shutdown(self._ctx)
Exemple #26
0
 def shutdown(self):
     if self._dev:
         freenect.close_device(self._dev)
     if self._ctx:
         freenect.shutdown(self._ctx)
Exemple #27
0
 def stop(self):
     freenect.close_device(self.dev)
     freenect.shutdown(self.ctx)
     print('kinect Stopped')
def back_movement(z_back):
    if z_back[0:479, 200:439].mean() > 200 or z_back[
            0:479, 0:199].mean() > 200 or z_back[0:479, 440:639].mean() > 200:
        ser.write('\x50')
        time.sleep(3)


plt.ion()
plt.figure()
a = []
b = []
ctx = freenect.init()
dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1)

freenect.set_tilt_degs(dev, 20)
freenect.close_device(dev)
test_cases = [True, True, True]
#for i in xrange(5):
#    z = get_depth()
#left_mean = z[0:479,0:319].mean()
#right_mean = z[0:479,320:639].mean()
#if left_mean > right_mean:
#    wall = 1
#else: wall = 0
for i in xrange(5):
    zp = get_depth()
wall = 0
while True:
    z = get_depth()  # returns the depth frame
    back_movement(z)
    contours_right = contours_return(z, -10)
Exemple #29
0
def init():
    ctx = freenect.init()
    dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1)
    freenect.set_tilt_degs(dev, 0)
    freenect.close_device(dev)
Exemple #30
0
def killkinect(ctx, dev):
    freenect.close_device(dev)
    freenect.shutdown(ctx)
Exemple #31
0
    median = cv2.medianBlur(a,5)
    mediane = cv2.medianBlur(a,5)
    rect = mediane[0:479, 0:639]
    mean = rect.mean()
    return mean

ctx = freenect.init()	#initialize freenect
dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1)	#open Kinect Device

freenect.set_tilt_degs(dev, 30)	#Tilts kinect to 30 degrees
time.sleep(1)	

freenect.set_tilt_degs(dev, 0)	#Tilts kinect to 0 degree
time.sleep(1)

freenect.close_device(dev)	#closes Kinect
while(True):
    a = get_depth()	#gets the depth data from Kinect
    mean = return_mean(a)	#returns the mean of the depth data
    if mean > 240:	
            ser.write("\x38")	#if the front area has more depth than the threshold than the robot will move forward
    else:
        while(return_mean(get_depth())<242):
            ser.write("\x36")	#rotate till the threshold is crossed
    th3 = cv2.adaptiveThreshold(a,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,3,2)	#Binary threshold
    contours, hierarchy = cv2.findContours(th3,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)	#find contours
    img = cv2.drawContours(a, contours, -1, (0,255,0), 3)	#draw contours in the frame
    cv2.imshow('gray',a)	#displays the image with contours
    if cv2.waitKey(1)!=-1:	#if key is pressed terminate the program
        ser.write('\x35')
        ser.close()
Exemple #32
0
def main():
    grip_dir = os.environ['GRIP_DATA']
    global g_model
    g_model = IO.load(os.path.join(grip_dir, 'aam.new.io'))[1]

    global g_predictor, reference_3d, geo_vs, geo_vts, rect
    rect = None
    pred_fn = os.path.join(grip_dir, 'pred.new.io')
    g_predictor = Face.load_predictor(pred_fn)  #, cutOff=15)
    reference_shape = g_predictor['ref_shape']
    size = reference_shape.shape[0]
    geo_vs = np.zeros((size, 3), dtype=np.float32)
    geo_vs[:size, :2] = reference_shape
    geo_vts = np.zeros((size, 2), dtype=np.float32)
    geo_vts[:size] = reference_shape + 0.5
    geo_ts = np.array([[1, 0, 0, 0], [0, 1, 0, 1000], [0, 0, 1, 0]],
                      dtype=np.float32)
    geo_fs = Face.triangulate_2D(reference_shape)
    geo_bs = []
    for p0, p1, p2 in geo_fs:
        geo_bs.append((p0, p1))
        geo_bs.append((p1, p2))
        geo_bs.append((p2, p0))
    reference_3d = np.zeros((reference_shape.shape[0], 3), dtype=np.float32)
    reference_3d[:, :2] = reference_shape * [100, 100]

    img_vs = np.array([[0, 0, 0], [640, 0, 0], [640, 480, 0], [0, 480, 0]],
                      dtype=np.float32)
    img_vts = np.array([[0, 1], [1, 1], [1, 0], [0, 0]], dtype=np.float32)
    img_fs = np.array([[0, 1, 2, 3]], dtype=np.int32)
    img_ts = np.array([[1, 0, 0, 0], [0, 1, 0, 1000], [0, 0, 1, 0]],
                      dtype=np.float32)

    geo_mesh = GLMeshes(names=['geo_mesh'],
                        verts=[geo_vs],
                        faces=[geo_fs],
                        transforms=[geo_ts],
                        bones=[geo_bs],
                        vts=[geo_vts])
    img_mesh = GLMeshes(names=['img_mesh'],
                        verts=[img_vs],
                        faces=[img_fs],
                        transforms=[img_ts],
                        bones=[None],
                        vts=[img_vts])
    kinect = freenect.init()
    tilt, roll = 0, 0

    if 1:
        kdev = freenect.open_device(kinect, 0)
        freenect.set_led(kdev, 0)  # turn off LED
        freenect.set_tilt_degs(kdev, 25)
        kstate = freenect.get_tilt_state(kdev)
        freenect.update_tilt_state(kdev)
        tilt_angle, tilt_status = kstate.tilt_angle, kstate.tilt_status
        ax, ay, az = kstate.accelerometer_x, kstate.accelerometer_y, kstate.accelerometer_z
        #bottom facing down: (85, 743, 369, 52, 0)
        #right side down: (916, 71, 96, 112, 0)
        #front side down: (52, 63, -863, -128, 0)
        freenect.close_device(kdev)
        y_axis = np.array((ax, ay, az), dtype=np.float32)
        y_axis = y_axis / np.linalg.norm(y_axis)
        roll = np.degrees(np.arctan2(ax, ay))
        tilt = -np.degrees(np.arctan2(az, (ax**2 + ay**2)**0.5))

    fovX = 62.0
    pan_tilt_roll = (0, tilt, roll)
    tx_ty_tz = (0, 1000, 6000)
    P = Calibrate.composeP_fromData((fovX, ), (pan_tilt_roll), (tx_ty_tz), 0)

    global g_camera_rays, g_camera_mat
    h, w = 480 // 2, 640 // 2
    coord, pix_coord = make_coords(h, w)
    #P = np.eye(3,4,dtype=np.float32)
    #P[0,0] = P[1,1] = 2.0
    k1, k2 = 0, 0
    g_camera_mat = Calibrate.makeMat(P, (k1, k2), [w, h])
    K, RT, P, ks, T, wh = g_camera_mat
    coord_undist = coord.copy()
    Calibrate.undistort_points_mat(coord.reshape(-1, 2), g_camera_mat,
                                   coord_undist.reshape(-1, 2))
    g_camera_rays = np.dot(coord_undist,
                           RT[:2, :3])  # ray directions (unnormalized)
    g_camera_rays -= np.dot([-K[0, 2], -K[1, 2], K[0, 0]], RT[:3, :3])
    g_camera_rays /= (np.sum(g_camera_rays**2, axis=-1)**0.5).reshape(
        h, w, 1)  # normalized ray directions
    names = ['kinect']
    vs = [np.zeros((h * w, 3), dtype=np.float32)]
    ts = [np.eye(3, 4, dtype=np.float32)]
    vts = [pix_coord * (1.0 / w, 1.0 / h)]
    faces = [make_faces(h, w)]
    mats = None
    geom_mesh = GLMeshes(names=names,
                         verts=vs,
                         faces=faces,
                         transforms=ts,
                         vts=vts)
    layers = {
        'geom_mesh': geom_mesh,
        'geo_mesh': geo_mesh,
        'img_mesh': img_mesh
    }
    QGLViewer.makeViewer(layers=layers,
                         mats=mats,
                         callback=cb,
                         timeRange=(0, 10000))