Пример #1
0
def mainSnapshot(options=None):
    pipeline = CudaKdePacketPipeline()
    fn = Freenect2()
    device, listener, registration = initCamera(fn, pipeline)
    warmUp(listener)

    undistorted = Frame(FRAME_WIDTH, FRAME_HEIGHT, 4)
    registered = Frame(FRAME_WIDTH, FRAME_HEIGHT, 4)
    result = None
    while True:
        frames = listener.waitForNewFrame()
        color = frames["color"]
        depth = frames["depth"]
        registration.apply(color, depth, undistorted, registered)

        # kinect flips X axis
        regArray = registered.asarray(np.uint8)
        regArray = np.flip(regArray, 1)
        imgRGB = cv2.cvtColor(regArray, cv2.COLOR_RGBA2RGB)
        ret, buf = cv2.imencode(".jpg", imgRGB)
        if ret == True:
            data = base64.b64encode(buf)
            result = {
                'width': FRAME_WIDTH,
                'height': FRAME_HEIGHT,
                'data': data
            }
            break
        else:
            print 'fail'
        listener.release(frames)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    shutdownCamera(device)
    return result
Пример #2
0
    def stream_thread(self):
        undistorted = Frame(RES_DEPTH[0], RES_DEPTH[1], 4)
        registered = Frame(RES_DEPTH[0], RES_DEPTH[1], 4)

        while self.run_loop:
            frames = self.listener.waitForNewFrame()

            color = frames["color"]
            ir = frames["ir"]
            depth = frames["depth"]

            self.registration.apply(color, depth, undistorted, registered)

            img_ir = ir.asarray() / 65535.
            self.lock.acquire()
            self.img_depth = (depth.asarray() / 4500. * 255).astype(np.uint8)
            self.img_color = np.copy(color.asarray()[:, ::-1, :3])
            self.img_buffer[:, :, :, self.idx_buffer] = self.img_color
            self.img_registered_color = np.copy(registered.asarray(np.uint8))
            self.img_registered_depth = np.copy(undistorted.asarray(np.uint8))
            self.lock.release()
            self.idx_buffer += 1
            if self.idx_buffer >= SIZE_AVG_FILTER:
                self.idx_buffer = 0

            cv2.imshow("Kinect Depth", self.img_depth)
            cv2.imshow(
                "Kinect Color",
                cv2.resize(self.img_color, (int(round(
                    0.3 * RES_COLOR[0])), int(round(0.3 * RES_COLOR[1])))))
            cv2.imshow("Kinect R Depth", self.img_registered_depth)
            cv2.imshow("Kinect R Color", self.img_registered_color)

            key = cv2.waitKey(1) & 0xFF
            if key == ord('q') or key == ord('x'):
                self.run_loop = False
                break
            elif key == ord(' '):
                cv2.imwrite("frame.png", frame)

            self.listener.release(frames)

        self.device.stop()
        self.device.close()
Пример #3
0
def captureKinect(device, serial,listener, timestamp, dID):
    ts = timestamp
    dataID = dID
    camera="Kinect"
    filename = dataID+"_"+ts+"_"+camera
    firmware = device.getFirmwareVersion()
    firmware = str(firmware).split("'")[1]
    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),device.getColorCameraParams())
    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)
    frames = listener.waitForNewFrame()	
    color = frames["color"]
    ir = frames["ir"]	
    depth = frames["depth"]
    registration.apply(color, depth, undistorted, registered,bigdepth=None,color_depth_map=None)
    colorExposure = color.exposure
    depthExposure = depth.exposure
    irExposure = ir.exposure
    colorGain = color.gain
    depthGain = depth.gain
    irGain = ir.gain

    #key = cv2.waitKey(5000)
    cv2.imwrite('DataSet/'+filename+"_color.jpg", color.asarray())
    cv2.imwrite('DataSet/'+filename+"_depth.jpg", depth.asarray()%256)
    cv2.imwrite('DataSet/'+filename+"_RGBD.jpg", registered.asarray(np.uint8))
    listener.release(frames)
    with open("KinectDeviceID.txt") as file:
      deviceID = file.read() 
    process = subprocess.Popen("lsusb -v -d "+deviceID+"|grep \""+deviceID+"\"", stdout=subprocess.PIPE, shell=True)
    usb = process.stdout.readline()
    usb = str(usb).split("'")[1]
    usb = usb.split(':')[0]
    #print(usb)
    process.communicate()
    
    SensorDetails=[]
    SensorDetails.append(str(serial).split("'")[1])
    SensorDetails.append(firmware)
    SensorDetails.append("NA")
    SensorDetails.append(usb)
    SensorDetails.append("Color: "+str(colorExposure)+" Depth: "+str(depthExposure)+" IR: "+str(irExposure))
    SensorDetails.append("Color: "+str(colorGain)+" Depth: "+str(depthGain)+" IR: "+str(irGain))
    with open("kinect.txt",'w') as file:
      for item in SensorDetails:
         file.write(item+"\n")
Пример #4
0
def mainSegment(options = None):
    print '--------------------------------------------'
    print options
    counter = 0
    pipeline = CudaKdePacketPipeline()
    fn = Freenect2()
    device, listener, registration = initCamera(fn, pipeline)
    warmUp(listener)
    minVal = computeBackground(listener, registration)
    undistorted = Frame(FRAME_WIDTH, FRAME_HEIGHT, 4)
    net = bp.newNet()
    t0 =  time.clock()
    counterOld = counter
    while True:
        frames = listener.waitForNewFrame()
        depth = frames["depth"]
        registration.undistortDepth(depth, undistorted)
        # kinect flips X axis
        flipDepthFrame(undistorted)

        d =  np.copy(undistorted.asarray(np.float32))
        alpha = subtraction(minVal, d)
        if (options and options.get('display')):
            cv2.imshow('Contour', alpha)

        result = bp.process(options, net, scale(d), alpha, registration,
                            undistorted, device)

        if (options and options.get('display')):
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        listener.release(frames)
        counter = counter + 1
        if counter % 10 == 0:
            t1 = time.clock()
            print '{:.3f} images/sec'.format((counter - counterOld)/(t1-t0))
            t0 = t1
            counterOld = counter
        #print(result)
        yield json.dumps(result)
    shutdownCamera(device);
Пример #5
0
def computeBackground(listener, registration):
    count = 0
    undistorted = Frame(FRAME_WIDTH, FRAME_HEIGHT, 4)
    minVal = np.full((FRAME_HEIGHT, FRAME_WIDTH), MAX_FLOAT32, dtype=np.float32)
    while count < 60:
        frames = listener.waitForNewFrame()
        depth = frames["depth"]
        registration.undistortDepth(depth, undistorted)
        # kinect flips X axis
        flipDepthFrame(undistorted)

        d =  undistorted.asarray(np.float32)
        zeros = d.size - np.count_nonzero(d)
        #print('Input:zeros:' + str(zeros) + ' total:' + str(d.size))
        d[d == 0.] = MAX_FLOAT32
        minVal = np.minimum(minVal, d)
        #print ('Minval: zeros:' + str(minVal[minVal == MAX_FLOAT32].size) +
        #       ' total:' + str(minVal.size))
        count = count + 1
        listener.release(frames)
    return inpaint(minVal)
Пример #6
0
        depth = frames["depth"]

    if enable_rgb and enable_depth:
        registration.apply(color, depth, undistorted, registered)
    elif enable_depth:
        registration.undistortDepth(depth, undistorted)

    # if enable_depth:
    # cv2.imshow("ir", ir.asarray() / 65535.)
    # d_array = (depth.asarray() / 4500.)
    # print d_array.shape
    # depth_out.write(d_array)
    # cv2.imshow("depth", depth.asarray() / 4500.)
    # cv2.imshow("undistorted", undistorted.asarray(np.float32) / 4500.)
    if enable_rgb:
        color_array = color.asarray()
        color_out.write(color_array)
        cv2.imshow("color", color_array)
    # if enable_rgb and enable_depth:
    #     cv2.imshow("registered", registered.asarray(np.uint8))

    listener.release(frames)

    key = cv2.waitKey(delay=1)
    if key == ord('q'):
        break

device.stop()
device.close()
# depth_out.release()
color_out.release()
Пример #7
0
def KinectStream(frame_count, BreakKinect, enable_rgb=True, enable_depth=True):
    # create folders for the color and depth images, respectively
    last_path, _ = os.path.split(os.getcwd())
    path_color = os.path.join(last_path, "color")
    path_depth = os.path.join(last_path, "depth")
    CreateRefrashFolder(path_color)
    CreateRefrashFolder(path_depth)
    '''
	# if the depth images are needed, you must use OpenGLPacketPipeline for enabling GPU to
	# render the depth map for so that the real-time capture can be achieved.
	try:
		from pylibfreenect2 import OpenGLPacketPipeline
		pipeline = OpenGLPacketPipeline()
	except:
		try:
			from pylibfreenect2 import OpenCLPacketPipeline
			pipeline = OpenCLPacketPipeline()
		except:
			from pylibfreenect2 import CpuPacketPipeline
			pipeline = CpuPacketPipeline()
	'''

    if enable_depth:
        from pylibfreenect2 import OpenGLPacketPipeline
        pipeline = OpenGLPacketPipeline()
    else:
        from pylibfreenect2 import CpuPacketPipeline
        pipeline = CpuPacketPipeline()
    print("Packet pipeline:", type(pipeline).__name__)

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    types = 0
    if enable_rgb:
        types |= FrameType.Color
    if enable_depth:
        types |= (FrameType.Ir | FrameType.Depth)
    listener = SyncMultiFrameListener(types)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    if enable_rgb and enable_depth:
        device.start()
    else:
        device.startStreams(rgb=enable_rgb, depth=enable_depth)
    # NOTE: must be called after device.start()
    if enable_depth:
        registration = Registration(device.getIrCameraParams(),
                                    device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)
    # the target size of the resize function
    SetSize = (540, 360)
    # SetSize = (1080, 720)

    while not BreakKinect.value:
        frame_count.value += 1
        file_name = 'image' + str(int(frame_count.value)) + '.jpg'
        im_path_color = os.path.join(path_color, file_name)
        im_path_depth = os.path.join(path_depth, file_name)
        frames = listener.waitForNewFrame()

        if enable_rgb:
            color = frames["color"]
        if enable_depth:
            ir = frames["ir"]
            depth = frames["depth"]

        if enable_rgb and enable_depth:
            registration.apply(color, depth, undistorted, registered, enable_filter=False)
        elif enable_depth:
            registration.undistortDepth(depth, undistorted)

        if enable_rgb:
            start = time.time()
            new_frame = cv2.resize(color.asarray(), SetSize)
            # new_frame = new_frame[:,:,:-1]
            # new_frame = cv2.cvtColor(new_frame[:,:,:-1], cv2.COLOR_RGB2BGR)
            new_frame = registered.asarray(np.uint8)
            # new_frame = cv2.cvtColor(new_frame[:,:,[0,1,2]], cv2.COLOR_RGB2BGR)
            cv2.imwrite(im_path_color, new_frame[:, :, :-1])
        # print("Kinect color:", new_frame.shape)

        if enable_depth:
            # depth_frame = cv2.resize(depth.asarray()/ 4500., SetSize)
            depth = depth.asarray() / 4500.
            # cv2.imshow("color", new_frame)
            undist = undistorted.asarray(np.float32) / 4500 * 255
            cv2.imwrite(im_path_depth, undist.astype(np.uint8))
            print("Kinect depth:", depth.shape)

        listener.release(frames)

    device.stop()
    device.close()
    # WriteVideo(path, im_pre = 'image', video_name = 'test.avi', fps = 15, size = SetSize)
    sys.exit(0)
Пример #8
0
FOVX = 1.232202  #horizontal FOV in radians
focal_x = device.getIrCameraParams().fx  #focal length x
focal_y = device.getIrCameraParams().fy  #focal length y
principal_x = device.getIrCameraParams().cx  #principal point x
principal_y = device.getIrCameraParams().cy  #principal point y
undistorted = Frame(h, w, 4)
registered = Frame(h, w, 4)

while True:

    frames = listener.waitForNewFrame()
    depth_frame = frames["depth"]
    color = frames["color"]
    registration.apply(color, depth_frame, undistorted, registered)
    #convert image
    color = registered.asarray(np.uint8)
    color = cv2.flip(color, 1)
    img = depth_frame.asarray(np.float32) / 4500.
    imgray = np.uint8(depth_frame.asarray(np.float32) / 255.0)
    #flip images
    img = cv2.flip(img, 1)
    imgray = cv2.flip(imgray, 1)
    if len(sys.argv) > 1:
        if sys.argv[1] == "test" and frame_i < num_frames:
            np.save("test_frames/" + str(frame_i) + ".npy",
                    depth_frame.asarray(np.float32))
            frame_i += 1

    #begin edge detection

    ret, thresh = cv2.threshold(imgray, 0, 255, cv2.THRESH_BINARY)
Пример #9
0
    registration.apply(color,
                       depth,
                       undistorted,
                       registered,
                       bigdepth=bigdepth,
                       color_depth_map=color_depth_map)

    # NOTE for visualization:
    # cv2.imshow without OpenGL backend seems to be quite slow to draw all
    # things below. Try commenting out some imshow if you don't have a fast
    # visualization backend.
    cv2.imshow("ir", ir.asarray() / 65535.)
    cv2.imshow("depth", depth.asarray() / 4500.)
    cv2.imshow("color",
               cv2.resize(color.asarray(), (int(1920 / 3), int(1080 / 3))))
    cv2.imshow("registered", registered.asarray(np.uint8))

    if need_bigdepth:
        cv2.imshow(
            "bigdepth",
            cv2.resize(bigdepth.asarray(np.float32),
                       (int(1920 / 3), int(1082 / 3))))
    if need_color_depth_map:
        cv2.imshow("color_depth_map", color_depth_map.reshape(424, 512))

    listener.release(frames)

    key = cv2.waitKey(delay=1)
    if key == ord('q'):
        break
Пример #10
0
class Kinect():
    ''' Kinect object, it uses pylibfreenect2 as interface to get the frames.
    The original example was taken from the pylibfreenect2 github repository, at:
    https://github.com/r9y9/pylibfreenect2/blob/master/examples/selective_streams.py  '''
    def __init__(self,
                 enable_rgb,
                 enable_depth,
                 need_bigdepth,
                 need_color_depth_map,
                 K=None,
                 D=None):
        ''' Init method called upon creation of Kinect object '''

        # according to the system, it loads the correct pipeline
        # and prints a log for the user
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            self.pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                self.pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                self.pipeline = CpuPacketPipeline()

        rospy.loginfo(color.BOLD + color.YELLOW + '-- PACKET PIPELINE: ' +
                      str(type(self.pipeline).__name__) + ' --' + color.END)

        self.enable_rgb = enable_rgb
        self.enable_depth = enable_depth
        self.K = K
        self.D = D

        # creates the freenect2 device
        self.fn = Freenect2()
        # if no kinects are plugged in the system, it quits
        self.num_devices = self.fn.enumerateDevices()
        if self.num_devices == 0:
            rospy.loginfo(color.BOLD + color.RED +
                          '-- ERROR: NO DEVICE CONNECTED!! --' + color.END)
            sys.exit(1)

        # otherwise it gets the first one available
        self.serial = self.fn.getDeviceSerialNumber(0)
        self.device = self.fn.openDevice(self.serial, pipeline=self.pipeline)

        # defines the streams to be acquired according to what the user wants
        types = 0
        if self.enable_rgb:
            types |= FrameType.Color
        if self.enable_depth:
            types |= (FrameType.Ir | FrameType.Depth)
        self.listener = SyncMultiFrameListener(types)

        # Register listeners
        if self.enable_rgb:
            self.device.setColorFrameListener(self.listener)
        if self.enable_depth:
            self.device.setIrAndDepthFrameListener(self.listener)

        if self.enable_rgb and self.enable_depth:
            self.device.start()
        else:
            self.device.startStreams(rgb=self.enable_rgb,
                                     depth=self.enable_depth)

        # NOTE: must be called after device.start()
        if self.enable_depth:
            self.registration = Registration(
                self.device.getIrCameraParams(),
                self.device.getColorCameraParams())

        # last number is bytes per pixel
        self.undistorted = Frame(512, 424, 4)
        self.registered = Frame(512, 424, 4)

        # Optinal parameters for registration
        self.need_bigdepth = need_bigdepth
        self.need_color_depth_map = need_color_depth_map

        if self.need_bigdepth:
            self.bigdepth = Frame(1920, 1082, 4)
        else:
            self.bigdepth = None

        if self.need_color_depth_map:
            self.color_depth_map = np.zeros((424, 512), np.int32).ravel()
        else:
            self.color_depth_map = None

    def acquire(self):
        ''' Acquisition method to trigger the Kinect to acquire new frames. '''

        # acquires a frame only if it's new
        frames = self.listener.waitForNewFrame()

        if self.enable_rgb:
            self.color = frames["color"]
            self.color_new = cv2.resize(self.color.asarray(),
                                        (int(1920 / 1), int(1080 / 1)))
            # The image obtained has a fourth dimension which is the alpha value
            # thus we have to remove it and take only the first three
            self.color_new = self.color_new[:, :, 0:3]
            # correct distortion of camera
            self.correct_distortion()
        if self.enable_depth:
            # these only have one dimension, we just need to convert them to arrays
            # if we want to perform detection on them
            self.depth = frames["depth"]
            self.depth_new = cv2.resize(self.depth.asarray() / 4500.,
                                        (int(512 / 1), int(424 / 1)))

            self.registration.undistortDepth(self.depth, self.undistorted)
            self.undistorted_new = cv2.resize(
                self.undistorted.asarray(np.float32) / 4500.,
                (int(512 / 1), int(424 / 1)))

            self.ir = frames["ir"]
            self.ir_new = cv2.resize(self.ir.asarray() / 65535.,
                                     (int(512 / 1), int(424 / 1)))

        if self.enable_rgb and self.enable_depth:
            self.registration.apply(self.color,
                                    self.depth,
                                    self.undistorted,
                                    self.registered,
                                    bigdepth=self.bigdepth,
                                    color_depth_map=self.color_depth_map)
            # RGB + D
            self.registered_new = self.registered.asarray(np.uint8)

            if self.need_bigdepth:
                self.bigdepth_new = cv2.resize(
                    self.bigdepth.asarray(np.float32),
                    (int(1920 / 1), int(1082 / 1)))
                #cv2.imshow("bigdepth", cv2.resize(self.bigdepth.asarray(np.float32), (int(1920 / 1), int(1082 / 1))))
            if self.need_color_depth_map:
                #cv2.imshow("color_depth_map", self.color_depth_map.reshape(424, 512))
                self.color_depth_map_new = self.color_depth_map.reshape(
                    424, 512)

        # do this anyway to release every acquired frame
        self.listener.release(frames)

    def stop(self):
        ''' Stop method to close device upon exiting the program '''

        rospy.loginfo(color.BOLD + color.RED + '\n -- CLOSING DEVICE... --' +
                      color.END)
        self.device.stop()
        self.device.close()

    def correct_distortion(self):
        ''' Method to correct distortion using camera calibration parameters '''

        if self.K is not None and self.D is not None:
            h, w = self.color_new.shape[:2]
            newcameramtx, roi = cv2.getOptimalNewCameraMatrix(
                self.K, self.D, (w, h), 1, (w, h))
            # undistort
            self.RGBundistorted = cv2.undistort(self.color_new, self.K, self.D,
                                                None, newcameramtx)
            # crop the image
            x, y, w, h = roi
            self.RGBundistorted = self.RGBundistorted[y:y + h, x:x + w]
            self.RGBundistorted = cv2.flip(self.RGBundistorted, 0)
            self.RGBundistorted = self.RGBundistorted[0:900, 300:1800]
            #self.RGBundistorted = self.RGBundistorted[0:900, 520:1650]
        else:
            rospy.loginfo(color.BOLD + color.RED +
                          '-- ERROR: NO CALIBRATION LOADED!! --' + color.END)
            self.RGBundistorted = self.color_new
Пример #11
0
frame_i = 0  # current frame used for saving data
frame_limit = -1  # number of frames to process, -1 will run indefinitely
prev_frame = None  # previous frame used for optical flow

setup_obstacle_node()

# uncomment to send global obstacle positions based on loclalization data
#update_position()

while not rospy.is_shutdown():
	frames = listener.waitForNewFrame()
	depth_frame = frames["depth"]
	color = frames["color"]
	registration.apply(color, depth_frame, undistorted, registered)
	color_frame = registered.asarray(np.uint8)

	if save_test_data:
		np.save(frames_dir + str(frame_i) + ".npy", depth_frame.asarray(np.float32))

	frame_i += 1
	if frame_i == frame_limit:
		break

	img = depth_frame.asarray(np.float32)
        img = cv2.flip(img, 1)
	output, obstacle_id = get_obstacles_with_plane(img,
									  prev_frame,
									  color_frame,
									  obstacle_list,
									  thetas,
Пример #12
0
    def find_and_track_kinect(self, name, tracker = "CSRT",
            face_target_box = DEFAULT_FACE_TARGET_BOX,
            track_scaling = DEFAULT_SCALING,
            res = (RGB_W, RGB_H), video_out = True):
        print("Starting Tracking")

        target = name

        fn = Freenect2()
        num_devices = fn.enumerateDevices()

        if num_devices == 0:
            print("No device connected!")

        serial = fn.getDeviceSerialNumber(0)
        device = fn.openDevice(serial, pipeline = self.pipeline)

        listener = SyncMultiFrameListener(FrameType.Color | FrameType.Depth)

        device.setColorFrameListener(listener)
        device.setIrAndDepthFrameListener(listener)

        device.start()

        registration = Registration(device.getIrCameraParams(),
                device.getColorCameraParams())

        undistorted = Frame(512, 424, 4)
        registered = Frame(512, 424, 4)
        bigdepth = Frame(1920, 1082, 4)

        trackerObj = None
        face_process_frame = True

        bbox = None
        track_bbox = None

        head_h = 0
        body_left_w = 0
        body_right_w = 0
        center_w = 0

        globalState = ""

        # Following line creates an avi video stream of daisy's tracking
        # out = cv2.VideoWriter('daisy_eye.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 10, (960, 540))
        # out.write(c)
        while True:
            timer = cv2.getTickCount()

            frames = listener.waitForNewFrame()

            color = frames["color"]
            depth = frames["depth"]

            registration.apply(color, depth, undistorted, registered, bigdepth=bigdepth)

            bd = np.resize(bigdepth.asarray(np.float32), (1080, 1920))
            c = cv2.cvtColor(color.asarray(), cv2.COLOR_RGB2BGR)

            if self.connected:
                newTarget = self.alexa_neuron.get('name');
                if newTarget != target:
                    target = newTarget
                    listener.release(frames)
                    trackerObj = None
                    face_process_frame = True

                    bbox = None
                    track_bbox = None
                    continue
                if target is not None and target not in self.known_faces:
                    target = None

            if target is None:
                if self.connected:
                    c = self.__scale_frame(c, scale_factor = 0.5)
                    image = cv2.imencode('.jpg', c)[1].tostring()
                    self.web_neuron.update([('image', image)])
                listener.release(frames)
                trackerObj = None
                face_process_frame = True

                bbox = None
                track_bbox = None
                self.__update_individual_position("WAITING", None, None, None, res)
                continue

            face_bbox = None
            new_track_bbox = None

            if face_process_frame:
                small_c = self.__crop_frame(c, face_target_box)
                face_locations = face_recognition.face_locations(small_c, number_of_times_to_upsample=0, model="cnn")
                face_encodings = face_recognition.face_encodings(small_c, face_locations)
                for face_encoding in face_encodings:
                    matches = face_recognition.compare_faces(
                            [self.known_faces[target]], face_encoding, 0.6)
                    if len(matches) > 0 and matches[0]:
                        (top, right, bottom, left) = face_locations[0]

                        left += face_target_box[0]
                        top += face_target_box[1]
                        right += face_target_box[0]
                        bottom += face_target_box[1]

                        face_bbox = (left, top, right, bottom)
                        mid_w = int((left + right) / 2)
                        mid_h = int((top + bottom) / 2)
                        new_track_bbox = self.__body_bbox(bd, mid_w, mid_h, res)

                        break
            face_process_frame = not face_process_frame

            overlap_pct = 0
            track_area = self.__bbox_area(track_bbox)
            if track_area > 0 and new_track_bbox:
                overlap_area = self.__bbox_overlap(new_track_bbox, track_bbox)
                overlap_pct = min(overlap_area / self.__bbox_area(new_track_bbox),
                        overlap_area / self.__bbox_area(track_bbox))
            small_c = self.__scale_frame(c, track_scaling)
            if new_track_bbox is not None and overlap_pct < CORRECTION_THRESHOLD:
                bbox = (new_track_bbox[0],
                        new_track_bbox[1],
                        new_track_bbox[2] - new_track_bbox[0],
                        new_track_bbox[3] - new_track_bbox[1])
                bbox = self.__scale_bbox(bbox, track_scaling)
                trackerObj = self.__init_tracker(small_c, bbox, tracker)
                self.alexa_neuron.update([('tracking', True)])

            if trackerObj is None:
                self.__update_individual_position("WAITING", None, None, None, res)

            status = False

            if trackerObj is not None:
                status, trackerBBox = trackerObj.update(small_c)
                bbox = (int(trackerBBox[0]),
                        int(trackerBBox[1]),
                        int(trackerBBox[0] + trackerBBox[2]),
                        int(trackerBBox[1] + trackerBBox[3]))

            if bbox is not None:
                track_bbox = (bbox[0], bbox[1], bbox[2], bbox[3])
                track_bbox = self.__scale_bbox(bbox, 1/track_scaling)

            w = 0
            h = 0

            if status:
                w = track_bbox[0] + int((track_bbox[2] - track_bbox[0])/2)
                h = track_bbox[1] + int((track_bbox[3] - track_bbox[1])/2)

                if (w < res[0] and w >= 0 and h < res[1] and h >= 0):
                    distanceAtCenter =  bd[h][w]
                    center = (w, h)
                    globalState = self.__update_individual_position(status, track_bbox, center, distanceAtCenter, res)



            if globalState == "Fail":
                break

            fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

            if video_out or self.connected:
                cv2.line(c, (w, 0), (w, res[1]), (0,255,0), 1)
                cv2.line(c, (0, h), (res[0], h), (0,255,0), 1)
                cv2.line(c, (0, head_h), (res[0], head_h), (0,0,0), 1)
                cv2.line(c, (body_left_w, 0), (body_left_w, res[1]), (0,0,255), 1)
                cv2.line(c, (body_right_w, 0), (body_right_w, res[1]), (0,0,255), 1)
                cv2.line(c, (center_w, 0), (center_w, res[1]), (0,0,255), 1)

                self.__draw_bbox(True, c, face_target_box, (255, 0, 0), "FACE_TARGET")
                self.__draw_bbox(status, c, track_bbox, (0, 255, 0), tracker)
                self.__draw_bbox(face_bbox is not None, c, face_bbox, (0, 0, 255), target)
                self.__draw_bbox(face_bbox is not None, c, new_track_bbox, (0, 255, 255), "BODY")

                c = self.__scale_frame(c, scale_factor = 0.5)

                cv2.putText(c, "FPS : " + str(int(fps)), (100,50),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,255), 1)
                if not status:
                    failedTrackers = "FAILED: "
                    failedTrackers += tracker + " "
                    cv2.putText(c, failedTrackers, (100, 80),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,142), 1)
                if self.connected:
                    image = cv2.imencode('.jpg', c)[1].tostring()
                    self.web_neuron.update([('image', image)])
                if video_out:
                    cv2.imshow("color", c)

            listener.release(frames)

            key = cv2.waitKey(1) & 0xff
            if key == ord('q'):
                self.__update_individual_position("STOP", None, None, None, res)
                break

        self.so.close()
        cv2.destroyAllWindows()
        device.stop()
        device.close()
Пример #13
0
def main():
    try:
        from pylibfreenect2 import OpenCLPacketPipeline
        pipeline = OpenCLPacketPipeline()
    except:
        from pylibfreenect2 import CpuPacketPipeline
        pipeline = CpuPacketPipeline()

    # Create and set logger
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # Optinal parameters for registration
    # set True if you need
    need_bigdepth = False
    need_color_depth_map = False

    bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
    color_depth_map = np.zeros((424, 512),  np.int32).ravel() \
        if need_color_depth_map else None

    point = pcl.PointCloud()
    visual = pcl.pcl_visualization.CloudViewing()
    visual.ShowColorCloud(cloud)

    while True:
        frames = listener.waitForNewFrame()

        color = frames["color"]
        ir = frames["ir"]
        depth = frames["depth"]

        registration.apply(color,
                           depth,
                           undistorted,
                           registered,
                           bigdepth=bigdepth,
                           color_depth_map=color_depth_map)

        # NOTE for visualization:
        # cv2.imshow without OpenGL backend seems to be quite slow to draw all
        # things below. Try commenting out some imshow if you don't have a fast
        # visualization backend.
        # cv2.imshow("ir", ir.asarray() / 65535.)
        # cv2.imshow("depth", depth.asarray() / 4500.)
        # cv2.imshow("color", cv2.resize(color.asarray(), (int(1920 / 3), int(1080 / 3))))
        # cv2.imshow("registered", registered.asarray(np.uint8))

        # if need_bigdepth:
        #     cv2.imshow("bigdepth", cv2.resize(bigdepth.asarray(np.float32),
        #                                       (int(1920 / 3), int(1082 / 3))))
        # if need_color_depth_map:
        #     cv2.imshow("color_depth_map", color_depth_map.reshape(424, 512))

        undistorted_arrray = undistorted.asarray(dtype=np.float32, ndim=2)
        # registered_array = registered.asarray(dtype=np.uint8)
        point = pcl.PointCloud(undistorted_arrray)
        # visual.ShowColorCloud(cloud)

        listener.release(frames)

        # key = cv2.waitKey(delay=1)
        # if key == ord('q'):
        #     break
        v = True
        while v:
            v = not (visual.WasStopped())

    device.stop()
    device.close()

    sys.exit(0)
Пример #14
0
class Tracker:
    # X_POSITION = -0.11387496
    # Z_POSITION = 1.3
    X_POSITION = -0.185
    Z_POSITION = 1.5


    greenLower = (29, 86, 6)
    greenUpper = (64, 255, 255)

    greenLower = (27, 100, 50)
    greenUpper = (60, 255, 255)

    def __init__(self, use_kinect=False, basket=False, gui=True):
        self.connected = False
        self.max_radius = 100000
        self.running = False
        self.killed = False
        self.gui = gui
        self.basket = basket
        self.use_kinect = use_kinect
        # self.fig, self.ax = plt.subplots()

        if self.use_kinect:
            self.px = []
            self.py = []
            logger = createConsoleLogger(LoggerLevel.Error)
            setGlobalLogger(logger)

        while not self.connected:
            # Bad, but needed
            if self.use_kinect:
                self.connect_kinect()
            else:
                self.connect_camera()

        self.thread = threading.Thread(target=self.video_thread)
        self.thread.start()

    def connect_camera(self):
        # Bad, but needed
        self.camera = cv2.VideoCapture(1)
        if not self.camera.isOpened():
            print("Unable to connect to the camera, check again")
            time.sleep(5)
        else:
            self.connected = True

    def connect_kinect(self):
        # Bad, but needed
        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()
        if num_devices == 0:
            print("Kinect not found, check again")
            self.connected = False
            return

        try:
            self.pipeline = OpenGLPacketPipeline()
        except:
            self.pipeline = CpuPacketPipeline()

        serial = self.fn.getDeviceSerialNumber(0)
        self.device = self.fn.openDevice(serial, pipeline=self.pipeline)

        self.listener = SyncMultiFrameListener(
            FrameType.Color | FrameType.Ir | FrameType.Depth)

        # Register listeners
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        self.device.start()
        print("started")

        # NOTE: must be called after device.start()
        self.registration = Registration(self.device.getIrCameraParams(),
                                         self.device.getColorCameraParams())
        print("registration")
        self.undistorted = Frame(512, 424, 4)
        self.registered = Frame(512, 424, 4)
        self.connected = True

    def video_thread(self):
        print('Camera thread started')
        need_bigdepth = False
        need_color_depth_map = False

        bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
        color_depth_map = np.zeros((424, 512), np.int32).ravel() \
            if need_color_depth_map else None

        while not self.killed:
            while self.running:
                # (grabbed, frame) = self.camera.read()
                frames = self.listener.waitForNewFrame()
                frame = frames["color"]
                ir = frames["ir"]
                depth = frames["depth"]
                self.registration.apply(frame, depth, self.undistorted, self.registered,
                                        bigdepth=bigdepth,
                                        color_depth_map=color_depth_map)

                # print(frame.width, frame.height, frame.bytes_per_pixel)
                # print(ir.width, ir.height, ir.bytes_per_pixel)
                # print(depth.width, depth.height, depth.bytes_per_pixel)
                # print(frame.asarray().shape, ir.asarray().shape, depth.asarray().shape)

                # color_image_array = frame.asarray()
                color_image_array = self.registered.asarray(np.uint8)
                hsv = cv2.cvtColor(color_image_array, cv2.COLOR_BGR2HSV)

                mask = cv2.inRange(hsv, self.greenLower, self.greenUpper)
                mask = cv2.erode(mask, None, iterations=2)
                mask = cv2.dilate(mask, None, iterations=2)

                cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)[-2]
                center = None

                if len(cnts) > 0:
                    c = max(cnts, key=cv2.contourArea)
                    ((x, y), radius) = cv2.minEnclosingCircle(c)
                    M = cv2.moments(c)
                    center = (int(M["m10"] / M["m00"]),
                              int(M["m01"] / M["m00"]))

                    if radius > 10:
                        cv2.circle(color_image_array, (int(x), int(y)), int(radius),
                                   (0, 255, 255), 2)
                        cv2.circle(color_image_array, center, 5, (0, 0, 255), -1)
                        x, y, z = self.registration.getPointXYZ(self.undistorted, y, x)
                        # print(x, y, z, end='\r')
                        if not math.isnan(y) and not math.isnan(z):
                            self.px.append(y)
                            self.py.append(z)
                        row = tuple(c[c[:, :, 1].argmin()][0])[1]
                        self.max_radius = min(self.max_radius, row)

                if self.gui:
                    cv2.imshow("Frame", color_image_array)
                    cv2.imshow("Masked", mask)
                self.listener.release(frames)
                cv2.waitKey(1) & 0xFF
                # self.ax.plot(self.px)
                # self.ax.plot(self.py)
                # plt.pause(0.01)

        cv2.destroyAllWindows()
        print("exit looping")

    def start(self):
        self.max_radius = 100000
        if self.use_kinect:
            self.px = []
            self.py = []
        self.running = True

    def stop(self):
        self.running = False

    def kill(self):
        self.killed = True
        if self.use_kinect:
            self.device.stop()
            self.device.close()

    def get_reward(self):
        if self.use_kinect:
            if not self.basket:
                try:
                    peaks = peakutils.indexes(self.px)
                except Exception as e:
                    print(e)
                    peaks = np.array([])

                if peaks.any():
                    # for xx in peaks:
                    #     t.ax.plot(xx, self.px[xx], 'ro')
                    return self.py[peaks[0]] * 100
                else:
                    return 0
            else:
                print(np.mean(self.px), np.mean(self.py))
                dist = distance(np.array([self.px, self.py]).T, self.X_POSITION, self.Z_POSITION)
                print("distance", dist, len(self.px))
                return dist * 100
        else:
            return self.max_radius
Пример #15
0
while True:
    frames = listener.waitForNewFrame()
    color = frames["color"]
    ir = frames["ir"]
    depth = frames["depth"]
    registration.apply(color, depth, undistorted, registered)
    depth = depth.asarray()
    if len(maxDepth) == 0:
        maxDepth = depth
        listener.release(frames)
        continue

    maxDepth = numpy.maximum(depth, maxDepth)

    foreground = registered.asarray(numpy.uint8).copy()

    ImageProc.depthFilter(maxDepth, depth, foreground)

    cv2.imshow("registered", registered.asarray(numpy.uint8))
    #cv2.imshow("depth", depth / 4500)
    #cv2.imshow("max depth", maxDepth / 4500)
    #cv2.imshow("color", color.asarray())

    cv2.imshow("greenscreen", foreground)

    listener.release(frames)
    key = cv2.waitKey(delay=1)
    if key == ord('q'):
        break
Пример #16
0
def projection():

    pipeline = CpuPacketPipeline()
    print("Packet pipeline:", type(pipeline).__name__)

    # Create and set logger
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)
    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # Optinal parameters for registration
    # set True if you need
    need_bigdepth = True
    need_color_depth_map = True

    bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
    color_depth_map = np.zeros((424, 512),  np.int32).ravel() \
        if need_color_depth_map else None

    color_pub = rospy.Publisher('color', Image, queue_size=10)
    depth_pub = rospy.Publisher('depth', Image, queue_size=10)
    ir_pub = rospy.Publisher('ir', Image, queue_size=10)
    small_depth_pub = rospy.Publisher('small_depth', Image, queue_size=10)

    rospy.init_node('projection', anonymous=True)
    rate = rospy.Rate(10)  # 10hz

    while not rospy.is_shutdown():
        frames = listener.waitForNewFrame()

        color = frames["color"]
        ir = frames["ir"]
        depth = frames["depth"]

        registration.apply(color,
                           depth,
                           undistorted,
                           registered,
                           bigdepth=bigdepth,
                           color_depth_map=color_depth_map)

        color_image = CvBridge().cv2_to_imgmsg(color.asarray())
        color_pub.publish(color_image)

        depth_image = CvBridge().cv2_to_imgmsg(bigdepth.asarray(np.float32))
        depth_pub.publish(depth_image)

        ir_image = CvBridge().cv2_to_imgmsg(ir.asarray())
        ir_pub.publish(ir_image)

        small_depth_image = CvBridge().cv2_to_imgmsg(depth.asarray())
        small_depth_pub.publish(small_depth_image)

        rate.sleep()

        listener.release(frames)

        key = cv2.waitKey(delay=1)
        if key == ord('q'):
            break

    device.stop()
    device.close()
    sys.exit(0)
Пример #17
0
class Kinect:

    def __init__(self):
        self.averageSpineX = 0

        self.fn = Freenect2()
        if self.fn.enumerateDevices() == 0:
            sys.exit(47)


        self.serial = self.fn.getDeviceSerialNumber(0)

        types = 0
        types |= FrameType.Color
        types |= (FrameType.Ir | FrameType.Depth)
        self.listener = SyncMultiFrameListener(types)

        self.logger = createConsoleLogger(LoggerLevel.Debug)

        self.device = self.fn.openDevice(self.serial)

        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        self.undistorted = Frame(512, 424, 4)
        self.registered = Frame(512, 424, 4)


    def valueBounded(self, checkValue, absoluteValue):
        if ((-absoluteValue <= checkValue >= absoluteValue)):
            return True
        else:
            return False


    def valueUnbounded(self, checkValue, absoluteValue):
        if ((checkValue > absoluteValue) | (checkValue < -absoluteValue)):
            return True
        else:
            return False

    def valuePlusMinusDifferential(self, checkValue, testValue, Differential):
        if((checkValue < testValue + Differential) & (checkValue > testValue - Differential)):
            return True
        else:
            return False


    def update(self, registration):
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        frames = self.listener.waitForNewFrame()
        depth = frames["depth"]
        color = frames["color"]

        d = self.undistorted.asarray(dtype=np.float32)

        registration.apply(color, depth, self.undistorted, self.registered)

        self.listener.release(frames)

        return d


    # calculate the average and skeleton points of a depth array, those
    # values plus the depth array



    # calculate mean depth of depth array, used to find skeleton points.
    def getMeanDepth(self, depth, rows, cols):
        total = 0
        sumDepth = 0
        for row in range(rows):
            for col in range(cols):
                try:
                    offset = row + col * (rows)
                except IndexError as e:
                    print (e)
                #if depth[offset]['depth'] < DANGER_THRESHOLD:
                total += 1
                sumDepth += depth[offset]['depth']

        return (sumDepth / total)


    # calculate the skeleton points of the depth array
    def getSkeleton(self, depthArray, average, rows, cols):
        topY = 0
        leftX = rows
        bottomY = cols
        rightX = 0

        for d in depthArray:
            #print depthArray[offset]
            if (d['x'] > rightX):
                rightX = d['x']
            if (d['x'] < leftX):
                leftX = d['x']
            if (d['y'] < bottomY):
                bottomY = d['y']
            if (d['y'] > topY):
                topY = d['y']

        averageX = (leftX + rightX) / 2
        returnValues = {'averageX': averageX,
                        'topY': topY,
                        'bottomY': bottomY,
                        'rightX': rightX,
                        'leftX': leftX}

        return returnValues


    def changeInX(self, spine):
        print ("changeInX")
        if self.averageSpineX == 0:
            self.averageSpineX = spine['averageX']
        elif (self.valueBounded((self.averageSpineX - spine['averageX']), X_CHANGE_THRESHOLD)):
            self.averageSpineX = ((self.averageSpineX + spine['averageX']) / 2)
        else:
            self.checkDrowning()


    # Check to see if the difference between the averageSpineX and the last
    # analyzed averageX is less below a threshold. If it is, the loop
    # continues. If it runs for 20 seconds, it will set the drowning flag to
    # true. If falsePositive gets to be too high, DORi will no longer
    # assume the victim is a drowning risk
    def checkDrowning(self):
        print ("checkDrowning")
        drowningRisk = True
        drowning = False
        falsePositive = 0
        # 20 seconds from start of def #
        timeLimit = time.time() + DROWN_TIME
        self.device.start()
        while drowningRisk:
            print ('checking...')
            if time.time() > timeLimit:
                drowningRisk = False
                drowning = True

            data = self.fullDataLoop()
            if (self.valueUnbounded((self.averageSpineX - data['spine']['averageX']), X_CHANGE_THRESHOLD)):
                falsePositive += 1
                if falsePositive > 100:
                    drowningRisk = False
            else:
                continue
        if drowning:
            self.device.stop()
            print ("This guy is for sure drowning")
            sys.exit(47)

        self.device.stop()

    def depthMatrixToPointCloudPos2(self, depthArray):

        R, C = depthArray.shape

        R -= KINECT_SPECS['cx']
        R *= depthArray
        R /= KINECT_SPECS['fx']

        C -= KINECT_SPECS['cy']
        C *= depthArray
        C /= KINECT_SPECS['fy']

        return np.column_stack((depthArray.ravel(), R.ravel(), C.ravel()))

    def getBody(self, depthArray, average, rows, cols):
        body = []

        for d in depthArray:
            if self.valuePlusMinusDifferential(d['depth'], average, BODY_DEPTH_THRESHOLD):
                body.append(d)

        return body

    def pointRavel(self, unraveledArray, rows, cols):
        raveledArray = []
        d = unraveledArray.ravel()
        for row in range(rows):
            for col in range(cols):
                try:
                    offset = row + col * (rows)
                    raveledArray.append({'x': row, 'y': col, 'depth': d[offset]})
                except IndexError as e:
                    print (e)

        return raveledArray

    def fullDataLoop(self):


        registration = Registration(self.device.getIrCameraParams(),
                                    self.device.getColorCameraParams())

        deptharraytest = [{'x': 152, 'y': 100, 'depth': 400}, {'x': 300, 'y': 200, 'depth': 400},
                          {'x': 350, 'y': 150, 'depth': 400}, {'x': 400, 'y': 300, 'depth': 400},
                          {'x': 22, 'y': 10, 'depth': 400}, {'x': 144, 'y': 12, 'depth': 400}]

        depthArray = self.update(registration)
        rows, cols = depthArray.shape
        d = self.pointRavel(depthArray, rows, cols)
        m = self.getMeanDepth(d, rows, cols)
        b = self.getBody(d, m, rows, cols)
        s = self.getSkeleton(deptharraytest, m, cols, rows)


        return {'spine' : s, 'meanDepth' : m}

    def run(self, duration):
        end = time.time() + duration
        self.device.start()

        registration = Registration(self.device.getIrCameraParams(),
                                    self.device.getColorCameraParams())


        deptharraytest = [{'x':152, 'y':100, 'depth':400}, {'x':300, 'y':200, 'depth':400},
                          {'x': 350, 'y': 150, 'depth': 400},{'x':400, 'y':300, 'depth':400},
                          {'x': 22, 'y': 10, 'depth': 400},{'x':144, 'y':12, 'depth':400}]
        while time.time() < end:
            depthArray = self.update(registration)
            rows, cols = depthArray.shape
            d = self.pointRavel(depthArray, rows, cols)
            m =self.getMeanDepth(d, rows, cols)
            b = self.getBody(d, m, rows, cols)
            s = self.getSkeleton(deptharraytest, m, cols, rows)
            self.changeInX(s)


        self.device.stop()

    def execute(self):
        self.device.start()

        registration = Registration(self.device.getIrCameraParams(),
                                    self.device.getColorCameraParams())

        d = self.update(registration)
        shape = d.shape
        m = self.getMeanDepth(d, 9)
        b = self.getBody(d,m)
        s = self.getSkeleton(d, m)
        self.changeInX(s)


        print ('depthArray' + str(len(d.ravel())))
        print ('body' + str(len(b)))
        for body in b:
            print ("(" + str(body['x']) + "," + str(body['y']) + "): " + str(body['z']))


        print (m)


        self.device.stop()




    def exit(self):
        self.device.stop()
        self.device.close()
Пример #18
0
        #            for i in range(8):
        #                newDepthImg[row][col+i] = 0.

        #rowStr += str(newDepthImg[row][col]) + ':' + str(depthArray[row][col]) + '\t'
        #f.write(rowStr + '\n')
        #rowStr = ''
        #cv2.imshow("depth", newDepthImg)
        #cv2.imshow("depth", depthArray)

        #with open("depthArray.txt", mode="wt", encoding="utf-8") as f:
        #    for row in newDepthImg:
        #        f.write('\t'.join(str(elm) for elm in row))
        #        f.write('\n')

        #cv2.imshow("undistorted", undistorted.asarray(np.float32) / 4500.)
        newBigDepthArr = cv2.resize(bigdepth.asarray(np.float32),
                                    (int(1920), int(1082)))
        newBigDcrop = newBigDepthArr[200:620, 700:1120]
        cv2.imshow("bigdepth", newBigDepthArr / 4500)
    if enable_rgb:
        newRGBarr[:, 50:1970] = cv2.resize(color.asarray(),
                                           (int(1920), int(1080)))
    if enable_rgb and enable_depth:
        fake = True
        #newRGBarr = cv2.resize(color.asarray(),
        #                               (int(1920 / 2.547), int(1080 / 2.547)))
        #print(str(newRGBarr[:,120:632].shape))
        newRGBimg = newRGBarr[200:620:, 700 + cal:1120 + cal]

        #print(str(newRGBimg.shape))
        #print(str(newBigDcrop.shape))
Пример #19
0
class KivyCamera(Image):
    def __init__(self, **kwargs):
        super(KivyCamera, self).__init__(**kwargs)

    def setup(self, fn, pipeline, **kwargs):
        super(KivyCamera, self).__init__(**kwargs)
        self.bg_image = None
        self.current_img = None
        self.current_final_img = None
        self.current_mask = None

        serial = fn.getDeviceSerialNumber(0)
        self.device = fn.openDevice(serial, pipeline=pipeline)

        self.listener = SyncMultiFrameListener(
                 FrameType.Color | FrameType.Ir | FrameType.Depth)

        # Register listeners
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        self.device.start()

        # NOTE: must be called after device.start()
        self.registration = Registration(self.device.getIrCameraParams(),
                                           self.device.getColorCameraParams())

        self.undistorted = Frame(512, 424, 4)
        self.registered = Frame(512, 424, 4)

        # Optinal parameters for registration
        # set True if you need
        need_bigdepth = True
        need_color_depth_map = False

        self.bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
        self.color_depth_map = np.zeros((424, 512),  np.int32).ravel() \
            if need_color_depth_map else None

        self.clipping_distance = 0.5


    def update(self, dt):
        frames = self.listener.waitForNewFrame()
        color = frames["color"]
        depth = frames["depth"]

        self.registration.apply(color, depth, self.undistorted, self.registered,
                               bigdepth=self.bigdepth,
                               color_depth_map=self.color_depth_map)

        self.current_img = color.asarray()[:,:,:3]
        depth_img = self.bigdepth.asarray(np.float32) / 4500. # scale to interval [0,1]

        if(self.bg_image is not None):
            if(self.bg_image.shape[2] == 4): # put frame around picture
                fgMask = self.bg_image[:,:,2]>0
            else: # modify background
                # if needed: denoise filter
                depth_img = cv2.medianBlur(depth_img, 5)
                depth_img = cv2.GaussianBlur(depth_img, (9,9), 0)
                fgMask = ((depth_img > self.clipping_distance) & (depth_img > 0))
                fgMask = cv2.resize(fgMask.astype(np.uint8), (1920, 1080))
                
                # if needed: morphological operations
                #kernel = np.ones((10,10), np.uint8)
                #fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_CLOSE, kernel)
                #fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_OPEN, kernel)
            
            self.current_mask = np.dstack((fgMask, fgMask, fgMask)).astype(np.bool) # mask is 1 channel, color is 3 channels
            self.current_final_img = np.where(self.current_mask, self.bg_image[:,:,:3], self.current_img)
        else:
            self.current_mask = None
            self.current_final_img = self.current_img

        self.listener.release(frames)
        
        self.display_img(self.current_final_img)

    def display_img(self, img):
        buf1 = cv2.flip(img, 0)
        buf = buf1.tostring()
        image_texture = Texture.create(size=(img.shape[1], img.shape[0]), colorfmt='rgb') #(480,640)
        image_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
        # display image from the texture
        self.texture = image_texture
    cv2.imwrite(os.path.join(outputDir, "depth" + frameNum + ".png"), depth.asarray() / 4500.)
    
    # Convert from RGBX or BGRX to RGB or BGR
    colorArray = color.asarray()
    print(colorArray.shape)
    trimColor = np.zeros( (np.size(colorArray,0), np.size(colorArray,1), 3) )
    print(trimColor.shape)
    trimColor[:,:,0] = colorArray[:,:,0]
    trimColor[:,:,1] = colorArray[:,:,1]
    trimColor[:,:,2] = colorArray[:,:,2]
    
    cv2.imwrite(os.path.join(colorDir, "img_" + frameNum + ".png"), cv2.resize(trimColor,
                                   (int(1920 / 3), int(1080 / 3))))
    cv2.imwrite(os.path.join(outputDir, "color" + frameNum + ".png"), cv2.resize(trimColor,
                                   (int(1920 / 3), int(1080 / 3))))
    cv2.imwrite(os.path.join(outputDir, "registered" + frameNum + ".png"), registered.asarray(np.uint8))

    if need_bigdepth:
        bigdepthArray = bigdepth.asarray(np.float32) / 4500. # This is a max depth
        print(bigdepthArray.shape)
        print(bigdepthArray)
        
        #bigdepthArray = bigdepthArray.astype(np.uint16)
        cv2.imshow("bigdepth", cv2.resize(bigdepthArray,
                                          (int(1920 / 3), int(1082 / 3))))
        key = cv2.waitKey(delay=1000)
        
        #bigdepthNorm = np.zeros( (np.size(bigdepthArray)) )
        #cv2.normalize(bigdepthArray, bigdepthNorm)
        bigdepthArray[bigdepthArray == np.inf] = 1
        print(bigdepthArray)
    if enable_rgb:
        color = frames["color"]
    if enable_depth:
        ir = frames["ir"]
        depth = frames["depth"]

    if enable_rgb and enable_depth:
        registration.apply(color, depth, undistorted, registered)
    elif enable_depth:
        registration.undistortDepth(depth, undistorted)

    if enable_depth:
        cv2.imshow("ir", ir.asarray() / 65535.)
        cv2.imshow("depth", depth.asarray() / 4500.)
        cv2.imshow("undistorted", undistorted.asarray(np.float32) / 4500.)
    if enable_rgb:
        cv2.imshow("color", cv2.resize(color.asarray(),
                                       (int(1920 / 3), int(1080 / 3))))
    if enable_rgb and enable_depth:
        cv2.imshow("registered", registered.asarray(np.uint8))

    listener.release(frames)

    key = cv2.waitKey(delay=1)
    if key == ord('q'):
        break

device.stop()
device.close()
Пример #22
0
while True:
    frames = listener.waitForNewFrame()

    color = frames["color"]
    ir = frames["ir"]
    depth = frames["depth"]

    registration.apply(color, depth, undistorted, registered, bigdepth=bigdepth, color_depth_map=color_depth_map)

    # cv2.imshow("ir", ir.asarray() / 65535.)
    depth_array = depth.asarray()
    # cv2.imshow("depth", depth_array)
    color_array = cv2.resize(color.asarray(), (int(1920 / 3), int(1080 / 3)))
    cv2.imshow("color", color_array)
    
    reg_array = registered.asarray(np.uint8)
    cv2.imshow("registered", reg_array)

    # if need_bigdepth:
    #     cv2.imshow("bigdepth", cv2.resize(bigdepth.asarray(np.float32),
    #                                       (int(1920 / 3), int(1082 / 3))))
    
    # Tentative aborted: I choose to use "registered" in the end.
    # if need_color_depth_map:
    #     img = color_depth_map
    #     print("img1", img.shape)
    #     img2 = (img + 2**16).astype(np.uint32)
    #     print("img2", img2.shape)
    #     img3 = img2.view(np.uint8).reshape(img2.shape + (4,))[:, :3]
    #     print("img3", img3.shape)
    #     img4 = img3.reshape(424, 512, 3)
Пример #23
0
    def run(self):
        pipeline = CpuPacketPipeline()
        print("Packet pipeline:", type(pipeline).__name__)

        enable_rgb = True
        enable_depth = True

        fn = Freenect2()
        num_devices = fn.enumerateDevices()
        if num_devices == 0:
            print("No device connected!")
            sys.exit(1)

        serial = fn.getDeviceSerialNumber(0)
        device = fn.openDevice(serial, pipeline=pipeline)

        types = 0
        if enable_rgb:
            types |= FrameType.Color
        if enable_depth:
            types |= (FrameType.Ir | FrameType.Depth)
        listener = SyncMultiFrameListener(types)

        # Register listeners
        device.setColorFrameListener(listener)
        device.setIrAndDepthFrameListener(listener)

        if enable_rgb and enable_depth:
            device.start()
        else:
            device.startStreams(rgb=enable_rgb, depth=enable_depth)

        # NOTE: must be called after device.start()
        if enable_depth:
            registration = Registration(device.getIrCameraParams(),
                                        device.getColorCameraParams())

        undistorted = Frame(512, 424, 4)
        registered = Frame(512, 424, 4)

        count = 0
        while True:
            mutex = Lock()
            mutex.acquire()
            frames = listener.waitForNewFrame()

            if enable_rgb:
                color = frames["color"]
            if enable_depth:
                ir = frames["ir"]
                depth = frames["depth"]

            if enable_rgb and enable_depth:
                registration.apply(color, depth, undistorted, registered)
            elif enable_depth:
                registration.undistortDepth(depth, undistorted)

            if enable_depth:
                cv2.imshow("ir", ir.asarray() / 65535.)
                nameir = self.makename("ir", count)
                #cv2.imwrite(nameir, ir.asarray() / 65535.)

                cv2.imshow("depth", depth.asarray() / 4500.)
                named = self.makename("depth", count)
                #cv2.imwrite(named, depth.asarray() / 4500.)

                cv2.imshow("undistorted",
                           undistorted.asarray(np.float32) / 4500.)
                nameu = self.makename("undistorted", count)
                #cv2.imwrite(nameu, undistorted.asarray(np.float32) / 4500.)
            if enable_rgb:
                cv2.imshow(
                    "color",
                    cv2.resize(color.asarray(),
                               (int(1920 / 3), int(1080 / 3))))
                namec = self.makename("color", count)
                #cv2.imwrite(namec,cv2.resize(color.asarray(),(int(1920 / 3), int(1080 / 3))))
            if enable_rgb and enable_depth:
                cv2.imshow("registered", registered.asarray(np.uint8))
                namer = self.makename("registered", count)
                #cv2.imwrite(namer,registered.asarray(np.uint8))
            mutex.release()

            count = count + 1

            listener.release(frames)

            key = cv2.waitKey(delay=1)
            if key == ord('q'):
                break

        device.stop()
        device.close()
Пример #24
0
class Kinect:
    def __init__(self, kinect_num=0):
        self.fn = Freenect2()
        self.serial = None
        self.device = None
        self.listener = None
        self.registration = None

        self._frames = None  # frames cache so that the user can use them before we free them
        self._bigdepth = Frame(1920, 1082, 4)  # malloc'd
        self._undistorted = Frame(512, 424, 4)
        self._registered = Frame(512, 424, 4)
        self._color_dump = Frame(1920, 1080, 4)

        num_devices = self.fn.enumerateDevices()
        if num_devices <= kinect_num:
            raise ConnectionError("No Kinect device at index %d" % kinect_num)

        self.serial = self.fn.getDeviceSerialNumber(kinect_num)
        self.device = self.fn.openDevice(self.serial, pipeline=pipeline)

        self.listener = SyncMultiFrameListener(FrameType.Color
                                               | FrameType.Depth)

        # Register listeners
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

    def close(self):
        if self.device:
            self.device.close()

    def start(self):
        if self.device:
            self.device.start()
            self.registration = Registration(
                self.device.getIrCameraParams(),
                self.device.getColorCameraParams())
            return self  # for convenience
        else:
            raise ConnectionError("Connection to Kinect wasn't established")

    def stop(self):
        if self.device:
            self.device.stop()

    ### If copy is False for these functoins, make sure to call release_frames
    ### when you're done with the returned frame ###

    def get_current_color_frame(self, copy=True):
        self._frames = self.listener.waitForNewFrame()
        ret = self._convert_color_frame(self._frames["color"], copy=copy)
        if copy:
            self.release_frames()
        return ret

    def get_current_depth_frame(self, copy=True):
        self._frames = self.listener.waitForNewFrame()
        if copy:
            ret = self._frames["depth"].asarray().copy()
            self.release_frames()
        else:
            ret = self._frames["depth"].asarray()
        return ret

    def get_current_rgbd_frame(self, copy=True):
        self._frames = self.listener.waitForNewFrame()

        self.registration.apply(self._frames["color"],
                                self._frames["depth"],
                                self._undistorted,
                                self._registered,
                                bigdepth=self._bigdepth)

        color = self._convert_color_frame(self._frames["color"], copy=copy)
        depth = self._convert_bigdepth_frame(self._bigdepth, copy=copy)
        if copy:
            self.release_frames()
        return color, depth

    def get_current_bigdepth_frame(self, copy=True):
        self._frames = self.listener.waitForNewFrame()

        self.registration.apply(self._frames["color"],
                                self._frames["depth"],
                                self._undistorted,
                                self._registered,
                                bigdepth=self._bigdepth)

        depth = self._convert_bigdepth_frame(self._bigdepth, copy=copy)
        if copy:
            self.release_frames()
        return depth

    def release_frames(self):
        self.listener.release(self._frames)

    # single frame calls

    def get_single_color_frame(self):
        self.start()
        ret = self.get_current_color_frame()
        self.stop()
        return ret

    def get_single_depth_frame(self):
        self.start()
        ret = self.get_current_depth_frame()
        self.stop()
        return ret

    def _convert_bigdepth_frame(self, frame, copy=True):
        if copy:
            d = self._bigdepth.asarray(np.float32).copy()
        else:
            d = self._bigdepth.asarray(np.float32)
        return d[1:-1, ::-1]

    def _convert_color_frame(self, frame, copy=True):
        if copy:
            img = frame.asarray().copy()
        else:
            img = frame.asarray()
        img = img[:, ::-1]
        img[..., :3] = img[..., 2::-1]  # bgrx -> rgbx
        return img
Пример #25
0
                       depth,
                       undistorted,
                       registered,
                       bigdepth=bigdepth,
                       color_depth_map=color_depth_map)

    # NOTE for visualization:
    # cv2.imshow without OpenGL backend seems to be quite slow to draw all
    # things below. Try commenting out some imshow if you don't have a fast
    # visualization backend.
    #cv2.imshow("ir", ir.asarray() / 65535.)
    #cv2.imshow("depth", depth.asarray() / 4500.)
    cv2.imshow("color",
               cv2.resize(color.asarray(), (int(1920 / 3), int(1080 / 3))))

    reg = registered.asarray(np.uint8)

    reg = cv2.medianBlur(reg, 5)

    #print rgbd.shape

    # print rgbd

    cv2.imshow("registered", reg)

    color_array = reg[:, :, 0:3]

    #if need_bigdepth:
    #   cv2.imshow("bigdepth", cv2.resize(bigdepth.asarray(np.float32),
    #                                     (int(1920 / 3), int(1082 / 3))))
    #if need_color_depth_map:
Пример #26
0
    # How to see where we're getting info at:
    #test_get_dist_center = cv2.circle(test_get_dist_center,(w/2,h/2), 2, (0,0,255), -1)

    #lol, so apparantly this is supposed to be h by w???
    test_get_dist_center = cv2.circle(test_get_dist_center, (h / 2, w / 2), 2,
                                      (0, 0, 255), -1)

    cv2.imshow("depth", test_get_dist_center / 4500.)
    #cv2.imshow("object detection filter Gauss 11", imgFilterGauss11)
    cv2.imshow("object detection filter", imgFilter)
    #cv2.imshow("color", cv2.resize(color.asarray(), (int(1920 / 3), int(1080 / 3))))
    #cv2.imshow("registered", registered.asarray(np.uint8))

    if need_bigdepth:
        cv2.imshow(
            "bigdepth",
            cv2.resize(bigdepth.asarray(np.float32),
                       (int(1920 / 3), int(1082 / 3))))
    if need_color_depth_map:
        cv2.imshow("color_depth_map", color_depth_map.reshape(424, 512))

    listener.release(frames)

    key = cv2.waitKey(delay=1)
    if key == ord('q'):
        break

device.stop()
device.close()

sys.exit(0)
    ir = frames["ir"]
    depth = frames["depth"]

    registration.apply(color, depth, undistorted, registered,
                       bigdepth=bigdepth,
                       color_depth_map=color_depth_map)

    # NOTE for visualization:
    # cv2.imshow without OpenGL backend seems to be quite slow to draw all
    # things below. Try commenting out some imshow if you don't have a fast
    # visualization backend.
    cv2.imshow("ir", ir.asarray() / 65535.)
    cv2.imshow("depth", depth.asarray() / 4500.)
    cv2.imshow("color", cv2.resize(color.asarray(),
                                   (int(1920 / 3), int(1080 / 3))))
    cv2.imshow("registered", registered.asarray(np.uint8))

    if need_bigdepth:
        cv2.imshow("bigdepth", cv2.resize(bigdepth.asarray(np.float32),
                                          (int(1920 / 3), int(1082 / 3))))
    if need_color_depth_map:
        cv2.imshow("color_depth_map", color_depth_map.reshape(424, 512))

    listener.release(frames)

    key = cv2.waitKey(delay=1)
    if key == ord('q'):
        break

device.stop()
device.close()
while True:
    timer = cv2.getTickCount()

    frames = listener.waitForNewFrame()

    color = frames["color"]
    depth = frames["depth"]

    registration.apply(color,
                       depth,
                       undistorted,
                       registered,
                       bigdepth=bigdepth)

    bd = np.resize(bigdepth.asarray(np.float32), (1080, 1920))
    c = cv2.cvtColor(color.asarray(), cv2.COLOR_RGB2BGR)

    face_bbox = None
    new_track_bbox = None
    face_locations = face_recognition.face_locations(
        c, number_of_times_to_upsample=0, model="cnn")
    face_encodings = face_recognition.face_encodings(c, face_locations)
    for face_encoding in face_encodings:
        matches = face_recognition.compare_faces([known_faces[target]],
                                                 face_encoding, 0.6)
        if len(matches) > 0 and matches[0]:
            (top, right, bottom, left) = face_locations[0]

            face_bbox = (left, top, right, bottom)
            mid_w = int((left + right) / 2)
    registration.apply(color,
                       depth,
                       undistorted,
                       registered,
                       bigdepth=bigdepth,
                       color_depth_map=color_depth_map)

    # NOTE for visualization:
    # cv2.imshow without OpenGL backend seems to be quite slow to draw all
    # things below. Try commenting out some imshow if you don't have a fast
    # visualization backend.
    cv2.imshow("ir", ir.asarray() / 65535.)
    cv2.imshow("depth", depth.asarray() / 4500.)
    cv2.imshow("color",
               cv2.resize(color.asarray(), (int(1920 / 3), int(1080 / 3))))
    cv2.imshow("registered", registered.asarray(np.uint8))

    # Convert from RGBX or BGRX to RGB or BGR
    colorArray = color.asarray()
    #print(colorArray.shape)
    trimColor = np.zeros((np.size(colorArray, 0), np.size(colorArray, 1), 3))
    #print(trimColor.shape)
    trimColor[:, :, 0] = colorArray[:, :, 0]
    trimColor[:, :, 1] = colorArray[:, :, 1]
    trimColor[:, :, 2] = colorArray[:, :, 2]
    #skio.imsave(os.path.join(colorDir, "img_" + frameNum + ".png"), trimColor)

    cv2.imwrite(os.path.join(colorDir, "img_" + frameNum + ".png"), trimColor)

    cv2.imshow("undistored", undistorted.asarray(np.float32) / 4500.)
    #print(undistorted.asarray(np.float32) / 4500.)
Пример #30
0
def find_and_track_kinect(name, tracker = "CSRT",
        min_range = 0, max_range = 1700,
        face_target_box = DEFAULT_FACE_TARGET_BOX,
        res = (RGB_W, RGB_H),
        video_out = True, debug = True):

    known_faces = {}

    for person in faces:
        image = face_recognition.load_image_file(faces[person])
        print(person)
        face_encoding_list = face_recognition.face_encodings(image)
        if len(face_encoding_list) > 0:
            known_faces[person] = face_encoding_list[0]
        else:
            print("\t Could not find face for person...")

    fn = Freenect2()
    num_devices = fn.enumerateDevices()

    if num_devices == 0:
        print("No device connected!")

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline = pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Depth)

    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    registration = Registration(device.getIrCameraParams(),
            device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)
    bigdepth = Frame(1920, 1082, 4)


    trackerObj = None
    face_count = 5
    face_process_frame = True

    bbox = None
    face_bbox = None
    track_bbox = None

    while True:
        timer = cv2.getTickCount()

        person_found = False

        frames = listener.waitForNewFrame()

        color = frames["color"]
        depth = frames["depth"]

        registration.apply(color, depth, undistorted, registered, bigdepth=bigdepth)

        bd = np.resize(bigdepth.asarray(np.float32), (1080, 1920))
        c = cv2.cvtColor(color.asarray(), cv2.COLOR_RGB2BGR)

        __clean_color(c, bd, min_range, max_range)

        if face_process_frame:
            small_c = __crop_frame(c, face_target_box)
            face_locations = face_recognition.face_locations(small_c, model="cnn")
            face_encodings = face_recognition.face_encodings(small_c, face_locations)
            for face_encoding in face_encodings:
                matches = face_recognition.compare_faces(
                        [known_faces[name]], face_encoding, 0.6)
                if len(matches) > 0 and matches[0]:
                    person_found = True
                    face_count += 1
                    (top, right, bottom, left) = face_locations[0]

                    left += face_target_box[0]
                    top += face_target_box[1]
                    right += face_target_box[0]
                    bottom += face_target_box[1]

                    face_bbox = (left, top, right, bottom)

                    person_found = True

                    break
        face_process_frame = not face_process_frame

        overlap_pct = 0
        track_area = __bbox_area(track_bbox)
        if track_area > 0 and face_bbox:
            overlap_area = __bbox_overlap(face_bbox, track_bbox)
            overlap_pct = min(overlap_area / __bbox_area(face_bbox),
                    overlap_area / __bbox_area(track_bbox))

        if person_found and face_count >= FACE_COUNT and overlap_pct < CORRECTION_THRESHOLD:
            bbox = (face_bbox[0],
                    face_bbox[1],
                    face_bbox[2] - face_bbox[0],
                    face_bbox[3] - face_bbox[1])
            trackerObj = __init_tracker(c, bbox, tracker)
            face_count = 0

        status = False

        if trackerObj is not None:
            status, trackerBBox = trackerObj.update(c)
            bbox = (int(trackerBBox[0]),
                int(trackerBBox[1]),
                int(trackerBBox[0] + trackerBBox[2]),
                int(trackerBBox[1] + trackerBBox[3]))

        if bbox is not None:
            track_bbox = bbox

        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        w = 0
        h = 0

        if status:
            w = track_bbox[0] + int((track_bbox[2] - track_bbox[0])/2)
            h = track_bbox[1] + int((track_bbox[3] - track_bbox[1])/2)
            if (w < res[0] and w >= 0 and h < res[1] and h >= 0):
                distanceAtCenter =  bd[h][w]
                __update_individual_position("NONE", track_bbox, distanceAtCenter, res)

        if video_out:
            cv2.line(c, (w, 0),
                    (w, int(res[1])), (0,255,0), 1)
            cv2.line(c, (0, h), \
                    (int(res[0]), h), (0,255,0), 1)

            __draw_bbox(True, c, face_target_box, (255, 0, 0), "FACE_TARGET")
            __draw_bbox(status, c, track_bbox, (0, 255, 0), tracker)
            __draw_bbox(person_found, c, face_bbox, (0, 0, 255), name)

            c = __scale_frame(c, scale_factor = 0.5)

            cv2.putText(c, "FPS : " + str(int(fps)), (100,50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,255), 1)
            if not status:
                failedTrackers = "FAILED: "
                failedTrackers += tracker + " "
                cv2.putText(c, failedTrackers, (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,142), 1)

            cv2.imshow("color", c)

        listener.release(frames)

        key = cv2.waitKey(1) & 0xff
        if key == ord('q'):
            break
    device.stop()
    device.close()
Пример #31
0
def main(hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, single_person,
         max_batch_size, disable_vidgear, device):
    if device is not None:
        device = torch.device(device)
    else:
        if torch.cuda.is_available() and True:
            torch.backends.cudnn.deterministic = True
            device = torch.device('cuda:0')
        else:
            device = torch.device('cpu')

    print(device)

    has_display = 'DISPLAY' in os.environ.keys() or sys.platform == 'win32'

    model = SimpleHRNet(
        hrnet_c,
        hrnet_j,
        hrnet_weights,
        multiperson=not single_person,
        max_batch_size=max_batch_size,
        device=device
    )

    print("Packet pipeline:", type(pipeline).__name__)

    enable_rgb = True
    enable_depth = True

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    types = 0
    if enable_rgb:
        types |= FrameType.Color
    if enable_depth:
        types |= (FrameType.Ir | FrameType.Depth)
    listener = SyncMultiFrameListener(types)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    if enable_rgb and enable_depth:
        device.start()
    else:
        device.startStreams(rgb=enable_rgb, depth=enable_depth)

    # NOTE: must be called after device.start()
    if enable_depth:
        registration = Registration(device.getIrCameraParams(),
                                    device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    while True:
        frames = listener.waitForNewFrame()

        if enable_rgb:
            color = frames["color"]
        if enable_depth:
            ir = frames["ir"]
            depth = frames["depth"]

        if enable_rgb and enable_depth:
            registration.apply(color, depth, undistorted, registered)
        elif enable_depth:
            registration.undistortDepth(depth, undistorted)

        if enable_depth:
            cv2.imshow("ir", ir.asarray() / 65535.)
            cv2.imshow("depth", depth.asarray() / 4500.)
            # cv2.imshow("undistorted", undistorted.asarray(np.float32) / 4500.)

        if enable_rgb and enable_depth:
            cv2.imshow("registered", registered.asarray(np.uint8))

        # color = cv2.resize(color.asarray()[:, :, :3], (int(1920 / 3), int(1080 / 3)))
        color = registered.asarray(np.uint8)[:, :, :3]
        color = np.ascontiguousarray(color, dtype=np.uint8)
        pts = model.predict(color)

        undistorted_nor = undistorted.asarray(np.float32) / 4500.
        undistorted_image = cv2.cvtColor(undistorted_nor, cv2.COLOR_GRAY2BGR)
        for i, pt in enumerate(pts):
            color = draw_points_and_skeleton(color, pt, joints_dict()[hrnet_joints_set]['skeleton'], person_index=i,
                                             joints_color_palette='gist_rainbow', skeleton_color_palette='jet',
                                             joints_palette_samples=10)
            for j, point in enumerate(pt):
                if point[2]>0.5 and point[0] < undistorted_image.shape[0] and point[1] < undistorted_image.shape[1]:
                    if j == 9:
                        left_wrist_depth = undistorted_nor[int(point[0]), int(point[1])]
                        print('left_wrist_depth',left_wrist_depth)
                        undistorted_image = cv2.circle(undistorted_image, (int(point[1]), int(point[0])), 30, (255, 255, 0),-1)

                    if j == 10:
                        right_wrist_depth = undistorted_nor[int(point[0]), int(point[1])]
                        print('right_wrist_depth', right_wrist_depth)
                        undistorted_image = cv2.circle(undistorted_image, (int(point[1]), int(point[0])), 30, (255, 0, 255),-1)

        if enable_rgb:
            cv2.imshow("color", color)
            cv2.imshow("undistorted", undistorted_image)

        listener.release(frames)

        key = cv2.waitKey(delay=1)
        if key == ord('q'):
            break

    device.stop()
    device.close()

    sys.exit(0)
Пример #32
0
class Kinect:

    def __init__(self):
        self.averageSpineX = 0

        self.fn = Freenect2()
        if self.fn.enumerateDevices() == 0:
            sys.exit(47)


        self.serial = self.fn.getDeviceSerialNumber(0)

        types = 0
        types |= FrameType.Color
        types |= (FrameType.Ir | FrameType.Depth)
        self.listener = SyncMultiFrameListener(types)

        self.logger = createConsoleLogger(LoggerLevel.Debug)

        self.device = self.fn.openDevice(self.serial)

        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        self.undistorted = Frame(512, 424, 4)
        self.registered = Frame(512, 424, 4)


    def valueBounded(self, checkValue, absoluteValue):
        if ((-absoluteValue <= checkValue >= absoluteValue)):
            return True
        else:
            return False


    def valueUnbounded(self, checkValue, absoluteValue):
        if ((checkValue > absoluteValue) | (checkValue < -absoluteValue)):
            return True
        else:
            return False

    def valuePlusMinusDifferential(self, checkValue, testValue, Differential):
        if((checkValue < testValue + Differential) & (checkValue > testValue - Differential)):
            return True
        else:
            return False


    def update(self, registration):
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        frames = self.listener.waitForNewFrame()
        depth = frames["depth"]
        color = frames["color"]

        d = self.undistorted.asarray(dtype=np.float32)

        registration.apply(color, depth, self.undistorted, self.registered)

        self.listener.release(frames)

        return d


    # calculate the average and skeleton points of a depth array, those
    # values plus the depth array



    # calculate mean depth of depth array, used to find skeleton points.
    def getMeanDepth(self, depth, rows, cols):
        total = 0
        sumDepth = 0
        for row in range(rows):
            for col in range(cols):
                try:
                    offset = row + col * (rows)
                except IndexError as e:
                    print e
                #if depth[offset]['depth'] < DANGER_THRESHOLD:
                total += 1
                sumDepth += depth[offset]['depth']

        return (sumDepth / total)


    # calculate the skeleton points of the depth array
    def getSkeleton(self, depthArray, average, rows, cols):
        topY = 0
        leftX = rows
        bottomY = cols
        rightX = 0

        for d in depthArray:
            #print depthArray[offset]
            if (d['x'] > rightX):
                rightX = d['x']
            if (d['x'] < leftX):
                leftX = d['x']
            if (d['y'] < bottomY):
                bottomY = d['y']
            if (d['y'] > topY):
                topY = d['y']

        averageX = (leftX + rightX) / 2
        returnValues = {'averageX': averageX,
                        'topY': topY,
                        'bottomY': bottomY,
                        'rightX': rightX,
                        'leftX': leftX}

        return returnValues


    def changeInX(self, spine):
        print "changeInX"
        if self.averageSpineX == 0:
            self.averageSpineX = spine['averageX']
        elif (self.valueBounded((self.averageSpineX - spine['averageX']), X_CHANGE_THRESHOLD)):
            self.averageSpineX = ((self.averageSpineX + spine['averageX']) / 2)
        else:
            self.checkDrowning()


    # Check to see if the difference between the averageSpineX and the last
    # analyzed averageX is less below a threshold. If it is, the loop
    # continues. If it runs for 20 seconds, it will set the drowning flag to
    # true. If falsePositive gets to be too high, DORi will no longer
    # assume the victim is a drowning risk
    def checkDrowning(self):
        print "checkDrowning"
        drowningRisk = True
        drowning = False
        falsePositive = 0
        # 20 seconds from start of def #
        timeLimit = time.time() + DROWN_TIME
        self.device.start()
        while drowningRisk:
            print 'checking...'
            if time.time() > timeLimit:
                drowningRisk = False
                drowning = True

            data = self.fullDataLoop()
            if (self.valueUnbounded((self.averageSpineX - data['spine']['averageX']), X_CHANGE_THRESHOLD)):
                falsePositive += 1
                if falsePositive > 100:
                    drowningRisk = False
            else:
                continue
        if drowning:
            self.device.stop()
            print "This guy is for sure drowning"
            sys.exit(47)

        self.device.stop()

    def depthMatrixToPointCloudPos2(self, depthArray):

        R, C = depthArray.shape

        R -= KINECT_SPECS['cx']
        R *= depthArray
        R /= KINECT_SPECS['fx']

        C -= KINECT_SPECS['cy']
        C *= depthArray
        C /= KINECT_SPECS['fy']

        return np.column_stack((depthArray.ravel(), R.ravel(), C.ravel()))

    def getBody(self, depthArray, average, rows, cols):
        body = []

        for d in depthArray:
            if self.valuePlusMinusDifferential(d['depth'], average, BODY_DEPTH_THRESHOLD):
                body.append(d)

        return body

    def pointRavel(self, unraveledArray, rows, cols):
        raveledArray = []
        d = unraveledArray.ravel()
        for row in range(rows):
            for col in range(cols):
                try:
                    offset = row + col * (rows)
                    raveledArray.append({'x': row, 'y': col, 'depth': d[offset]})
                except IndexError as e:
                    print e

        return raveledArray

    def fullDataLoop(self):


        registration = Registration(self.device.getIrCameraParams(),
                                    self.device.getColorCameraParams())

        deptharraytest = [{'x': 152, 'y': 100, 'depth': 400}, {'x': 300, 'y': 200, 'depth': 400},
                          {'x': 350, 'y': 150, 'depth': 400}, {'x': 400, 'y': 300, 'depth': 400},
                          {'x': 22, 'y': 10, 'depth': 400}, {'x': 144, 'y': 12, 'depth': 400}]

        depthArray = self.update(registration)
        rows, cols = depthArray.shape
        d = self.pointRavel(depthArray, rows, cols)
        m = self.getMeanDepth(d, rows, cols)
        b = self.getBody(d, m, rows, cols)
        s = self.getSkeleton(deptharraytest, m, cols, rows)


        return {'spine' : s, 'meanDepth' : m}

    def run(self, duration):
        end = time.time() + duration
        self.device.start()

        registration = Registration(self.device.getIrCameraParams(),
                                    self.device.getColorCameraParams())


        deptharraytest = [{'x':152, 'y':100, 'depth':400}, {'x':300, 'y':200, 'depth':400},
                          {'x': 350, 'y': 150, 'depth': 400},{'x':400, 'y':300, 'depth':400},
                          {'x': 22, 'y': 10, 'depth': 400},{'x':144, 'y':12, 'depth':400}]
        while time.time() < end:
            depthArray = self.update(registration)
            rows, cols = depthArray.shape
            d = self.pointRavel(depthArray, rows, cols)
            m =self.getMeanDepth(d, rows, cols)
            b = self.getBody(d, m, rows, cols)
            s = self.getSkeleton(deptharraytest, m, cols, rows)
            self.changeInX(s)


        self.device.stop()

    def execute(self):
        self.device.start()

        registration = Registration(self.device.getIrCameraParams(),
                                    self.device.getColorCameraParams())

        d = self.update(registration)
        shape = d.shape
        m = self.getMeanDepth(d, 9)
        b = self.getBody(d,m)
        s = self.getSkeleton(d, m)
        self.changeInX(s)


        print 'depthArray' + str(len(d.ravel()))
        print 'body' + str(len(b))
        for body in b:
            print "(" + str(body['x']) + "," + str(body['y']) + "): " + str(body['z'])


        print m


        self.device.stop()




    def exit(self):
        self.device.stop()
        self.device.close()
Пример #33
0
    def run(self):
        self._isrunning = True

        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                pipeline = CpuPacketPipeline()
        if self._debug:
            print("Packet pipeline:", type(pipeline).__name__)

        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()
        if self._debug:
            print("Number of devices: {}".format(num_devices))

        serial = self.fn.getDeviceSerialNumber(0)
        device = self.fn.openDevice(serial, pipeline=pipeline)
        listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                          | FrameType.Depth)

        device.setColorFrameListener(listener)
        device.setIrAndDepthFrameListener(listener)

        if self._debug:
            print("Init done")

        device.start()

        registration = Registration(device.getIrCameraParams(),
                                    device.getColorCameraParams())

        params = device.getIrCameraParams()
        self.cx = params.cx
        self.cy = params.cy
        self.fx = params.fx
        self.fy = params.fy

        undistorted = Frame(512, 424, 4)
        registered = Frame(512, 424, 4)

        while self._isrunning:
            frames = listener.waitForNewFrame()

            color = frames["color"]
            ir = frames["ir"]
            depth = frames["depth"]

            registration.apply(color,
                               depth,
                               undistorted,
                               registered,
                               bigdepth=None,
                               color_depth_map=None)

            if self._save_color:
                self._set_color_image(
                    cv2.cvtColor(color.asarray(), cv2.COLOR_BGR2RGB))
            if self._save_depth or self._save_pointcloud:
                depth_arr = depth.asarray()
                if self._save_depth:
                    self._set_depth_image(depth_arr)
            if self._save_ir:
                self._set_ir_image(ir.asarray())
            if self._save_registered or self._save_pointcloud:
                reg = cv2.cvtColor(registered.asarray(np.uint8),
                                   cv2.COLOR_BGR2RGB)
                if self._save_registered:
                    self._set_registered_image(reg)
            if self._save_undistorted:
                und = undistorted.asarray(np.uint8)
                self._set_undistorted_image(
                    cv2.cvtColor(und, cv2.COLOR_BGR2RGB))
            if self._save_pointcloud:
                self.compute_pointcloud(reg, depth_arr)

            listener.release(frames)

        if self._debug:
            print("Stopping device")
        device.stop()
        if self._debug:
            print("Closing device")
        device.close()
        if self._debug:
            print("Device stopped and closed")