def show(video_path):
    """
    Shows depth map
    @param video_path: contains the ONI file path
    """
    dev = openni2.Device
    try:
        openni2.initialize()
        dev = openni2.Device.open_file(video_path)
        print(dev.get_sensor_info(openni2.SENSOR_DEPTH))
    except (RuntimeError, TypeError, NameError):
        print(RuntimeError, TypeError, NameError)

    depth_stream = dev.create_depth_stream()
    depth_stream.start()
    while True:
        frame_depth = depth_stream.read_frame()
        frame_depth_data = frame_depth.get_buffer_as_uint16()
        depth_array = np.ndarray(
            (frame_depth.height, frame_depth.width),
            dtype=np.uint16,
            buffer=frame_depth_data) / 2300.  # 0-10000mm to 0.-1.
        cv2.imshow('Depth', depth_array)

        ch = 0xFF & cv2.waitKey(1)
        if ch == 27:
            break

    depth_stream.stop()
    openni2.unload()
    cv2.destroyAllWindows()
def kinectDeactivate():
    '''
    turn off kinect
    '''
    openni2.unload()
    log("kinect stopped")
    return True
Example #3
0
def show_rgb_viewer():
    """Shows an rgb viewer inside a new window

    Returns as soon as the stream has been terminated.
    """
    if not IS_INITIALIZED:
        print "Device not initialized"
        return

    device = openni2.Device.open_any()

    rgb_stream = _rgb_stream_from_device(device)
    rgb_stream.start()

    done = False
    while not done:
        key = cv2.waitKey(1) & 255
        if key == 27:
            print "ESC pressed"
            done = True

        bgr = np.fromstring(rgb_stream.read_frame().get_buffer_as_uint8(),
                            dtype=np.uint8).reshape(240, 320, 3)
        rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)

        cv2.imshow("rgb", rgb)

    cv2.destroyAllWindows()
    rgb_stream.stop()
    openni2.unload()
    print "Terminated"
def show(video_path):
    """
    Shows depth map
    @param video_path: contains the ONI file path
    """
    dev = openni2.Device
    try:
        openni2.initialize()
        dev = openni2.Device.open_file(video_path)
        print(dev.get_sensor_info(openni2.SENSOR_DEPTH))
    except (RuntimeError, TypeError, NameError):
        print(RuntimeError, TypeError, NameError)

    depth_stream = dev.create_depth_stream()
    depth_stream.start()
    while True:
        frame_depth = depth_stream.read_frame()
        frame_depth_data = frame_depth.get_buffer_as_uint16()
        depth_array = np.ndarray((frame_depth.height, frame_depth.width),
                                 dtype=np.uint16,
                                 buffer=frame_depth_data) / 2300.  # 0-10000mm to 0.-1.
        cv2.imshow('Depth', depth_array)

        ch = 0xFF & cv2.waitKey(1)
        if ch == 27:
            break

    depth_stream.stop()
    openni2.unload()
    cv2.destroyAllWindows()
Example #5
0
def signal_handler(signal, frame):
    print 'You pressed Ctrl+C!'
    rec.stop()
    depth_stream.stop()
    color_stream.stop()
    openni2.unload()
    sys.exit(0)
Example #6
0
def capture_chessboards(camera_type, save_dir):
    dev = openni2.Device.open_any()

    if camera_type == 'ir':
        stream = ir_stream_init(dev)
        get_frame = get_ir
    elif camera_type == 'rgb':
        stream = color_stream_init(dev)
        get_frame = get_color
    else:
        raise ValueError()

    stream.start()

    index = 1
    while True:
        frame = get_frame(stream)
        cv2.imshow("frame", frame)

        key = cv2.waitKey(34) & 0xFF
        if key in [ord('s'), 13]:
            cv2.imwrite('{}/{:03d}.png'.format(save_dir, index), frame)
            print('{:03d}.png'.format(index))
            index += 1
        elif key in [ord('q'), 27]:
            break
    cv2.destroyAllWindows()
    stream.stop()
    openni2.unload()
Example #7
0
def signal_handler(signal, frame):
	print 'You pressed Ctrl+C!'
	rec.stop()
	depth_stream.stop()
	color_stream.stop()
	openni2.unload()
	sys.exit(0)
Example #8
0
    def test_openni2_bindings(self):
        if os.path.exists(self.GEN_ONI):
            os.unlink(self.GEN_ONI)
        subprocess.check_call(["python", "../bin/build_openni.py"])
        self.assertTrue(os.path.exists(self.GEN_ONI))

        from primesense import _openni2
        from primesense import openni2

        openni2.initialize()
        ver = openni2.get_version()
        openni2.unload()

        self.assertEqual(ver.major, openni2.c_api.ONI_VERSION_MAJOR)
        self.assertEqual(ver.minor, openni2.c_api.ONI_VERSION_MINOR)

        h_file = os.path.join(config.get("headers", "openni_include_dir"),
                              "OpenNI.h")

        self.check_unexposed_functions(openni2, _openni2, h_file,
                                       ["oniGetExtendedError"])
        self.check_missing_names_by_prefix(openni2, h_file, "DEVICE_PROPERTY_",
                                           "ONI_")
        self.check_missing_names_by_prefix(openni2, h_file, "STREAM_PROPERTY_",
                                           "ONI_")
Example #9
0
def show_depth_viewer():
    """Shows a depth viewer inside a new window

    Returns as soon as the stream has been terminated.
    """
    if not IS_INITIALIZED:
        print "Device not initialized"
        return

    device = openni2.Device.open_any()

    depth_stream = _depth_stream_from_device(device)
    depth_stream.start()

    done = False
    while not done:
        key = cv2.waitKey(1) & 255
        if key == 27:
            print "ESC pressed"
            done = True

        _, d4d = _get_depth_from_stream(depth_stream)
        cv2.imshow("depth", d4d)

    cv2.destroyAllWindows()
    depth_stream.stop()
    openni2.unload()
    print "Terminated"
Example #10
0
def saveDepth(dev):
    depth_stream = dev.create_depth_stream()
    depth_stream.start()
    avi_depth = cv2.VideoWriter('depth.avi', cv2.cv.CV_FOURCC(*'XVID'),
                                depth_stream.get_video_mode().fps,
                                (depth_stream.get_video_mode().resolutionX,
                                 depth_stream.get_video_mode().resolutionY))
    depth_scale_factor = 255.0 / depth_stream.get_max_pixel_value()
    frame_depth = depth_stream.read_frame()

    while frame_depth.frameIndex < depth_stream.get_number_of_frames():
        frame_depth = depth_stream.read_frame()
        frame_depth_data = frame_depth.get_buffer_as_uint16()
        depth_array = np.ndarray((frame_depth.height, frame_depth.width),
                                 dtype=np.uint16,
                                 buffer=frame_depth_data)
        depth_uint8 = cv2.convertScaleAbs(depth_array,
                                          alpha=depth_scale_factor)
        depth_colored = cv2.applyColorMap(depth_uint8, cv2.COLORMAP_HSV)

        avi_depth.write(depth_colored)

    depth_stream.stop()
    openni2.unload()
    cv2.destroyAllWindows()
Example #11
0
def closeKinectStream():
    if config.depth_stream is not None:
        config.depth_stream.stop()
    openni2.unload()
    config.openKinectFailureLogged = False
    config.dev = None
    config.kinectReady = False
    config.log(f"kinect depth stream stopped and openni unloaded")
Example #12
0
    def end(self):
        if self._started:
            #Stop Stream
            self.depth_stream.stop()
            self.color_stream.stop()
            openni2.unload()

        self._started = False
def cmd(onifile, yamlfile):
    openni2.initialize()

    device = NiDevice(onifile, yamlfile)
    device.initialize()
    device.update()

    openni2.unload()
Example #14
0
 def stop(self):
     print "Closing Kinect interfaces"
     #self.hand_listener.close()
     #self.user_listener.close()
     #nite2.c_api.niteShutdown()
     #openni2.c_api.oniShutdown()
     nite2.unload()
     openni2.unload()
     print "Kinect interfaces closed"
def main(args):
    rp.init_node('faster_rcnn_client_xtion', anonymous=True)
    RCNNClient()

    try:
        rp.spin()
    except KeyboardInterrupt:
        openni2.unload()
        print "Shutting down ROS Image feature detector module"
    cv2.destroyAllWindows()
Example #16
0
 def run(self):
     self.initRun()
     self.initDepth(640, 480, 30)
     done = False
     print("Server is online")
     while not done:
         self.getDepth(640, 480)
         self.send(self.prepareData(self.data))
     self.depth_stream.stop()
     openni2.unload()
     self.s.close()
     print("Terminated")
    def stop(self):
        """ Stop the sensor """
        # check that everything is running
        if not self.running or self.device is None:
            return False

        # stop streams
        if self.depth_stream:
            self.depth_stream.stop()
        if self.color_stream:
            self.color_stream.stop()
        self.running = False

        openni2.unload()
        return True
Example #18
0
def main():
        """The entry point"""
        try:
            openni2.initialize()     # can also accept the path of the OpenNI redistribution
        except:
            print ("Device not initialized")
            return
        
        try:
            dev = openni2.Device.open_any()
            write_files(dev)
        except:
            print ("Unable to open the device")
            
        openni2.unload()
Example #19
0
    def stop(self):
        """ Stop the sensor """
        # check that everything is running
        if not self._running or self._device is None:
            logging.warning('Primesense not running. Aborting stop')
            return False

        # stop streams
        if self._depth_stream:
            self._depth_stream.stop()
        if self._color_stream:
            self._color_stream.stop()
        self._running = False

        # Unload openni2
        openni2.unload()
        return True
Example #20
0
def main():
    """The entry point"""
    try:
        openni2.initialize(
        )  # can also accept the path of the OpenNI redistribution
    except:
        print("Device not initialized")
        return
    try:
        dev = openni2.Device.open_any()
        write_files(dev)
    except:
        print("Unable to open the device")
    try:
        openni2.unload()
    except:
        print("Device not unloaded")
Example #21
0
def split(video_path):
    """
    Split the ONI file into RGB and depth maps and shows using two separate windows
    @param video_path: contains the ONI file path
    """
    openni2.initialize()
    dev = openni2.Device.open_file(video_path)
    print(dev.get_sensor_info(openni2.SENSOR_DEPTH))
    depth_stream = dev.create_depth_stream()
    color_stream = dev.create_color_stream()
    depth_stream.start()
    color_stream.start()
    while True:
        frame_depth = depth_stream.read_frame()
        frame_color = color_stream.read_frame()

        frame_depth_data = frame_depth.get_buffer_as_uint16()
        frame_color_data = frame_color.get_buffer_as_uint8()

        depth_array = np.ndarray((frame_depth.height, frame_depth.width),
                                 dtype=np.uint16,
                                 buffer=frame_depth_data)
        color_array = np.ndarray((frame_color.height, frame_color.width, 3),
                                 dtype=np.uint8,
                                 buffer=frame_color_data)
        color_array = cv2.cvtColor(color_array, cv2.COLOR_BGR2RGB)

        cv2.imwrite(
            "./depth/depth_" + str("{:020d}".format(frame_depth.timestamp)) +
            ".png", depth_array)
        cv2.imwrite(
            "./color/color_" + str("{:020d}".format(frame_color.timestamp)) +
            ".png", color_array)

        cv2.imshow("depth", depth_array)
        cv2.imshow("color", color_array)

        ch = 0xFF & cv2.waitKey(1)
        if ch == 27:
            break

    depth_stream.stop()
    color_stream.stop()
    openni2.unload()
    cv2.destroyAllWindows()
Example #22
0
def main():
    openni2.initialize('/usr/lib')

    dev = openni2.Device.open_any()
    print dev.get_sensor_info(openni2.SENSOR_DEPTH)

    depth_stream = dev.create_depth_stream()
    depth_stream.start()

    frame = depth_stream.read_frame()
    frame_data = frame.get_buffer_as_uint16()

    depth_array = np.ndarray((frame.height, frame.width),
                             dtype=np.uint16,
                             buffer=frame_data)
    plt.imshow(depth_array)
    plt.show()

    depth_stream.stop()

    openni2.unload()
Example #23
0
    def test_openni2_bindings(self):
        if os.path.exists(self.GEN_ONI):
            os.unlink(self.GEN_ONI)
        subprocess.check_call(["python", "../bin/build_openni.py"])
        self.assertTrue(os.path.exists(self.GEN_ONI))
        
        from primesense import _openni2
        from primesense import openni2
        
        openni2.initialize()
        ver = openni2.get_version()
        openni2.unload()

        self.assertEqual(ver.major, openni2.c_api.ONI_VERSION_MAJOR)
        self.assertEqual(ver.minor, openni2.c_api.ONI_VERSION_MINOR)

        h_file = os.path.join(config.get("headers", "openni_include_dir"), "OpenNI.h")

        self.check_unexposed_functions(openni2, _openni2, h_file, ["oniGetExtendedError"])
        self.check_missing_names_by_prefix(openni2, h_file, "DEVICE_PROPERTY_", "ONI_")
        self.check_missing_names_by_prefix(openni2, h_file, "STREAM_PROPERTY_", "ONI_")
Example #24
0
def kinectInit(logIP, logPort):

    global navManager, depth_stream

    try:
        navManager = rpyc.connect(logIP, logPort)
        navManager.root.connectionStatus("kinect", True)
        log("logger connection established")

    except:
        print(
            f"kinect - could not establish connection to logger {sys.exc_info()[0]}"
        )
        raise SystemExit()

    # try to capture the depth data
    try:
        openni2.initialize("C:/Program Files (x86)/OpenNI2/Redist/")
        dev = openni2.Device.open_any()

        depth_stream = dev.create_depth_stream()
        depth_stream.start()
        depth_stream.set_video_mode(
            c_api.OniVideoMode(
                pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM,
                resolutionX=640,
                resolutionY=480,
                fps=30))
        frame = depth_stream.read_frame()
        log("depth data successfully captured")
        navManager.root.processStatus("kinect", True)

    except:
        log(f"capturing depth data failed {sys.exc_info()[0]}, 12 V on??")
        try:
            openni2.unload()
        except:
            pass
        raise SystemExit()
Example #25
0
    def run(self):
        self.initRun()
        self.initDepth(640, 480, 30)

        done = False

        print("Server is online")
        while not done:
            self.getDepth(640, 480)
            self.send(self.prepareData(self.data))

            #self.show()
            #cv2.imshow("depth", self.d4d)

            ## Distance map
            # print('Center pixel is {} mm away'.format(dmap[119, 159]))

    # cv2.destroyAllWindows()
        self.depth_stream.stop()
        openni2.unload()
        self.s.close()
        print("Terminated")
Example #26
0
def split(video_path):
    """
    Split the ONI file into RGB and depth maps and shows using two separate windows
    @param video_path: contains the ONI file path
    """
    openni2.initialize()
    dev = openni2.Device.open_file(video_path)
    print(dev.get_sensor_info(openni2.SENSOR_DEPTH))
    depth_stream = dev.create_depth_stream()
    color_stream = dev.create_color_stream()
    depth_stream.start()
    color_stream.start()
    while True:
        frame_depth = depth_stream.read_frame()
        frame_color = color_stream.read_frame()

        frame_depth_data = frame_depth.get_buffer_as_uint16()
        frame_color_data = frame_color.get_buffer_as_uint8()

        depth_array = np.ndarray((frame_depth.height, frame_depth.width), dtype=np.uint16, buffer=frame_depth_data)
        color_array = np.ndarray((frame_color.height, frame_color.width, 3), dtype=np.uint8, buffer=frame_color_data)
        color_array = cv2.cvtColor(color_array, cv2.COLOR_BGR2RGB)

        cv2.imwrite("./depth/depth_" + str("{:020d}".format(frame_depth.timestamp)) + ".png", depth_array)
        cv2.imwrite("./color/color_" + str("{:020d}".format(frame_color.timestamp)) + ".png", color_array)

        cv2.imshow("depth", depth_array)
        cv2.imshow("color", color_array)

        ch = 0xFF & cv2.waitKey(1)
        if ch == 27:
            break

    depth_stream.stop()
    color_stream.stop()
    openni2.unload()
    cv2.destroyAllWindows()
Example #27
0
def saveColor(dev):
    color_stream = dev.create_color_stream()
    color_stream.start()

    avi_color = cv2.VideoWriter('color.avi', cv2.cv.CV_FOURCC(*'XVID'),
                                color_stream.get_video_mode().fps,
                                (color_stream.get_video_mode().resolutionX,
                                 color_stream.get_video_mode().resolutionY))

    frame_color = color_stream.read_frame()

    while frame_color.frameIndex < color_stream.get_number_of_frames():
        frame_color = color_stream.read_frame()
        frame_color_data = frame_color.get_buffer_as_uint8()
        color_array = np.ndarray((frame_color.height, frame_color.width, 3),
                                 dtype=np.uint8,
                                 buffer=frame_color_data)
        color_array = cv2.cvtColor(color_array, cv2.COLOR_BGR2RGB)

        avi_color.write(color_array)

    color_stream.stop()
    openni2.unload()
    cv2.destroyAllWindows()
Example #28
0
def saveDepth(dev):
    depth_stream = dev.create_depth_stream()
    depth_stream.start()
    avi_depth = cv2.VideoWriter('depth.avi', cv2.cv.CV_FOURCC(*'XVID'),
                                depth_stream.get_video_mode().fps,
                                (depth_stream.get_video_mode().resolutionX,
                                 depth_stream.get_video_mode().resolutionY))
    depth_scale_factor = 255.0 / depth_stream.get_max_pixel_value()
    frame_depth = depth_stream.read_frame()

    while frame_depth.frameIndex < depth_stream.get_number_of_frames():
        frame_depth = depth_stream.read_frame()
        frame_depth_data = frame_depth.get_buffer_as_uint16()
        depth_array = np.ndarray((frame_depth.height, frame_depth.width),
                                 dtype=np.uint16,
                                 buffer=frame_depth_data)
        depth_uint8 = cv2.convertScaleAbs(depth_array, alpha=depth_scale_factor)
        depth_colored = cv2.applyColorMap(depth_uint8, cv2.COLORMAP_HSV)

        avi_depth.write(depth_colored)

    depth_stream.stop()
    openni2.unload()
    cv2.destroyAllWindows()
Example #29
0
def saveColor(dev):
    color_stream = dev.create_color_stream()
    color_stream.start()

    avi_color = cv2.VideoWriter('color.avi', cv2.cv.CV_FOURCC(*'XVID'),
                                color_stream.get_video_mode().fps,
                                (color_stream.get_video_mode().resolutionX,
                                 color_stream.get_video_mode().resolutionY))

    frame_color = color_stream.read_frame()

    while frame_color.frameIndex < color_stream.get_number_of_frames():
        frame_color = color_stream.read_frame()
        frame_color_data = frame_color.get_buffer_as_uint8()
        color_array = np.ndarray((frame_color.height, frame_color.width, 3),
                                 dtype=np.uint8,
                                 buffer=frame_color_data)
        color_array = cv2.cvtColor(color_array, cv2.COLOR_BGR2RGB)

        avi_color.write(color_array)

    color_stream.stop()
    openni2.unload()
    cv2.destroyAllWindows()
def main(argv=None):
    print('Hello! This is XXXXXX Program')

    cv2.namedWindow(window_detection_name)
    cv2.createTrackbar(low_H_name, window_detection_name, low_H, max_value_H,
                       on_low_H_thresh_trackbar)
    cv2.createTrackbar(high_H_name, window_detection_name, high_H, max_value_H,
                       on_high_H_thresh_trackbar)
    cv2.createTrackbar(low_S_name, window_detection_name, low_S, max_value,
                       on_low_S_thresh_trackbar)
    cv2.createTrackbar(high_S_name, window_detection_name, high_S, max_value,
                       on_high_S_thresh_trackbar)
    cv2.createTrackbar(low_V_name, window_detection_name, low_V, max_value,
                       on_low_V_thresh_trackbar)
    cv2.createTrackbar(high_V_name, window_detection_name, high_V, max_value,
                       on_high_V_thresh_trackbar)

    ## Initialize OpenNi
    # dist = './driver/OpenNI-Linux-x64-2.3/Redist'
    dist = './driver/OpenNI-Windows-x64-2.3/Redist'
    openni2.initialize(dist)
    if (openni2.is_initialized()):
        print("openNI2 initialized")
    else:
        print("openNI2 not initialized")

    ## Register the device
    dev = openni2.Device.open_any()

    ## Create the streams stream
    rgb_stream = dev.create_color_stream()
    depth_stream = dev.create_depth_stream()

    ## Define stream parameters
    w = 320
    h = 240
    fps = 30

    ## Configure the rgb_stream -- changes automatically based on bus speed
    rgb_stream.set_video_mode(
        c_api.OniVideoMode(
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
            resolutionX=w,
            resolutionY=h,
            fps=fps))

    ## Configure the depth_stream -- changes automatically based on bus speed
    # print 'Depth video mode info', depth_stream.get_video_mode() # Checks depth video configuration
    depth_stream.set_video_mode(
        c_api.OniVideoMode(
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM,
            resolutionX=w,
            resolutionY=h,
            fps=fps))

    ## Check and configure the mirroring -- default is True
    ## Note: I disable mirroring
    # print 'Mirroring info1', depth_stream.get_mirroring_enabled()
    depth_stream.set_mirroring_enabled(False)
    rgb_stream.set_mirroring_enabled(False)

    ## Start the streams
    rgb_stream.start()
    depth_stream.start()

    ## Synchronize the streams
    dev.set_depth_color_sync_enabled(True)  # synchronize the streams

    ## IMPORTANT: ALIGN DEPTH2RGB (depth wrapped to match rgb stream)
    dev.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)

    ## main loop
    done = False
    while not done:
        key = cv2.waitKey(1) & 255
        ## Read keystrokes
        if key == 27:  # terminate
            print("\tESC key detected!")
            done = True

        ## Streams
        # RGB
        rgb = get_rgb(rgb_stream=rgb_stream, h=h, w=w)

        # DEPTH
        dmap, d4d = get_depth(depth_stream=depth_stream, h=h, w=w)

        # canvas
        canvas = np.hstack((rgb, d4d))
        cv2.rectangle(canvas, (119, 79), (202, 162), (0, 255, 0), 1)
        cv2.rectangle(canvas, (119 + 320, 79), (202 + 320, 162), (0, 255, 0),
                      1)
        ## Display the stream syde-by-side
        cv2.imshow('depth || rgb', canvas)

        hsv = cv2.cvtColor(src=rgb, code=cv2.COLOR_BGR2HSV)

        ### for black
        # tblack = cv2.inRange(hsv, (low_H, low_S, low_V), (high_H, high_S, high_V))
        tblack = cv2.inRange(hsv, (100, 130, 0), (130, 220, 150))

        ### for white
        # twhite = cv2.inRange(hsv, (low_H, low_S, low_V), (high_H, high_S, high_V))
        twhite = cv2.inRange(hsv, (0, 0, 230, 0), (160, 200, 255, 0))

        cv2.imshow('black', tblack)
        cv2.imshow('white', twhite)
    # end while

    ## Release resources
    cv2.destroyAllWindows()
    rgb_stream.stop()
    depth_stream.stop()
    openni2.unload()
    print("Terminated")
Example #31
0
def main():
    p = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
    gp = p.add_mutually_exclusive_group(required=True)
    gp.add_argument("-f", action="store_false", dest="cls", help="extract features from .oni video")
    gp.add_argument("-k", action="store_true", dest="cls", help="classifies people in .oni video")
    p.add_argument("video_path", help="path file .oni")
    args = p.parse_args()

    videoid = os.path.basename(args.video_path[:-4])  # nome del video
    framecount = 0
    pid = 0  # ID persona corrente (per classificazione)
    newid = False  # flag nuovo ID (per classificazione)
    peoplefeats = []  # vettore di features (per classificazione)
    altezze = []
    vteste = []
    vspalle = []
    vhsv_testa = []
    vhsv_spalle = []

    # inizializzazione di OpenNI e apertura degli stream video
    openni2.initialize()
    dev = openni2.Device.open_file(args.video_path)
    dev.set_depth_color_sync_enabled(True)
    depth_stream = dev.create_depth_stream()
    color_stream = dev.create_color_stream()
    depth_stream.start()
    color_stream.start()

    while framecount < depth_stream.get_number_of_frames() and framecount < color_stream.get_number_of_frames():
        dframe = depth_stream.read_frame()
        cframe = color_stream.read_frame()
        framecount += 1

        # conversione di tipo/formato per OpenCV
        depth_array = np.ndarray((dframe.height, dframe.width), dtype=np.uint16, buffer=dframe.get_buffer_as_uint16())
        color_array = cv2.cvtColor(
            np.ndarray((cframe.height, cframe.width, 3), dtype=np.uint8, buffer=cframe.get_buffer_as_uint8()),
            cv2.COLOR_RGB2HSV,
        )

        # ALTEZZA ======================================================================================================

        # ci salviamo il background (primo frame) per sottrarlo a quello corrente
        if framecount == 1:
            background = depth_array.copy()
            mask_b = cv2.inRange(background, 0, 0)  # maschera dei pixel nulli del background
        foreground = cv2.absdiff(depth_array, background)

        mask_f = cv2.bitwise_or(
            mask_b, cv2.inRange(depth_array, 0, 0)
        )  # maschera pixel nulli bg + pixel nulli deptharray
        mask_p = cv2.inRange(foreground, 150, 2500)  # maschera della persona (pixel tra 150 e 2500)
        cont, _ = cv2.findContours(mask_p.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        mask_p = np.zeros(mask_p.shape, np.uint8)

        # disegno solo il contorno con area maggiore
        if len(cont) > 0:
            cv2.drawContours(
                mask_p, cont, np.argmax([cv2.contourArea(x) for x in cont]), (255, 255, 255), cv2.cv.CV_FILLED
            )
        mask_p = cv2.bitwise_and(mask_p, cv2.bitwise_not(mask_f))  # tolgo i pixel nulli dalla maschera del contorno

        _, hmax, _, _ = cv2.minMaxLoc(
            foreground, mask_p
        )  # ci accertiamo che l'altezza max trovata sia dentro la maschera

        # TESTA + SPALLE ===============================================================================================

        if hmax > 1500 and np.mean(mask_p[235:245, 315:325]) == 255:  # solo quando si passa al centro dell'immagine
            newid = args.cls  # nuova persona (solo se sto classificando)

            altezze.append(hmax)  # la consideriamo per la media
            mask_t = cv2.bitwise_and(mask_p, cv2.inRange(foreground, hmax - 150, hmax))  # testa fino a 15cm dal max
            mask_s = cv2.bitwise_and(
                mask_p, cv2.inRange(foreground, hmax - 500, hmax - 150)
            )  # spalle tra 15 e 50cm dal max
            cont_t, _ = cv2.findContours(mask_t.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
            cont_s, _ = cv2.findContours(mask_s.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

            if len(cont_s) > 0:
                spalle = np.argmax([cv2.contourArea(x) for x in cont_s])
                if (
                    len(cont_s[spalle]) >= 5 and cv2.contourArea(cont_s[spalle]) > 5000
                ):  # area spalle 5000 (soglia arbitraria)
                    vspalle.append(
                        np.sqrt(4 * cv2.contourArea(cont_s[spalle]) * np.pi)
                    )  # circ. equivalente delle spalle

                    # erodo la maschera per eliminare le porzioni di pavimento che capitano nel frame a colori
                    hist_spalle = cv2.calcHist(
                        [color_array],
                        [0, 1, 2],
                        cv2.erode(mask_s, np.ones((10, 10), np.uint8)),
                        [18, 4, 4],
                        [0, 180, 0, 255, 0, 255],
                    )  # istogramma HSV spalle
                    vhsv_spalle.append(
                        np.unravel_index(np.argmax(hist_spalle), hist_spalle.shape)
                    )  # colore dominante delle spalle

            if len(cont_t) > 0:
                testa = np.argmax([cv2.contourArea(x) for x in cont_t])
                if (
                    len(cont_t[testa]) >= 5 and cv2.contourArea(cont_t[testa]) > 2500
                ):  # area testa 2500 (soglia arbitraria)
                    vteste.append(np.sqrt(4 * cv2.contourArea(cont_t[testa]) * np.pi))  # circ. equivalente della testa

                    # erodo la maschera per eliminare le porzioni di pavimento che capitano nel frame a colori
                    hist_testa = cv2.calcHist(
                        [color_array],
                        [0, 1, 2],
                        cv2.erode(mask_t, np.ones((10, 10), np.uint8)),
                        [18, 4, 4],
                        [0, 180, 0, 255, 0, 255],
                    )  # istogramma HSV testa
                    vhsv_testa.append(
                        np.unravel_index(np.argmax(hist_testa), hist_testa.shape)
                    )  # colore dominante della testa

        else:
            if newid:  # se classifichiamo teniamo traccia delle singole persone
                pid += 1
                newid = False
                peoplefeats.append(
                    [
                        pid,
                        np.mean(altezze) / 2500 if len(altezze) else 0,
                        np.mean(vteste) / 500 if len(vteste) else 0,
                        np.amax(vspalle) / 500 if len(vspalle) else 0,
                        Counter(vhsv_testa).most_common(1)[0][0][0] / 17.0 if len(vhsv_testa) else 0,
                        Counter(vhsv_testa).most_common(1)[0][0][1] / 3.0 if len(vhsv_testa) else 0,
                        Counter(vhsv_testa).most_common(1)[0][0][2] / 3.0 if len(vhsv_testa) else 0,
                        Counter(vhsv_spalle).most_common(1)[0][0][0] / 17.0 if len(vhsv_spalle) else 0,
                        Counter(vhsv_spalle).most_common(1)[0][0][1] / 3.0 if len(vhsv_spalle) else 0,
                        Counter(vhsv_spalle).most_common(1)[0][0][2] / 3.0 if len(vhsv_spalle) else 0,
                    ]
                )
                altezze = []
                vteste = []
                vspalle = []  # resettiamo tutto per una nuova persona
                vhsv_testa = []
                vhsv_spalle = []

        # FINE FEATURES ================================================================================================

    depth_stream.stop()
    color_stream.stop()

    # salvo le features in un file csv
    if not args.cls:
        with open("features_id.csv", "a") as features:
            features.write(str(videoid) + ";")
            features.write(str(np.mean(altezze) / 2500 if len(altezze) else 0) + ";")
            features.write(str(np.mean(vteste) / 500 if len(vteste) else 0) + ";")
            features.write(str(np.amax(vspalle) / 500 if len(vspalle) else 0) + ";")
            if len(vhsv_testa):
                features.write(str(Counter(vhsv_testa).most_common(1)[0][0][0] / 17.0) + ";")  # H testa
                features.write(str(Counter(vhsv_testa).most_common(1)[0][0][1] / 3.0) + ";")  # S testa
                features.write(str(Counter(vhsv_testa).most_common(1)[0][0][2] / 3.0) + ";")  # V testa
            else:
                features.write("0;0;0;")
            if len(vhsv_spalle):
                features.write(str(Counter(vhsv_spalle).most_common(1)[0][0][0] / 17.0) + ";")  # H spalle
                features.write(str(Counter(vhsv_spalle).most_common(1)[0][0][1] / 3.0) + ";")  # S spalle
                features.write(str(Counter(vhsv_spalle).most_common(1)[0][0][2] / 3.0) + "\n")  # V spalle
            else:
                features.write("0;0;0\n")

    else:  # classifichiamo con knn
        assert os.path.exists("features_id.csv") and os.path.getsize(
            "features_id.csv"
        ), "features_id non esiste o è vuoto"
        traindata = np.loadtxt("features_id.csv", dtype=np.float32, delimiter=";")

        knn = cv2.KNearest()
        knn.train(traindata[:, 1:], np.matrix(traindata[:, 0]))
        _, results, _, dist = knn.find_nearest(np.matrix(peoplefeats, dtype=np.float32)[:, 1:], 1)
        for i in range(len(results)):
            print "person: {!s} -> class: {!s}, distance: {!s}".format(
                np.matrix(peoplefeats, dtype=np.float32)[:, 0][i], results[i], dist[i]
            )

    openni2.unload()
    cv2.destroyAllWindows()
Example #32
0
def main(argv=None):
    print('Hello! This is PointNet-Segmentation and Surface-Matching Program')

    opt = parser.parse_args()
    num_points = opt.np
    pretrained_model = opt.ptn
    num_classes = 3
    '''
    Load PointNet Model (model for point-wise classification)
    '''
    classifier = PointNetPointWise(num_points=num_points,
                                   num_classes=num_classes)
    classifier.load_state_dict(torch.load(pretrained_model))
    classifier.eval()
    '''
    Setup camera
    '''
    fps = 30
    # w = 1280 #640
    # h = 960 #480
    # fps = 30
    # # config crop area [h1:h2, w1:w2]
    # h1 = 240 #140
    # h2 = 720 #340
    # w1 = 320 #200
    # w2 = 960 #440
    # w, h, h1, h2, w1, w2 = (np.array([640, 480, 140, 340, 200, 440])).astype(int)
    w, h, h1, h2, w1, w2 = (np.array([640, 480, 140, 340, 200,
                                      440])).astype(int)

    rgb_stream, depth_stream = setup_camera(w=w, h=h, fps=fps)
    from config import CAMERA_CONFIG
    '''
    Record
    '''
    done = False
    while not done:
        key = cv2.waitKey(1) & 255
        ## Read keystrokes
        if key == 27:  # terminate
            print("\tESC key detected!")
            done = True
        elif chr(key) == 's':  # screen capture
            # print("\ts key detected. Saving image {}".format(s))
            '''
            Get data
            '''
            # rgb = rgb[h1:h2, w1:w2, :]
            # dmap = dmap[h1:h2, w1:w2]
            # pc_scene = rgbd2pc(rgb=rgb, depth=dmap, center_x=(w1+w2)/2, center_y=(h1+h2)/2, focal_length=889)
            # pc_scene = sample_data(point_cloud=pc_scene, num_points=num_points)
            # rgbd2pc(rgb, depth=dmap, center_x=325.5, center_y=255.5, focal_length=572, scale=2000)
            xyzrgbrc = rgbd2xyzrgbrc(rgb=rgb, depth=dmap, scale=1000)
            xyzrgbrc = xyzrgbrc[h1:h2, w1:w2, :]  # crop the interested area
            pc_scene = xyzrgb2pc(xyzrgb=xyzrgbrc)

            # ply = generate_ply(pc_scene)
            # with open('./tmp/abc' + '.ply', 'w') as output:
            #     output.write(ply)

            pc_scene = sample_data(point_cloud=pc_scene, num_points=num_points)

            # pc_scene = (np.array(pc_scene)[:, :3]).astype(np.float32)
            '''
            Predict and Segment objects
            '''
            pred_labels = predict_segmentation(classifier=classifier,
                                               input_points=pc_scene)
            visualize(x=pc_scene[:, 0],
                      y=pc_scene[:, 1],
                      z=pc_scene[:, 2],
                      label=pred_labels,
                      point_radius=0.0008)  # visualize predicted results
            pipe_points, wrench_points = visualize_segmented_objects(
                scene_points=pc_scene,
                labels=pred_labels)  # visualize segmented objects
            '''
            Surface-Matching
            '''
            # Convert numpy array to point cloud type
            pc_model = load_ply(path='./models/1.ply', num_points=-1)

            # pc_scene = wrench_points
            visualize(x=pc_model[:, 0],
                      y=pc_model[:, 1],
                      z=pc_model[:, 2],
                      label=np.ones(len(pc_model)),
                      point_radius=0.0008)
            visualize(x=pipe_points[:, 0],
                      y=pipe_points[:, 1],
                      z=pipe_points[:, 2],
                      label=np.ones(len(pipe_points)),
                      point_radius=0.0008)

            result_icp = match_surface(model_points=pc_model,
                                       object_points=pipe_points)

            # Transformation(Rotation angles)
            print('Theta x, Theta y, Theta z:(in Degree) ')
            print(
                rotationMatrixToEulerAngles(result_icp.transformation[:3, :3])
                / np.pi * 180)

        rgb, dmap = display_stream(rgb_stream=rgb_stream,
                                   depth_stream=depth_stream,
                                   h=h,
                                   w=w,
                                   crop=((w1, h1), (w2, h2)))
    # end while

    ## Release resources
    cv2.destroyAllWindows()
    rgb_stream.stop()
    depth_stream.stop()
    openni2.unload()
    print("Terminated")
 def unload(self):
     self.depth_stream.stop()
     self.color_stream.stop()
     openni2.unload()
def main(arg_c_frames):

    openni2.initialize(
    )  # can also accept the path of the OpenNI redistribution

    dev = openni2.Device.open_any()

    # 320x240 -> 60fps ok
    # 640x480 -> 30fps
    # 1280x1024 -> 30fps
    g_params = {
        'c_frames': arg_c_frames,
        'resol_x': 640,
        'resol_y': 480,
        'fps': 30,
        'pixel_format_rgb': c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
        # 'pixel_format_depth': c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM,
        'pixel_format_depth':
        c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM,
    }
    print g_params

    # create color stream
    color_stream = dev.create_color_stream()
    color_stream.set_video_mode(
        c_api.OniVideoMode(pixelFormat=g_params['pixel_format_rgb'],
                           resolutionX=g_params['resol_x'],
                           resolutionY=g_params['resol_y'],
                           fps=g_params['fps']))

    # create depth stream
    depth_stream = dev.create_depth_stream()
    depth_stream.set_video_mode(
        c_api.OniVideoMode(pixelFormat=g_params['pixel_format_depth'],
                           resolutionX=g_params['resol_x'],
                           resolutionY=g_params['resol_y'],
                           fps=g_params['fps']))

    list_frames_color = []
    list_frames_depth = []

    # crap: not working as expected...
    # th = threading.Thread(target=monitor_thread,
    #                       args=(list_frames_depth, list_frames_color))
    # th.daemon = True
    # th.start()

    # ==== BEGIN INTENSIVE ====
    print 'starting color stream'
    color_stream.start()
    print 'starting depth stream'
    depth_stream.start()
    time.sleep(2.0)  # wait for stream to be stable...

    # idling...
    for i in range(10):
        color_stream.read_frame()
        depth_stream.read_frame()

    # record now...
    for i in range(g_params['c_frames']):
        frame_color = color_stream.read_frame()
        frame_depth = depth_stream.read_frame()
        list_frames_color.append(frame_color)
        list_frames_depth.append(frame_depth)

    print 'stopping color stream'
    color_stream.stop()
    print 'stopping depth stream'
    depth_stream.stop()
    # ==== END INTENSIVE ====

    print 'processing color frames ...'
    list_timestamp_rgb, list_img_rgb = process_frames_color(
        list_frames_color, g_params['resol_x'])
    save_list_img_dir('rgb', list_timestamp_rgb, list_img_rgb)

    print 'processing depth frames ...'
    list_timestamp_depth, list_img_depth = process_frames_depth(
        list_frames_depth)
    save_list_img_dir('depth', list_timestamp_depth, list_img_depth)

    print 'generating assoc.txt ...'
    os.system('./associate.py rgb.txt depth.txt > assoc.txt')
    print '# of lines associations:'
    os.system('wc -l assoc.txt')

    path_dir_rgbd = './data-fastfusion-tum/rgbd_dataset'
    print 'saving data in ' + path_dir_rgbd + ' ...'
    if os.path.exists(path_dir_rgbd):
        shutil.rmtree(path_dir_rgbd)
    os.mkdir(path_dir_rgbd)
    cmd = 'mv rgb rgb.txt depth depth.txt assoc.txt ' + path_dir_rgbd
    # print cmd
    os.system(cmd)

    openni2.unload()
def populate(video_path):
        """
        This script populates Video,Depth_Frame and RGB_frame tables
        @param video_path: contains the ONI file path
        """

        ##### obtains Video id ####################
        # preparing a cursor object
        cursor = db.cursor()
        query ="SELECT COUNT(*) from VIDEO"
        cursor.execute(query)             #executes query
        res=cursor.fetchone()
        total_rows=res[0]
        videoid=total_rows+1

        #query for VIDEO table####################

        query = """INSERT INTO VIDEO(VIDEOID,
         VIDEO)
         VALUES (%s, %s)"""
        video_data=(videoid,video_path)
        try:
           # Execute the SQL command
           cursor.execute(query,video_data)
           # Commit changes in the database
           db.commit()
        except:
           # Rollback in case there is any error
           db.rollback()

        ###########################################

        openni2.initialize()
        dev = openni2.Device.open_file(video_path)
        print (dev.get_sensor_info(openni2.SENSOR_DEPTH))
        depth_stream = dev.create_depth_stream()
        color_stream = dev.create_color_stream()
        depth_stream.start()
        color_stream.start()
        ##### getting first frame timestamp ##########################
        first_frame=depth_stream.read_frame()
        frame1_timestamp = datetime.datetime.fromtimestamp(float(first_frame.timestamp)/1000000.0  )

        ### we want to start counting from  2015-06-01 01:00:00 ####
        frame1_timestamp += datetime.timedelta(days=((365*45)+162))

        ##### initialize a frame counter and the variable flag  #########
        frameno = 0
        flag= False
        while True:




                frame_depth = depth_stream.read_frame()
                frame_color = color_stream.read_frame()

                frame_depth_data = frame_depth.get_buffer_as_uint16()
                frame_color_data = frame_color.get_buffer_as_uint8()

                depth_array = np.ndarray((frame_depth.height, frame_depth.width), dtype = np.uint16, buffer = frame_depth_data)
                color_array = np.ndarray((frame_color.height, frame_color.width, 3), dtype = np.uint8, buffer = frame_color_data)
                color_array = cv2.cvtColor(color_array, cv2.COLOR_BGR2RGB)

                depth_timestamp = datetime.datetime.fromtimestamp(float(frame_depth.timestamp)/1000000.0  )
                color_timestamp = datetime.datetime.fromtimestamp(float(frame_color.timestamp)/1000000.0  )
                depth_timestamp += datetime.timedelta(days=((365*45)+162))
                color_timestamp += datetime.timedelta(days=((365*45)+162))

                cv2.imshow("depth", depth_array)
                cv2.imshow("color", color_array)
                ##### counting frames #############
                frameno = frameno + 1



                #### this is for avoid that the video loops  ######
                if (frame1_timestamp == depth_timestamp and frameno != 1):
                    flag=True


                if (flag == False):
                    ### query for depth_frame table ####################
                    query_3 = """INSERT INTO DEPTH_FRAME(VIDEOID,FRAMENO,TIMESTAMP)
                    VALUES (%s, %s, %s)"""
                    depth_dbdata=(videoid,frameno,depth_timestamp)

                    ### query for rgb_frame table ####################
                    query_4 = """INSERT INTO RGB_FRAME(VIDEOID,FRAMENO,TIMESTAMP)
                    VALUES (%s, %s, %s)"""
                    rgb_dbdata=(videoid,frameno,color_timestamp)

                    try:
                       # Execute the SQL command
                       cursor.execute(query_3,depth_dbdata)
                       cursor.execute(query_4,rgb_dbdata)
                       # Commit changes in the database
                       db.commit()
                    except:
                       # Rollback in case there is any error
                       db.rollback()

                ch = 0xFF & cv2.waitKey(1)
                if (ch == 27 or flag == True):
                    break




        depth_stream.stop()
        color_stream.stop()
        openni2.unload()
        cv2.destroyAllWindows()




        # disconnect from server
        db.close()
Example #36
0
def main(argv=None):
    print('Hello! This is XXXXXX Program')

    ## Load PointNet config
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', type=str, default='./seg/seg_model_1.pth', help='model path')
    opt = parser.parse_args()
    print(opt)

    ## Load PointNet model
    num_points = 2700
    classifier = PointNetDenseCls(num_points=num_points, k=10)
    classifier.load_state_dict(torch.load(opt.model))
    classifier.eval()

    ### Config visualization
    cmap = plt.cm.get_cmap("hsv", 5)
    cmap = np.array([cmap(i) for i in range(10)])[:, :3]
    # gt = cmap[seg - 1, :]


    ## Initialize OpenNi
    # dist = './driver/OpenNI-Linux-x64-2.3/Redist'
    dist = './driver/OpenNI-Windows-x64-2.3/Redist'
    openni2.initialize(dist)
    if (openni2.is_initialized()):
        print("openNI2 initialized")
    else:
        print("openNI2 not initialized")

    ## Register the device
    dev = openni2.Device.open_any()

    ## Create the streams stream
    rgb_stream = dev.create_color_stream()
    depth_stream = dev.create_depth_stream()

    ## Define stream parameters
    w = 320
    h = 240
    fps = 30

    ## Configure the rgb_stream -- changes automatically based on bus speed
    rgb_stream.set_video_mode(
        c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=w, resolutionY=h,
                           fps=fps))

    ## Configure the depth_stream -- changes automatically based on bus speed
    # print 'Depth video mode info', depth_stream.get_video_mode() # Checks depth video configuration
    depth_stream.set_video_mode(
        c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=w, resolutionY=h,
                           fps=fps))

    ## Check and configure the mirroring -- default is True
    ## Note: I disable mirroring
    # print 'Mirroring info1', depth_stream.get_mirroring_enabled()
    depth_stream.set_mirroring_enabled(False)
    rgb_stream.set_mirroring_enabled(False)

    ## Start the streams
    rgb_stream.start()
    depth_stream.start()

    ## Synchronize the streams
    dev.set_depth_color_sync_enabled(True)  # synchronize the streams

    ## IMPORTANT: ALIGN DEPTH2RGB (depth wrapped to match rgb stream)
    dev.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)

    saving_folder_path = './shapenetcore_partanno_segmentation_benchmark_v0/tools/'
    if not os.path.exists(saving_folder_path):
        os.makedirs(saving_folder_path+'RGB')
        os.makedirs(saving_folder_path+'D')
        os.makedirs(saving_folder_path+'PC')
        os.makedirs(saving_folder_path+'points')
        os.makedirs(saving_folder_path+'points_label')

    from config import CAMERA_CONFIG

    ## main loop
    s = 1000
    done = False
    while not done:
        key = cv2.waitKey(1) & 255
        ## Read keystrokes
        if key == 27:  # terminate
            print("\tESC key detected!")
            done = True
        elif chr(key) == 's':  # screen capture
            print("\ts key detected. Saving image {}".format(s))


            rgb = rgb[60:180, 80:240, :]
            dmap = dmap[60:180, 80:240]
            ply_content, points_content = generate_ply_from_rgbd(rgb=rgb, depth=dmap, config=CAMERA_CONFIG)

            cv2.imwrite(saving_folder_path + "RGB/" + str(s) + '.png', rgb)
            cv2.imwrite(saving_folder_path + "D/" + str(s) + '.png', dmap)
            print(rgb.shape, dmap.shape)
            print(type(rgb), type(dmap))
            with open(saving_folder_path + "PC/" + str(s) + '.ply', 'w') as output:
                output.write(ply_content)
            print(saving_folder_path + "PC/" + str(s) + '.ply', ' done')
            s += 1  # uncomment for multiple captures

            # ### Get pointcloud of scene for prediction
            # points_np = (np.array(points_content)[:, :3]).astype(np.float32)
            # choice = np.random.choice(len(points_np), num_points, replace=True)
            # points_np = points_np[choice, :]
            # points_torch = torch.from_numpy(points_np)
            #
            # points_torch = points_torch.transpose(1, 0).contiguous()
            #
            # points_torch = Variable(points_torch.view(1, points_torch.size()[0], points_torch.size()[1]))
            #
            # ### Predict to segment scene
            # pred, _ = classifier(points_torch)
            # pred_choice = pred.data.max(2)[1]
            # print(pred_choice)

        ## Streams
        # RGB
        rgb = get_rgb(rgb_stream=rgb_stream, h=h, w=w)

        # DEPTH
        dmap, d4d = get_depth(depth_stream=depth_stream, h=h, w=w)

        # canvas
        canvas = np.hstack((rgb, d4d))
        ## Display the stream syde-by-side
        cv2.imshow('depth || rgb', canvas)
    # end while

    ## Release resources
    cv2.destroyAllWindows()
    rgb_stream.stop()
    depth_stream.stop()
    openni2.unload()
    print("Terminated")
    def sess_run(self, detection_graph=0, detection_sess=0):

        ## rtp library, containing some useful functions
        _di, _config = self.__config()

        rtp = RealtimeHandposePipeline(1,
                                       config=_config,
                                       di=_di,
                                       verbose=False,
                                       comrefNet=None,
                                       maxDepth=EXACT_TABLE_CAMERA_DISTANCE,
                                       minDepth=DEPTH_THRESHOLD)
        joint_num = self.__joint_num()  ## 14 joints
        cube_size = self.__crop_cube()  ##  to rescale the cropped hand image

        with tf.Session() as sess:
            init = tf.global_variables_initializer()
            sess.run(init)
            self.saver.restore(sess, self.model_path)

            ### Initialize OpenNI with its libraries

            frameRate = 30
            width = 640  # Width of image
            height = 480  # height of image
            openni2.initialize("django_project\\orbbec-astra\\Redist"
                               )  #The OpenNI2 Redist folder
            # Open a device
            dev = openni2.Device.open_any()
            # Open two streams: one for color and one for depth
            depth_stream = dev.create_depth_stream()
            depth_stream.start()
            depth_stream.set_video_mode(
                c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.
                                   ONI_PIXEL_FORMAT_DEPTH_1_MM,
                                   resolutionX=width,
                                   resolutionY=height,
                                   fps=frameRate))

            color_stream = dev.create_color_stream()
            color_stream.start()
            color_stream.set_video_mode(
                c_api.OniVideoMode(
                    pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
                    resolutionX=width,
                    resolutionY=height,
                    fps=frameRate))

            # Register both streams so they're aligned
            dev.set_image_registration_mode(
                c_api.OniImageRegistrationMode.
                ONI_IMAGE_REGISTRATION_DEPTH_TO_COLOR)
            # Set exposure settings
            # Camera Exposure
            bAutoExposure = Fals
            exposure = 100
            if (color_stream.camera != None):
                color_stream.camera.set_auto_exposure(bAutoExposure)
                color_stream.camera.set_exposure(exposure)

            ## ===================================main loop==============================================
            while True:

                ## Loading and correcting depth frame
                frame = depth_stream.read_frame()
                frame_data = frame.get_buffer_as_uint16()
                depthPix = np.frombuffer(frame_data, dtype=np.uint16)
                depthPix.shape = (height, width)

                depthPix = DepthMapCorrection(depthPix)

                ## ____________________RGB-D based hand detection :____________________________________
                # transform 1 channel depth image to RGB channels
                detection_frame = np.array(
                    Image.fromarray(
                        Depth_to_255_scale(depthPix)).convert('RGB'))

                ##Detection :
                processed_image, cropped, [xmin, xmax, ymin,
                                           ymax] = detection_results(
                                               detection_graph,
                                               detection_frame,
                                               detection_sess,
                                               score_threshold=0.2)
                cv2.imshow('detection_frame', processed_image)

                depthPix_original = depthPix.copy()
                depthPix = depthPix[xmin:xmax, ymin:ymax]
                cv2.imshow('cropped', cropped)
                ## returning cropped depth image :depthPix  (original size 200x200 : change it in the hand detection folder )

                try:
                    ## loc is the center's coordinates of our bounding box in (pixels,pixels,mm), used to make the center of mass of the hand closer to it instead of the closest object to the camera
                    loc_rgbd_detector = [
                        depthPix.shape[0] // 2, depthPix.shape[1] // 2,
                        depthPix[depthPix.shape[0] // 2,
                                 depthPix.shape[1] // 2]
                    ]

                    ## Countour based Hand detection + image preprocessing + the hand's center of mass com3D
                    crop1, M, com3D, bounds = rtp.detect(
                        depthPix, loc_rgbd_detector)
                    if com3D[0] == 0:
                        continue  # skipping errors during hand detection using COM and contours

                    crop = crop1.reshape(1, crop1.shape[0], crop1.shape[1],
                                         1).astype('float32')

                    ##____________________________Hand pose estimation______________________________________
                    pred_ = sess.run(self.hand_tensor,
                                     feed_dict={self.inputs: crop})
                    norm_hand = np.reshape(pred_, (joint_num, 3))

                    ## from normalized and relative to com3D to global coordinates
                    pose = norm_hand * cube_size / 2. + com3D

                    ## Show results
                    img, J_pixels = rtp.show3(
                        depthPix_original, pose, self._dataset,
                        (np.float32(xmin), np.float32(ymin)))
                    cv2.imshow('frame', img)
                    cv2.imshow('crop1', crop1)

                    print('com3D :', com3D, ' pose[0] :', pose[0],
                          ' pixels[0] ', J_pixels[0])
                except:
                    print('error happened')
                    continue
                if cv2.waitKey(1) >= 0:
                    break
        openni2.unload()

        cv2.destroyAllWindows()
Example #38
0
 def unload(self):
     self.depth_stream.stop()
     self.color_stream.stop()
     openni2.unload()
Example #39
0
 def close(self):
     self.rgb_stream.close()
     self.depth_stream.close()
     openni2.unload()
Example #40
0
def kill_openni():
  openni2.unload()
Example #41
0
def main():
    p = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter)
    gp = p.add_mutually_exclusive_group(required=True)
    gp.add_argument('-f',
                    action='store_false',
                    dest='cls',
                    help='extract features from .oni video')
    gp.add_argument('-k',
                    action='store_true',
                    dest='cls',
                    help='classifies people in .oni video')
    p.add_argument('video_path', help='path file .oni')
    args = p.parse_args()

    videoid = os.path.basename(args.video_path[:-4])  # nome del video
    framecount = 0
    pid = 0  # ID persona corrente (per classificazione)
    newid = False  # flag nuovo ID (per classificazione)
    peoplefeats = []  # vettore di features (per classificazione)
    altezze = []
    vteste = []
    vspalle = []
    vhsv_testa = []
    vhsv_spalle = []

    # inizializzazione di OpenNI e apertura degli stream video
    openni2.initialize()
    dev = openni2.Device.open_file(args.video_path)
    dev.set_depth_color_sync_enabled(True)
    depth_stream = dev.create_depth_stream()
    color_stream = dev.create_color_stream()
    depth_stream.start()
    color_stream.start()

    while framecount < depth_stream.get_number_of_frames(
    ) and framecount < color_stream.get_number_of_frames():
        dframe = depth_stream.read_frame()
        cframe = color_stream.read_frame()
        framecount += 1

        # conversione di tipo/formato per OpenCV
        depth_array = np.ndarray((dframe.height, dframe.width),
                                 dtype=np.uint16,
                                 buffer=dframe.get_buffer_as_uint16())
        color_array = cv2.cvtColor(
            np.ndarray((cframe.height, cframe.width, 3),
                       dtype=np.uint8,
                       buffer=cframe.get_buffer_as_uint8()), cv2.COLOR_RGB2HSV)

        # ALTEZZA ======================================================================================================

        # ci salviamo il background (primo frame) per sottrarlo a quello corrente
        if framecount == 1:
            background = depth_array.copy()
            mask_b = cv2.inRange(background, 0,
                                 0)  # maschera dei pixel nulli del background
        foreground = cv2.absdiff(depth_array, background)

        mask_f = cv2.bitwise_or(mask_b, cv2.inRange(
            depth_array, 0,
            0))  # maschera pixel nulli bg + pixel nulli deptharray
        mask_p = cv2.inRange(
            foreground, 150,
            2500)  # maschera della persona (pixel tra 150 e 2500)
        cont, _ = cv2.findContours(mask_p.copy(), cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_NONE)
        mask_p = np.zeros(mask_p.shape, np.uint8)

        # disegno solo il contorno con area maggiore
        if len(cont) > 0:
            cv2.drawContours(mask_p, cont,
                             np.argmax([cv2.contourArea(x) for x in cont]),
                             (255, 255, 255), cv2.cv.CV_FILLED)
        mask_p = cv2.bitwise_and(mask_p, cv2.bitwise_not(
            mask_f))  # tolgo i pixel nulli dalla maschera del contorno

        _, hmax, _, _ = cv2.minMaxLoc(
            foreground, mask_p
        )  # ci accertiamo che l'altezza max trovata sia dentro la maschera

        # TESTA + SPALLE ===============================================================================================

        if hmax > 1500 and np.mean(
                mask_p[235:245, 315:325]
        ) == 255:  # solo quando si passa al centro dell'immagine
            newid = args.cls  # nuova persona (solo se sto classificando)

            altezze.append(hmax)  # la consideriamo per la media
            mask_t = cv2.bitwise_and(
                mask_p, cv2.inRange(foreground, hmax - 150,
                                    hmax))  # testa fino a 15cm dal max
            mask_s = cv2.bitwise_and(
                mask_p,
                cv2.inRange(foreground, hmax - 500,
                            hmax - 150))  # spalle tra 15 e 50cm dal max
            cont_t, _ = cv2.findContours(mask_t.copy(), cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_NONE)
            cont_s, _ = cv2.findContours(mask_s.copy(), cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_NONE)

            if len(cont_s) > 0:
                spalle = np.argmax([cv2.contourArea(x) for x in cont_s])
                if len(cont_s[spalle]) >= 5 and cv2.contourArea(
                        cont_s[spalle]
                ) > 5000:  # area spalle 5000 (soglia arbitraria)
                    vspalle.append(
                        np.sqrt(4 * cv2.contourArea(cont_s[spalle]) *
                                np.pi))  # circ. equivalente delle spalle

                    # erodo la maschera per eliminare le porzioni di pavimento che capitano nel frame a colori
                    hist_spalle = cv2.calcHist(
                        [color_array], [0, 1, 2],
                        cv2.erode(mask_s, np.ones((10, 10),
                                                  np.uint8)), [18, 4, 4],
                        [0, 180, 0, 255, 0, 255])  # istogramma HSV spalle
                    vhsv_spalle.append(
                        np.unravel_index(np.argmax(hist_spalle),
                                         hist_spalle.shape)
                    )  # colore dominante delle spalle

            if len(cont_t) > 0:
                testa = np.argmax([cv2.contourArea(x) for x in cont_t])
                if len(cont_t[testa]) >= 5 and cv2.contourArea(
                        cont_t[testa]
                ) > 2500:  # area testa 2500 (soglia arbitraria)
                    vteste.append(
                        np.sqrt(4 * cv2.contourArea(cont_t[testa]) *
                                np.pi))  # circ. equivalente della testa

                    # erodo la maschera per eliminare le porzioni di pavimento che capitano nel frame a colori
                    hist_testa = cv2.calcHist(
                        [color_array], [0, 1, 2],
                        cv2.erode(mask_t, np.ones((10, 10),
                                                  np.uint8)), [18, 4, 4],
                        [0, 180, 0, 255, 0, 255])  # istogramma HSV testa
                    vhsv_testa.append(
                        np.unravel_index(
                            np.argmax(hist_testa),
                            hist_testa.shape))  # colore dominante della testa

        else:
            if newid:  # se classifichiamo teniamo traccia delle singole persone
                pid += 1
                newid = False
                peoplefeats.append([
                    pid,
                    np.mean(altezze) / 2500 if len(altezze) else 0,
                    np.mean(vteste) / 500 if len(vteste) else 0,
                    np.amax(vspalle) / 500 if len(vspalle) else 0,
                    Counter(vhsv_testa).most_common(1)[0][0][0] /
                    17. if len(vhsv_testa) else 0,
                    Counter(vhsv_testa).most_common(1)[0][0][1] /
                    3. if len(vhsv_testa) else 0,
                    Counter(vhsv_testa).most_common(1)[0][0][2] /
                    3. if len(vhsv_testa) else 0,
                    Counter(vhsv_spalle).most_common(1)[0][0][0] /
                    17. if len(vhsv_spalle) else 0,
                    Counter(vhsv_spalle).most_common(1)[0][0][1] /
                    3. if len(vhsv_spalle) else 0,
                    Counter(vhsv_spalle).most_common(1)[0][0][2] /
                    3. if len(vhsv_spalle) else 0
                ])
                altezze = []
                vteste = []
                vspalle = []  # resettiamo tutto per una nuova persona
                vhsv_testa = []
                vhsv_spalle = []

        # FINE FEATURES ================================================================================================

    depth_stream.stop()
    color_stream.stop()

    # salvo le features in un file csv
    if not args.cls:
        with open('features_id.csv', 'a') as features:
            features.write(str(videoid) + ';')
            features.write(
                str(np.mean(altezze) / 2500 if len(altezze) else 0) + ';')
            features.write(
                str(np.mean(vteste) / 500 if len(vteste) else 0) + ';')
            features.write(
                str(np.amax(vspalle) / 500 if len(vspalle) else 0) + ';')
            if len(vhsv_testa):
                features.write(
                    str(Counter(vhsv_testa).most_common(1)[0][0][0] / 17.) +
                    ';')  # H testa
                features.write(
                    str(Counter(vhsv_testa).most_common(1)[0][0][1] / 3.) +
                    ';')  # S testa
                features.write(
                    str(Counter(vhsv_testa).most_common(1)[0][0][2] / 3.) +
                    ';')  # V testa
            else:
                features.write('0;0;0;')
            if len(vhsv_spalle):
                features.write(
                    str(Counter(vhsv_spalle).most_common(1)[0][0][0] / 17.) +
                    ';')  # H spalle
                features.write(
                    str(Counter(vhsv_spalle).most_common(1)[0][0][1] / 3.) +
                    ';')  # S spalle
                features.write(
                    str(Counter(vhsv_spalle).most_common(1)[0][0][2] / 3.) +
                    '\n')  # V spalle
            else:
                features.write('0;0;0\n')

    else:  # classifichiamo con knn
        assert os.path.exists('features_id.csv') and os.path.getsize('features_id.csv'), \
            "features_id non esiste o è vuoto"
        traindata = np.loadtxt('features_id.csv',
                               dtype=np.float32,
                               delimiter=';')

        knn = cv2.KNearest()
        knn.train(traindata[:, 1:], np.matrix(traindata[:, 0]))
        _, results, _, dist = knn.find_nearest(
            np.matrix(peoplefeats, dtype=np.float32)[:, 1:], 1)
        for i in range(len(results)):
            print "person: {!s} -> class: {!s}, distance: {!s}".format(
                np.matrix(peoplefeats, dtype=np.float32)[:, 0][i], results[i],
                dist[i])

    openni2.unload()
    cv2.destroyAllWindows()
Example #42
0
def run_old_loop():
    """
    Creates session videos. However, opencv video release is not working. HUGE memory
    leak that.
#if __name__ == "__main__":
    #time.sleep(20) # secs pause! for startup
    #devN = 1

    """
    synctype  = "relaxed"
    actorname = "patient_0"

    ## Flags
    vis_frames       = True  # True   # display frames
    save_frames_flag = False  # save all frames
    test_flag        = True

    test_frames = 5000000
    fps = 30
    c=0
    ## Runtime and Controls
    nf  = 500#172800# 60*60*24*2 # Number of video frames in each clip and video
    f   = 1  # frame counter
    tic = 0
    run_time   = 0
    total_t    = 0
    fourcc=cv2.cv.CV_FOURCC('X','V','I','D')    
    
    done = False

    ## TCP communication
    ## Start the client thread:
    clientConnectThread = client.ClientConnect("connect", "{}".format(devN))
    clientConnectThread.setDaemon(True)
    clientConnectThread.start() #launching thread
    #time.sleep(1)    
    server_time = 0.0
    server_response="none"
    response = clientConnectThread.get_command()
    if "_" in response:
        server_response,server_time  = response.split("_")
    else: server_reponse = response
    # print(server_response, server_time)
    
    ## Create a pandas dataframe to hold the information (index starts at 1)
    cols = ["frameN","localtime","servertime"]
    df   = pd.DataFrame(columns=cols)
    df.loc[c] =[0,server_time,time.time()] 
    



    ## The folders for all data
    folder4frames,folder4csv=createFolders(actorname)
    print "Creating Video Headers"
    ## Initialize the videowriter
    vid_num=0
    video_rgb   = cv2.VideoWriter(folder4frames+"/rgb/dev"  +str(devN)+"rgb"  +str(vid_num)+".avi",fourcc, fps=fps, frameSize=(w,h))
    video_depth = cv2.VideoWriter(folder4frames+"/depth/dev"+str(devN)+"depth"+str(vid_num)+".avi",fourcc, fps=fps, frameSize=(w,h))
    video_dmap  = cv2.VideoWriter(folder4frames+"/dmap/dev" +str(devN)+"dmap" +str(vid_num)+".avi",fourcc, fps=fps, frameSize=(w,h)) 

    # Get the first timestamp
    tic = time.time()
    start_t = tic

    ##--- main loop ---
    done     = False
    while not done: # view <= nviews
        ## RGB-D Streams
        rgb   = get_rgb()
        dmap, d4d = get_depth()

        if vis_frames: # Display the streams
            rgbdm = np.hstack((rgb,d4d,dmap))
            #rgbdm_small = rgbdm # orginal size
            #rgbdm_small = cv2.resize(rgbdm,(1280,240)) # medium
            #rgbdm_small = cv2.resize(rgbdm,(640,240)) # smallest     
            rgbdm_small = cv2.resize(rgbdm,(960,240)) # smallest 
            cv2.imshow("1:4 scale", rgbdm_small)
            ## === Keyboard Commands ===
            key = cv2.waitKey(1) & 255
            if key == 27: 
                print "Terminating code!"
                done = True        
        #Poll the server:
        clientConnectThread.update_command("check")
        sresponse = clientConnectThread.get_command()
        if "_" in response:
            server_response,server_time  = response.split("_")
        else: server_reponse = response
    
        run_time = time.time()-tic
        print "Processing {} session and frame number {}".format(vid_num,f)
        
        ## === check synchronization type
        if synctype =='strict':
            if server_response == 'save':
                video_rgb.write(rgb)    # --> rgb vid file
                video_depth.write(d4d)  # --> depth vid file
                video_dmap.write(dmap)  # --> dmap vid file
                # Write Datarows
                df.loc[c] =[f,strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()), server_time]             
                f+=1
                c+=1
        elif synctype == 'relaxed':            
            video_rgb.write(rgb)    # --> rgb vid file
            video_depth.write(d4d)  # --> depth vid file
            video_dmap.write(dmap)  # --> dmap vid file

            # Write Datarows
            df.loc[c] =[f, run_time,server_time]            
            f+=1
            c+=1
        else:
            print "synchronization type unknown"
            
        
        if test_flag and (f==test_frames): 
            print "Terminating code!"
            done = True
            
        #if np.mod(f,nf) == 0: # close and create new csv and video
        if f == nf:
            df.to_csv(folder4csv+"dev"+str(devN)+'_data'+str(vid_num)+'.csv')
            print "session {} saved".format(vid_num)
            # release video writers
            video_rgb.release()
            video_depth.release()
            video_dmap.release()
            del df
            del video_rgb
            del video_depth
            del video_dmap
            print "\t Cleared the vars and memory and prepping next session"
            vid_num+=1
            ## Create new video writers 
            video_rgb   = cv2.VideoWriter(folder4frames+"/rgb/dev"  +str(devN)+"rgb"  +str(vid_num)+".avi",fourcc, fps=fps, frameSize=(w,h))
            video_depth = cv2.VideoWriter(folder4frames+"/depth/dev"+str(devN)+"depth"+str(vid_num)+".avi",fourcc, fps=fps, frameSize=(w,h))
            video_dmap  = cv2.VideoWriter(folder4frames+"/dmap/dev" +str(devN)+"dmap" +str(vid_num)+".avi",fourcc, fps=fps, frameSize=(w,h))    
            # reset pandas dataframe
            df = pd.DataFrame(columns=cols)
            c=0
            f=0
            ##done = True #stop after the first recording.

        #elif chr(key) =='s':  #s-key to save current screen
        #    save_frames(f,rgb,dmap,p=folder4screens)
        
        #if
        # --- keyboard commands ---
    # while
   
    # TERMINATE
    print "=== Terminating code! ==="
    # Close carmine context and stop device    
    print "==== Closing carmine context"    
    rgb_stream.stop()
    depth_stream.stop()
    openni2.unload()
    # write last datapoints
    print "==== Writing last portions of data."
    vid_num=+1
    df.loc[c] =[f, run_time,server_time]
    video_rgb.write(rgb)    # write to vid file
    video_depth.write(d4d)  # write to vid file
    video_dmap.write(dmap)
    # Write data to csv
    df.to_csv(folder4csv+"dev"+str(devN)+'_data'+str(vid_num)+'.csv')        
    # release video writers
    print "==== Releasing the video writers"
    video_rgb.release()
    video_depth.release()
    video_dmap.release()
    # Disconnect the client from the server
    print "==== Disconecting client and closing the server"
    clientConnectThread.update_command("close")
    # Release video/image resources
    cv2.destroyAllWindows()
    # print some timing information:
    fps = f/run_time
    print "\nTime metrics for {} frames:" .format(f)
    print ("\ttotal run time is %.2f secs over" %run_time)
    print ("\tfps: %.2f"%fps)
    sys.exit(1)
Example #43
0
depth_stream.start()
color_stream.start()
recorder = depth_stream.get_recoder(sys.argv[1]) # Note that this is mispelled in the API
recorder.attach(color_stream)

delay = int(sys.argv[3])
for i in range(0, delay):
    print "T-minus",delay-i
    time.sleep(1)

numFrames = int(sys.argv[2])
recorder.start()
for i in range(0, numFrames):
    depth_frame = depth_stream.read_frame()
    depth_frame_data = depth_frame.get_buffer_as_uint16()
    color_frame = color_stream.read_frame()
    color_frame_data = color_frame.get_buffer_as_triplet()
    print "Recorded frame",i+1,"of",numFrames
    
print "Saving",sys.argv[1]
recorder.stop()

depth_stream.stop()
color_stream.stop()

openni2.unload()

print "Done!"

def run_old_loop():
    """
    Creates session videos. However, opencv video release is not working. HUGE memory
    leak that.
#if __name__ == "__main__":
    #time.sleep(20) # secs pause! for startup
    #devN = 1

    """
    synctype = "relaxed"
    actorname = "patient_0"

    # Flags
    vis_frames = True  # True   # display frames
    save_frames_flag = False  # save all frames
    test_flag = True

    test_frames = 5000000
    fps = 30
    c = 0
    ## Runtime and Controls
    nf = 500  # 172800# 60*60*24*2 # Number of video frames in each clip and video
    f = 1  # frame counter
    tic = 0
    run_time = 0
    total_t = 0
    fourcc = cv2.cv.CV_FOURCC('X', 'V', 'I', 'D')

    done = False

    # TCP communication
    # Start the client thread:
    clientConnectThread = client.ClientConnect("connect", "{}".format(devN))
    clientConnectThread.setDaemon(True)
    clientConnectThread.start()  # launching thread
    # time.sleep(1)
    server_time = 0.0
    server_response = "none"
    response = clientConnectThread.get_command()
    if "_" in response:
        server_response, server_time = response.split("_")
    else:
        server_reponse = response
    # print(server_response, server_time)

    # Create a pandas dataframe to hold the information (index starts at 1)
    cols = ["frameN", "localtime", "servertime"]
    df = pd.DataFrame(columns=cols)
    df.loc[c] = [0, server_time, time.time()]

    # The folders for all data
    folder4frames, folder4csv = createFolders(actorname)
    print "Creating Video Headers"
    # Initialize the videowriter
    vid_num = 0
    video_rgb = cv2.VideoWriter(folder4frames + "/rgb/dev" + str(devN) +
                                "rgb" + str(vid_num) + ".avi",
                                fourcc,
                                fps=fps,
                                frameSize=(w, h))
    video_depth = cv2.VideoWriter(folder4frames + "/depth/dev" + str(devN) +
                                  "depth" + str(vid_num) + ".avi",
                                  fourcc,
                                  fps=fps,
                                  frameSize=(w, h))
    video_dmap = cv2.VideoWriter(folder4frames + "/dmap/dev" + str(devN) +
                                 "dmap" + str(vid_num) + ".avi",
                                 fourcc,
                                 fps=fps,
                                 frameSize=(w, h))

    # Get the first timestamp
    tic = time.time()
    start_t = tic

    # --- main loop ---
    done = False
    while not done:  # view <= nviews
        # RGB-D Streams
        rgb = get_rgb()
        dmap, d4d = get_depth()

        if vis_frames:  # Display the streams
            rgbdm = np.hstack((rgb, d4d, dmap))
            # rgbdm_small = rgbdm # orginal size
            # rgbdm_small = cv2.resize(rgbdm,(1280,240)) # medium
            # rgbdm_small = cv2.resize(rgbdm,(640,240)) # smallest
            rgbdm_small = cv2.resize(rgbdm, (960, 240))  # smallest
            cv2.imshow("1:4 scale", rgbdm_small)
            # === Keyboard Commands ===
            key = cv2.waitKey(1) & 255
            if key == 27:
                print "Terminating code!"
                done = True
        # Poll the server:
        clientConnectThread.update_command("check")
        response = clientConnectThread.get_command()
        if "_" in response:
            server_response, server_time = response.split("_")
        else:
            server_reponse = response

        run_time = time.time() - tic
        print "Processing {} session and frame number {}".format(vid_num, f)

        # === check synchronization type
        if synctype == 'strict':
            if server_response == 'save':
                video_rgb.write(rgb)  # --> rgb vid file
                video_depth.write(d4d)  # --> depth vid file
                video_dmap.write(dmap)  # --> dmap vid file
                # Write Datarows
                df.loc[c] = [
                    f,
                    strftime("%a, %d %b %Y %H:%M:%S +0000", localtime()),
                    server_time
                ]
                f += 1
                c += 1
        elif synctype == 'relaxed':
            video_rgb.write(rgb)  # --> rgb vid file
            video_depth.write(d4d)  # --> depth vid file
            video_dmap.write(dmap)  # --> dmap vid file

            # Write Datarows
            df.loc[c] = [f, run_time, server_time]
            f += 1
            c += 1
        else:
            print "synchronization type unknown"

        if test_flag and (f == test_frames):
            print "Terminating code!"
            done = True

        # if np.mod(f,nf) == 0: # close and create new csv and video
        if f == nf:
            df.to_csv(folder4csv + "dev" + str(devN) + '_data' + str(vid_num) +
                      '.csv')
            print "session {} saved".format(vid_num)
            # release video writers
            video_rgb.release()
            video_depth.release()
            video_dmap.release()
            del df
            del video_rgb
            del video_depth
            del video_dmap
            print "\t Cleared the vars and memory and prepping next session"
            vid_num += 1
            # Create new video writers
            video_rgb = cv2.VideoWriter(folder4frames + "/rgb/dev" +
                                        str(devN) + "rgb" + str(vid_num) +
                                        ".avi",
                                        fourcc,
                                        fps=fps,
                                        frameSize=(w, h))
            video_depth = cv2.VideoWriter(folder4frames + "/depth/dev" +
                                          str(devN) + "depth" + str(vid_num) +
                                          ".avi",
                                          fourcc,
                                          fps=fps,
                                          frameSize=(w, h))
            video_dmap = cv2.VideoWriter(folder4frames + "/dmap/dev" +
                                         str(devN) + "dmap" + str(vid_num) +
                                         ".avi",
                                         fourcc,
                                         fps=fps,
                                         frameSize=(w, h))
            # reset pandas dataframe
            df = pd.DataFrame(columns=cols)
            c = 0
            f = 0
            # done = True #stop after the first recording.

        # elif chr(key) =='s':  #s-key to save current screen
        #    save_frames(f,rgb,dmap,p=folder4screens)

        # if
        # --- keyboard commands ---
    # while

    # TERMINATE
    print "=== Terminating code! ==="
    # Close carmine context and stop device
    print "==== Closing carmine context"
    rgb_stream.stop()
    depth_stream.stop()
    openni2.unload()
    # write last datapoints
    print "==== Writing last portions of data."
    vid_num = +1
    df.loc[c] = [f, run_time, server_time]
    video_rgb.write(rgb)  # write to vid file
    video_depth.write(d4d)  # write to vid file
    video_dmap.write(dmap)
    # Write data to csv
    df.to_csv(folder4csv + "dev" + str(devN) + '_data' + str(vid_num) + '.csv')
    # release video writers
    print "==== Releasing the video writers"
    video_rgb.release()
    video_depth.release()
    video_dmap.release()
    # Disconnect the client from the server
    print "==== Disconecting client and closing the server"
    clientConnectThread.update_command("close")
    # Release video/image resources
    cv2.destroyAllWindows()
    # print some timing information:
    fps = f / run_time
    print "\nTime metrics for {} frames:".format(f)
    print("\ttotal run time is %.2f secs over" % run_time)
    print("\tfps: %.2f" % fps)
    sys.exit(1)
Example #45
0
 def close_camera(self):
     #ditch camera
     openni2.unload()#and nite.unload()?
     print "closed openni, stopped depth streams"
def main():
	p = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="")
	p.add_argument('--v', dest = 'video_path', action = 'store', default = '', help = 'path file *.oni')
	args = p.parse_args()
	
	#creazione delle directory in cui salvare le immagini, se non sono già presenti
	if not os.path.isdir(DIR1):
		os.mkdir(DIR1)
	
	if not os.path.isdir(DIR2):
		os.mkdir(DIR2)
		
	if not os.path.isdir(DIR3):
		os.mkdir(DIR3)	
		
	if not os.path.isdir(DIR4):
		os.mkdir(DIR4)
		
	#inizializzazione di OpenNI e apertura degli stream video	
	openni2.initialize()
	dev = openni2.Device.open_file(args.video_path)
	depth_stream = dev.create_depth_stream()
	color_stream = dev.create_color_stream()
	depth_stream.start()
	color_stream.start()
    	
	#contiene il timestamp del frame precedente
	t_prev = -2
	#contiene il timestamp del frame corrente
	t_curr = -1
	#indice incrementato ogni volta che si salvano i frame
	i = 0
	#indice che conta i frame aperti dallo stream video
	frame_count = 0
	
	while (t_curr > t_prev):
		#acquisizione degli array relativi ai frame dagli stream RGB e Depth
		frame_depth = depth_stream.read_frame()
		frame_color = color_stream.read_frame()
		
		#aggiornamento dei timestamp		
		t_prev = t_curr
		t_curr = frame_depth.timestamp
		
		frame_count += 1
		
		if frame_count % skip == 1:
			
			
			print frame_count
			
			#conversione di tipo
			frame_depth_data = frame_depth.get_buffer_as_uint16()
			frame_color_data = frame_color.get_buffer_as_uint8()
			#conversione degli array in un formato utilizzabile da OpenCV
			depth_array = np.ndarray((frame_depth.height, frame_depth.width), dtype = np.uint16, buffer = frame_depth_data)
			color_array = np.ndarray((frame_color.height, frame_color.width, 3), dtype = np.uint8, buffer = frame_color_data)
			color_array = cv2.cvtColor(color_array, cv2.COLOR_BGR2RGB)
			
			#cv2.imshow("RGB", color_array)
			#cv2.imshow("Depth", depth_array)
			
			#se il frame è il primo, può essere preso come background del canale depth
			if frame_count == 1:
				depth_array_back = np.ndarray((frame_depth.height, frame_depth.width), dtype = np.uint16, buffer = frame_depth_data)
				depth_array_back = depth_array
			
			#eliminazione delle aree nere dal depth frame dovute ad errori del sensore di profondità
			depth_array_clean = removeBlackPixels(depth_array)
			
			#si ottiene il foreground togliendo il background al frame corrente
			depth_array_fore = cv2.absdiff(depth_array_clean, depth_array_back)
			#estrazione della maschera dal depth foreground
			mask = extractMask(depth_array_fore)
			h, x, y = getMaxHeight(depth_array_fore, mask)
			#se l'altezza massima nel frame depth è maggiore della soglia, si salvano le immagini
			if (h>MIN_HEIGHT):
				i+=1				
				os.chdir(DIR1)
				cv2.imwrite(str(i)+".png",color_array)
				os.chdir("..")
				os.chdir(DIR2)
				cv2.imwrite(str(i)+".png",depth_array)
				os.chdir("..")
				os.chdir(DIR3)
				cv2.imwrite(str(i)+".png",mask)
				os.chdir("..")
				os.chdir(DIR4)
				cv2.imwrite(str(i)+".png",depth_array_clean)
				os.chdir("..")
				
			ch = 0xFF & cv2.waitKey(1)
			if ch == 27:
				break	
	
	depth_stream.stop()
	color_stream.stop()
	openni2.unload()
	cv2.destroyAllWindows()
                np.savetxt(
                    "/home/julian/Documents/Hinvmatrix_ir_to_rgb_" + str(num) +
                    ".out", Hinv)
                leave = True

    # display and write video
    disp = np.hstack((depth_place, ir_place, rgb_frame))
    cv2.imshow("live", disp)
    rgb_vid.write(rgb_frame)
    ir_vid.write(ir_frame)
    depth_vid.write(depth_frame)
    np.save(ir_name + str(f), full_ir)
    np.save(depth_name + str(f), full_depth)

    print("frame No.", f)
    if k == 27:  # esc key
        done = True

# release resources and destoy windows
rgb_stream.stop()
depth_stream.stop()
openni2.unload()
rgb_vid.release()
ir_vid.release()
depth_vid.release()
cv2.destroyWindow("vid")
cv2.destroyWindow("ir")
cv2.destroyWindow("rgb")
cv2.destroyWindow("live")
print("Completed video generation using {} codec".format(fourcc))
Example #48
0
def main():

	p = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="")
	p.add_argument('--v', dest = 'video_path', action = 'store', default = '', help = 'path file *.oni')
	args = p.parse_args()
	
	#inizializzazione di OpenNI e apertura degli stream video	
	openni2.initialize()
	dev = openni2.Device.open_file(args.video_path)
	depth_stream = dev.create_depth_stream()
	color_stream = dev.create_color_stream()
	depth_stream.start()
	color_stream.start()
	
	#estrazione dell'id della persona dal nome del file .oni
	VideoId=args.video_path.split(".")[0]
	#file con i punti ad altezza massima dei frame contenenti il soggetto
	tracking_file_color = open(VideoId + "_color" + EXT,"w")
	#file con i punti ad altezza massima di tutti i frame del video
	tracking_file_all = open(VideoId + "_depth" + EXT,"w")
    
	#contiene il timestamp del frame precedente
	t_prev = -2
	#contiene il timestamp del frame corrente
	t_curr = -1
	#indice incrementato ogni volta che si salvano i frame
	i = 0
	#indice che conta i frame aperti dallo stream video
	frame_count = 0
	
	#variabile gestione ultime mod
	ultimopassaggio=0
	newid=True
	contperid=0
	
	while (True):
		#acquisizione degli array relativi ai frame dallo stream RGB e Depth
		frame_depth = depth_stream.read_frame()
		frame_color = color_stream.read_frame()
		#conversione di tipo
		frame_depth_data = frame_depth.get_buffer_as_uint16()
		frame_color_data = frame_color.get_buffer_as_uint8()
		#conversione degli array in un formato utilizzabile da OpenCV
		depth_array = np.ndarray((frame_depth.height, frame_depth.width), dtype = np.uint16, buffer = frame_depth_data)
		color_array = np.ndarray((frame_color.height, frame_color.width, 3), dtype = np.uint8, buffer = frame_color_data)
		color_array = cv2.cvtColor(color_array, cv2.COLOR_BGR2RGB)
		frame_count += 1
		
		#aggiornamento dei timestamp
		t_prev = t_curr
		t_curr = frame_color.timestamp
		if (t_curr < t_prev):
			break
		
		#se il frame è il primo, può essere preso come background del canale depth
		if frame_count == 1:
			depth_array_back = np.ndarray((frame_depth.height, frame_depth.width), dtype = np.uint16, buffer = frame_depth_data)
			depth_array_back = depth_array
			depth_array_back = removeBlackPixels(depth_array_back)

		depth_array = removeBlackPixels(depth_array)
		
		depth_array_fore = cv2.absdiff(depth_array, depth_array_back)

		#estrazione della maschera dal depth foreground
		mask = extractMask(depth_array_fore)

		h, x, y = getMaxHeight(depth_array_fore, mask)

		#se il punto ad altezza massima nel frame depth è maggiore della soglia, si salvano le immagini
		if (h>MIN_HEIGHT):
			#gestione più persone
			if (newid==True):
				contperid+=1
				newid=False
			
			
			cv2.circle(depth_array,tuple((x,y)), 5, 65536, thickness=1)
			
			line_to_write = VideoId+";"+  str("{:03d}".format(contperid)) +";"+str(frame_count)+";"+str(frame_depth.timestamp)+";"+str(h)+";"+str(x)+";"+str(y)+"\n"
			print line_to_write
			tracking_file_all.write(line_to_write)
			line_to_write_color = VideoId+";"+ str("{:03d}".format(contperid))+";"+str(frame_count)+";"+str(frame_color.timestamp)+"\n"
			tracking_file_color.write(line_to_write_color)
			
			cv2.circle(depth_array,tuple((x,y)), 5, 65536, thickness=7)
			ultimopassaggio=frame_count+3 #3 indica quanti frame devono passare dopo il passaggio dell'ultima persona
			
		else:
			line_to_write =  VideoId+";"+ "NULL"+";"+ str(frame_count)+";"+str(frame_depth.timestamp)+";"+ "NULL"+";"+ "NULL"+";"+ "NULL"+"\n"
			print line_to_write
			

			tracking_file_all.write(line_to_write)
			line_to_write_color = VideoId+";"+ "NULL" +";"+str(frame_count)+";"+str(frame_color.timestamp)+"\n"
			tracking_file_color.write(line_to_write_color)
			#gestione multipersone
			if (frame_count>ultimopassaggio):
				newid=True;
		
		#cv2.imshow("RGB", color_array)
		cv2.imshow("Depth", depth_array)
			
		ch = 0xFF & cv2.waitKey(1)
		if ch == 27:
			break	
	
	tracking_file_color.close()
	tracking_file_all.close()
	depth_stream.stop()
	color_stream.stop()
	openni2.unload()
	cv2.destroyAllWindows()