Example #1
0
def get_frame(vid_name,frame_id):
    # get the oni frame
    openni2.initialize('/usr/lib64/')
    dev = openni2.Device.open_file(vid_name)
    print (dev.get_device_info())
    # get and start the streams
    rgb_stream = dev.create_color_stream()
    depth_stream = dev.create_depth_stream()
    depth_stream.start()
    rgb_stream.start()
    # seek
    pbs = openni2.PlaybackSupport(dev)
    print("total frames = " + str(pbs.get_number_of_frames(depth_stream)))
    pbs.set_repeat_enabled(True)
    pbs.set_speed(0)

    # read the frame
    for i in range(0,frame_id+1):
        frame = depth_stream.read_frame()
    frame_data = frame.get_buffer_as_uint16()
    depth_stream.stop()
    # to numpy!
    depth = np.array(frame_data)
    depth = np.reshape(depth,[frame.height,frame.width])

    # read the rgb
    for i in range(0,frame_id+1):
        rgb = rgb_stream.read_frame()
    rgb = np.array(rgb.get_buffer_as_uint8())
    rgb = np.reshape(rgb,[frame.height,frame.width,3])
        
    #code.interact(local=locals())
    return depth, rgb
 def __init__(self):
     self.path = os.getcwd()
     openni2.initialize()
     self.device = openni2.Device.open_any()
     self.device.set_depth_color_sync_enabled(True)
     self.start_depth_stream()
     self.start_rgb_stream()
Example #3
0
 def __init__(self,oni=None):
     openni2.initialize()
     if oni is not None:
         self.dev = openni2.Device.open_file(oni)
     else:
         self.dev = openni2.Device.open_any()
     print self.dev.get_sensor_info(openni2.SENSOR_DEPTH)
def show(video_path):
    """
    Shows depth map
    @param video_path: contains the ONI file path
    """
    dev = openni2.Device
    try:
        openni2.initialize()
        dev = openni2.Device.open_file(video_path)
        print(dev.get_sensor_info(openni2.SENSOR_DEPTH))
    except (RuntimeError, TypeError, NameError):
        print(RuntimeError, TypeError, NameError)

    depth_stream = dev.create_depth_stream()
    depth_stream.start()
    while True:
        frame_depth = depth_stream.read_frame()
        frame_depth_data = frame_depth.get_buffer_as_uint16()
        depth_array = np.ndarray((frame_depth.height, frame_depth.width),
                                 dtype=np.uint16,
                                 buffer=frame_depth_data) / 2300.  # 0-10000mm to 0.-1.
        cv2.imshow('Depth', depth_array)

        ch = 0xFF & cv2.waitKey(1)
        if ch == 27:
            break

    depth_stream.stop()
    openni2.unload()
    cv2.destroyAllWindows()
Example #5
0
def kinect3DPlotDemo():
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    plt.ion()
    plt.show()

    openni2.initialize()
    dev = openni2.Device.open_any()
    ds = dev.create_depth_stream()
    ds.start()

    while(1):
        f = ds.read_frame().get_buffer_as_uint16()
        a = np.ndarray((480,640),dtype=np.uint16,buffer=f)
        ipts = []
        for y in range(180, 300, 20):
            for x in range(260, 380, 20):
                ipts.append((x, y, a[y][x]))
        m = np.matrix(ipts).T
        fpts = rwCoordsFromKinect(m) #get real world coordinates
        plt.cla()
        ax.scatter([pt[0] for pt in fpts], [pt[1] for pt in fpts], [pt[1] for pt in fpts], color='r')
        plt.draw()

        p = planeFromPts(np.matrix(random.sample(fpts, 3))) #fit a plane to these points
        print p
        plt.pause(.1)
Example #6
0
 def __init__(self, data):
     l = data[0][0].shape
     self.data = [None]*l[0]*4
     self.name = []
     for i in range(l[1]):
         name = str(i+1).zfill(2)
         for k in ('emg ', 'accX ', 'accY ', 'accZ '):
             self.name.append(k + name)
     self.data[::4] = data[0][0].tolist()
     for i in range(l[0]):
         for k in range(1,4):
             self.data[i*4+k] = data[0][1][i+k-1].tolist()
     self.triggers = data[1]
     
     if len(data) == 3:
         oni.initialize()
         try:
             self.dev = oni.Device.open_file(data[2])
         except:
             print "could not open oni-file, no oni-file will be loaded"
             return 
         self.depth_stream = self.dev.create_depth_stream()
         self.clr_stream = self.dev.create_color_stream()
         self.frames = {}
         self.player = oni.PlaybackSupport(self.dev)
Example #7
0
def get_number_of_frames(vid_name):
    openni2.initialize('/usr/lib64/')
    dev = openni2.Device.open_file(vid_name)
    # get and start the streams
    depth_stream = dev.create_depth_stream()
    # seek
    pbs = openni2.PlaybackSupport(dev)
    return pbs.get_number_of_frames(depth_stream)
Example #8
0
    def __init__(self):
        super(SkeletonInput, self).__init__()
        if user_tracker is None:
            openni2.initialize()
            nite2.initialize()

        self.smoothed_inputs = SmoothInputs()
        self.reset_user_tracker()
def cmd(onifile, yamlfile):
    openni2.initialize()

    device = NiDevice(onifile, yamlfile)
    device.initialize()
    device.update()

    openni2.unload()
Example #10
0
 def __init__( self ):
     openni2.initialize()
     self.dev = openni2.Device.open_any()
     self.depth_stream = self.dev.create_depth_stream()
     self.col = self.dev.create_color_stream()
     self.index = 0
     self.depth_stream.start()
     self.col.start()
Example #11
0
    def __init__(self):
        path = '/home/pi/devel/OpenNI2/Packaging/OpenNI-Linux-Arm-2.2/Redist'
        openni2.initialize(path)  # can also accept the path of the OpenNI redistribution
        dev = openni2.Device.open_any()
        print dev.get_sensor_info(openni2.SENSOR_DEPTH)

        self.depth_stream = dev.create_depth_stream()
        self.depth_stream.set_video_mode(
            c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=320,
                               resolutionY=240, fps=30))
        self.depth_stream.start()
Example #12
0
def openDevice(video_path):
    try:
        openni2.initialize()
        dev = openni2.Device.open_file(video_path)
        pbs = openni2.PlaybackSupport(dev)

        pbs.set_repeat_enabled(False)
        pbs.set_speed(-1.0)

        return dev
    except (RuntimeError, TypeError, NameError):
        print(RuntimeError, TypeError, NameError)
    def __init__(self):
        rp.wait_for_service('faster_rcnn')

        openni2.initialize()

        self.dev = openni2.Device.open_any()
        print self.dev.get_sensor_info(openni2.SENSOR_COLOR)

        self.color_stream = self.dev.create_color_stream()

        try:
            self._client = rp.ServiceProxy('faster_rcnn', tms_ss_rcnn.srv.obj_detection)
        except rp.ServiceException, e:
            print 'Service call failed: %s' % e
Example #14
0
    def setup_camera(self):
        #initialize the camera
        openni2.initialize(sys.path[-1])
        #AND/OR initialize niTE? (also inits openni)
        #find device
        self.device = openni2.Device.open_any()
        #maybe better error checker available
        if not self.device:
            print "Error finding device. Check OpenNi docs."
            sys.exit();

        #debugging: print the options: pixel types, resolutions
        #self.print_video_options(self.device, openni2.SENSOR_DEPTH)
        #self.print_video_options(self.device, openni2.SENSOR_COLOR)

        if self.both_streams:
            #make sure color and depth are synced, why would we not want this? If we're only grabbing one.
            if not self.device.get_depth_color_sync_enabled():
                self.device.set_depth_color_sync_enabled(True)

            #also: registration mode: could not find parameters that worked here...
            if self.device.get_depth_color_sync_enabled():
                print "Color-Depth Sync is enabled"
            else:
                print "Sync is disabled"
                self.device.set_depth_color_sync_enabled(True)

            #registration: not correctly implemented in python bindings, but if they fix it put it here

            #create both streams: perhaps this should be done later, close to Start of streaming
            self.depth_stream = self.device.create_depth_stream()

            #set pixel format and resolution for both streams
            if self.size == 'quarter':
                self.depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = self.depth_type, resolutionX = 320, resolutionY = 240, fps = 30))
            elif self.size == 'full':
                self.depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = self.depth_type, resolutionX = 640, resolutionY = 480, fps = 30))
            else:
                print "No recognizeable video resolution given, defaulting to QVGA ('quarter')"
                self.depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = self.depth_type, resolutionX = 320, resolutionY = 240, fps = 30))

        #color only by default
        self.color_stream = self.device.create_color_stream()
        if self.size == 'quarter':
            self.color_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = self.color_type, resolutionX = 320, resolutionY = 240, fps = 30))
        elif self.size == 'full':
            self.color_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = self.color_type, resolutionX = 640, resolutionY = 480, fps = 30))
        else:
            print "No recognizeable video resolution given, defaulting to QVGA ('quarter')"
            self.color_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = self.color_type, resolutionX = 320, resolutionY = 240, fps = 30))
def initialize(dll_directories=_default_dll_directories):
    global _nite2_initialized
    global loaded_dll_directory
    if _nite2_initialized:
        return
    if isinstance(dll_directories, str):
        dll_directories = [dll_directories]

    if not openni2.is_initialized():
        openni2.initialize()

    if loaded_dll_directory:
        c_api.niteInitialize()
        _nite2_initialized = True
        return

    found = False
    prev = os.getcwd()
    exceptions = []
    dll_directories = [
        os.path.normpath(os.path.abspath(d)) for d in dll_directories
    ]

    for dlldir in dll_directories:
        if not os.path.isdir(dlldir):
            exceptions.append((dlldir, "Directory does not exist"))
            continue
        fullpath = os.path.join(dlldir, _dll_name)
        if not os.path.isfile(fullpath):
            exceptions.append((fullpath, "file does not exist"))
            continue
        try:
            os.chdir(dlldir)
            c_api.load_dll(fullpath)
            c_api.niteInitialize()
        except Exception as ex:
            exceptions.append((fullpath, ex))
        else:
            found = True
            loaded_dll_directory = dlldir
            break

    os.chdir(prev)
    if not found:
        raise InitializationError(
            "NiTE2 could not be loaded:\n    %s" % ("\n    ".join(
                "%s: %s" % (dir, ex) for dir, ex in exceptions)), )

    _nite2_initialized = True
Example #16
0
def main():
        """The entry point"""
        try:
            openni2.initialize()     # can also accept the path of the OpenNI redistribution
        except:
            print ("Device not initialized")
            return
        
        try:
            dev = openni2.Device.open_any()
            write_files(dev)
        except:
            print ("Unable to open the device")
            
        openni2.unload()
Example #17
0
    def __init__(self, *args, **kwargs):
        app.Canvas.__init__(self, *args, **kwargs)
        self.program = gloo.Program(self.read_shader('1.vert'), self.read_shader('skeleton_video.frag'))

        # Fill screen with single quad, fragment shader does all the real work
        self.program["position"] = [(-1, -1), (-1, 1), (1, 1),
                                    (-1, -1), (1, 1), (1, -1)]
        self.program["frame"] = gloo.Texture2D(shape=(480, 640, 2), format='luminance_alpha')

        width, height = self.physical_size
        gloo.set_viewport(0, 0, width, height)
        self.program['resolution'] = [width, height]
        openni2.initialize()
        nite2.initialize()
        self.user_tracker = nite2.UserTracker(False)

        gloo.set_clear_color(color='black')
        self._timer = app.Timer('auto', connect=self.update, start=True)
        self.show()
Example #18
0
def kinectImageDemo():
    openni2.initialize()
    dev = openni2.Device.open_any()
    ds = dev.create_depth_stream()
    ds.start()
    plt.ion()

    #get initial image
    f = ds.read_frame().get_buffer_as_uint16()
    a = np.ndarray((480,640),dtype=np.uint16,buffer=f)
    im = plt.imshow(a)
    plt.show()

    while(1):
        f = ds.read_frame().get_buffer_as_uint16()
        a = np.ndarray((480,640),dtype=np.uint16,buffer=f)
        im.set_data(a) #update
        plt.draw()
        plt.pause(0.01) #allow time for refresh
Example #19
0
    def test_openni2_bindings(self):
        if os.path.exists(self.GEN_ONI):
            os.unlink(self.GEN_ONI)
        subprocess.check_call(["python", "../bin/build_openni.py"])
        self.assertTrue(os.path.exists(self.GEN_ONI))
        
        from primesense import _openni2
        from primesense import openni2
        
        openni2.initialize()
        ver = openni2.get_version()
        openni2.unload()

        self.assertEqual(ver.major, openni2.c_api.ONI_VERSION_MAJOR)
        self.assertEqual(ver.minor, openni2.c_api.ONI_VERSION_MINOR)

        h_file = os.path.join(config.get("headers", "openni_include_dir"), "OpenNI.h")

        self.check_unexposed_functions(openni2, _openni2, h_file, ["oniGetExtendedError"])
        self.check_missing_names_by_prefix(openni2, h_file, "DEVICE_PROPERTY_", "ONI_")
        self.check_missing_names_by_prefix(openni2, h_file, "STREAM_PROPERTY_", "ONI_")
def open_kinect():
    """Open a connection to the first Kinect found, and
    then start capturing depth and color images. Returns handles
    to the depth and color streams."""

    # initialise openNI
    openni_path = r"C:\Program Files\OpenNI2\Redist"
    #openni_path = r"/Library/OpenNI-MacOSX-x64-2.2/Redist"
    openni2.initialize(openni_path)     # can also accept the path of the OpenNI redistribution

    #devs = openni2.Device.enumerateDevices()
    #print devs
    # open first Kinect
    dev = openni2.Device.open_any()

    # connect to the depth and color cameras
    depth_stream = dev.create_depth_stream()

    # start the stream capture
    depth_stream.start()
    return depth_stream
Example #21
0
def load_video(vid_name):
    depths = []
    rgbs = []
    
    # get the oni frame
    openni2.initialize('/usr/lib64/')
    dev = openni2.Device.open_file(vid_name)
    print (dev.get_device_info())
    # get and start the streams
    rgb_stream = dev.create_color_stream()
    depth_stream = dev.create_depth_stream()
    depth_stream.start()
    rgb_stream.start()
    # seek
    pbs = openni2.PlaybackSupport(dev)
    n_frames = pbs.get_number_of_frames(depth_stream)
    print("total frames = " + str(n_frames))
    pbs.set_repeat_enabled(True)
    pbs.set_speed(0)

    # read the frame
    for i in range(0,n_frames):
        print("Depth {0} of {1}".format(i,n_frames))
        frame = depth_stream.read_frame()
        frame_data = frame.get_buffer_as_uint16()
        #depth_stream.stop()
        # to numpy!
        depth = np.array(frame_data)
        depth = np.reshape(depth,[frame.height,frame.width])
        depths.append(depth)

        print("RGB {0} of {1}".format(i,n_frames))
        rgb = rgb_stream.read_frame()
        rgb = np.array(rgb.get_buffer_as_uint8())
        rgb = np.reshape(rgb,[frame.height,frame.width,3])
        rgbs.append(rgb)
                
    #code.interact(local=locals())
    return depths, rgbs
Example #22
0
 def __init__(self, filename):
     oni.initialize()
     self.filename = filename
     self.dev = oni.Device.open_file(filename)
     
     self.player = oni.PlaybackSupport(self.dev)
     self.player.set_speed(-1)
     self.depth_stream = self.dev.create_depth_stream()
     self.clr_stream = self.dev.create_color_stream()
     self.depth_stream.start()
     self.clr_stream.start()
     
     self.prep = Prepare(self)
     self.skeleton = None
     
     self.pause = 5
     self.history = StateHistory()
     self.currentFrameIdx = 0
     self.drag = False
     
     self.adjustment = None
     for x in self.history.states.keys():
         self.history.states[x] = [None]*self.player.get_number_of_frames(self.depth_stream)
Example #23
0
def split(video_path):
    """
    Split the ONI file into RGB and depth maps and shows using two separate windows
    @param video_path: contains the ONI file path
    """
    openni2.initialize()
    dev = openni2.Device.open_file(video_path)
    print(dev.get_sensor_info(openni2.SENSOR_DEPTH))
    depth_stream = dev.create_depth_stream()
    color_stream = dev.create_color_stream()
    depth_stream.start()
    color_stream.start()
    while True:
        frame_depth = depth_stream.read_frame()
        frame_color = color_stream.read_frame()

        frame_depth_data = frame_depth.get_buffer_as_uint16()
        frame_color_data = frame_color.get_buffer_as_uint8()

        depth_array = np.ndarray((frame_depth.height, frame_depth.width), dtype=np.uint16, buffer=frame_depth_data)
        color_array = np.ndarray((frame_color.height, frame_color.width, 3), dtype=np.uint8, buffer=frame_color_data)
        color_array = cv2.cvtColor(color_array, cv2.COLOR_BGR2RGB)

        cv2.imwrite("./depth/depth_" + str("{:020d}".format(frame_depth.timestamp)) + ".png", depth_array)
        cv2.imwrite("./color/color_" + str("{:020d}".format(frame_color.timestamp)) + ".png", color_array)

        cv2.imshow("depth", depth_array)
        cv2.imshow("color", color_array)

        ch = 0xFF & cv2.waitKey(1)
        if ch == 27:
            break

    depth_stream.stop()
    color_stream.stop()
    openni2.unload()
    cv2.destroyAllWindows()
Example #24
0
    def __init__(self, *args, **kwargs):
        app.Canvas.__init__(self, *args, **kwargs)
        self.raw_depth_program = gloo.Program(read_shader('1.vert'), read_shader('skeleton_video.frag'))
        self.skeleton_program = gloo.Program(read_shader('skeleton_bones.vert'), read_shader('skeleton_bones.frag'))

        # Fill screen with single quad, fragment shader does all the real work
        self.raw_depth_program["position"] = [(-1, -1), (-1, 1), (1, 1),
                                    (-1, -1), (1, 1), (1, -1)]
        self.raw_depth_program["frame"] = gloo.Texture2D(shape=(480, 640, 2), format='luminance_alpha')

        width, height = self.physical_size
        gloo.set_viewport(0, 0, width, height)
        self.raw_depth_program['resolution'] = [width, height]

        # Set uniform and attribute
        self.skeleton_program['a_position'] = gloo.VertexBuffer()

        openni2.initialize()
        nite2.initialize()
        self.user_tracker = nite2.UserTracker(False)

        gloo.set_state(clear_color='black', blend=True, blend_func=('src_alpha', 'one_minus_src_alpha'))
        self._timer = app.Timer('auto', connect=self.update, start=True)
        self.show()
def populate(video_path):
        """
        This script populates Video,Depth_Frame and RGB_frame tables
        @param video_path: contains the ONI file path
        """

        ##### obtains Video id ####################
        # preparing a cursor object
        cursor = db.cursor()
        query ="SELECT COUNT(*) from VIDEO"
        cursor.execute(query)             #executes query
        res=cursor.fetchone()
        total_rows=res[0]
        videoid=total_rows+1

        #query for VIDEO table####################

        query = """INSERT INTO VIDEO(VIDEOID,
         VIDEO)
         VALUES (%s, %s)"""
        video_data=(videoid,video_path)
        try:
           # Execute the SQL command
           cursor.execute(query,video_data)
           # Commit changes in the database
           db.commit()
        except:
           # Rollback in case there is any error
           db.rollback()

        ###########################################

        openni2.initialize()
        dev = openni2.Device.open_file(video_path)
        print (dev.get_sensor_info(openni2.SENSOR_DEPTH))
        depth_stream = dev.create_depth_stream()
        color_stream = dev.create_color_stream()
        depth_stream.start()
        color_stream.start()
        ##### getting first frame timestamp ##########################
        first_frame=depth_stream.read_frame()
        frame1_timestamp = datetime.datetime.fromtimestamp(float(first_frame.timestamp)/1000000.0  )

        ### we want to start counting from  2015-06-01 01:00:00 ####
        frame1_timestamp += datetime.timedelta(days=((365*45)+162))

        ##### initialize a frame counter and the variable flag  #########
        frameno = 0
        flag= False
        while True:




                frame_depth = depth_stream.read_frame()
                frame_color = color_stream.read_frame()

                frame_depth_data = frame_depth.get_buffer_as_uint16()
                frame_color_data = frame_color.get_buffer_as_uint8()

                depth_array = np.ndarray((frame_depth.height, frame_depth.width), dtype = np.uint16, buffer = frame_depth_data)
                color_array = np.ndarray((frame_color.height, frame_color.width, 3), dtype = np.uint8, buffer = frame_color_data)
                color_array = cv2.cvtColor(color_array, cv2.COLOR_BGR2RGB)

                depth_timestamp = datetime.datetime.fromtimestamp(float(frame_depth.timestamp)/1000000.0  )
                color_timestamp = datetime.datetime.fromtimestamp(float(frame_color.timestamp)/1000000.0  )
                depth_timestamp += datetime.timedelta(days=((365*45)+162))
                color_timestamp += datetime.timedelta(days=((365*45)+162))

                cv2.imshow("depth", depth_array)
                cv2.imshow("color", color_array)
                ##### counting frames #############
                frameno = frameno + 1



                #### this is for avoid that the video loops  ######
                if (frame1_timestamp == depth_timestamp and frameno != 1):
                    flag=True


                if (flag == False):
                    ### query for depth_frame table ####################
                    query_3 = """INSERT INTO DEPTH_FRAME(VIDEOID,FRAMENO,TIMESTAMP)
                    VALUES (%s, %s, %s)"""
                    depth_dbdata=(videoid,frameno,depth_timestamp)

                    ### query for rgb_frame table ####################
                    query_4 = """INSERT INTO RGB_FRAME(VIDEOID,FRAMENO,TIMESTAMP)
                    VALUES (%s, %s, %s)"""
                    rgb_dbdata=(videoid,frameno,color_timestamp)

                    try:
                       # Execute the SQL command
                       cursor.execute(query_3,depth_dbdata)
                       cursor.execute(query_4,rgb_dbdata)
                       # Commit changes in the database
                       db.commit()
                    except:
                       # Rollback in case there is any error
                       db.rollback()

                ch = 0xFF & cv2.waitKey(1)
                if (ch == 27 or flag == True):
                    break




        depth_stream.stop()
        color_stream.stop()
        openni2.unload()
        cv2.destroyAllWindows()




        # disconnect from server
        db.close()
Example #26
0
"""Usage examples of the Python_OpenNI2 wrapper

The examples are taken from https://github.com/elmonkey/Python_OpenNI2/tree/master/samples.
"""

from primesense import openni2
from primesense.utils import InitializationError

IS_INITIALIZED = False

try:
    openni2.initialize("/usr/local/src/OpenNI-Linux-Arm-2.3/Redist")
    if openni2.is_initialized():
        IS_INITIALIZED = True
        print "OpenNI2 is initialized"
    else:
        print "OpenNI2 is not initialized"
except InitializationError as err:
    print("OpenNI2 is not initialized", err)
Example #27
0
# ======= Noise Reduction Parameters =======
# Easing Parameter

# ===================================================
# ================= IMPLEMENTATION ==================
# ===================================================

# Import modules of interest
import cv2
import numpy as np
from primesense import openni2
from primesense import _openni2 as c_api

# Initialize OpenNI with its libraries
openni2.initialize("Redist/")  #The OpenNI2 Redist folder

# Open a device
dev = openni2.Device.open_any()
# Check to make sure it's not None

# Open two streams: one for color and one for depth
depth_stream = dev.create_depth_stream()
depth_stream.start()
depth_stream.set_video_mode(
    c_api.OniVideoMode(
        pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM,
        resolutionX=width,
        resolutionY=height,
        fps=frameRate))
Example #28
0
from primesense import openni2
from primesense import _openni2 as c_api
import numpy as np
import cv2
openni2.initialize('/home/spadesk/library/OpenNI2/Bin/x64-Release')
dev = openni2.Device.open_any()
depth_stream = dev.create_depth_stream()
depth_stream.start()
depth_stream.set_video_mode(
    c_api.OniVideoMode(
        pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM,
        resolutionX=320,
        resolutionY=240,
        fps=30))
while True:
    frame = depth_stream.read_frame()
    frame_data = frame.get_buffer_as_uint8()
    img = np.frombuffer(frame_data, dtype=np.uint16)
    img.shape = (1, 240, 320)
    img = np.concatenate(img)
    # img = np.concatenate((img, img, img), axis=0)
    # img = np.swapaxes(img, 0, 2)
    # img = np.swapaxes(img, 0, 1)
    cv2.imshow("image", img)
    if cv2.waitKey(34) & 0xFF == ord('q'):
        break

openni2.unload()
Example #29
0
import cv2
import numpy as np
from primesense import openni2
from primesense import _openni2 as c_api

file_name = b'depth.oni'
openni2.initialize("C:\\Program Files\\OpenNI2\\Redist")
device = openni2.Device
dev = device.open_file(file_name)
depth_stream = dev.create_depth_stream()
depth_stream.start()
# depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX = 640, resolutionY = 480, fps = 15))
while True:
    frame = depth_stream.read_frame()
    frame_data = frame.get_buffer_as_uint16()
    img = np.frombuffer(frame_data, dtype=np.uint16)
    img.shape = (1, 480, 640)
    img = np.concatenate((img, img, img), axis=0)
    img = np.swapaxes(img, 0, 2)
    img = np.swapaxes(img, 0, 1)
    cv2.imshow("image", img)
    cv2.waitKey(34)
openni2.unload()
Example #30
0
from primesense import _openni2 as c_api
import time
import imutils
from pythonosc import osc_message_builder
from pythonosc import udp_client
import ctypes


def rgb2gray(rgb):
    r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
    gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
    return gray.astype(np.uint8)


# Initialize OpenNI with its libraries
openni2.initialize(redistPath)  #The OpenNI2 Redist folder

# Open a device
dev = openni2.Device.open_any()
# Check to make sure it's not None

# Open two streams: one for color and one for depth
depth_stream = dev.create_depth_stream()
depth_stream.start()
depth_stream.set_video_mode(
    c_api.OniVideoMode(
        pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM,
        resolutionX=width,
        resolutionY=height,
        fps=frameRate))
Example #31
0
# Center of the image
x = h/2
y = w/2

# Device number
devN=3

## Array to store the image modalities+overlayed_skeleton (4images)
#rgb   = np.zeros((480,640,3), np.uint8)
#rgbdm = np.zeros((480,640*4, 3), np.uint8)

##pi3
dist = "/home/carlos/Install/kinect/OpenNI-Linux-Arm-2.2/Redist/"

## initialize openni and check
openni2.initialize(dist)
if (openni2.is_initialized()):
    print "openNI2 initialized"
else:
    print "openNI2 not initialized"
#if

## Register the device
dev = openni2.Device.open_any()

## create the streams stream
rgb_stream = dev.create_color_stream()
depth_stream = dev.create_depth_stream()


Example #32
0
import cv2
import numpy as np
from primesense import _openni2 as c_api
from primesense import openni2

if __name__ == '__main__':
    dev = openni2.Device
    try:
        openni2.initialize()
        dev = openni2.Device.open_any()
        print(dev.get_sensor_info(openni2.SENSOR_DEPTH))
    except (RuntimeError, TypeError, NameError):
        print(RuntimeError, TypeError, NameError)
    depth_stream = dev.create_depth_stream()
    color_stream = dev.create_color_stream()
    depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM,
                                                   resolutionX=640,
                                                   resolutionY=480,
                                                   fps=30))
    color_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
                                                   resolutionX=640,
                                                   resolutionY=480,
                                                   fps=30))
    depth_stream.start()
    color_stream.start()

    shot_idx = 0

    while True:
        frame_depth = depth_stream.read_frame()
        frame_color = color_stream.read_frame()
Example #33
0
def main(argv=None):
    print('Hello! This is XXXXXX Program')

    ## Initialize OpenNi
    # dist = './driver/OpenNI-Linux-x64-2.3/Redist'
    dist = './driver/OpenNI-Windows-x64-2.3/Redist'
    openni2.initialize(dist)
    if (openni2.is_initialized()):
        print("openNI2 initialized")
    else:
        print("openNI2 not initialized")

    ## Register the device
    dev = openni2.Device.open_any()

    ## Create the streams stream
    rgb_stream = dev.create_color_stream()
    depth_stream = dev.create_depth_stream()

    ## Define stream parameters
    w = 320
    h = 240
    fps = 30

    ## Configure the rgb_stream -- changes automatically based on bus speed
    rgb_stream.set_video_mode(
        c_api.OniVideoMode(
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
            resolutionX=w,
            resolutionY=h,
            fps=fps))

    ## Configure the depth_stream -- changes automatically based on bus speed
    # print 'Depth video mode info', depth_stream.get_video_mode() # Checks depth video configuration
    depth_stream.set_video_mode(
        c_api.OniVideoMode(
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM,
            resolutionX=w,
            resolutionY=h,
            fps=fps))

    ## Check and configure the mirroring -- default is True
    ## Note: I disable mirroring
    # print 'Mirroring info1', depth_stream.get_mirroring_enabled()
    depth_stream.set_mirroring_enabled(False)
    rgb_stream.set_mirroring_enabled(False)

    ## Start the streams
    rgb_stream.start()
    depth_stream.start()

    ## Synchronize the streams
    dev.set_depth_color_sync_enabled(True)  # synchronize the streams

    ## IMPORTANT: ALIGN DEPTH2RGB (depth wrapped to match rgb stream)
    dev.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)

    saving_folder_path = './shapenetcore_partanno_segmentation_benchmark_v0/tools/'
    if not os.path.exists(saving_folder_path):
        os.makedirs(saving_folder_path + 'RGB')
        os.makedirs(saving_folder_path + 'D')
        os.makedirs(saving_folder_path + 'PC')
        os.makedirs(saving_folder_path + 'points')
        os.makedirs(saving_folder_path + 'points_label')

    from config import CAMERA_CONFIG

    ## main loop
    s = 1000
    done = False
    while not done:
        key = cv2.waitKey(1) & 255
        ## Read keystrokes
        if key == 27:  # terminate
            print("\tESC key detected!")
            done = True
        elif chr(key) == 's':  # screen capture
            print("\ts key detected. Saving image {}".format(s))

            ### crop the image
            rgb = rgb[80:160, 120:200, :]
            dmap = dmap[80:160, 120:200]

            ### get hsv image
            hsv = cv2.cvtColor(src=rgb, code=cv2.COLOR_BGR2HSV)
            ### get black area
            tblack = cv2.inRange(hsv, (100, 130, 0), (130, 220, 150))
            ### get white area
            twhite = cv2.inRange(hsv, (0, 0, 230, 0), (160, 200, 255, 0))
            ply_content, points_content, label_content = generate_ply_from_rgbd(
                rgb=rgb, depth=dmap, config=CAMERA_CONFIG)

            cv2.imwrite(saving_folder_path + "RGB/" + str(s) + '.png', rgb)
            cv2.imwrite(saving_folder_path + "D/" + str(s) + '.png', dmap)
            print(rgb.shape, dmap.shape)
            print(type(rgb), type(dmap))
            with open(saving_folder_path + "PC/" + str(s) + '.ply',
                      'w') as output:
                output.write(ply_content)
            print(saving_folder_path + "PC/" + str(s) + '.ply', ' done')
            s += 1  # uncomment for multiple captures

        ## Streams
        # RGB
        rgb = get_rgb(rgb_stream=rgb_stream, h=h, w=w)

        # DEPTH
        dmap, d4d = get_depth(depth_stream=depth_stream, h=h, w=w)

        # canvas
        canvas = np.hstack((rgb, d4d))
        cv2.rectangle(canvas, (119, 79), (202, 162), (0, 255, 0), 1)
        cv2.rectangle(canvas, (119 + 320, 79), (202 + 320, 162), (0, 255, 0),
                      1)
        ## Display the stream syde-by-side
        cv2.imshow('depth || rgb', canvas)

        hsv = cv2.cvtColor(src=rgb, code=cv2.COLOR_BGR2HSV)

        ### for black
        # tblack = cv2.inRange(hsv, (low_H, low_S, low_V), (high_H, high_S, high_V))
        tblack = cv2.inRange(hsv, (100, 180, 0), (130, 255, 150))

        ### for white
        # twhite = cv2.inRange(hsv, (low_H, low_S, low_V), (high_H, high_S, high_V))
        twhite = cv2.inRange(hsv, (0, 0, 230, 0), (100, 200, 255, 0))

        cv2.imshow('black', tblack)
        cv2.imshow('white', twhite)
    # end while

    ## Release resources
    cv2.destroyAllWindows()
    rgb_stream.stop()
    depth_stream.stop()
    openni2.unload()
    print("Terminated")
import os
import time
from primesense import openni2


openni2.initialize()

assert openni2.Device.enumerate_uris()
dev = openni2.Device.open_any()
assert dev.get_device_info()
assert dev.get_sensor_info(openni2.SENSOR_IR)
assert dev.get_sensor_info(openni2.SENSOR_DEPTH)
assert dev.get_sensor_info(openni2.SENSOR_COLOR)

dev.is_image_registration_mode_supported(openni2.IMAGE_REGISTRATION_OFF)
dev.is_image_registration_mode_supported(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)

depth_stream = dev.create_depth_stream()
color_stream = dev.create_color_stream()
ir_stream = dev.create_ir_stream()
assert depth_stream
assert color_stream
assert ir_stream

dev.is_file()
dev.is_command_supported(openni2.c_api.ONI_DEVICE_COMMAND_SEEK)
dev.get_depth_color_sync_enabled()
dev.set_depth_color_sync_enabled(True)
dev.get_depth_color_sync_enabled()
mode = dev.get_image_registration_mode()
dev.set_image_registration_mode(mode)
Example #35
0
        )  #small chance these may be reversed in certain apis...This order? Really?
        #filling rgb channels with duplicates so matplotlib can draw it (expects rgb)
        img = np.concatenate((img, img, img), axis=0)
        #because the order is so weird, rearrange it (third dimension must be 3 or 4)
        img = np.swapaxes(img, 0, 2)
        img = np.swapaxes(img, 0, 1)
    elif whatisit == (640 * 480 * 3):
        #color is miraculously in this order
        img.shape = (480, 640, 3)
    else:
        print "Frames are of size: ", img.size

    return img


openni2.initialize('/lib/')

dev = openni2.Device.open_any()
depth_stream = dev.create_depth_stream()
depth_stream.set_video_mode(
    c_api.OniVideoMode(
        pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM,
        resolutionX=640,
        resolutionY=480,
        fps=30))
depth_stream.start()

pygame.init()
screen = pygame.display.set_mode((640, 480))
ir_frame = pygame.Surface((640, 480))
pygame.display.set_caption('Structure Map')
import numpy as np
import cv2
from primesense import openni2
from primesense import _openni2 as c_api

openni2.initialize()     # can also accept the path of the OpenNI redistribution

dev = openni2.Device.open_any()

depth_stream = dev.create_color_stream()
depth_stream.start()

size = (640,480)

out = cv2.VideoWriter("calib_kinect.avi",cv2.VideoWriter_fourcc(*'DIVX'), 24, size)

i=0
img_array = []

key = -1

while(key == -1):
    i += 1

    frame = depth_stream.read_frame()
    frame_data = frame.get_buffer_as_uint8()

    img = np.frombuffer(frame_data, dtype=np.uint8)
    img.shape = (480,640,3)
    
    RGB_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#!/usr/bin/env python
#-*-coding:utf-8-*-
'''
这个版本解决了10帧读取视频和是否移动检测的问题。

'''
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import numpy as np
import cv2
from primesense import openni2
from primesense import _openni2 as c_api
dist ='/home/victor/software/OpenNI-Linux-x64-2.3/Redist'
openni2.initialize(dist)
if (openni2.is_initialized()):
    print "openNI2 initialized"
else:
    print "openNI2 not initialized"
dev = openni2.Device.open_any()
rgb_stream = dev.create_color_stream()
print 'The rgb video mode is', rgb_stream.get_video_mode() 
rgb_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=320, resolutionY=240, fps=15))
rgb_stream.start()
rgb_stream.set_mirroring_enabled(False)
def get_rgb():
    bgr   = np.fromstring(rgb_stream.read_frame().get_buffer_as_uint8(),dtype=np.uint8).reshape(240,320,3)
    rgb   = cv2.cvtColor(bgr,cv2.COLOR_BGR2GRAY)
    return rgb    
s=0
done=False
rgbs=[]
Example #38
0
    print('Distortion Coefficients:', dist_coefs.ravel())


if __name__ == '__main__':
    import sys
    import os
    ROOT_DIR = os.path.join(os.path.dirname(__file__), '../')
    sys.path.append(ROOT_DIR)

    from config import OPENNI_CONFIG, CALIBRATION_CONFIG

    if len(sys.argv) < 2:
        raise ValueError('Camera type not provided')

    CAMERA_TYPE = sys.argv[1]
    if CAMERA_TYPE.lower() not in ['rgb', 'ir']:
        raise ValueError(
            'Please specifiy either "rgb" or "ir" for camera type')

    OPENNI2_REDIST_DIR = OPENNI_CONFIG['OPENNI2_REDIST_DIR']
    PATTERN_SIZE = CALIBRATION_CONFIG['PATTERN_SIZE']
    SQUARE_SIZE = CALIBRATION_CONFIG['SQUARE_SIZE']

    SAVE_DIR = os.path.join(
        ROOT_DIR, CALIBRATION_CONFIG['SAVE_DIR'], CAMERA_TYPE)
    os.makedirs(os.path.dirname(SAVE_DIR), exist_ok=True)

    openni2.initialize(OPENNI2_REDIST_DIR)
    capture_chessboards(CAMERA_TYPE, SAVE_DIR)
    calibration_with_chessboards(PATTERN_SIZE, SQUARE_SIZE, SAVE_DIR)
Example #39
0
        if not rec and cv2.waitKey(1) & 0xFF == ord('s'):
            rec = True
            print("Recording...\nPress 'q' to stop recording")
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break


if __name__ == '__main__':
    args = getArgs()
    path = "../../data/videos/" + args.video
    try:
        print("Trying to open Openni device...")
        if sys.platform == "win32":
            openni2.initialize(
                "../../externals/OpenNI/Windows/Astra OpenNI2 Development Instruction(x64)_V1.3/OpenNI2/OpenNI-Windows-x64-2.3.0.63/Redist"
            )
        else:
            openni2.initialize(
                "../../OpenNI/Linux/OpenNI-Linux-x64-2.3.0.63/Redist")
        dev = openni2.Device.open_any()
        print("Openni device opened.")
        recordOni()
        openni2.unload()
    except:
        print("No openni device found.")
        print("Trying to open RealSense device...")
        try:
            import pyrealsense2 as rs
            dev = rs.pipeline()
            dev.start()
Example #40
0
'''

import cv2

from primesense import openni2#, nite2
import numpy as np
from primesense import _openni2 as c_api

#import matplotlib.pyplot as plt


## Directory where OpenNI2.so is located
dist = '/home/carlos/Install/kinect/OpenNI2-Linux-Arm-2.2/Redist/'

## Initialize openni and check
openni2.initialize(dist)#'C:\Program Files\OpenNI2\Redist\OpenNI2.dll') # accepts the path of the OpenNI redistribution
if (openni2.is_initialized()):
    print "openNI2 initialized"
else:
    print "openNI2 not initialized"

#### initialize nite and check
##nite2.initialize()
##if (nite2.is_initialized()):
##    print "nite2 initialized"
##else:
##    print "nite2 not initialized"
#### ===============================


dev = openni2.Device.open_any()
Example #41
0
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from primesense import openni2#, nite2
from primesense import _openni2 as c_api
import numpy as np
import rospkg 
import cv2
import config as cf

rospack = rospkg.RosPack()
path = rospack.get_path('mtapos')
openni2.initialize(path+'/src/modules') #
dev = openni2.Device.open_any()
rgb_stream = dev.create_color_stream()
rgb_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=640, resolutionY=480, fps=30))
rgb_stream.start()

depth_stream = dev.create_depth_stream()
depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX = 320, resolutionY = 240, fps = 30))
depth_stream.start()
while True:
    bgr = np.fromstring(rgb_stream.read_frame().get_buffer_as_uint8(),dtype=np.uint8).reshape(480,640,3)
    rgb   = cv2.cvtColor(bgr,cv2.COLOR_BGR2RGB)
    rgb = cv2.resize(rgb,(480, 320))
    rgb = cv2.flip(rgb,1)

    frame = depth_stream.read_frame()
    frame_data = frame.get_buffer_as_uint16()
    img = np.frombuffer(frame_data, dtype=np.uint16)
    img.shape = (240, 320)
    def sess_run(self, detection_graph=0, detection_sess=0):
        _di, _config = self.__config()

        rtp = RealtimeHandposePipeline(1,
                                       config=_config,
                                       di=_di,
                                       verbose=False,
                                       comrefNet=None,
                                       maxDepth=EXACT_TABLE_CAMERA_DISTANCE,
                                       minDepth=DEPTH_THRESHOLD)

        joint_num = self.__joint_num()
        cube_size = self.__crop_cube()

        with tf.Session() as sess:
            init = tf.global_variables_initializer()
            sess.run(init)
            self.saver.restore(sess, self.model_path)

            frameRate = 30
            width = 640  # Width of image
            height = 480  # height of image
            openni2.initialize("django_project\\orbbec-astra\\Redist"
                               )  #The OpenNI2 Redist folder

            # Initialize OpenNI with its libraries

            # Open a device
            dev = openni2.Device.open_any()
            # Check to make sure it's not None

            # Open two streams: one for color and one for depth
            depth_stream = dev.create_depth_stream()
            depth_stream.start()
            depth_stream.set_video_mode(
                c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.
                                   ONI_PIXEL_FORMAT_DEPTH_1_MM,
                                   resolutionX=width,
                                   resolutionY=height,
                                   fps=frameRate))

            color_stream = dev.create_color_stream()
            color_stream.start()
            color_stream.set_video_mode(
                c_api.OniVideoMode(
                    pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
                    resolutionX=width,
                    resolutionY=height,
                    fps=frameRate))

            # Register both streams so they're aligned
            dev.set_image_registration_mode(
                c_api.OniImageRegistrationMode.
                ONI_IMAGE_REGISTRATION_DEPTH_TO_COLOR)
            # Set exposure settings
            # Camera Exposure
            bAutoExposure = False
            exposure = 100
            if (color_stream.camera != None):
                color_stream.camera.set_auto_exposure(bAutoExposure)
                color_stream.camera.set_exposure(exposure)

            while True:
                frame = depth_stream.read_frame()
                frame_data = frame.get_buffer_as_uint16()
                depthPix = np.frombuffer(frame_data, dtype=np.uint16)
                depthPix.shape = (height, width)

                # This depth is in 1 mm units
                #depthPix = cv2.resize(depthPix[80:400,80:560],(640,480))

                frame = color_stream.read_frame()
                frame_data = frame.get_buffer_as_uint8()
                colorPix = np.frombuffer(frame_data, dtype=np.uint8)
                colorPix.shape = (height, width, 3)

                #colorPix = cv2.resize(colorPix[80:400,80:560,:],(640,480))

                #colorPix = cv2.cvtColor(colorPix, cv2.COLOR_BGR2RGB)
                depthPix = loadDepthMap(depthPix)
                processed_image, cropped, [xmin, xmax, ymin,
                                           ymax] = detection_results(
                                               detection_graph,
                                               colorPix,
                                               detection_sess,
                                               score_threshold=0.2)
                depthPix_original = depthPix.copy()
                depthPix = depthPix[xmin:xmax, ymin:ymax]
                cv2.imshow('cropped', cropped)

                cv2.imshow('Detection',
                           cv2.cvtColor(processed_image, cv2.COLOR_RGB2BGR))

                #try:
                if True:
                    crop1, M, com3D, bounds = rtp.detect(depthPix)
                    if com3D[0] == 0:
                        continue  # skipping error happened during hand detection using COM and contours
                    crop = crop1.reshape(1, crop1.shape[0], crop1.shape[1],
                                         1).astype('float32')

                    pred_ = sess.run(self.hand_tensor,
                                     feed_dict={self.inputs: crop})
                    norm_hand = np.reshape(pred_, (joint_num, 3))
                    pose = norm_hand * cube_size / 2. + com3D

                    img, J_pixels = rtp.show3(
                        depthPix_original, pose, self._dataset,
                        (np.float32(xmin), np.float32(ymin)))
                    cv2.imshow('frame', img)
                    cv2.imshow('crop1', crop1)

                    print('com3D :', com3D, ' pose[0] :', pose[0],
                          ' pixels[0] ', J_pixels[0])
                #except:
                #	print('error happened')
                #	continue
                if cv2.waitKey(1) >= 0:
                    break
        openni2.unload()

        cv2.destroyAllWindows()
'''

import cv2

from primesense import openni2  #, nite2
import numpy as np
from primesense import _openni2 as c_api

#import matplotlib.pyplot as plt

## Directory where OpenNI2.so is located
#dist = '/home/carlos/Install/kinect/OpenNI2-Linux-Arm-2.2/Redist/'
dist = '/home/simon/deeplearning/openni/Install/kinect/openni2/OpenNI2-x64/Redist'
## Initialize openni and check
openni2.initialize(
    dist
)  #'C:\Program Files\OpenNI2\Redist\OpenNI2.dll') # accepts the path of the OpenNI redistribution
if (openni2.is_initialized()):
    print("openNI2 initialized")
else:
    print("openNI2 not initialized")

#### initialize nite and check
##nite2.initialize()
##if (nite2.is_initialized()):
##    print "nite2 initialized"
##else:
##    print "nite2 not initialized"
#### ===============================

dev = openni2.Device.open_any()
Example #44
0
import cv2
import numpy as np
from primesense import openni2
from primesense import _openni2 as c_api
openni2.initialize(
    "/home/hoaiphuong/Downloads/OpenNI_2.3.0.43/Linux/OpenNI-Linux-x64-2.3/Redist"
)
dev = openni2.Device.open_any()
#depth_stream = dev.create_depth_stream()
rgb_stream = dev.create_color_stream()
#depth_stream.start()

#depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX = 640, resolutionY = 480, fps = 30))
rgb_stream.set_video_mode(
    c_api.OniVideoMode(
        pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
        resolutionX=640,
        resolutionY=480,
        fps=30))
rgb_stream.start()
while True:
    #frame = depth_stream.read_frame()
    #frame_data = frame.get_buffer_as_uint16()
    #img = np.frombuffer(frame_data, dtype=np.uint16)
    #img.shape = (1, 480, 640)
    #img = np.concatenate((img, img, img), axis=0)
    #img = np.swapaxes(img, 0, 2)
    #img = np.swapaxes(img, 0, 1)
    #cv2.imshow("image", img)
    #cv2.waitKey(34)
    bgr = np.fromstring(rgb_stream.read_frame().get_buffer_as_uint8(),
Example #45
0
#!/usr/bin/python
import cv2
import numpy as np
from primesense import openni2
from primesense import _openni2 as c_api
from primesense._openni2 import OniSensorType

n = True
openni2.initialize('/home/test/Desktop/OpenNI-Linux-x64-2.2/Redist/')
dev = openni2.Device.open_any()
depth_stream = dev.create_depth_stream()
#depth_stream = dev.create_color_stream()
#depth_stream = dev.create_stream(OniSensorType.ONI_SENSOR_COLOR)
depth_stream.start()
depth_stream.set_video_mode(
    c_api.OniVideoMode(
        pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM,
        resolutionX=640,
        resolutionY=480,
        fps=30))
#depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
#                                              resolutionX = 640, resolutionY = 480, fps = 30))
while True:
    frame = depth_stream.read_frame()
    frame_data = frame.get_buffer_as_uint16()
    img = np.frombuffer(frame_data, dtype=np.uint16)
    img.shape = (1, 480, 640)
    #img.shape = (1,640,720)
    img = np.concatenate((img), axis=0)
    #img = np.swapaxes(img, 0, 2)
    #img = np.swapaxes(img, 0, 1)
        img = np.swapaxes(img, 0, 1)
    elif whatisit == (640*480*3):
        img.shape = (480,640,3)
    else:
        print "Frames are of size: ",img.size
    #images still appear to be reflected, but I don't need them to be correct in that way
    print img.shape
    #need both of follwoing: plt.imShow adds image to plot
    plt.imshow(img)
    #plt.show shows all the currently added figures
    #plt.show()
    plt.pause(0.1)
    plt.draw()
    plt.close()

openni2.initialize()     # can also accept the path of the OpenNI redistribution

dev = openni2.Device.open_any()
print dev.get_sensor_info(openni2.SENSOR_DEPTH)

print "testing depth "
depth_stream = dev.create_depth_stream()
#depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX = 320, resolutionY = 240, fps = 30))
depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX = 640, resolutionY = 480, fps = 30))

print "Testing Color "
color_stream = dev.create_color_stream()
#color_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX = 320, resolutionY = 240, fps = 30))
color_stream.set_video_mode(c_api.OniVideoMode(pixelFormat = c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX = 640, resolutionY = 480, fps = 30))

print "Starting Streams"
Example #47
0
from primesense import openni2
import numpy as np
import cv2
import os
from cv2 import aruco
import BoardInfo

base_storage_path = "../camera_calibration_out/Kinect"
second_dir = "calibImages_"
openni2.initialize("C:\Program Files\OpenNI2\Tools")

dirsInOutput = os.listdir(base_storage_path) if os.path.isdir(base_storage_path) else []

highest_i = -1
for f in dirsInOutput:
    if not os.path.isdir(f):
        continue
    try:
        test_i = int(f.replace("second_dir", ""))
    except:
        continue
    highest_i = max(highest_i, test_i)

save_path = base_storage_path+"/"+second_dir+str(highest_i+1)+"/"
os.makedirs(save_path, exist_ok=True)

all_charco_corners = []
all_charco_ids = []

detectorParams = aruco.DetectorParameters_create()
detectorParams.adaptiveThreshConstant = 5
Example #48
0
#!/usr/bin/env python

from primesense import openni2
import numpy as np
import cv2

if __name__ == '__main__':
    # initialize device
    openni2.initialize('/usr/lib')

    dev = openni2.Device.open_any()
    print dev.get_sensor_info(openni2.SENSOR_DEPTH)

    # initialize stream
    depth_stream = dev.create_depth_stream()
    color_stream = dev.create_color_stream()
    depth_stream.start()
    color_stream.start()

    shot_idx = 0
    depth_array_draw_scale = 10000.0

    # loop
    while True:
        # get depth
        depth_frame = depth_stream.read_frame()
        depth_frame_data = depth_frame.get_buffer_as_uint16()
        depth_array = np.ndarray((depth_frame.height, depth_frame.width),
                                 dtype=np.uint16,
                                 buffer=depth_frame_data)
def main(argv):
    tf_device = '/gpu:0'
    with tf.device(tf_device):
        """Build graph
        """

    frame_num = 0

    input_data = tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.input_size, FLAGS.input_size, 3],
                                name='input_image')
    center_map = tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.input_size, FLAGS.input_size, 1],
                                name='center_map')

    model = cpm_hand_slim.CPM_Model(FLAGS.stages, FLAGS.joints + 1)
    model.build_model(input_data, center_map, 1)

    """Create session and restore weights
    """
    sess = tf.Session()

    sess.run(tf.global_variables_initializer())
    model.load_weights_from_file(FLAGS.model_path, sess, False)

    test_center_map = cpm_utils.gaussian_img(FLAGS.input_size, FLAGS.input_size, FLAGS.input_size / 2,
                                             FLAGS.input_size / 2,
                                             FLAGS.cmap_radius)
    test_center_map = np.reshape(test_center_map, [1, FLAGS.input_size, FLAGS.input_size, 1])

    # Check weights

    for variable in tf.trainable_variables():
        with tf.variable_scope('', reuse=True):
            var = tf.get_variable(variable.name.split(':0')[0])
            print(variable.name, np.mean(sess.run(var)))

    cam = cv2.VideoCapture(FLAGS.cam_num)

    # Create kalman filters
    if FLAGS.KALMAN_ON:
        kalman_filter_array = [cv2.KalmanFilter(4, 2) for _ in range(FLAGS.joints)]
        for _, joint_kalman_filter in enumerate(kalman_filter_array):
            joint_kalman_filter.transitionMatrix = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]],
                                                            np.float32)
            joint_kalman_filter.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
            joint_kalman_filter.processNoiseCov = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
                                                           np.float32) * FLAGS.kalman_noise
    else:
        kalman_filter_array = None

    # Init Openni2
    openni2.initialize("C:\OpenNI\Redist")
    dev = openni2.Device.open_any()
    depth_stream = dev.create_depth_stream()
    depth_stream.start()
    depth_stream.set_video_mode(
        c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX=640,
                           resolutionY=480, fps=30))

    with tf.device(tf_device):

        while True:
            # t1 = time.time()
            orig_img, test_img, depth_map = cpm_utils.read_image(cam, FLAGS.input_size, depth_stream)

            test_img_resize = cv2.resize(test_img, (FLAGS.input_size, FLAGS.input_size))
            # print('img read time %f' % (time.time() - t1))

            test_img_input = test_img_resize / 256.0 - 0.5
            test_img_input = np.expand_dims(test_img_input, axis=0)

            # Inference
            t1 = time.time()
            stage_heatmap_np = sess.run([model.stage_heatmap[5]],
                                        feed_dict={'input_image:0': test_img_input,
                                                   'center_map:0': test_center_map})

            # Show visualized image
            demo_img = visualize_result(test_img, orig_img, FLAGS, stage_heatmap_np, kalman_filter_array, frame_num,
                                        depth_map)
            cv2.imshow('current depth', depth_map)
            cv2.imshow('current heatmap', demo_img.astype(np.uint8))
            if cv2.waitKey(1) == ord('q'): break
            print('fps: %.2f' % (1 / (time.time() - t1)))
            print(frame_num)
            frame_num = frame_num + 1
Example #50
0
from primesense import openni2
import sys
import time

if len(sys.argv)<5:
    print "Usage: python record_oni.py <path of output .oni> <num frames> <delay (s)> <resolution>"
    sys.exit(1)

niRedistPath = "openni/Redist"
openni2.initialize(niRedistPath)

fileName = sys.argv[1]
dev = openni2.Device.open_any()
dev.set_depth_color_sync_enabled(True)

depth_stream = dev.create_depth_stream()
color_stream = dev.create_color_stream()

videoMode = color_stream.get_video_mode()
if int(sys.argv[4])==1:
    videoMode.resolutionX = 640
    videoMode.resolutionY = 480
elif int(sys.argv[4])==2:
    videoMode.resolutionX = 1280
    videoMode.resolutionY = 1024
else:
    videoMode.resolutionX = 320
    videoMode.resolutionY = 240
color_stream.set_video_mode(videoMode)
color_stream.camera.set_auto_exposure(False)
color_stream.camera.set_auto_white_balance(False)
Example #51
0
def main(args_in):
    vid_in = None
    open_ni = args_in.openni
    #OpenCV inits
    load_cascade(os.getcwd() + '/m_10_side_wide')
    color_stream = None
    if open_ni:
        openni2.initialize('./redist/')
        try:
            dev = openni2.Device.open_any()
            color_stream = dev.create_color_stream()
            color_stream.start()
        except:
            print('No valid device connected')
            open_ni = False
            raise ValueError
    else:
        vid_in = cv2.VideoCapture('ISS.mp4')
        vid_in.set(cv2.CAP_PROP_POS_FRAMES, 400)
    ret = True
    cur_time = time.time()
    loop_lim = 10
    cur_loop = 0
    fpr_buffer = {}
    while (ret == True):
        cur_loop += 1
        if cur_loop > loop_lim:
            cur_loop = 0
            chk = time.time() - cur_time
            cur_time = time.time()
            print(loop_lim / chk)
        frame = None
        if open_ni:
            frame_full = color_stream.read_frame()
            frame = np.ctypeslib.as_array(frame_full.get_buffer_as_triplet())
            frame = frame.reshape((frame_full.height, frame_full.width, 3))
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        else:
            _, frame = vid_in.read()
        if frame is None:
            print('Error reading frame')
            return
        width_des = 1280
        r = width_des / frame.shape[1]
        try:
            if r < 1:
                dim = (int(width_des), int(frame.shape[0] * r))
                frame = cv2.resize(frame,
                                   dim,
                                   interpolation=cv2.INTER_LANCZOS4)
        except:
            print(
                'Error with resize/color conversion - make sure to unplug action cam before running without openni flag'
            )
        data = lighting_balance(frame, True, False)
        data = np.asarray(data, dtype='uint8')
        det = None
        if open_ni:
            pass
        else:
            det = detect(data, face_settings, 'manual')
        chk = remove_overlap(det)
        # fpr_buffer, chk = clean_fpr(fpr_buffer, chk, face_suppress_settings)
        for cur in chk:
            cv2.circle(frame, (int(cur[1][1]), int(cur[1][0])),
                       int(cur[2] / 2), (0, 0, 255), 3)
        cv2.imshow('Webcam', frame)
        cv2.waitKey(1)
    vid_in.release()
    cv2.destroyAllWindows()