def __init__(self,
              x=640,
              y=480,
              fps=30,
              rgb_mirror=False,
              depth_mirror=False,
              rgb=True,
              depth=True):
     self.x = x
     self.y = y
     self.fps = fps
     self.rgb_mirror = rgb_mirror
     self.depth_mirror = depth_mirror
     self.rgb = rgb
     self.depth = depth
     # Linux
     self.dist = '/home/test/ws/src/pyRamon/pyConn/OpenNI-Linux-x64-2.3/Redist/'
     openni2.initialize(self.dist)
     if (openni2.is_initialized()):
         print("openNI2 initialized")
     else:
         print("openNI2 not initialized")
     ## Register the device
     self.dev = openni2.Device.open_any()
     if rgb:
         self.createColor()
     if depth:
         self.createDepth()
     if rgb and depth:
         self.sync()
     if rgb:
         self.startColor()
     if depth:
         self.startDepth()
Exemplo n.º 2
0
 def initRun(self):
     openni2.initialize(self.dist)
     if (openni2.is_initialized()):
         print("openNI2 initialized")
     else:
         print("openNI2 not initialized")
     self.device = openni2.Device.open_any()
     print("Device opened")
Exemplo n.º 3
0
 def __init__(self):
     # Linux
     self.dist = '/home/test/ws/src/pyRamon/pyConn/OpenNI-Linux-x64-2.3/Redist/'
     openni2.initialize(self.dist)
     if (openni2.is_initialized()):
         print("openNI2 initialized")
     else:
         print("openNI2 not initialized")
     ## Register the device
     self.dev = openni2.Device.open_any()
Exemplo n.º 4
0
 def __init__(self, fp):
     super().__init__(fp)
     self.depth_stream = None
     # Initialize openni and check
     print("Starting openNI initialization")
     openni2.initialize(dist)  #
     if openni2.is_initialized():
         print("openNI2 initialized")
     else:
         print("openNI2 not initialized")
Exemplo n.º 5
0
    def isOpened(self):

        if not openni2.is_initialized():
            return False

        self.device = openni2.Device.open_any()
        self.getDepthStream()

        self.depth_stream.start()

        return True
Exemplo n.º 6
0
def setup_camera(w, h, fps):
    ## Initialize OpenNi
    # dist = './driver/OpenNI-Linux-x64-2.3/Redist'
    dist = './driver/OpenNI-Windows-x64-2.3/Redist'
    openni2.initialize(dist)
    if (openni2.is_initialized()):
        print("openNI2 initialized")
    else:
        print("openNI2 not initialized")

    ## Register the device
    dev = openni2.Device.open_any()

    ## Create the streams stream
    rgb_stream = dev.create_color_stream()
    depth_stream = dev.create_depth_stream()

    ## Configure the rgb_stream -- changes automatically based on bus speed
    rgb_stream.set_video_mode(
        c_api.OniVideoMode(
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
            resolutionX=w,
            resolutionY=h,
            fps=fps))

    ## Configure the depth_stream -- changes automatically based on bus speed
    # print 'Depth video mode info', depth_stream.get_video_mode() # Checks depth video configuration
    depth_stream.set_video_mode(
        c_api.OniVideoMode(
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM,
            resolutionX=w,
            resolutionY=h,
            fps=fps))

    ## Check and configure the mirroring -- default is True
    ## Note: I disable mirroring
    # print 'Mirroring info1', depth_stream.get_mirroring_enabled()
    depth_stream.set_mirroring_enabled(False)
    rgb_stream.set_mirroring_enabled(False)

    ## Start the streams
    rgb_stream.start()
    depth_stream.start()

    ## Synchronize the streams
    dev.set_depth_color_sync_enabled(True)  # synchronize the streams

    ## IMPORTANT: ALIGN DEPTH2RGB (depth wrapped to match rgb stream)
    dev.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)
    return rgb_stream, depth_stream
Exemplo n.º 7
0
def initialize(dll_directories=_default_dll_directories):
    global _nite2_initialized
    global loaded_dll_directory
    if _nite2_initialized:
        return
    if isinstance(dll_directories, str):
        dll_directories = [dll_directories]

    if not openni2.is_initialized():
        openni2.initialize()

    if loaded_dll_directory:
        c_api.niteInitialize()
        _nite2_initialized = True
        return

    found = False
    prev = os.getcwd()
    exceptions = []
    dll_directories = [
        os.path.normpath(os.path.abspath(d)) for d in dll_directories
    ]

    for dlldir in dll_directories:
        if not os.path.isdir(dlldir):
            exceptions.append((dlldir, "Directory does not exist"))
            continue
        fullpath = os.path.join(dlldir, _dll_name)
        if not os.path.isfile(fullpath):
            exceptions.append((fullpath, "file does not exist"))
            continue
        try:
            os.chdir(dlldir)
            c_api.load_dll(fullpath)
            c_api.niteInitialize()
        except Exception as ex:
            exceptions.append((fullpath, ex))
        else:
            found = True
            loaded_dll_directory = dlldir
            break

    os.chdir(prev)
    if not found:
        raise InitializationError(
            "NiTE2 could not be loaded:\n    %s" % ("\n    ".join(
                "%s: %s" % (dir, ex) for dir, ex in exceptions)), )

    _nite2_initialized = True
Exemplo n.º 8
0
def initialize(dll_directories=_default_dll_directories):
    global _nite2_initialized
    global loaded_dll_directory
    if _nite2_initialized:
        return
    if isinstance(dll_directories, str):
        dll_directories = [dll_directories]

    if not openni2.is_initialized():
        openni2.initialize()

    if loaded_dll_directory:
        c_api.niteInitialize()
        _nite2_initialized = True
        return

    found = False
    prev = os.getcwd()
    exceptions = []
    dll_directories = [
        os.path.normpath(os.path.abspath(d)) for d in dll_directories
    ]

    for dlldir in dll_directories:
        if not os.path.isdir(dlldir):
            exceptions.append((dlldir, "Directory does not exist"))
            continue
        fullpath = os.path.join(dlldir, _dll_name)
        if not os.path.isfile(fullpath):
            exceptions.append((fullpath, "file does not exist"))
            continue
        try:
            os.chdir(dlldir)
            c_api.load_dll(fullpath)
            c_api.niteInitialize()
        except Exception as ex:
            exceptions.append((fullpath, ex))
        else:
            found = True
            loaded_dll_directory = dlldir
            break

    os.chdir(prev)
    if not found:
        raise InitializationError(
            "NiTE2 could not be loaded:\n    %s" %
            ("\n    ".join("%s: %s" % (dir, ex) for dir, ex in exceptions)), )

    _nite2_initialized = True
Exemplo n.º 9
0
  def __init__(self, width=320, height=240, fps=30):
    if not openni2.is_initialized():
      openni2.initialize("/usr/lib/")

    self.width = width
    self.height = height

    # maybe change this to a more specific device
    self.device = openni2.Device.open_any()
    self.rgb_stream = self.device.create_color_stream()
    self.rgb_stream.set_video_mode(
        c_api.OniVideoMode(
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
            resolutionX=width,
            resolutionY=height,
            fps=fps))
    self.rgb_stream.start()
Exemplo n.º 10
0
  def __init__(self, width=320, height=240, fps=30):
    if not openni2.is_initialized():
      openni2.initialize("/usr/lib/")

    self.width = width
    self.height = height

    # maybe change this to a more specific device
    self.device = openni2.Device.open_any()
    self.depth_stream = self.device.create_depth_stream()
    self.depth_stream.set_video_mode(
        c_api.OniVideoMode(
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM,
            resolutionX=width,
            resolutionY=height,
            fps=30))

    # self.depth_stream.set_mirroring_enabled(False)
    self.depth_stream.start()
Use to record with the primesense camera RGB and depth cameras and the seek thermal camera
"""
import numpy as np
import cv2
import os
import shutil
from primesense import openni2  # , nite2
from primesense import _openni2 as c_api
from seek_camera import thermal_camera

#############################################################################
# set-up primesense camera
dist = '/home/julian/Install/OpenNI2-x64/Redist'
# Initialize openni and check
openni2.initialize(dist)
if (openni2.is_initialized()):
    print "openNI2 initialized"
else:
    print "openNI2 not initialized"
# Register the device
prime = openni2.Device.open_any()
# Create the streams
rgb_stream = prime.create_color_stream()
depth_stream = prime.create_depth_stream()
# Configure the depth_stream -- changes automatically based on bus speed
# print 'Depth video mode info', depth_stream.get_video_mode() # Checks depth video configuration
depth_stream.set_video_mode(
    c_api.OniVideoMode(
        pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM,
        resolutionX=320,
        resolutionY=240,
Exemplo n.º 12
0
def main(argv=None):
    print('Hello! This is XXXXXX Program')

    ## Load PointNet config
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', type=str, default='./seg/seg_model_1.pth', help='model path')
    opt = parser.parse_args()
    print(opt)

    ## Load PointNet model
    num_points = 2700
    classifier = PointNetDenseCls(num_points=num_points, k=10)
    classifier.load_state_dict(torch.load(opt.model))
    classifier.eval()

    ### Config visualization
    cmap = plt.cm.get_cmap("hsv", 5)
    cmap = np.array([cmap(i) for i in range(10)])[:, :3]
    # gt = cmap[seg - 1, :]


    ## Initialize OpenNi
    # dist = './driver/OpenNI-Linux-x64-2.3/Redist'
    dist = './driver/OpenNI-Windows-x64-2.3/Redist'
    openni2.initialize(dist)
    if (openni2.is_initialized()):
        print("openNI2 initialized")
    else:
        print("openNI2 not initialized")

    ## Register the device
    dev = openni2.Device.open_any()

    ## Create the streams stream
    rgb_stream = dev.create_color_stream()
    depth_stream = dev.create_depth_stream()

    ## Define stream parameters
    w = 320
    h = 240
    fps = 30

    ## Configure the rgb_stream -- changes automatically based on bus speed
    rgb_stream.set_video_mode(
        c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=w, resolutionY=h,
                           fps=fps))

    ## Configure the depth_stream -- changes automatically based on bus speed
    # print 'Depth video mode info', depth_stream.get_video_mode() # Checks depth video configuration
    depth_stream.set_video_mode(
        c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=w, resolutionY=h,
                           fps=fps))

    ## Check and configure the mirroring -- default is True
    ## Note: I disable mirroring
    # print 'Mirroring info1', depth_stream.get_mirroring_enabled()
    depth_stream.set_mirroring_enabled(False)
    rgb_stream.set_mirroring_enabled(False)

    ## Start the streams
    rgb_stream.start()
    depth_stream.start()

    ## Synchronize the streams
    dev.set_depth_color_sync_enabled(True)  # synchronize the streams

    ## IMPORTANT: ALIGN DEPTH2RGB (depth wrapped to match rgb stream)
    dev.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)

    saving_folder_path = './shapenetcore_partanno_segmentation_benchmark_v0/tools/'
    if not os.path.exists(saving_folder_path):
        os.makedirs(saving_folder_path+'RGB')
        os.makedirs(saving_folder_path+'D')
        os.makedirs(saving_folder_path+'PC')
        os.makedirs(saving_folder_path+'points')
        os.makedirs(saving_folder_path+'points_label')

    from config import CAMERA_CONFIG

    ## main loop
    s = 1000
    done = False
    while not done:
        key = cv2.waitKey(1) & 255
        ## Read keystrokes
        if key == 27:  # terminate
            print("\tESC key detected!")
            done = True
        elif chr(key) == 's':  # screen capture
            print("\ts key detected. Saving image {}".format(s))


            rgb = rgb[60:180, 80:240, :]
            dmap = dmap[60:180, 80:240]
            ply_content, points_content = generate_ply_from_rgbd(rgb=rgb, depth=dmap, config=CAMERA_CONFIG)

            cv2.imwrite(saving_folder_path + "RGB/" + str(s) + '.png', rgb)
            cv2.imwrite(saving_folder_path + "D/" + str(s) + '.png', dmap)
            print(rgb.shape, dmap.shape)
            print(type(rgb), type(dmap))
            with open(saving_folder_path + "PC/" + str(s) + '.ply', 'w') as output:
                output.write(ply_content)
            print(saving_folder_path + "PC/" + str(s) + '.ply', ' done')
            s += 1  # uncomment for multiple captures

            # ### Get pointcloud of scene for prediction
            # points_np = (np.array(points_content)[:, :3]).astype(np.float32)
            # choice = np.random.choice(len(points_np), num_points, replace=True)
            # points_np = points_np[choice, :]
            # points_torch = torch.from_numpy(points_np)
            #
            # points_torch = points_torch.transpose(1, 0).contiguous()
            #
            # points_torch = Variable(points_torch.view(1, points_torch.size()[0], points_torch.size()[1]))
            #
            # ### Predict to segment scene
            # pred, _ = classifier(points_torch)
            # pred_choice = pred.data.max(2)[1]
            # print(pred_choice)

        ## Streams
        # RGB
        rgb = get_rgb(rgb_stream=rgb_stream, h=h, w=w)

        # DEPTH
        dmap, d4d = get_depth(depth_stream=depth_stream, h=h, w=w)

        # canvas
        canvas = np.hstack((rgb, d4d))
        ## Display the stream syde-by-side
        cv2.imshow('depth || rgb', canvas)
    # end while

    ## Release resources
    cv2.destroyAllWindows()
    rgb_stream.stop()
    depth_stream.stop()
    openni2.unload()
    print("Terminated")
def main(argv=None):
    print('Hello! This is XXXXXX Program')

    cv2.namedWindow(window_detection_name)
    cv2.createTrackbar(low_H_name, window_detection_name, low_H, max_value_H,
                       on_low_H_thresh_trackbar)
    cv2.createTrackbar(high_H_name, window_detection_name, high_H, max_value_H,
                       on_high_H_thresh_trackbar)
    cv2.createTrackbar(low_S_name, window_detection_name, low_S, max_value,
                       on_low_S_thresh_trackbar)
    cv2.createTrackbar(high_S_name, window_detection_name, high_S, max_value,
                       on_high_S_thresh_trackbar)
    cv2.createTrackbar(low_V_name, window_detection_name, low_V, max_value,
                       on_low_V_thresh_trackbar)
    cv2.createTrackbar(high_V_name, window_detection_name, high_V, max_value,
                       on_high_V_thresh_trackbar)

    ## Initialize OpenNi
    # dist = './driver/OpenNI-Linux-x64-2.3/Redist'
    dist = './driver/OpenNI-Windows-x64-2.3/Redist'
    openni2.initialize(dist)
    if (openni2.is_initialized()):
        print("openNI2 initialized")
    else:
        print("openNI2 not initialized")

    ## Register the device
    dev = openni2.Device.open_any()

    ## Create the streams stream
    rgb_stream = dev.create_color_stream()
    depth_stream = dev.create_depth_stream()

    ## Define stream parameters
    w = 320
    h = 240
    fps = 30

    ## Configure the rgb_stream -- changes automatically based on bus speed
    rgb_stream.set_video_mode(
        c_api.OniVideoMode(
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
            resolutionX=w,
            resolutionY=h,
            fps=fps))

    ## Configure the depth_stream -- changes automatically based on bus speed
    # print 'Depth video mode info', depth_stream.get_video_mode() # Checks depth video configuration
    depth_stream.set_video_mode(
        c_api.OniVideoMode(
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM,
            resolutionX=w,
            resolutionY=h,
            fps=fps))

    ## Check and configure the mirroring -- default is True
    ## Note: I disable mirroring
    # print 'Mirroring info1', depth_stream.get_mirroring_enabled()
    depth_stream.set_mirroring_enabled(False)
    rgb_stream.set_mirroring_enabled(False)

    ## Start the streams
    rgb_stream.start()
    depth_stream.start()

    ## Synchronize the streams
    dev.set_depth_color_sync_enabled(True)  # synchronize the streams

    ## IMPORTANT: ALIGN DEPTH2RGB (depth wrapped to match rgb stream)
    dev.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)

    ## main loop
    done = False
    while not done:
        key = cv2.waitKey(1) & 255
        ## Read keystrokes
        if key == 27:  # terminate
            print("\tESC key detected!")
            done = True

        ## Streams
        # RGB
        rgb = get_rgb(rgb_stream=rgb_stream, h=h, w=w)

        # DEPTH
        dmap, d4d = get_depth(depth_stream=depth_stream, h=h, w=w)

        # canvas
        canvas = np.hstack((rgb, d4d))
        cv2.rectangle(canvas, (119, 79), (202, 162), (0, 255, 0), 1)
        cv2.rectangle(canvas, (119 + 320, 79), (202 + 320, 162), (0, 255, 0),
                      1)
        ## Display the stream syde-by-side
        cv2.imshow('depth || rgb', canvas)

        hsv = cv2.cvtColor(src=rgb, code=cv2.COLOR_BGR2HSV)

        ### for black
        # tblack = cv2.inRange(hsv, (low_H, low_S, low_V), (high_H, high_S, high_V))
        tblack = cv2.inRange(hsv, (100, 130, 0), (130, 220, 150))

        ### for white
        # twhite = cv2.inRange(hsv, (low_H, low_S, low_V), (high_H, high_S, high_V))
        twhite = cv2.inRange(hsv, (0, 0, 230, 0), (160, 200, 255, 0))

        cv2.imshow('black', tblack)
        cv2.imshow('white', twhite)
    # end while

    ## Release resources
    cv2.destroyAllWindows()
    rgb_stream.stop()
    depth_stream.stop()
    openni2.unload()
    print("Terminated")
Exemplo n.º 14
0
x = h/2
y = w/2

# Device number
devN=3

## Array to store the image modalities+overlayed_skeleton (4images)
#rgb   = np.zeros((480,640,3), np.uint8)
#rgbdm = np.zeros((480,640*4, 3), np.uint8)

##pi3
dist = "/home/carlos/Install/kinect/OpenNI-Linux-Arm-2.2/Redist/"

## initialize openni and check
openni2.initialize(dist)
if (openni2.is_initialized()):
    print "openNI2 initialized"
else:
    print "openNI2 not initialized"
#if

## Register the device
dev = openni2.Device.open_any()

## create the streams stream
rgb_stream = dev.create_color_stream()
depth_stream = dev.create_depth_stream()



Exemplo n.º 15
0
"""Usage examples of the Python_OpenNI2 wrapper

The examples are taken from https://github.com/elmonkey/Python_OpenNI2/tree/master/samples.
"""

from primesense import openni2
from primesense.utils import InitializationError

IS_INITIALIZED = False

try:
    openni2.initialize("/usr/local/src/OpenNI-Linux-Arm-2.3/Redist")
    if openni2.is_initialized():
        IS_INITIALIZED = True
        print "OpenNI2 is initialized"
    else:
        print "OpenNI2 is not initialized"
except InitializationError as err:
    print("OpenNI2 is not initialized", err)
Exemplo n.º 16
0
def main(argv=None):
    print('Hello! This is XXXXXX Program')

    ## Initialize OpenNi
    # dist = './driver/OpenNI-Linux-x64-2.3/Redist'
    dist = './driver/OpenNI-Windows-x64-2.3/Redist'
    openni2.initialize(dist)
    if (openni2.is_initialized()):
        print("openNI2 initialized")
    else:
        print("openNI2 not initialized")

    ## Register the device
    dev = openni2.Device.open_any()

    ## Create the streams stream
    rgb_stream = dev.create_color_stream()
    depth_stream = dev.create_depth_stream()

    ## Define stream parameters
    w = 320
    h = 240
    fps = 30

    ## Configure the rgb_stream -- changes automatically based on bus speed
    rgb_stream.set_video_mode(
        c_api.OniVideoMode(
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888,
            resolutionX=w,
            resolutionY=h,
            fps=fps))

    ## Configure the depth_stream -- changes automatically based on bus speed
    # print 'Depth video mode info', depth_stream.get_video_mode() # Checks depth video configuration
    depth_stream.set_video_mode(
        c_api.OniVideoMode(
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM,
            resolutionX=w,
            resolutionY=h,
            fps=fps))

    ## Check and configure the mirroring -- default is True
    ## Note: I disable mirroring
    # print 'Mirroring info1', depth_stream.get_mirroring_enabled()
    depth_stream.set_mirroring_enabled(False)
    rgb_stream.set_mirroring_enabled(False)

    ## Start the streams
    rgb_stream.start()
    depth_stream.start()

    ## Synchronize the streams
    dev.set_depth_color_sync_enabled(True)  # synchronize the streams

    ## IMPORTANT: ALIGN DEPTH2RGB (depth wrapped to match rgb stream)
    dev.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)

    saving_folder_path = './shapenetcore_partanno_segmentation_benchmark_v0/tools/'
    if not os.path.exists(saving_folder_path):
        os.makedirs(saving_folder_path + 'RGB')
        os.makedirs(saving_folder_path + 'D')
        os.makedirs(saving_folder_path + 'PC')
        os.makedirs(saving_folder_path + 'points')
        os.makedirs(saving_folder_path + 'points_label')

    from config import CAMERA_CONFIG

    ## main loop
    s = 1000
    done = False
    while not done:
        key = cv2.waitKey(1) & 255
        ## Read keystrokes
        if key == 27:  # terminate
            print("\tESC key detected!")
            done = True
        elif chr(key) == 's':  # screen capture
            print("\ts key detected. Saving image {}".format(s))

            ### crop the image
            rgb = rgb[80:160, 120:200, :]
            dmap = dmap[80:160, 120:200]

            ### get hsv image
            hsv = cv2.cvtColor(src=rgb, code=cv2.COLOR_BGR2HSV)
            ### get black area
            tblack = cv2.inRange(hsv, (100, 130, 0), (130, 220, 150))
            ### get white area
            twhite = cv2.inRange(hsv, (0, 0, 230, 0), (160, 200, 255, 0))
            ply_content, points_content, label_content = generate_ply_from_rgbd(
                rgb=rgb, depth=dmap, config=CAMERA_CONFIG)

            cv2.imwrite(saving_folder_path + "RGB/" + str(s) + '.png', rgb)
            cv2.imwrite(saving_folder_path + "D/" + str(s) + '.png', dmap)
            print(rgb.shape, dmap.shape)
            print(type(rgb), type(dmap))
            with open(saving_folder_path + "PC/" + str(s) + '.ply',
                      'w') as output:
                output.write(ply_content)
            print(saving_folder_path + "PC/" + str(s) + '.ply', ' done')
            s += 1  # uncomment for multiple captures

        ## Streams
        # RGB
        rgb = get_rgb(rgb_stream=rgb_stream, h=h, w=w)

        # DEPTH
        dmap, d4d = get_depth(depth_stream=depth_stream, h=h, w=w)

        # canvas
        canvas = np.hstack((rgb, d4d))
        cv2.rectangle(canvas, (119, 79), (202, 162), (0, 255, 0), 1)
        cv2.rectangle(canvas, (119 + 320, 79), (202 + 320, 162), (0, 255, 0),
                      1)
        ## Display the stream syde-by-side
        cv2.imshow('depth || rgb', canvas)

        hsv = cv2.cvtColor(src=rgb, code=cv2.COLOR_BGR2HSV)

        ### for black
        # tblack = cv2.inRange(hsv, (low_H, low_S, low_V), (high_H, high_S, high_V))
        tblack = cv2.inRange(hsv, (100, 180, 0), (130, 255, 150))

        ### for white
        # twhite = cv2.inRange(hsv, (low_H, low_S, low_V), (high_H, high_S, high_V))
        twhite = cv2.inRange(hsv, (0, 0, 230, 0), (100, 200, 255, 0))

        cv2.imshow('black', tblack)
        cv2.imshow('white', twhite)
    # end while

    ## Release resources
    cv2.destroyAllWindows()
    rgb_stream.stop()
    depth_stream.stop()
    openni2.unload()
    print("Terminated")