Exemplo n.º 1
0
    def run(self):
        numb_frame = self.image_stream.get_number_of_frames()
        self.slider.setRange(2, numb_frame)

        while True:
            if self.current_frame > numb_frame:
                self.current_frame = 2

            if self.current_frame < 2:
                self.current_frame = 2

            self.slider.setValue(self.current_frame)

            self.pbs.seek(self.image_stream,
                          self.current_frame)  # с какого кадра стартовать
            frame_image = self.image_stream.read_frame()

            self.build_frame(frame_image)
            time.sleep(0.016)

            if not self.is_paused:
                self.current_frame += 1
            else:
                continue

        self.image_stream.stop()
        openni2.unload()
Exemplo n.º 2
0
def main():

    print("pyOniRecorder by Rocco Pietrini v1 \n ")
    readSettings()
    try:
        if sys.platform == "win32":
            libpath = "lib/Windows"
        else:
            libpath = "lib/Linux"
        print("library path is: ",
              os.path.join(os.path.dirname(__file__), libpath))
        openni2.initialize(os.path.join(os.path.dirname(__file__), libpath))
        print("OpenNI2 initialized \n")
    except Exception as ex:
        print("ERROR OpenNI2 not initialized", ex, " check library path..\n")
        return
    try:
        dev = openni2.Device.open_any()
    except Exception as ex:
        print("ERROR Unable to open the device: ", ex,
              " device disconnected? \n")
        return
    write_files(dev)
    try:
        openni2.unload()
        print("Device unloaded \n")
    except Exception as ex:
        print("Device not unloaded: ", ex, "\n")
Exemplo n.º 3
0
 def endtread(self):
     ##image  relise
     cv2.destroyAllWindows()  #destory windown
     #self.cam_astras.rgb_stream.stop()#stop 3D rbg_stream
     #self.cam_astras.depth_stream.stop()#stop 3D depth stream
     openni2.unload()
     print("camera closed")
Exemplo n.º 4
0
    def colorStream(self, pathDir:str):
        color_stream = self.dev.create_color_stream()
        color_stream.start()
        color_stream.set_video_mode( c_api.OniVideoMode(pixelFormat = c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX = 640,
                                                        resolutionY = 480, fps = 30))

        while True:
            frame = color_stream.read_frame()
            frame_data = frame.get_buffer_as_uint8()

            img = np.frombuffer(frame_data, dtype=np.uint8)
            img.shape = (307200, 3)
            b = img[:, 0].reshape(480, 640)
            g = img[:, 1].reshape(480, 640)
            r = img[:, 2].reshape(480, 640)

            rgb = (r[..., np.newaxis], g[..., np.newaxis], b[..., np.newaxis])
            img = np.concatenate(rgb, axis=-1)

            cv2.imshow("Color Image", img)
            key = cv2.waitKey(1) & 0xFF

            if key == ord("q"):
                break

            if key == ord("c"):
                filename = pathDir +  str(datetime.datetime.now()) + ".png"
                cv2.imwrite(filename, img)

        openni2.unload()
        cv2.destroyAllWindows()
Exemplo n.º 5
0
    def __del__(self):
        # close all the streams
        for stream in self.streams:
            stream.close()

        # unload openni2
        openni2.unload()
Exemplo n.º 6
0
    def depthStream(self):
        depth_stream = self.dev.create_depth_stream()
        depth_stream.start()
        depth_stream.set_video_mode( c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM,
                                                        resolutionX=640, resolutionY=480, fps=30))

        while True:
            # Grab a new depth frame
            frame = depth_stream.read_frame()
            frame_data = frame.get_buffer_as_uint16()
            # Put the depth frame into a numpy array and reshape it
            img = np.frombuffer(frame_data, dtype=np.uint16)
            img.shape = (1, 480, 640)
            img = np.concatenate((img, img, img), axis=0)
            img = np.swapaxes(img, 0, 2)
            img = np.swapaxes(img, 0, 1)

            # Display the reshaped depth frame using OpenCV
            cv2.imshow("Depth Image", img)
            key = cv2.waitKey(1) & 0xFF

            # If the 'c' key is pressed, break the while loop
            if key == ord("q"):
                break

        # Close all windows and unload the depth device
        openni2.unload()
        cv2.destroyAllWindows()
Exemplo n.º 7
0
    def __del__(self):
        """Delete the Kinect interface."""
        # close all the streams
        self.rgb_stream.close()
        self.depth_stream.close()

        # unload openni2
        openni2.unload()
Exemplo n.º 8
0
def update():
    colors = ((1.0, 1.0, 1.0, 1.0))

    # Grab a new depth frame
    depth_frame = depth_stream.read_frame()
    depth_frame_data = depth_frame.get_buffer_as_uint16()

    # Grab a new color frame
    color_frame = color_stream.read_frame()
    color_frame_data = color_frame.get_buffer_as_uint8()

    depth_img = np.frombuffer(depth_frame_data, dtype=np.uint16)
    depth_img.shape = (1, 480, 640)
    depth_img = np.concatenate((depth_img, depth_img, depth_img), axis=0)
    depth_img = np.swapaxes(depth_img, 0, 2)
    depth_img = np.swapaxes(depth_img, 0, 1)

    #cloud = points.astype(np.float32)/1000
    depth_image = np.ndarray((depth_frame.height,depth_frame.width),dtype=np.uint16,buffer=depth_frame_data)
    x, y, z, cloud = point_cloud(depth_image)

    x = cloud[:,:,0].flatten()
    y = cloud[:,:,1].flatten()
    z = cloud[:,:,2].flatten()

    N = max(x.shape)
    pos = np.empty((N,3))
    pos[:, 0] = x
    pos[:, 1] = z
    pos[:, 2] = y

    #size = np.empty((pos.shape[0]))
    #color = np.empty((pos.shape[0], 4))

    #for i in range(pos.shape[0]):
    #    size[i] = 0.1
    #    color[i] = (1,0,0,1)

    #d = np.ndarray((depth_frame.height, depth_frame.width),dtype=np.uint16,buffer=points)#/10000
    out = pos

    color_img = np.frombuffer(color_frame_data, dtype=np.uint8)
    color_img.shape = (480, 640, 3)
    color_img = color_img[...,::-1]

    cv_img = cv2.convertScaleAbs(depth_img, alpha=(255.0/65535.0))

    cv2.imshow("Image", color_img)
    sp2.setData(pos=out, size=2, color=colors, pxMode=True)

    key = cv2.waitKey(1) & 0xFF
    if (key == 27 or key == ord('q') or key == ord('x') or key == ord("c")):
        depth_stream.stop()
        color_stream.stop()
        openni2.unload()
        cv2.destroyAllWindows()
        sys.exit(0)
Exemplo n.º 9
0
def main(args):
    device = getOrbbec()
    # 创建深度流
    depth_stream = device.create_depth_stream()
    depth_stream.set_mirroring_enabled(args.mirroring)
    depth_stream.set_video_mode(
        c_api.OniVideoMode(
            resolutionX=args.width,
            resolutionY=args.height,
            fps=args.fps,
            pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM))

    # 获取uvc
    cap = cv2.VideoCapture(0)

    # 设置 镜像 帧同步
    device.set_image_registration_mode(True)
    device.set_depth_color_sync_enabled(True)
    depth_stream.start()

    while True:
        # 读取帧
        frame_depth = depth_stream.read_frame()
        frame_depth_data = frame_depth.get_buffer_as_uint16()
        # 读取帧的深度信息 depth_array 也是可以用在后端处理的 numpy格式的
        depth_array = np.ndarray((frame_depth.height, frame_depth.width),
                                 dtype=np.uint16,
                                 buffer=frame_depth_data)
        # 变换格式用于 opencv 显示
        depth_uint8 = 1 - 250 / (depth_array)
        depth_uint8[depth_uint8 > 1] = 1
        depth_uint8[depth_uint8 < 0] = 0
        cv2.imshow('depth', depth_uint8)

        # 读取 彩色图
        _, color_array = cap.read()
        cv2.imshow('color', color_array)

        # 对彩色图 color_array 做处理

        # 对深度图 depth_array 做处理

        # 键盘监听
        if cv2.waitKey(1) == ord('q'):
            # 关闭窗口 和 相机
            depth_stream.stop()
            cap.release()
            cv2.destroyAllWindows()
            break

    # 检测设备是否关闭(没什么用)
    try:
        openni2.unload()
        print("Device unloaded \n")
    except Exception as ex:
        print("Device not unloaded: ", ex, "\n")
Exemplo n.º 10
0
def startApp():
    openni2.initialize(
    )  # can also accept the path of the OpenNI redistribution

    dev = openni2.Device.open_any()
    print(dev.get_device_info())

    depth_stream = dev.create_depth_stream()
    depth_stream.start()
    punkty = []

    def getFrame(readFrame):
        frame_data = readFrame.get_buffer_as_uint16()
        img = np.frombuffer(frame_data, dtype=np.uint16)
        img.shape = (480, 640)
        return img

    while (True):
        img = getFrame(depth_stream.read_frame())
        img = np.ma.masked_equal(img, 0.0,
                                 copy=True)  #moze nie bedzie potrzebny
        indexClosestToCamera = img.argmin()

        j, i = np.unravel_index(indexClosestToCamera, img.shape)
        point = (i, j)
        dlX = 350
        dlY = 300
        xStart = 120
        yStart = 120

        czyWGranicachPionowych = lambda p: xStart <= p[0] < (xStart + dlX)
        czyWGranicachPoziomych = lambda p: yStart <= p[1] < (yStart + dlY)

        if czyWGranicachPionowych(point) and czyWGranicachPoziomych(point):
            pixelValueNearestToCamera = img.min()
            print(
                str(j) + " " + str(i) + "->" + str(pixelValueNearestToCamera))
            # if 1700 > pixelValueNearestToCamera > 1200:
            cv2.circle(img, (i, j), 30, (0, 0, 0), 5)
            punkty.append((i, j))
            if pixelValueNearestToCamera > 1400 and len(punkty) > 30:
                result = loadArtificialNeuralNetwork(punkty)
                print(result)
                break
        # cv2.line(img,(xStart, yStart), (xStart+dlX, yStart), (0,0,0), 5)
        # cv2.line(img,(xStart+dlX, yStart), (xStart+dlX, yStart+dlY), (0,0,0), 5)
        # cv2.line(img,(xStart+dlX, yStart+dlY), (xStart, yStart+dlY), (0,0,0), 5)
        # cv2.line(img,(xStart, yStart+dlY), (xStart, yStart), (0,0,0), 5)

        cv2.imshow("Malgorzata Niewiadomska Inzynieria Biomedyczna", img)
        if cv2.waitKey(1) & 0xFF == ord('z'):
            break

    depth_stream.stop()
    openni2.unload()
    print(punkty)
Exemplo n.º 11
0
 def __exit__(self, ):
     if self.video_stream is not None:
         self.video_stream.stop()
     else:
         self.video_stream = None
     if self.depth_stream is not None:
         self.depth_stream.stop()
     else:
         self.depth_stream = None
     openni2.unload()
Exemplo n.º 12
0
    def quit_player(self):
        reply = QtWidgets.QMessageBox.question(
            self, 'Message', 'Are you sure to quit?',
            QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
            QtWidgets.QMessageBox.No)

        if reply == QtWidgets.QMessageBox.Yes:
            if self.is_streaming:
                self.close_streaming()
            openni2.unload()
            self.close()
Exemplo n.º 13
0
    def stop(self):
        """ Stop the sensor """
        # check that everything is running
        if not self._running or self._device is None:
            logging.warning('Primesense not running. Aborting stop')
            return False

        # stop streams
        if self._depth_stream:
            self._depth_stream.stop()
        if self._color_stream:
            self._color_stream.stop()
        self._running = False

        # Unload openni2
        openni2.unload()
        return True
Exemplo n.º 14
0
def main():
    p = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter, description="")
    p.add_argument('--v',
                   dest='video_path',
                   action='store',
                   required=True,
                   help='path Video')
    p.add_argument('--d',
                   dest='dst',
                   action='store',
                   default='img',
                   help='Destination Folder')
    p.add_argument('--i',
                   dest='interval',
                   action='store',
                   default=1,
                   help='Interval')
    args = p.parse_args()
    interval = int(args.interval)
    dst = args.dst
    print(args.video_path)
    #i=args.video_path.rfind('/')
    #videoname=args.video_path[-i:]
    splittedpath, videoname = os.path.split(args.video_path)
    print("Videoname: " + videoname)
    videoname = os.path.splitext(videoname)[0] + "_"
    if not os.path.exists(dst):
        os.mkdir(dst)
        print("Directory ", dst, " Created ")
    try:
        dev, pbs = openDevice(args.video_path.encode('utf-8'))
        pbs.set_repeat_enabled(True)
        if dev.has_sensor(openni2.SENSOR_COLOR):
            print("Color Stream found")
            processColor(dev, pbs, interval, dst, videoname)
        if dev.has_sensor(openni2.SENSOR_DEPTH):
            print("Depth Stream found")
            processDepth(dev, pbs, interval, dst, videoname)
        print("Done!")
    except Exception as ex:
        print(ex)
    openni2.unload()
Exemplo n.º 15
0
def update():
    colors = ((1.0, 1.0, 1.0, 1.0))

    # Grab a new depth frame
    depth_frame = depth_stream.read_frame()
    depth_frame_data = depth_frame.get_buffer_as_uint16()

    # Grab a new color frame
    color_frame = color_stream.read_frame()
    color_frame_data = color_frame.get_buffer_as_uint8()

    points = np.frombuffer(depth_frame_data, dtype=np.uint16)
    points.shape = (1, 480, 640)
    points = np.concatenate((points,points,points), axis=0)
    points = np.swapaxes(points, 0, 2)
    points = np.swapaxes(points, 0, 1)
    # (x,y,z)

    d = np.ndarray((depth_frame.height, depth_frame.width),dtype=np.uint16,buffer=depth_frame_data)#/10000
    for row in range(depth_frame.height)
        for col in range(depth_frame.width)
            X = row
            Y = col
            Z = d[row][col]

    #out = pos

    cv_img = cv2.convertScaleAbs(points, alpha=(255.0/65535.0))

    cv2.imshow("Image", cv_img)
    sp2.setData(pos=out, size=1, color=colors, pxMode=True)

    key = cv2.waitKey(1) & 0xFF
    if key == ord("c"):
        depth_stream.stop()
        color_stream.stop()
        openni2.unload()
        cv2.destroyAllWindows()
        sys.exit(0)
Exemplo n.º 16
0
def main():
    cap = cv2.VideoCapture(0)
    (ret, color_frame) = cap.read()
    openni2.initialize()
    device = openni2.Device.open_any()
    device.set_depth_color_sync_enabled(True)
    depth_stream = device.create_depth_stream()
    depth_stream.start()
    while True:
        print("#################")
        frame = depth_stream.read_frame()
        ret, color_frame = cap.read()
        frame_data = frame.get_buffer_as_uint8()
        position = openni2.convert_depth_to_world(depth_stream, 100, 100,
                                                  frame_data[10000])
        print position
        cv2.imshow("image", color_frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break
    depth_stream.stop()
    openni2.unload()
Exemplo n.º 17
0
        human_nodes = {node.name: node for node in bodypartslist}

        try:
            while True:

                frame = userTracker.read_frame()
                #logger.info("%s humans detected" % len(frame.users))

                humanparts = process_frame(frame)

                if humanparts is not None:
                    for name, transformation in humanparts.items():

                        node = human_nodes[name]
                        node.transformation = transformation
                        nodes.update(node)

        except KeyboardInterrupt:
            logger.info("Quitting...")

        ## Clean up
        for _, node in human_nodes.items():
            nodes.remove(node)

        nodes.remove(camera)

    nite2.unload()
    openni2.unload()
    logger.info("Bye!")
Exemplo n.º 18
0
        human_nodes = {node.name:node for node in bodypartslist}

        try:
            while True:

                frame = userTracker.read_frame()
                #logger.info("%s humans detected" % len(frame.users))

                humanparts = process_frame(frame)

                if humanparts is not None:
                    for name, transformation in humanparts.items():
                        
                        node = human_nodes[name]
                        node.transformation = transformation
                        nodes.update(node)


        except KeyboardInterrupt:
            logger.info("Quitting...")

        ## Clean up
        for _, node in human_nodes.items():
            nodes.remove(node)

        nodes.remove(camera)

    nite2.unload()
    openni2.unload()
    logger.info("Bye!")
Exemplo n.º 19
0
 def __del__(self):
     openni2.unload()
def pegarMovimentos():
    global player1HandRightx
    global player1HandRighty
    global player1HandLeftx
    global player1HandLefty

    global player2HandRightx
    global player2HandRighty
    global player2HandLefty
    global player2HandLeftx

    global joints

    global leoteste

    #openni2.initialize("/home/leonardo/Downloads/OpenNI-Linux-x64-2.2/Redist")
    openni2.initialize("/home/leonardo/Tcc/OpenNI-Linux-x64-2.2/Redist")
    #nite2.initialize("/home/leonardo/Downloads/NiTE-Linux-x64-2.2/Redist")
    nite2.initialize("/home/leonardo/Tcc/NiTE-2.0.0/Redist")

    dev = openni2.Device.open_any()
    device_info = dev.get_device_info()
    try:
        userTracker = nite2.UserTracker(dev)
    except utils.NiteError as ne:
        print "entrou em exept"
        print(
            "Unable to start the NiTE human tracker. Check the error messages in the console. Model data (s.dat, h.dat...) might be inaccessible."
        )
        print(ne)
        sys.exit(-1)
        print "antes do while"

    while True:
        frame = userTracker.read_frame()
        depth_frame = frame.get_depth_frame()

        if frame.users:
            i = 1
            for user in frame.users:
                user.id = i
                i += 1
                if user.skeleton.state == nite2.SkeletonState.NITE_SKELETON_TRACKED:
                    print user.id
                else:
                    print user.id
                    print user.is_new()
                    print user.is_visible()
                    print("Skeleton state: " + str(user.skeleton.state))
                    #print 'leo' + str(user.id)
                    #if user.id == 0 and leoteste==0:
                    #	print 'e zero'
                    #	leoteste=+1
                    userTracker.start_skeleton_tracking(user.id)
                    print("Skeleton state: " + str(user.skeleton.state))
                    #	userTracker.start_skeleton_tracking(2)
                    #if user.is_new():
                    #	print("New human detected! ID: %d Calibrating...", user.id)
                    #	userTracker.start_skeleton_tracking(1)
                    #elif user.skeleton.state == nite2.SkeletonState.NITE_SKELETON_TRACKED:
                    #else:
                    #print user.skeleton.joints
                    #print user.id
                    #print str(user.is_visible())
                    preenchermovimento(user.id, user.skeleton.joints)
        else:
            print("No users")

    nite2.unload()
    openni2.unload()
Exemplo n.º 21
0
def update():
    #######################################################################
    global alpha, distPt, inspectPt, calc_plane, plane_first, plane_second, mgrid_dist, calc_grid
    global dump_depth_image, dump_csv

    if not calc_plane and not done:
        calc_plane = True
        plane_first = None
        plane_second = None
    '''
    Grab a frame, select the plane
    If the plane is selected, wait for key, then update frame and select the points
    If points are selected, get depth point from image
    '''

    # Grab a new depth frame
    depth_frame = depth_stream.read_frame()
    depth_frame_data = depth_frame.get_buffer_as_uint16()

    # Grab a new color frame
    color_frame = color_stream.read_frame()
    color_frame_data = color_frame.get_buffer_as_uint8()

    # Get the time of frame capture
    t = time.time()
    dt = datetime.datetime.fromtimestamp(t)

    # Put the depth frame into a numpy array and reshape it
    depth_img = np.frombuffer(depth_frame_data, dtype=np.uint16)
    depth_img.shape = (1, 480, 640)
    depth_img = np.concatenate((depth_img, depth_img, depth_img), axis=0)
    depth_img = np.swapaxes(depth_img, 0, 2)
    depth_img = np.swapaxes(depth_img, 0, 1)

    depth_image = np.ndarray((depth_frame.height, depth_frame.width),
                             dtype=np.uint16,
                             buffer=depth_frame_data)

    # Put the color frame into a numpy array, reshape it, and convert from bgr to rgb
    color_image = np.frombuffer(color_frame_data, dtype=np.uint8)
    color_image.shape = (480, 640, 3)
    color_image = color_image[..., ::-1]

    depth_image = np.fliplr(depth_image)
    depth_img = np.fliplr(depth_img)
    color_image = np.fliplr(color_image)

    color_img = color_image.copy()
    depth_img = depth_img.copy()

    image_color = color_image.copy()
    image_depth = depth_img.copy()

    shape_img = np.zeros_like(color_img)
    text_img = np.zeros_like(color_img)
    mask_img = np.zeros_like(color_img)

    drawing = True

    try:
        # click only
        if len(refPt) == 1 and not selecting:
            point = (refPt[0][0], refPt[0][1])
            point_distance = float(depth_img[refPt[0][1]][refPt[0][0]][0] /
                                   10000.0)
            # First point
            cv2.circle(shape_img, refPt[0], 3, (0, 0, 255), 1)
            cv2.putText(text_img,
                        "Point X,Y: {},{}".format(point[0], point[1]),
                        (10, 15), cv2.FONT_HERSHEY_SIMPLEX, .5, textcolor)
            cv2.putText(text_img,
                        "Point distance in Meters: {}".format(point_distance),
                        (10, 30), cv2.FONT_HERSHEY_SIMPLEX, .5, textcolor)
        # click+drag
        if selecting and not done:
            cv2.rectangle(shape_img, refPt[0], (mousex, mousey), (0, 255, 0),
                          2)

        if len(refPt) == 2 and done:
            mask = np.zeros((mask_img.shape[0], mask_img.shape[1]))
            cv2.rectangle(mask, refPt[0], refPt[1], 1, thickness=-1)
            mask = mask.astype(np.bool)

            mask_img[mask] = color_img[mask]

            cv2.rectangle(shape_img, refPt[0], refPt[1], (0, 255, 0), 2)
            left = min(refPt[0][0], refPt[1][0])
            top = min(refPt[0][1], refPt[1][1])
            right = max(refPt[0][0], refPt[1][0])
            bottom = max(refPt[0][1], refPt[1][1])

            #print "{}:{} {}:{}".format(top, left, bottom, right)
            roi = depth_image[
                top:bottom,
                left:right]  # because the image is 480x640 not 640x480

        # polygon
        if len(refPt) > 1 and not done:
            cv2.polylines(shape_img, np.array([refPt]), False, (255, 0, 0), 3)
            cv2.line(shape_img, refPt[-1], (mousex, mousey), (0, 255, 0), 3)

        if len(refPt) > 2 and done:
            cv2.polylines(shape_img, np.array([refPt]), True, (0, 255, 0), 3)

            mask = np.zeros((mask_img.shape[0], mask_img.shape[1]))
            points = np.array(refPt, dtype=np.int32)
            cv2.fillConvexPoly(mask, points, 1)
            mask = mask.astype(np.bool)

            mask_img[mask] = color_img[mask]

            left, top = tuple(points.min(axis=0))
            right, bottom = tuple(points.max(axis=0))

            cv2.rectangle(shape_img, (left, top), (right, bottom),
                          color=(255, 0, 0),
                          thickness=3)

            #print "{}:{} {}:{}".format(top, left, bottom, right)
            #roi_rect = depth_image[top:bottom,left:right] # because the image is 480x640 not 640x480
            roi_mask = np.zeros_like(depth_image)
            roi_mask[mask] = depth_image[mask]
            roi = roi_mask[top:bottom, left:right]

        if mgrid_dist and calc_grid:
            distPt = []

            count = (mgrid_count[0] + 1, mgrid_count[1] + 1)
            mn = (0, 0)
            mx = (roi.shape[1], roi.shape[0])

            X = np.linspace(mn[0],
                            mx[0],
                            count[0],
                            dtype=np.int16,
                            endpoint=False)
            Y = np.linspace(mn[1],
                            mx[1],
                            count[1],
                            dtype=np.int16,
                            endpoint=False)
            xv, yv = np.meshgrid(X, Y)

            # get rid of first row and column
            xv = xv[1:, 1:]
            yv = yv[1:, 1:]

            # at end, add offset from top, left to points:
            for j in range(mgrid_count[1]):  #row
                for i in range(mgrid_count[0]):  #column
                    distPt.append(
                        tuple(map(sum, zip((left, top),
                                           (xv[j, i], yv[j, i])))))

            calc_grid = False

        # roi stats
        if len(refPt) > 1 and done:
            valid = (roi > 0) & (roi < 65536.0)
            roi = np.where(valid, roi, np.nan)

            roi_mean = np.round(np.nanmean(roi) / 10000, 5)
            roi_max = np.round(np.nanmax(roi) / 10000, 5)
            roi_min = np.round(np.nanmin(roi) / 10000, 5)
            roi_std = np.round(np.nanstd(roi) / 10000, 5)

            #cv2.rectangle(color_img, (5,5), (250,65), (50, 50, 50, 0), -1)
            cv2.putText(text_img, "Mean of ROI: {}".format(roi_mean), (10, 15),
                        cv2.FONT_HERSHEY_SIMPLEX, .5, textcolor)
            cv2.putText(text_img, "Max of ROI: {}".format(roi_max), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, .5, textcolor)
            cv2.putText(text_img, "Min of ROI: {}".format(roi_min), (10, 45),
                        cv2.FONT_HERSHEY_SIMPLEX, .5, textcolor)
            cv2.putText(text_img,
                        "Standard Deviation of ROI: {}".format(roi_std),
                        (10, 60), cv2.FONT_HERSHEY_SIMPLEX, .5, textcolor)
            if debug_print:
                print("Mean of ROI: ", roi_mean)
                print("Max of ROI: ", roi_max)
                print("Min of ROI: ", roi_min)
                print("Standard Deviation of ROI: ", roi_std)

        if distPt is not None:
            for pt in distPt:
                cv2.circle(shape_img, pt, 3, (0, 0, 255, 255), 1)

        if inspectPt is not None:
            cv2.circle(shape_img, inspectPt, 3, (0, 255, 255), 1)

        if plane_first is not None:
            C = np.round(plane_first[1], 4)
            text_plane = '1 z= {}x^2 + {}y^2 + {}xy {}x + {}y + {}'.format(
                C[4], C[5], C[3], C[1], C[2], C[0])
            cv2.putText(text_img, text_plane, (10, 75),
                        cv2.FONT_HERSHEY_SIMPLEX, .5, textcolor)

        if plane_second is not None:
            C = np.round(plane_second[1], 4)
            text_plane = '2 z= {}x^2 + {}y^2 + {}xy {}x + {}y + {}'.format(
                C[4], C[5], C[3], C[1], C[2], C[0])
            cv2.putText(text_img, text_plane, (10, 90),
                        cv2.FONT_HERSHEY_SIMPLEX, .5, textcolor)

    except Exception as e:
        print(e)

    drawing = False

    min_depth = 4500.0
    max_depth = float(max(np.nanmax(depth_img), 65535.0 / 4.0))
    #alpha = float(min_depth/max_depth)
    depth_img = cv2.convertScaleAbs(depth_img, alpha=(255.0 / (65535.0 / 2.0)))

    cv2.addWeighted(mask_img, alpha, color_img, 1 - alpha, 0, color_img)
    cv2.addWeighted(shape_img, 1, color_img, .75, 0, color_img)
    #cv2.bitwise_and(shape_img>0, color_img, color_img)
    #cv2.addWeighted(mask_img, alpha, depth_img, 1-alpha, 0, depth_img)
    cv2.addWeighted(shape_img, 1, depth_img, .75, 0, depth_img)

    depth_pt_image = depth_image.copy()
    color_pt_img = color_img.copy()
    #depth_pt_image = np.flipud(depth_image)
    #depth_pt_image = np.fliplr(depth_pt_image)
    #color_pt_img = np.flipud(color_img)
    #color_pt_img = np.fliplr(color_pt_img)

    cloud, colors = point_cloud(depth_pt_image, color_pt_img)

    if openGL:
        # Calculate a dynamic vertex size based on window dimensions and camera's position - To become the "size" input for the scatterplot's setData() function.
        v_rate = 2.5  # Rate that vertex sizes will increase as zoom level increases (adjust this to any desired value).
        v_scale = np.float32(v_rate) / w.opts[
            'distance']  # Vertex size increases as the camera is "zoomed" towards center of view.
        v_offset = (
            w.geometry().width() / 1000
        )**2  # Vertex size is offset based on actual width of the viewport.
        size = v_scale + v_offset

    x = cloud[:, :, 0].flatten()
    y = cloud[:, :, 1].flatten()
    z = cloud[:, :, 2].flatten()

    N = max(x.shape)
    pos = np.empty((N, 3))
    pos[:, 0] = x
    pos[:, 1] = y
    pos[:, 2] = z

    try:
        if len(refPt) > 1 and done:
            #roi_color = color_img[top:bottom,left:right] # because the image is 480x640 not 640x480

            roi_cloud = np.zeros_like(cloud)
            roi_colors = np.zeros_like(colors)

            cloud_mask = mask.flatten()
            color_mask = cloud_mask

            #roi_cloud[cloud_mask] = cloud[cloud_mas4k]
            roi_colors[color_mask] = colors[color_mask]

            roi_x = np.zeros_like(x) * np.nan
            roi_y = np.zeros_like(y) * np.nan
            roi_z = np.zeros_like(z) * np.nan

            roi_x[cloud_mask] = x[cloud_mask]
            roi_y[cloud_mask] = y[cloud_mask]
            roi_z[cloud_mask] = z[cloud_mask]

            N = max(x.shape)
            roi_points = np.empty((N, 3))
            roi_points[:, 0] = roi_x
            roi_points[:, 1] = roi_y
            roi_points[:, 2] = roi_z

            if openGL:
                v2_rate = 2.5
                v2_scale = np.float32(v2_rate) / w2.opts[
                    'distance']  # Vertex size increases as the camera is "zoomed" towards center of view.
                v2_offset = (
                    w2.geometry().width() / 1000
                )**2  # Vertex size is offset based on actual width of the viewport.
                size2 = v2_scale + v2_offset

            roi_data = np.c_[roi_x, roi_y, roi_z]
            finite = np.isfinite(roi_data).all(axis=1)
            roi_data = roi_data[finite]

            roi_colors = roi_colors[finite]

            if openGL:
                sp3.setData(pos=roi_data,
                            color=roi_colors,
                            size=size2,
                            pxMode=True)

            plane = None

            # FIXME: outliers mess everything up
            #roi_data = reject_outliers(roi_data, 0.2)

            count = 50
            X = np.linspace(np.min(roi_data[:, 0]), np.max(roi_data[:, 0]),
                            count)
            Y = np.linspace(np.min(roi_data[:, 1]), np.max(roi_data[:, 1]),
                            count)
            Xx = X.reshape(count, 1)
            Yy = Y.reshape(1, count)

            calc = 2
            if calc == 1:  #linear plane
                A = np.c_[roi_data[:, 0], roi_data[:, 1],
                          np.ones(roi_data.shape[0])]
                C, _, _, _ = np.linalg.lstsq(A, roi_data[:, 2])  # coefficients

                # evaluate it on grid
                Z = C[0] * Xx + C[1] * Yy + C[2]
                if debug_print:
                    print("z = {}x + {}y + {}".format(np.round(C[0], 4),
                                                      np.round(C[1], 4),
                                                      np.round(C[2], 4)))

                z = C[0] * X + C[1] * Y + C[2]
                plane = (np.c_[X, Y, z], C)
            elif calc == 2:  #quadratic plane
                A = np.c_[np.ones(roi_data.shape[0]), roi_data[:, :2],
                          np.prod(roi_data[:, :2], axis=1), roi_data[:, :2]**2]
                C, _, _, _ = np.linalg.lstsq(A, roi_data[:, 2])  # coefficients

                # evaluate it on grid
                Z = C[4] * Xx**2. + C[5] * Yy**2. + C[3] * Xx * Yy + C[
                    1] * Xx + C[2] * Yy + C[0]
                if debug_print:
                    print(('z= {}x^2 + {}y^2 + {}xy {}x + {}y + {}'.format(
                        C[4], C[5], C[3], C[1], C[2], C[0])))

                z = C[4] * X**2. + C[5] * Y**2. + C[3] * X * Y + C[1] * X + C[
                    2] * Y + C[0]
                plane = (np.c_[X, Y, z], C)
            elif calc == 3:  #lineplot
                # FIXME: This is not efficient, also want to update these line plots in real time to visualize deflection
                #for i in range(count):
                '''
                Loop through n segments with m resolution
                    take std of points from line to determine colors
                '''
                #y_resolution = 20
                #Y = np.linspace(np.min(roi_data[:,1]), np.max(roi_data[:,1]), y_resolution)
                #for i in range(y_resolution):
                #x = roi_data[:,0]
                #pts = np.vstack([x,yi,z]).transpose()
                #sp5.setData(pos = pts, color=pg.glColor((i, n*1.3)), width=(i+1)/10.)

            if openGL:
                if calc < 3:
                    if calc_plane:
                        sp4.setData(x=X, y=Y, z=Z)
                    else:
                        sp5.setData(x=X, y=Y, z=Z)

            if calc_plane:
                plane_first = plane
            else:
                plane_second = plane
            calc_plane = False

            if inspectPt is not None:
                distance = "NaN"
                # FIXME: find nearest cluster of points to selection
                pt = inspectPt
                p = cloud[pt[1]][pt[0]]
                distance = np.round(distance_to_plane(p, plane_first), 4)
                cv2.putText(text_img,
                            "Point Dist to Plane: {}".format(distance),
                            (10, 105), cv2.FONT_HERSHEY_SIMPLEX, .5, textcolor)

    except Exception as e:
        print(e)

    cv2.addWeighted(text_img, 1, color_img, 1, 0, color_img)

    img = np.concatenate((color_img, depth_img), 1)

    # Display the reshaped depth frame using OpenCV
    cv2.imshow("Depth Image", img)

    if openGL:
        sp2.setData(pos=pos, color=colors, size=size, pxMode=True)

    # Once you select the plane, save the raw images and composite
    # dump_to_csv(filename, time, args)
    # dump_image_to_file(image, name, time)
    if dump_depth_image == 1:
        dump_image_to_file(image_color, "image_color")
        dump_image_to_file(image_depth, "image_depth")
        dump_image_to_file(text_img, "image_text")
        dump_image_to_file(shape_img, "image_shape")
        dump_image_to_file(mask_img, "image_mask")
        dump_image_to_file(img, "main")
        if dump_csv:
            grid_data = list()
            grid_data.append(dt.strftime("%D"))
            grid_data.append(dt.strftime('%H:%M:%S.%f')[:-3])
            pt_top_left = cloud[top][left]
            pt_top_left_on_plane = point_on_plane(pt_top_left, plane_first)
            pts = np.asarray(distPt)
            print(pts)
            print(pts[0])
            print(pts.shape)
            pts.reshape(r, c)  #reshape to rows, columns
            for row in range(r):
                for col in range(c):
                    pt = pts[row][col]
                    print(pt)
                    # get point in real-world coordinates
                    pt_to_world = cloud[pt[1]][pt[0]]
                    # offset to plane
                    # We should get the point distance from the plane
                    pt_on_plane = point_on_plane(pt_to_world, plane_first)
                    pt_out = np.subtract(pt_on_plane, pt_top_left_on_plane)

                    #pt_out = pt

                    grid_data.append(pt_out)
            dump_to_csv("grid", grid_data, mode="a+")
        dump_depth_image = 0

    if dump_csv:
        raw_data = list()
        raw_data.append(dt.strftime("%D"))
        raw_data.append(dt.strftime('%H:%M:%S.%f')[:-3])
        # raw_data.append(dt.strftime("%T"))
        for pt in distPt:
            p = cloud[pt[1]][pt[0]]
            distance = np.round(distance_to_plane(p, plane_first), 4)
            if distance == np.nan:
                distance = " "
            raw_data.append(distance)
        dump_to_csv("raw", raw_data)

    key = cv2.waitKey(1) & 0xFF
    if (key == 27 or key == ord('q') or key == ord('x') or key == ord("c")):
        depth_stream.stop()
        color_stream.stop()
        openni2.unload()
        cv2.destroyAllWindows()
        sys.exit(0)
Exemplo n.º 22
0
 def cleanup(self) -> None:
     """Unload library."""
     openni2.unload()
Exemplo n.º 23
0
 def __del__(self):
     nite2.unload()
     openni2.unload()
     del self.midiout
Exemplo n.º 24
0
 def shutdown(self):
     #Stop and Release
     #self.ir_stream.stop()
     self.depth_stream.stop()
     unload()
     return
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''MODEL LOADING 加载模型'''
    MODEL = importlib.import_module(args.model)  # 导入模型所在的模块
    classifier = MODEL.get_model(args.num_joint * 3,
                                 normal_channel=args.normal).cuda()

    experiment_dir = Path('./log/regression/pointnet2_reg_msg')
    checkpoint = torch.load(
        str(experiment_dir) + '/checkpoints/best_model.pth')
    classifier.load_state_dict(checkpoint['model_state_dict'])
    classifier.eval()

    try:
        ''' 加载摄像机 '''
        device = getOrbbec()
        # 创建深度流
        depth_stream = device.create_depth_stream()
        depth_stream.set_mirroring_enabled(args.mirroring)
        depth_stream.set_video_mode(
            c_api.OniVideoMode(
                resolutionX=args.width,
                resolutionY=args.height,
                fps=args.fps,
                pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM))
        # 获取uvc
        cap = cv2.VideoCapture(0)

        # 设置 镜像 帧同步
        device.set_image_registration_mode(True)
        device.set_depth_color_sync_enabled(True)
        depth_stream.start()

        while True:
            # 读取帧
            frame_depth = depth_stream.read_frame()
            frame_depth_data = frame_depth.get_buffer_as_uint16()
            # 读取帧的深度信息 depth_array 也是可以用在后端处理的 numpy格式的
            depth_array = np.ndarray((frame_depth.height, frame_depth.width),
                                     dtype=np.uint16,
                                     buffer=frame_depth_data)
            # 变换格式用于 opencv 显示
            depth_uint8 = 1 - 250 / (depth_array)
            depth_uint8[depth_uint8 > 1] = 1
            depth_uint8[depth_uint8 < 0] = 0
            cv2.imshow('depth', depth_uint8)

            # 读取 彩色图
            _, color_array = cap.read()
            cv2.imshow('color', color_array)

            # 对彩色图 color_array 做处理

            # 对深度图 depth_array 做处理

            # 键盘监听
            if cv2.waitKey(1) == ord('q'):
                # 关闭窗口 和 相机
                depth_stream.stop()
                cap.release()
                cv2.destroyAllWindows()
                break

        # 检测设备是否关闭(没什么用)
        try:
            openni2.unload()
            print("Device unloaded \n")
        except Exception as ex:
            print("Device not unloaded: ", ex, "\n")

    except:
        # 读取 depth
        depth_array = matplotlib.image.imread('test_depth_1.tif').astype(
            np.float32)
        # depth to UVD
        cloud_point = depth2uvd(depth_array)
        # 将点云归一化
        cloud_point_normal, scale, centroid = pc_normalize(cloud_point)
        cloud_point_normal = np.reshape(cloud_point_normal, (1, -1, 3))
        cloud_point_normal = cloud_point_normal.transpose(0, 2, 1)
        # 对归一化的点云做预测
        cloud_point_normal = torch.from_numpy(cloud_point_normal).cuda()
        pred, _ = classifier(cloud_point_normal)
        # 对预测结果做还原
        pred_reduction = pred.cpu().data.numpy()
        pred_reduction = pred_reduction * np.tile(
            scale, (args.num_joint * 3, 1)).transpose(1, 0)
        pred_reduction = pred_reduction + np.tile(centroid,
                                                  (1, args.num_joint))
        pred_reduction = np.reshape(pred_reduction, (-1, 3))

        displayPoint2(cloud_point, pred_reduction, 'bear')
Exemplo n.º 26
0
def oni_converter(file_path):
    ''' Convert oni file to color and depth avi files.
        avi files will be saved in working directory
        as color.avi and depth.avi

        Parameters
        ----------
        file_path : str
            full path to oni file

    '''
    count = 0
    result = None

    PATH_TO_OPENNI2_SO_FILE = './OpenNI-Linux-x64-2.2/Redist'

    t1_start = process_time()
    openni2.initialize(PATH_TO_OPENNI2_SO_FILE)
    dev = openni2.Device.open_file(file_path.encode('utf-8'))
    c_stream, d_stream = dev.create_color_stream(), dev.create_depth_stream()
    openni2.PlaybackSupport(dev).set_speed(ctypes.c_float(0.0))
    d_stream.start()
    c_stream.start()

    c_images, d_images = [], []
    n_c_frames = openni2.PlaybackSupport(
        dev).get_number_of_frames(c_stream) - 1
    n_d_frames = openni2.PlaybackSupport(dev).get_number_of_frames(d_stream)
    if n_c_frames != n_d_frames:
        print('Разное количество кадров в color и depth потоках!\n'
              f'color - {n_c_frames},  depth - {n_d_frames}')
        sys.exit()

    for i in tqdm(range(n_c_frames)):

        # process depth stream
        d_frame = np.fromstring(d_stream.read_frame().get_buffer_as_uint16(),
                                dtype=np.uint16).reshape(480, 640)
        # Correct the range. Depth images are 12bits
        d_img = np.uint8(d_frame.astype(float) * 255 / 2**12 - 1)
        d_img = cv2.cvtColor(d_img, cv2.COLOR_GRAY2RGB)
        d_img = 255 - d_img
        d_images.append(d_img)
        if i == 0:
            continue
        # process color stream
        c_frame = c_stream.read_frame()
        c_frame_data = c_frame.get_buffer_as_uint8()
        c_img_bgr = np.frombuffer(c_frame_data, dtype=np.uint8)
        c_img_bgr.shape = (480, 640, 3)
        c_img_rgb = cv2.cvtColor(c_img_bgr, cv2.COLOR_BGR2RGB)
        c_images.append(c_img_rgb)
        count += 1
        # yield count
    openni2.unload()

    c_out = cv2.VideoWriter(
        'color.avi', cv2.VideoWriter_fourcc(*'DIVX'), 30, (640, 480))  # надо добавить fps как переменную
    d_out = cv2.VideoWriter(
        'depth.avi', cv2.VideoWriter_fourcc(*'DIVX'), 30, (640, 480))  # надо добавить fps как переменную

    for i in tqdm(range(len(c_images))):
        c_out.write(c_images[i])
        d_out.write(d_images[i])
        count += 1
        # yield count
    c_out.release()
    d_out.release()

    t1_stop = process_time()
    print(f"Process duration, seconds:, {round(t1_stop-t1_start, 3)}")
    return "ONI file has been processed successfully."
Exemplo n.º 27
0
 def stop(self):
     self._stop_color()
     self._stop_depth()
     openni2.unload()
Exemplo n.º 28
0
 def unload():
     openni2.unload()
Exemplo n.º 29
0
def main():

    ###########    below this part will move to the main_feeding .py  ###############################
    s = 0
    done = False
    then = time.time()  #Time before the operations start

    cam_astras = astras()

    while not done:
        key = cv2.waitKey(1) & 255
        ## Read keystrokes
        if key == 27:  # terminate
            print("\tESC key detected!")
            done = True

        elif key == ord("c"):
            break
    ###rgb#####

    #if mouth detection this (x,y) coordinate will transfer to depth
        try:
            rgb, mouth1, mouth2 = cam_astras.get_rgb()
            x1 = (mouth1[0] + mouth2[0]) // 2
            y1 = (mouth1[1] + mouth2[1]) // 2
            print(x1, y1)
    #else mouth not detect use this coordinate to the depth
        except ValueError:
            rgb = cam_astras.get_rgb()
            x1 = 163
            y1 = 209
        # DEPTH
        #print("rgb shape",rgb.shape)
        dmap, d4d = cam_astras.get_depth()
        z1 = dmap[y1, x1]
        #z1 = dmap[x1 ,y1] #replace this x.y through depth
        #print('dmap shape',dmap.shape)
        canvas = np.hstack((d4d, rgb))
        #coordinate pixel to mm
        ccor = cam_astras.depth_coordinate(x1, y1, z1)
        ## Distance map

        #   cv2.putText(rgb,'Center pixel is {} mm away'.format(dmap[x1, y1]),(30,80),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255),2)#136.182
        #cv2.putText(rgb,'Center pixel is {} mm away'.format(z1),(30,80),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255),2)#136.182

        ## Display the stream
        cv2.namedWindow('rgb', 0)
        cv2.resizeWindow('rgb', 1024, 640)
        cv2.moveWindow('rgb', 640, 320)
        cv2.imshow('rgb', canvas)  #rgb)

    now = time.time()  #Time after it finished

    print("It took: ", now - then, " seconds")

    cv2.destroyAllWindows()
    cam_astras.rgb_stream.stop()
    cam_astras.depth_stream.stop()
    openni2.unload()
    cap.release()
    print("END")
Exemplo n.º 30
0
 def close_capture_device(self):
     nite2.unload()
     openni2.unload()
Exemplo n.º 31
0
 def closeEvent(self, a0: QtGui.QCloseEvent):
     if self.is_streaming:
         self.close_streaming()
     openni2.unload()