コード例 #1
0
def cap_and_show_depth_image():
    global zed
    global runtime_parameters
 
    i   = 0
    key = ''

    left_img = core.PyMat()
    right_img = core.PyMat()
    prev_img = core.PyMat()
    curr_img = core.PyMat()
    match_img = ""
    depth    = core.PyMat()

    while key != 113: # means 'q'
        if zed.grab(runtime_parameters) == tp.PyERROR_CODE.PySUCCESS:
            zed.retrieve_image(left_img, sl.PyVIEW.PyVIEW_LEFT_GRAY)
            zed.retrieve_measure(depth,  sl.PyMEASURE.PyMEASURE_DEPTH)

            curr_img = left_img.get_data()

            if i == 0:
                # skip this operation
                pass
            else:
                detect_matching_from_images(prev_img, curr_img)

            key = cv2.waitKey(10)
            
            i = i + 1
            prev_img = copy.deepcopy(curr_img)

    zed.close()
コード例 #2
0
def main():
    #
    zed, runtime_parameters = ZED_SVO()
    #zed, runtime_parameters = ZED_SVO()
    color_feat = True
    i = 0
    image = core.PyMat()
    depth = core.PyMat()
    point_cloud = core.PyMat()
    confidence = core.PyMat()
    classifier = joblib.load("Road_classifier_TEST.pkl")

    print("own data start \n")

    #for j in range(3032):
    #zed.grab(runtime_parameters)

    #zed.set_svo_position(3032)

    while i < 1:

        i = i + 1
        # A new image is available if grab() returns PySUCCESS
        if zed.grab(runtime_parameters) == tp.PyERROR_CODE.PySUCCESS:
            # Retrieve left image
            zed.retrieve_image(image, sl.PyVIEW.PyVIEW_LEFT)
            zed.retrieve_measure(point_cloud, sl.PyMEASURE.PyMEASURE_XYZRGBA)
            zed.retrieve_measure(depth, sl.PyMEASURE.PyMEASURE_DEPTH)

            #print(point_cloud.get_data()[240, 140, :], point_cloud.get_data()[1:, 1:, :].shape)

            feat_img = image.get_data()[:, :, :3]
            feat_col = feat_img[:704, :, :3]
            classes = classify(feat_col, point_cloud, classifier)
コード例 #3
0
    def init_zed(self):
        # Create a PyZEDCamera object
        zed = zcam.PyZEDCamera()

        # Create a PyInitParameters object and set configuration parameters
        init_params = zcam.PyInitParameters()
        init_params.depth_mode = sl.PyDEPTH_MODE.PyDEPTH_MODE_QUALITY  # Use PERFORMANCE depth mode
        init_params.coordinate_units = sl.PyUNIT.PyUNIT_METER  # Use milliliter units (for depth measurements)
        init_params.camera_fps = 30
        init_params.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_HD1080
        init_params.depth_minimum_distance = 0.3
        init_params.coordinate_system = sl.PyCOORDINATE_SYSTEM.PyCOORDINATE_SYSTEM_IMAGE

        # Open the camera
        err = zed.open(init_params)
        if err != tp.PyERROR_CODE.PySUCCESS:
            exit(1)

        # Create and set PyRuntimeParameters after opening the camera
        self.runtime_parameters = zcam.PyRuntimeParameters()
        self.runtime_parameters.sensing_mode = sl.PySENSING_MODE.PySENSING_MODE_FILL  # Use STANDARD sensing mode

        image = core.PyMat()
        depth = core.PyMat()
        point_cloud = core.PyMat()

        return zed, image, depth, point_cloud
コード例 #4
0
def distance():
    # Create a PyZEDCamera object
    zed = zcam.PyZEDCamera()

    # Create a PyInitParameters object and set configuration parameters
    init_params = zcam.PyInitParameters()
    init_params.depth_mode = sl.PyDEPTH_MODE.PyDEPTH_MODE_PERFORMANCE  # Use PERFORMANCE depth mode
    init_params.coordinate_units = sl.PyUNIT.PyUNIT_MILLIMETER  # Use milliliter units (for depth measurements)

    # Open the camera
    err = zed.open(init_params)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Create and set PyRuntimeParameters after opening the camera
    runtime_parameters = zcam.PyRuntimeParameters()
    runtime_parameters.sensing_mode = sl.PySENSING_MODE.PySENSING_MODE_STANDARD  # Use STANDARD sensing mode
    # runtime_parameters.sensing_mode = sl.PySENSING_MODE.PySENSING_MODE_LAST # Use STANDARD sensing mode

    # Capture 50 images and depth, then stop
    i = 0
    image = core.PyMat()
    depth = core.PyMat()
    point_cloud = core.PyMat()

    while i < 500:
        # A new image is available if grab() returns PySUCCESS
        if zed.grab(runtime_parameters) == tp.PyERROR_CODE.PySUCCESS:
            # Retrieve left image
            zed.retrieve_image(image, sl.PyVIEW.PyVIEW_LEFT)
            # Retrieve depth map. Depth is aligned on the left image
            zed.retrieve_measure(depth, sl.PyMEASURE.PyMEASURE_DEPTH)
            # Retrieve colored point cloud. Point cloud is aligned on the left image.
            zed.retrieve_measure(point_cloud, sl.PyMEASURE.PyMEASURE_XYZRGBA)

            # Get and print distance value in mm at the center of the image
            # We measure the distance camera - object using Euclidean distance
            x = round(image.get_width() / 2)
            y = round(image.get_height() / 2)
            err, point_cloud_value = point_cloud.get_value(x, y)
            distance = math.sqrt(point_cloud_value[0] * point_cloud_value[0] +
                                 point_cloud_value[1] * point_cloud_value[1] +
                                 point_cloud_value[2] * point_cloud_value[2])

            if not np.isnan(distance) and not np.isinf(distance):
                distance = round(distance)
                print("Distance to Camera at ({0}, {1}): {2} mm\n".format(
                    x, y, distance))
                # Increment the loop
                i = i + 1
            else:
                print(
                    "Can't estimate distance at this position, move the camera\n"
                )
            sys.stdout.flush()

    # Close the camera

    zed.close()
コード例 #5
0
def main():
    print("Running...")
    init = zcam.PyInitParameters()
    init.depth_mode = sl.PyDEPTH_MODE.PyDEPTH_MODE_QUALITY
    init.coordinate_units = sl.PyUNIT.PyUNIT_CENTIMETER

    cam = zcam.PyZEDCamera()
    if not cam.is_opened():
        print("Opening ZED Camera...")
    status = cam.open(init)
    if status != tp.PyERROR_CODE.PySUCCESS:
        print(repr(status))
        print('error opening camera!! abort abort')
        exit()

    runtime = zcam.PyRuntimeParameters()
    mat = core.PyMat()
    thresholdMat = core.PyMat()
    print_camera_information(cam)
    '''
    python OpenCV Code
        
    img = cv.imread('gradient.png',0)
    ret,thresh1 = cv.threshold(img,127,255,cv.THRESH_BINARY)
    ret,thresh2 = cv.threshold(img,127,255,cv.THRESH_BINARY_INV)
    ret,thresh3 = cv.threshold(img,127,255,cv.THRESH_TRUNC)
    ret,thresh4 = cv.threshold(img,127,255,cv.THRESH_TOZERO)
    ret,thresh5 = cv.threshold(img,127,255,cv.THRESH_TOZERO_INV)
    titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
    images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
    '''
    '''
    # need gray image for near / far 
    grayImage.allocate(kinect.width, kinect.height);
	grayThreshNear.allocate(kinect.width, kinect.height);
	grayThreshFar.allocate(kinect.width, kinect.height);
	'''

    key = ''
    while key != 113:  # for 'q' key
        err = cam.grab(runtime)
        if err == tp.PyERROR_CODE.PySUCCESS:
            cam.retrieve_image(mat, sl.PyVIEW.PyVIEW_DEPTH)

            thresholdMat = cv2.threshold(mat.get_data(), 128, 255,
                                         cv2.THRESH_BINARY)
            cv2.imshow("ZED", thresholdMat)
            key = cv2.waitKey(5)

            # settings(key, cam, runtime, mat)
        else:
            key = cv2.waitKey(5)
    cv2.destroyAllWindows()

    cam.close()
    print("\nFINISH")
コード例 #6
0
 def __init__(self):
     """Basic initilization of camera"""
     self.cam = zcam.PyZEDCamera()
     self.zed_param = None
     self.zedStatus = None
     self.runtime_param = None
     self._image = core.PyMat()
     self._depth = core.PyMat()
     self.image = None
     self.depth = None
     self.image_time = None
コード例 #7
0
ファイル: multi_camera.py プロジェクト: sneheshs/zed-python
def main():
    print("Running...")
    init = zcam.PyInitParameters()
    init.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_HD720
    init.camera_linux_id = 0
    init.camera_fps = 30  # The framerate is lowered to avoid any USB3 bandwidth issues
    cam = zcam.PyZEDCamera()
    if not cam.is_opened():
        print("Opening ZED Camera 1...")
    status = cam.open(init)
    if status != tp.PyERROR_CODE.PySUCCESS:
        print(repr(status))
        exit()

    init.camera_linux_id = 1  # selection of the ZED ID
    cam2 = zcam.PyZEDCamera()
    if not cam2.is_opened():
        print("Opening ZED Camera 2...")
    status = cam2.open(init)
    if status != tp.PyERROR_CODE.PySUCCESS:
        print(repr(status))
        exit()

    runtime = zcam.PyRuntimeParameters()
    mat = core.PyMat()
    mat2 = core.PyMat()

    print_camera_information(cam)
    print_camera_information(cam2)

    key = ''
    while key != 113:  # for 'q' key
        # The computation could also be done in a thread, one for each camera
        err = cam.grab(runtime)
        if err == tp.PyERROR_CODE.PySUCCESS:
            cam.retrieve_image(mat, sl.PyVIEW.PyVIEW_LEFT)
            cv2.imshow("ZED 1", mat.get_data())

        err = cam2.grab(runtime)
        if err == tp.PyERROR_CODE.PySUCCESS:
            cam2.retrieve_image(mat2, sl.PyVIEW.PyVIEW_LEFT)
            cv2.imshow("ZED 2", mat2.get_data())

        key = cv2.waitKey(5)
    cv2.destroyAllWindows()

    cam.close()
    print("\nFINISH")
コード例 #8
0
def main():
    print("Running...")
    init = zcam.PyInitParameters()
    cam = zcam.PyZEDCamera()
    if not cam.is_opened():
        print("Opening ZED Camera...")
    status = cam.open(init)
    if status != tp.PyERROR_CODE.PySUCCESS:
        print(repr(status))
        exit()

    runtime = zcam.PyRuntimeParameters()
    mat = core.PyMat()

    print_camera_information(cam)
    print_help()

    key = ''
    while key != 113:  # for 'q' key
        err = cam.grab(runtime)
        if err == tp.PyERROR_CODE.PySUCCESS:
            cam.retrieve_image(mat, sl.PyVIEW.PyVIEW_LEFT)
            cv2.imshow("ZED", mat.get_data())
            key = cv2.waitKey(5)
            settings(key, cam, runtime, mat)
        else:
            key = cv2.waitKey(5)
    cv2.destroyAllWindows()

    cam.close()
    print("\nFINISH")
コード例 #9
0
def initZed(fps):
    # Create a PyZEDCamera object
    zed = zcam.PyZEDCamera()
    # Create a PyInitParameters object and set configuration parameters
    init_params = zcam.PyInitParameters()
    init_params.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_HD1080  # Use HD1080 video mode
    init_params.camera_fps = fps  # Set fps at 30

    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.PyCOORDINATE_SYSTEM.PyCOORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.PyUNIT.PyUNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = core.PyTransform(
    )  # First create a PyTransform object for PyTrackingParameters object
    tracking_parameters = zcam.PyTrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    image = core.PyMat()
    zed_pose = zcam.PyPose()
    return zed, image, zed_pose
コード例 #10
0
def main():
    # Create a PyZEDCamera object
    zed = zcam.PyZEDCamera()

    # Create a PyInitParameters object and set configuration parameters
    init_params = zcam.PyInitParameters()
    init_params.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_HD1080  # Use HD1080 video mode
    init_params.camera_fps = 30  # Set fps at 30

    # Open the camera
    err = zed.open(init_params)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Capture 50 frames and stop
    i = 0
    image = core.PyMat()
    while i < 50:
        # Grab an image, a PyRuntimeParameters object must be given to grab()
        if zed.grab(zcam.PyRuntimeParameters()) == tp.PyERROR_CODE.PySUCCESS:
            # A new image is available if grab() returns PySUCCESS
            zed.retrieve_image(image, sl.PyVIEW.PyVIEW_LEFT)
            timestamp = zed.get_timestamp(sl.PyTIME_REFERENCE.PyTIME_REFERENCE_CURRENT)  # Get the timestamp at the time the image was captured
            print("Image resolution: {0} x {1} || Image timestamp: {2}\n".format(image.get_width(), image.get_height(),
                  timestamp))
            i = i + 1

    # Close the camera
    zed.close()
コード例 #11
0
    def __init__(self, model_path):
        print('[INFO] Capture: Setting up camera.')
        # Create a PyZEDCamera object
        self.zed = zcam.PyZEDCamera()

        # Create a PyInitParameters object and set configuration parameters
        init_params = zcam.PyInitParameters()
        init_params.depth_mode = sl.PyDEPTH_MODE.PyDEPTH_MODE_PERFORMANCE  # Use PERFORMANCE depth mode
        init_params.coordinate_units = sl.PyUNIT.PyUNIT_MILLIMETER  # Use milliliter units (for depth measurements)

        # Open the camera
        err = self.zed.open(init_params)
        if err != tp.PyERROR_CODE.PySUCCESS:
            exit(1)

        # Create and set PyRuntimeParameters after opening the camera
        self.runtime_parameters = zcam.PyRuntimeParameters()
        self.runtime_parameters.sensing_mode = sl.PySENSING_MODE.PySENSING_MODE_STANDARD  # Use STANDARD sensing mode

        i = 0  #counter
        self.image = core.PyMat()
        print('[INFO] Capture: Camera setup complete.')
        print('[INFO] Capture: Setting up model...')

        # load model once
        self.model = load_model(model_path)  #TODO hard code model
        print('[INFO] Capture: Model loaded successfully.')
コード例 #12
0
def main():
    print("Running...")
    init = zcam.PyInitParameters()
    init.depth_mode = sl.PyDEPTH_MODE.PyDEPTH_MODE_PERFORMANCE
    init.coordinate_units = sl.PyUNIT.PyUNIT_CENTIMETER

    cam = zcam.PyZEDCamera()
    if not cam.is_opened():
        print("Opening ZED Camera...")
    status = cam.open(init)
    if status != tp.PyERROR_CODE.PySUCCESS:
        print(repr(status))
        print('error opening camera!! abort abort')
        exit()

    runtime = zcam.PyRuntimeParameters()
    mat = core.PyMat()

    print_camera_information(cam)

    key = ''
    while key != 113:  # for 'q' key
        err = cam.grab(runtime)
        if err == tp.PyERROR_CODE.PySUCCESS:
            cam.retrieve_image(mat, sl.PyVIEW.PyVIEW_DEPTH)

            cv2.imshow("ZED", mat.get_data())
            key = cv2.waitKey(5)
            # settings(key, cam, runtime, mat)
        else:
            key = cv2.waitKey(5)
    cv2.destroyAllWindows()

    cam.close()
    print("\nFINISH")
コード例 #13
0
    def __init__(self, inputQueue, outputQueue, visualize):
        self.inputQueue = inputQueue
        self.outputQueue = outputQueue
        #self.outputQueueImages = outputQueueImages
        self.visualize = visualize
        self.xlist = []
        self.ylist = []
        if (self.visualize):
            plt.figure(1)
        init = zcam.PyInitParameters(
            camera_resolution=sl.PyRESOLUTION.PyRESOLUTION_VGA,
            depth_mode=sl.PyDEPTH_MODE.PyDEPTH_MODE_PERFORMANCE,
            coordinate_units=sl.PyUNIT.PyUNIT_METER,
            coordinate_system=sl.PyCOORDINATE_SYSTEM.
            PyCOORDINATE_SYSTEM_RIGHT_HANDED_Y_UP,
            sdk_verbose=False)
        cam = zcam.PyZEDCamera()
        self.image = core.PyMat()
        status = cam.open(init)
        if status != tp.PyERROR_CODE.PySUCCESS:
            print(repr(status))
            exit()

        transform = core.PyTransform()
        tracking_params = zcam.PyTrackingParameters(transform)
        cam.enable_tracking(tracking_params)

        runtime = zcam.PyRuntimeParameters()
        camera_pose = zcam.PyPose()

        py_translation = core.PyTranslation()
        print("Starting ZEDPositioning")
        self.start_zed(cam, runtime, camera_pose, py_translation)
コード例 #14
0
def capture_thread_func(svo_filepath=None):
    global image_np_global, depth_np_global, exit_signal, new_data

    zed = zcam.PyZEDCamera()

    # Create a PyInitParameters object and set configuration parameters
    init_params = zcam.PyInitParameters()
    init_params.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_HD720  # Use HD1080 video mode
    init_params.camera_fps = 30  # Set fps at 30
    init_params.depth_mode = sl.PyDEPTH_MODE.PyDEPTH_MODE_PERFORMANCE
    init_params.coordinate_units = sl.PyUNIT.PyUNIT_METER
    init_params.svo_real_time_mode = False
    if svo_filepath is not None:
        init_params.svo_input_filename = svo_filepath

    # Open the camera
    err = zed.open(init_params)
    print(err)
    while err != tp.PyERROR_CODE.PySUCCESS:
        err = zed.open(init_params)
        print(err)
        sleep(1)

    image_mat = core.PyMat()
    depth_mat = core.PyMat()
    runtime_parameters = zcam.PyRuntimeParameters()

    while not exit_signal:
        if zed.grab(runtime_parameters) == tp.PyERROR_CODE.PySUCCESS:
            zed.retrieve_image(image_mat,
                               sl.PyVIEW.PyVIEW_LEFT,
                               width=width,
                               height=height)
            zed.retrieve_measure(depth_mat,
                                 sl.PyMEASURE.PyMEASURE_XYZRGBA,
                                 width=width,
                                 height=height)
            lock.acquire()
            image_np_global = load_image_into_numpy_array(image_mat)
            depth_np_global = load_depth_into_numpy_array(depth_mat)
            new_data = True
            lock.release()

        sleep(0.01)

    zed.close()
コード例 #15
0
def detect_video(yolo):

    zed = zcam.PyZEDCamera()

    # Create a PyInitParameters object and set configuration parameters
    init_params = zcam.PyInitParameters()
    init_params.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_HD720
    init_params.camera_fps = 60

    init_params.depth_mode = sl.PyDEPTH_MODE.PyDEPTH_MODE_PERFORMANCE  # Use PERFORMANCE depth mode
    init_params.coordinate_units = sl.PyUNIT.PyUNIT_MILLIMETER  # Use milliliter units (for depth measurements)

    # Open the camera
    err = zed.open(init_params)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    frame = core.PyMat()

    # Create and set PyRuntimeParameters after opening the camera
    runtime_parameters = zcam.PyRuntimeParameters()
    runtime_parameters.sensing_mode = sl.PySENSING_MODE.PySENSING_MODE_STANDARD  # Use STANDARD sensing mode

    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    prev_time = timer()

    while True:
        if zed.grab(runtime_parameters) == tp.PyERROR_CODE.PySUCCESS:

            # A new image is available if grab() returns PySUCCESS
            zed.retrieve_image(frame, sl.PyVIEW.PyVIEW_LEFT)
            image = Image.fromarray(frame.get_data())

            image, voc_detections = detect_img(yolo, image.copy(), video=True)

            result = np.asarray(image)
            curr_time = timer()
            exec_time = curr_time - prev_time
            prev_time = curr_time
            accum_time = accum_time + exec_time
            curr_fps = curr_fps + 1
            if accum_time > 1:
                accum_time = accum_time - 1
                fps = "FPS: " + str(curr_fps)
                curr_fps = 0
            cv2.putText(result,
                        text=fps,
                        org=(3, 15),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.50,
                        color=(255, 0, 0),
                        thickness=2)
            cv2.namedWindow("result", cv2.WINDOW_NORMAL)
            cv2.imshow("result", result)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
コード例 #16
0
def cap_and_show_depth_image():
    global zed
    global runtime_parameters

    i = 0
    key = ''

    limage = core.PyMat()
    rimage = core.PyMat()
    depth = core.PyMat()
    point_cloud = core.PyMat()

    while key != 113:  # means 'q'
        if zed.grab(runtime_parameters) == tp.PyERROR_CODE.PySUCCESS:
            zed.retrieve_image(limage, sl.PyVIEW.PyVIEW_LEFT)
            zed.retrieve_image(rimage, sl.PyVIEW.PyVIEW_RIGHT)
            zed.retrieve_measure(depth, sl.PyMEASURE.PyMEASURE_DEPTH)
            zed.retrieve_measure(point_cloud, sl.PyMEASURE.PyMEASURE_XYZRGBA)

            cv2.imshow("left image", limage.get_data())
            cv2.imshow("right image", rimage.get_data())
            cv2.imshow("depth image", depth.get_data())

            key = cv2.waitKey(10)

            x = round(limage.get_width() / 2.0)
            y = round(limage.get_height() / 2.0)
            err, point_cloud_value = point_cloud.get_value(x, y)

            distance = math.sqrt(point_cloud_value[0] * point_cloud_value[0] +
                                 point_cloud_value[1] * point_cloud_value[1] +
                                 point_cloud_value[2] * point_cloud_value[2])

            if np.isnan(distance) or np.isinf(distance):
                print(
                    "Can't estimate distance at this position, move the camera\n"
                )
            else:
                distance = round(distance)
                print("Distance to Camera at ({0}, {1}): {2}mm\n".format(
                    x, y, distance))
                i = i + 1
            sys.stdout.flush()

    zed.close()
コード例 #17
0
 def __init__(self):
     super(ZEDCamera, self).__init__()
     # Create a PyZEDCamera object
     self.zed = zcam.PyZEDCamera()
     self.camera_settings_table = self.get_camera_settings_table()
     # runtime_parameters.sensing_mode = sl.PySENSING_MODE.PySENSING_MODE_FILL
     self.runtime_parameters = zcam.PyRuntimeParameters()
     self.camera_settings_value = None
     self.image_mat = core.PyMat(
     )  # the image mat, useful for all capturing
コード例 #18
0
    def __enter__(self):
        totalAttempts = 5
        for attempt in range(totalAttempts):
            if self._openCamera() == True:
                self.runtime = zcam.PyRuntimeParameters()

                varNames = []
                if self.depth == True:
                    self.mat_depth = core.PyMat()
                    varNames.append('depth')
                if self.color == True:
                    self.mat_color = core.PyMat()
                    varNames.append('color')
                self.Album = namedtuple('Album', varNames)

                self.videoStream = ZEDVideoStream(self, self.inQ, self.outQ)
                self.videoStream.start()
                return self
            else:
                sleep(5)
        raise IOError('Camera could not be opened, please try power cylcing the ZED')
コード例 #19
0
    def __init__(self):
        # Create a PyZEDCamera object
        self.zed = zcam.PyZEDCamera()

        # Create a PyInitParameters object and set configuration parameters
        self.init_params = zcam.PyInitParameters()
        self.init_params.depth_mode = sl.PyDEPTH_MODE.PyDEPTH_MODE_PERFORMANCE  # Use PERFORMANCE depth mode
        self.init_params.coordinate_units = sl.PyUNIT.PyUNIT_MILLIMETER  # Use milliliter units (for depth measurements)

        # Open the camera
        err = zed.open(init_params)
        if err != tp.PyERROR_CODE.PySUCCESS:
            exit(1)

        # Create and set PyRuntimeParameters after opening the camera
        self.runtime_parameters = zcam.PyRuntimeParameters()
        self.runtime_parameters.sensing_mode = sl.PySENSING_MODE.PySENSING_MODE_STANDARD  # Use STANDARD sensing mode

        self.image = core.PyMat()
        self.depth_for_display = core.PyMat()

        print('Current mode: Capture {} images as fast as possible.\nMerge the images.\nSave to pickle files.'.format(num_images))
コード例 #20
0
def main():
    init = zcam.PyInitParameters()
    cam  = zcam.PyZEDCamera()
    if cam.is_opened():
        pass
    else:
        print("Opening ZED Camera...")

    init.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_HD1080
    status = cam.open(init)
    cam.set_camera_settings(sl.PyCAMERA_SETTINGS.PyCAMERA_SETTINGS_AUTO_WHITEBALANCE, 1, False)
    if status == tp.PyERROR_CODE.PySUCCESS:
        pass
    else:
        print(repr(status))
        exit()

    runtime = zcam.PyRuntimeParameters()
    mat = core.PyMat()

    # make save directory of images
    cwd      = os.getcwd()
    dir_name = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
    dir_path = cwd + "/" + dir_name

    if os.path.exists(dir_path):
        pass
    else:
        os.makedirs(dir_path)

    key = ''
    count = 0
    while key != 113:
        err = cam.grab(runtime)
        if err == tp.PyERROR_CODE.PySUCCESS:
            cam.retrieve_image(mat, sl.PyVIEW.PyVIEW_LEFT)
            
            img = mat.get_data()
            cv2.imshow("ZED", img)

            file_name = ("image_{0:08d}.jpg".format(count))
            cv2.imwrite(dir_path + "/" + file_name, img)
            count = count + 1
                        
            key = cv2.waitKey(10)
        else:
            key = cv2.waitKey(10)
    cv2.destroyAllWindows()

    cam.close()
    print("\nFINISH")
コード例 #21
0
ファイル: convert_svo.py プロジェクト: clarkmatt/zed-python
def main():

    if len(sys.argv) != 2:
        print("Please specify path to .svo file.")
        exit()

    filepath = sys.argv[1]
    print("Reading SVO file: {0}".format(filepath))

    init = zcam.PyInitParameters(svo_input_filename=filepath,
                                 svo_real_time_mode=False)
    cam = zcam.PyZEDCamera()
    status = cam.open(init)
    if status != tp.PyERROR_CODE.PySUCCESS:
        print(repr(status))
        exit()

    runtime = zcam.PyRuntimeParameters()
    mat = core.PyMat()

    #if a directory doesn't already exist, create one
    save_dir = "".join(filepath.split('.')[:-1])
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    img_num = 0

    key = ''
    #print("  Save the current image:     s")
    #print("  Quit the video reading:     q\n")
    while key != 113:  # for 'q' key
        img_path = save_dir + "/{:04d}.png".format(img_num)
        err = cam.grab(runtime)
        if err == tp.PyERROR_CODE.PySUCCESS:
            cam.retrieve_image(mat)
            cv2.imwrite(img_path, mat.get_data())
            #cv2.imshow("ZED", mat.get_data())
            key = cv2.waitKey(1)
            saving_image(key, mat)
        else:
            #key = cv2.waitKey(1)
            key = 113

    cv2.destroyAllWindows()

    #print_camera_information(cam)
    #saving_depth(cam)
    #saving_point_cloud(cam)

    cam.close()
    print("\nFINISH")
コード例 #22
0
def main():

    # sys.argv[1] = '/home/ogai1234/Desktop/la.svo'
    # if len(sys.argv) != 2:
    # 	print("Please specify path to .svo file.")
    # 	exit()

    # filepath = sys.argv[1]
    # global n
    # n = 1
    #change this name WHICH is in desktop
    filename = ['3318']

    for i in range(len(filename)):
        print("Reading SVO file: {0}".format(filename[i]))

        os.mkdir('/home/ogai1234/Desktop/' + filename[i])

        init = zcam.PyInitParameters(svo_input_filename=(filename[i] + '.svo'),
                                     svo_real_time_mode=False)
        cam = zcam.PyZEDCamera()
        status = cam.open(init)
        if status != tp.PyERROR_CODE.PySUCCESS:
            print(repr(status))
            exit()

        runtime = zcam.PyRuntimeParameters()
        mat = core.PyMat()
        m = '2018_' + filename[i] + '_'
        j = 0
        key = ''
        while status:
            if cam.grab(runtime) == tp.PyERROR_CODE.PySUCCESS:
                cam.retrieve_image(mat)
                cv2.imshow("ZED", mat.get_data())
                if j % 10 == 0:
                    filepath = '/home/ogai1234/Desktop/' + str(
                        filename[i]) + '/' + m + str(int(math.floor(
                            j / 10))) + '.jpg'
                    img = mat.write(filepath)
                key = cv2.waitKey(1)
                # print('11111111111111111111')
                # n = n +1
                j = j + 1
        cam.close()
        print("\nFINISH")
コード例 #23
0
def main():
    if len(sys.argv) != 2:
        print("Please specify path to .svo file.")
        exit()

    filepath = sys.argv[1]
    print("Reading SVO file: {0}".format(filepath))

    init = zcam.PyInitParameters(svo_input_filename=filepath, camera_image_flip=False, svo_real_time_mode=False)

    cam = zcam.PyZEDCamera()
    status = cam.open(init)
    zed_cuda_ctx=PyCudaContext()

    zed_cuda_ctx.pop_ctx()
    predictor = predict.Prediction()
    zed_cuda_ctx.push_ctx()

    if status != tp.PyERROR_CODE.PySUCCESS:
        print(repr(status))
        exit()

    fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # Be sure to use lower case
    out = cv2.VideoWriter('output.mp4', fourcc, 10.0, (1920, 1080))

    runtime = zcam.PyRuntimeParameters()
    mat = core.PyMat()
    while True:  # for 'q' key
        err = cam.grab(runtime)
        if err == tp.PyERROR_CODE.PySUCCESS:
            cam.retrieve_image(mat)
            npimg = mat.get_data().astype(np.float32)[:,:,:3]
            zed_cuda_ctx.pop_ctx()
            prediction = predictor.infer(npimg, overlay=True)
            zed_cuda_ctx.push_ctx()
            out_image = cv2.cvtColor(np.array(prediction), cv2.COLOR_RGB2BGR)
            cv2.imshow('Perception', out_image)
            out.write(out_image)
            key = cv2.waitKey(1)
    cv2.destroyAllWindows()
    out.release()
    cam.close()
    print("\nFINISH")
コード例 #24
0
def main():
    print("\n\n\n############################################################")
    print("Welcome to the Artificial Intelligence Lecture 2: Perception")
    print("############################################################ \n")

    print("\n **** Codeexample 2: Image Capturing  *** \n")


    print("\n1. Opening the camera by creating and Camera Object and initializing the camera:")
    # Create a PyZEDCamera object
    zed = zcam.PyZEDCamera()

    # Create a PyInitParameters object and set configuration parameters
    init_params = zcam.PyInitParameters()
    init_params.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_HD1080  # Use HD1080 video mode: VGA,HD720,HD1080,HD2k
    init_params.camera_fps = 30  # Set fps at 30: 15,30, 60,100

    # Open the camera
    err = zed.open(init_params)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Capture 5 frames and stop
    i = 0
    image = core.PyMat()
    runtime_parameters = zcam.PyRuntimeParameters()
    while i < 1:
        # Grab an image, a PyRuntimeParameters object must be given to grab()
        if zed.grab(runtime_parameters) == tp.PyERROR_CODE.PySUCCESS:
            # A new image is available if grab() returns PySUCCESS
            zed.retrieve_image(image, sl.PyVIEW.PyVIEW_LEFT)
            timestamp = zed.get_timestamp(sl.PyTIME_REFERENCE.PyTIME_REFERENCE_CURRENT)  # Get the timestamp at the time the image was captured
            print("Image resolution: {0} x {1} || Image timestamp: {2}\n".format(image.get_width(), image.get_height(),
                  timestamp))
            cv2.imwrite('Pictures/Image_1.png',image.get_data())

            i = i + 1


    # Close the camera
    zed.close()
コード例 #25
0
def main():

    if len(sys.argv) != 2:
        print("Please specify path to .svo file.")
        exit()

    filepath = sys.argv[1]
    print("Reading SVO file: {0}".format(filepath))

    init = zcam.PyInitParameters(svo_input_filename=filepath)
    cam = zcam.PyZEDCamera()
    status = cam.open(init)
    if status != tp.PyERROR_CODE.PySUCCESS:
        print(repr(status))
        exit()

    runtime = zcam.PyRuntimeParameters()
    mat = core.PyMat()

    key = ''
    print("  Save the current image:     s")
    print("  Quit the video reading:     q\n")
    while key != 113:  # for 'q' key
        err = cam.grab(runtime)
        if err == tp.PyERROR_CODE.PySUCCESS:
            cam.retrieve_image(mat)
            cv2.imshow("ZED", mat.get_data())
            key = cv2.waitKey(1)
            saving_image(key, mat)
        else:
            key = cv2.waitKey(1)
    cv2.destroyAllWindows()

    print_camera_information(cam)
    saving_depth(cam)
    saving_point_cloud(cam)

    cam.close()
    print("\nFINISH")
コード例 #26
0
ファイル: read_svo.py プロジェクト: tianyolanda/somecodes
def main():

	# sys.argv[1] = '/home/ogai1234/Desktop/la.svo'
	if len(sys.argv) != 2:
		print("Please specify path to .svo file.")
		exit()

	filepath = sys.argv[1]
	# global n
	n = 1


	print("Reading SVO file: {0}".format(filepath))

	init = zcam.PyInitParameters(svo_input_filename=filepath,svo_real_time_mode=False)
	cam = zcam.PyZEDCamera()
	status = cam.open(init)
	if status != tp.PyERROR_CODE.PySUCCESS:
		print(repr(status))
		exit()

	runtime = zcam.PyRuntimeParameters()
	mat = core.PyMat()
	m = '2018_0751_'
	i = 0
	key = ''
	while (key != 113) and (i < 21000):
		if cam.grab(runtime) == tp.PyERROR_CODE.PySUCCESS:
			cam.retrieve_image(mat)
			cv2.imshow("ZED", mat.get_data())
			if i%10 == 0:
				filepath ='/home/ogai1234/Desktop/p/' + m + str(int(math.floor(i/10))) + '.jpg'
				img = mat.write(filepath)
			key = cv2.waitKey(1)
			# print('11111111111111111111')
			n = n +1
			i = i + 1
	cam.close()
	print("\nFINISH")
def main():

    zed = zcam.PyZEDCamera()
    # Create a PyInitParameters object and set configuration parameters
    #init_params = zcam.PyInitParameters()
    filepath = sys.argv[1]
    init_params = zcam.PyInitParameters(svo_input_filename=filepath,
                                        svo_real_time_mode=False)
    init_params.depth_mode = sl.PyDEPTH_MODE.PyDEPTH_MODE_PERFORMANCE  # Use PERFORMANCE depth mode
    init_params.coordinate_units = sl.PyUNIT.PyUNIT_MILLIMETER  # Use milliliter units (for depth measurements)

    # Open the camera
    err = zed.open(init_params)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Create and set PyRuntimeParameters after opening the camera
    runtime_parameters = zcam.PyRuntimeParameters()
    runtime_parameters.sensing_mode = sl.PySENSING_MODE.PySENSING_MODE_STANDARD  # Use STANDARD sensing mode

    color_feat = True
    i = 0
    image = core.PyMat()
    depth = core.PyMat()
    point_cloud = core.PyMat()
    confidence = core.PyMat()
    features = []
    images = []
    depthroad = []
    classifier = joblib.load("Road_classifier.pkl")

    print("own data start \n")

    #for j in range(3032):
    #zed.grab(runtime_parameters)

    zed.set_svo_position(3032)

    while i < 1:

        i = i + 1
        # A new image is available if grab() returns PySUCCESS
        if zed.grab(runtime_parameters) == tp.PyERROR_CODE.PySUCCESS:
            # Retrieve left image
            zed.retrieve_image(image, sl.PyVIEW.PyVIEW_LEFT)

            feat_img = image.get_data()[:, :, :3]

            #feat_img = cv2.cvtColor(feat_img, cv2.COLOR_BGR2YCR_CB) #YCR_CB #RGB #HSV

            zed.retrieve_measure(point_cloud, sl.PyMEASURE.PyMEASURE_XYZRGBA)

            zed.retrieve_measure(depth, sl.PyMEASURE.PyMEASURE_DEPTH)

            zed.retrieve_measure(confidence, sl.PyMEASURE.PyMEASURE_CONFIDENCE)

            feat_col = feat_img[:704, :, :3]

            haralick = rr.compute_haralick(feat_col)

            print(haralick, haralick[..., 2:].shape)

            haralick = haralick.reshape((-1, 5))

            feature = rr.get_features(feat_col, color_feat)
            feature = np.concatenate((feature, haralick), 1)

            depthroad = 1 - g.is_road(point_cloud.get_data()[:704, :, :3])

            features.append(feature)

            data = np.array(features)[0, :, :]

            classes = classifier.predict(data)
            classes = classes.reshape(704, 1280)
            classes = np.logical_and(classes, depthroad)

            rr.show(classes, feat_col, feat_img,
                    point_cloud.get_data()[:704, :, :3])
コード例 #28
0
def main():
    print("\n############################################################")
    print("Welcome to the Artificial Intelligence Lecture 2: Perception")
    print("############################################################ \n")

    print("\n **** Codeexample 8: Hough Line Detection  *** \n")

    print("\n1. We will acces the camera and  see the camera Paramters:")

    # Create a PyZEDCamera object
    cam = zcam.PyZEDCamera()

    # Create a PyInitParameters object and set configuration parameters
    init = zcam.PyInitParameters()

    # Open the camera
    if not cam.is_opened():
        print("Opening Camera...")
    status = cam.open(init)
    if status != tp.PyERROR_CODE.PySUCCESS:
        print(repr(status))
        exit()

    runtime = zcam.PyRuntimeParameters()

    #Define the image as ZED Python Wrapper format
    image = core.PyMat()

    # Extract the current Camera paramters and camera Information
    print_camera_information(cam)

    print(
        "\n2. Now we will grey and blur the image and do a canny edge detection"
    )
    key = ''
    while key != 113:  # for 'q' key
        err = cam.grab(runtime)
        if err == tp.PyERROR_CODE.PySUCCESS:
            cam.retrieve_image(image, sl.PyVIEW.PyVIEW_LEFT)

            #Process the Image
            image2 = image.get_data()
            gray_image = cv2.cvtColor(image.get_data(), cv2.COLOR_BGR2GRAY)
            blur = cv2.GaussianBlur(gray_image, (7, 7), 1.5)
            edges = cv2.Canny(blur, 0, 23)

            #Detect the Hough Lines

            rho = 1
            theta = np.pi / 180
            threshold = 50
            min_line_length = 30
            max_line_gap = 10
            line_image = np.copy(
                gray_image) * 0  # creating a blank to draw lines on

            # Run Hough on edge detected image
            lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),
                                    min_line_length, max_line_gap)

            # Iterate over the output "lines" and draw lines on the blank
            for line in lines:
                for x1, y1, x2, y2 in line:
                    cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 10)

            # Create a "color" binary image to combine with line image
            color_edges = edges

            # Draw the lines on the edge image
            houghlines = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)

            #rescale the images
            gray_image2 = cv2.resize(gray_image, (0, 0), None, .70, .70)
            houghlines2 = cv2.resize(houghlines, (0, 0), None, .70, .70)
            edges2 = cv2.resize(edges, (0, 0), None, .70, .70)

            # Put all images in one numpy stack
            numpy_horizontal = np.hstack((gray_image2, edges2, houghlines2))

            cv2.imshow("ZED Live Stream", numpy_horizontal)

            key = cv2.waitKey(5)
        else:
            key = cv2.waitKey(5)

    cv2.destroyAllWindows()

    cam.close()
    print("\nFINISH")
コード例 #29
0
def detect_video(yolo, video_path, output_path=""):
    #import socket
    import cv2
    #BUFSIZE=1024
    #client=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
    #ip_port=('192.168.11.174',8888)
    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    prev_time = timer()
    zed = zcam.PyZEDCamera()

    init_params = zcam.PyInitParameters()
    init_params.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_VGA
    init_params.coordinate_units = sl.PyUNIT.PyUNIT_MILLIMETER

    err = zed.open(init_params)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(-1)

    runtime_params = zcam.PyRuntimeParameters()
    runtime_params.sensing_mode = sl.PySENSING_MODE.PySENSING_MODE_STANDARD

    image = core.PyMat()
    depth = core.PyMat()
    point_cloud = core.PyMat()
    #count=0

    while True:
        if zed.grab(runtime_params) == tp.PyERROR_CODE.PySUCCESS:
            zed.retrieve_image(image, sl.PyVIEW.PyVIEW_LEFT)
            #zed.retrieve_measure(depth,sl.PyMEASURE.PyMEASURE_DEPTH)
            #zed.retrieve_measure(point_cloud,sl.PyMEASURE.PyMEASURE_XYZRGBA)
            #frame=np.asarray(frame,image.size,image.type)
            print(type(image))
            #frame0=core.slMat2cvMat(image)
            frame0 = image.get_data()
            #cloud_img=point_cloud.get_data()

            print(type(frame0))
            frame = Image.fromarray(frame0)
            imageA = yolo.detect_image(frame)
            #x=round(image.get_width()/2)
            #y=round(image.get_height()/2)
            global locX, locY
            #err,point_cloud_value=point_cloud.get_value(locX,locY)
            #distance=math.sqrt(point_cloud_value[0]*point_cloud_value[0]+point_cloud_value[1]*point_cloud_value[1]+point_cloud_value[2]*point_cloud_value[2])
            #msg=StringIO.StringIO()
            """if not np.isnan(distance) and not np.isinf(distance):
                distance=round(distance)
                global Class
                if Class==0:
                    sprintf(msg,"Distance to Blue Cone at(%d,%d):%d mm\n",locX,locY,distance)
                elif Class==1:
                    sprintf(msg,"Distance to Red Cone at(%d, %d):%d mm\n",locX,locY,distance)
                else:
                    sprintf(msg,"Distance to Yellow Cone at(%d, %d):%d mm\n",locX,locY,distance)
                #print(msg.getvalue())
                #msg1=buf+str(count)
                #client.sendto(msg1.encode('utf-8'),ip_port)
                #data,server_addr=client.recvfrom(BUFSIZE)
                #count+=1
                
            else:
                print("Can't estimate distance at this position!\n")
            sys.stdout.flush()"""

            #imageL,locXL,locYL=yolo.detect_image(imageL)
            resultA = np.asarray(imageA)
            curr_time = timer()
            exec_time = curr_time - prev_time
            prev_time = curr_time
            accum_time = accum_time + exec_time
            curr_fps = curr_fps + 1
            if accum_time > 1:
                accum_time = accum_time - 1
                fps = "FPS: " + str(curr_fps)
                curr_fps = 0
            cv2.putText(resultA,
                        text=fps,
                        org=(3, 15),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.50,
                        color=(255, 0, 0),
                        thickness=2)
            cv2.namedWindow("result_win", cv2.WINDOW_NORMAL)
            cv2.imshow("result_win", resultA)
            #printf("the")
            # if isOutput:
            #     out.write(resultL)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    yolo.close_session()
コード例 #30
0
def main():

    global fps_time

    if len(sys.argv) != 2:
        print("Please specify path to .svo file.")
        exit()

    filepath = sys.argv[1]
    ite = loadall("pickle.dat")
    print("Reading SVO file: {0}".format(filepath))
    t = time.time()
    init = zcam.PyInitParameters(svo_input_filename=filepath,
                                 svo_real_time_mode=False)
    init.depth_mode = sl.PyDEPTH_MODE.PyDEPTH_MODE_QUALITY
    cam = zcam.PyZEDCamera()
    status = cam.open(init)
    if status != tp.PyERROR_CODE.PySUCCESS:
        print(repr(status))
        exit()

    runtime = zcam.PyRuntimeParameters()
    mat = core.PyMat()
    depth = core.PyMat()
    print('Initilisation of svo took ', time.time() - t, ' seconds')
    key = ''
    graph_path = "./models/graph/mobilenet_thin/graph_opt.pb"  #"./models/graph/cmu/graph_opt.pb"
    target = (432, 368)  #(656,368) (432,368) (1312,736)
    e = TfPoseEstimator(graph_path, target)
    nbf = 0
    nbp = 0
    print("  Save the current image:     s")
    print("  Quit the video reading:     q\n")
    while key != 113:  # for 'q' key
        t = time.time()
        err = cam.grab(runtime)
        if err == tp.PyERROR_CODE.PySUCCESS:
            retrieve_start = time.time()
            cam.retrieve_image(mat)
            cam.retrieve_measure(depth, sl.PyMEASURE.PyMEASURE_XYZRGBA)
            image = mat.get_data()
            retrieve_end = time.time()
            print('Retrieving of svo data took ',
                  retrieve_end - retrieve_start, ' seconds')
            pt = np.array(depth.get_data()[:, :, 0:3], dtype=np.float64)
            print(pt.shape, image.shape)
            pt[np.logical_not(np.isfinite(pt))] = 0.0
            nbf += 1
            pose_start = time.time()
            humans = e.inference(np.array(image[:, :, 0:3]))
            image_h, image_w = image.shape[:2]
            pose_end = time.time()
            print("Inference took", pose_end - pose_start, 'seconds')

            for pid, human in enumerate(humans):
                for kid, bdp in enumerate(human.body_parts.values()):
                    #print(bdp)
                    #print(kid, bdp.x, bdp.y, bdp.score)
                    if (bdp.score > 5.0):
                        print((int(bdp.x * image_w + 0.5),
                               int(bdp.y * image_h + 0 / 5)))
                        print(pt[int(bdp.y * image_h + 0 / 5),
                                 int(bdp.x * image_w + 0.5)])
                        nbp += 1
                        cv2.circle(image, (int(bdp.x * image_w + 0.5),
                                           int(bdp.y * image_h + 0 / 5)),
                                   5, (255, 255, 255),
                                   thickness=5,
                                   lineType=8,
                                   shift=0)
                        #coord_uv[pid,kid,:]=np.array([int(bdp.x*image_w+0.5),int(bdp.y*image_h+0/5)])
                        #coord_vis[pid,kid]=bdp.score/10

            image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
            cv2.putText(image, "FPS: %f" % (1.0 / (time.time() - fps_time)),
                        (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),
                        2)

            #print(time.time()-t)
            cv2.imshow('tf-pose-estimation result', image)
            fps_time = time.time()
            key = cv2.waitKey(1)
            saving_image(key, mat)
            #time.sleep(0.033)
        else:
            key = cv2.waitKey(1)
    cv2.destroyAllWindows()

    print(nbp / nbf, (nbp / nbf) / 18)
    #saving_depth(cam)
    #saving_point_cloud(cam)

    cam.close()
    print("\nFINISH")