def main():
    zed = sl.Camera()
    point_cloud = sl.Mat()
    # Set configuration parameters
    input_type = sl.InputType()
    input_type.set_from_svo_file('/home/ianmcvann/Documents/ZED/recent.svo')
    init_params = sl.InitParameters(input_t=input_type,
                                    svo_real_time_mode=False)
    # init_params = sl.InitParameters()
    init_params.depth_mode = sl.DEPTH_MODE.ULTRA  # Use PERFORMANCE depth mode
    init_params.coordinate_units = sl.UNIT.INCH  # Use milliliter units (for depth measurements)
    init_params.camera_resolution = sl.RESOLUTION.VGA
    init_params.camera_fps = 100
    init_params.depth_maximum_distance = 400
    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(-1)
    image = sl.Mat()
    zed.set_camera_settings(sl.VIDEO_SETTINGS.EXPOSURE, 15)
    runtime_parameters = sl.RuntimeParameters()
    while True:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)
            # A new image is available if grab() returns SUCCESS
            zed.retrieve_image(image, sl.VIEW.RIGHT)  # Retrieve the left image
            frame = image.get_data()
            process_pic(frame)
    cv2.destroyAllWindows()
Exemple #2
0
def main():
    if len(sys.argv) != 2:
        exit()

    filepath = sys.argv[1]
    print("Reading SVO file: {0}".format(filepath))

    input_type = sl.InputType()
    input_type.set_from_svo_file(filepath)
    init = sl.InitParameters(input_t=input_type, svo_real_time_mode=False)
    #init.depth_minimum_distance = 1
    init.depth_mode = sl.DEPTH_MODE.ULTRA
    cam = sl.Camera()
    status = cam.open(init)

    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.FILL

    mat = sl.Mat()

    while cv2.waitKey(1) != ord("q"):  # for 'q' key
        err = cam.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            #cam.retrieve_image(mat)
            cam.retrieve_image(mat, sl.VIEW.DEPTH)
            cv2.imshow("ZED", mat.get_data())

    cv2.destroyAllWindows()
    cam.close()
    print("\nFINISH")
Exemple #3
0
def main():
    if len(sys.argv) != 4:
        exit()

    filepath = sys.argv[1]
    rgb = sys.argv[2]
    depth = sys.argv[3]
    print("Reading SVO file: {0}".format(filepath))

    input_type = sl.InputType()
    input_type.set_from_svo_file(filepath)
    init = sl.InitParameters(input_t=input_type, svo_real_time_mode=False)
    init.depth_mode = sl.DEPTH_MODE.ULTRA
    cam = sl.Camera()
    status = cam.open(init)

    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.FILL

    mat_img = sl.Mat()
    mat_depth = sl.Mat()
    fr = 0
    cont = 0
    while cv2.waitKey(1) != ord("q"):  # for 'q' key
        err = cam.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            if cont == 2:
                cam.retrieve_image(mat_img)
                cam.retrieve_image(mat_depth, sl.VIEW.DEPTH)
                im = mat_img.get_data()
                d = mat_depth.get_data()
                cv2.imshow("ZED", mat_img.get_data())
                cv2.imwrite(
                    os.path.join(
                        rgb,
                        filepath.split('/')[-1].split('.')[0] + '-' + str(fr))
                    + ".png", im)
                cv2.imwrite(
                    os.path.join(
                        depth,
                        filepath.split('/')[-1].split('.')[0] + '-' + str(fr))
                    + ".png", d)
                fr = fr + 1
                cont = 0
            else:
                cont = cont + 1

    cv2.destroyAllWindows()
    cam.close()
    print("\nFINISH")
def main():
    print("Running...")

    input_type = sl.InputType()
    input_type.set_from_camera_id(0)

    init = sl.InitParameters()
    init.input = input_type
    init.camera_resolution = sl.RESOLUTION.HD720
    #init.camera_linux_id = 0
    init.camera_fps = 30  # The framerate is lowered to avoid any USB3 bandwidth issues
    cam = sl.Camera()
    if not cam.is_opened():
        print("Opening ZED Camera 1...")
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    input_type.set_from_camera_id(1)
    init.input = input_type
    cam2 = sl.Camera()
    if not cam2.is_opened():
        print("Opening ZED Camera 2...")
    status = cam2.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    mat = sl.Mat()
    mat2 = sl.Mat()

    print_camera_information(cam)
    print_camera_information(cam2)

    key = ''
    while key != 113:  # for 'q' key
        # The computation could also be done in a thread, one for each camera
        err = cam.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            cam.retrieve_image(mat, sl.VIEW.LEFT)
            cv2.imshow("ZED 1", mat.get_data())

        err = cam2.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            cam2.retrieve_image(mat2, sl.VIEW.LEFT)
            cv2.imshow("ZED 2", mat2.get_data())

        key = cv2.waitKey(5)
    cv2.destroyAllWindows()

    cam.close()
    print("\nFINISH")
Exemple #5
0
def start_read_svo(filepath):

    for file in os.listdir(filepath):
        if not file.endswith(".svo"):
            continue
        file = filepath + file
        print(file)
        print(file.rsplit(".", 1)[0])

        input_type = sl.InputType()
        input_type.set_from_svo_file(file)
        init = sl.InitParameters(input_t=input_type, svo_real_time_mode=False)
        init.depth_mode = sl.DEPTH_MODE.ULTRA
        cam = sl.Camera()
        status = cam.open(init)
        if status != sl.ERROR_CODE.SUCCESS:
            print(repr(status))
            exit()

        runtime = sl.RuntimeParameters()
        runtime.sensing_mode = sl.SENSING_MODE.FILL
        # runtime.sensing_mode = sl.SENSING_MODE.STANDARD
        mat = sl.Mat()

        key = ''
        i = 0
        print("  Save the current image:     s")
        print("  Quit the video reading:     q\n")
        while key != 113:  # for 'q' key
            err = cam.grab(runtime)
            if err == sl.ERROR_CODE.SUCCESS:
                cam.retrieve_image(mat)
                rgb = mat.get_data()
                # cv2.imshow("RGB", rgb)
                depthimg = get_depth_img(cam, mat)
                # cv2.imshow("Dep", depth)
                key = cv2.waitKey(1)
                if i>cam.get_svo_number_of_frames()*0.1\
                 and i<cam.get_svo_number_of_frames()*0.75:
                    t = str(time.time()).split(".")[0]
                    cv2.imwrite(
                        file.rsplit(".", 1)[0] + "_rgb_" + str(i) + "_" + t +
                        ".png", rgb)
                    cv2.imwrite(
                        file.rsplit(".", 1)[0] + "_depth_" + str(i) + "_" + t +
                        ".jpg", depthimg)
                i += 1
            else:
                break
        cv2.destroyAllWindows()

        print_camera_information(cam)
        cam.close()
        print("\nFINISH")
Exemple #6
0
def main():

    if len(sys.argv) != 2:
        print("Please specify path to .svo file.")
        exit()

    filepath = sys.argv[1]
    print("Reading SVO file: {0}".format(filepath))

    input_type = sl.InputType()
    input_type.set_from_svo_file(filepath)
    init = sl.InitParameters(input_t=input_type, svo_real_time_mode=False)
    init.camera_resolution = sl.RESOLUTION.HD720
    init.camera_resolution = sl.RESOLUTION.HD1080
    init.depth_mode = sl.DEPTH_MODE.ULTRA
    cam = sl.Camera()
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.FILL
    mat = sl.Mat()

    key = ''
    print("  Save the current image:     s")
    print("  Quit the video reading:     q\n")
    while key != 113:  # for 'q' key
        err = cam.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            cam.retrieve_image(mat)
            cv2.imshow("RGB", mat.get_data())
            # cam.retrieve_image(mat, sl.VIEW.DEPTH)
            depthimg = get_depth_img(cam, mat)
            
            cv2.imshow("Dep", depthimg)
            key = cv2.waitKey(1)
            saving_image(key, mat)
            if key == 48:
                while True:
                    key = cv2.waitKey(1)
                    if key == 49:
                        break
        else:
            key = cv2.waitKey(1)
    cv2.destroyAllWindows()

    print_camera_information(cam)
    cam.close()
    print("\nFINISH")
Exemple #7
0
def main():
    zed = sl.Camera()

    # Set configuration parameters
    input_type = sl.InputType()
    if len(sys.argv) >= 2:
        input_type.set_from_svo_file(sys.argv[1])

    init = sl.InitParameters(input_t=input_type)
    init.camera_resolution = sl.RESOLUTION.HD1080
    init.coordinate_units = sl.UNIT.METER
    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.FILL

    # Open the camera
    err = zed.open(init)
    if err != sl.ERROR_CODE.SUCCESS:
        print(repr(err))
        zed.close()
        exit(1)

    # Prepare new image size to retrieve half-resolution images
    image_size = zed.get_camera_information().camera_resolution
    image_size.width = image_size.width / 4
    image_size.height = image_size.height / 4

    # set video codec
    codec = cv2.VideoWriter_fourcc(*'DIVX')
    out = cv2.VideoWriter('selly_vison3.mp4', codec, 30.0, (480, 270))

    # Declare your sl.Mat matrices
    image_zed = sl.Mat()
    while True:
        start_time = time.time()
        err = zed.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            zed.retrieve_image(image_zed, sl.VIEW.LEFT, sl.MEM.CPU, image_size)
            image_frame = image_zed.get_data()[:, :, :3]
            start = time.time()

            cv2.imshow("Image2", image_frame)
            out.write(image_frame)

            if cv2.waitKey(1) & 0xFF == 32:
                break
            print("fps : ", round(1 / (time.time() - start_time), 2))

    out.release()
    cv2.destroyAllWindows()
    zed.close()
    print("\nFINISH")
Exemple #8
0
def main():

    if len(sys.argv) != 2:
        print("Please specify path to .svo file.")
        exit()

    filepath = sys.argv[1]
    print("Reading SVO file: {0}".format(filepath))

    cam = sl.Camera()
    input_type = sl.InputType()
    input_type.set_from_svo_file(filepath)
    init = sl.InitParameters()
    init.input = input_type
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.PositionalTrackingParameters(transform)

    cam.enable_positional_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    pymesh = sl.Mesh()
    print("Processing...")
    for i in range(200):
        cam.grab(runtime)
        cam.request_spatial_map_async()

    cam.extract_whole_spatial_map(pymesh)
    cam.disable_positional_tracking()
    cam.disable_spatial_mapping()

    filter_params = sl.MeshFilterParameters()
    filter_params.set(sl.MESH_FILTER.HIGH)
    print("Filtering params : {0}.".format(pymesh.filter(filter_params)))

    apply_texture = pymesh.apply_texture(sl.MESH_TEXTURE_FORMAT.RGBA)
    print("Applying texture : {0}.".format(apply_texture))
    print_mesh_information(pymesh, apply_texture)

    save_filter(filter_params)
    save_mesh(pymesh)
    cam.close()
    print("\nFINISH")
Exemple #9
0
def main():

    if len(sys.argv) != 2:
        print("Please specify path to .svo file.")
        exit()

    filepath = sys.argv[1]
    print("Reading SVO file: {0}".format(filepath))

    input_type = sl.InputType()
    input_type.set_from_svo_file(filepath)
    init = sl.InitParameters(input_t=input_type, svo_real_time_mode=False)
    cam = sl.Camera()
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.FILL

    # Declare sl.Mat matrices
    image_size = cam.get_camera_information().camera_resolution
    image_size.width = image_size.width / 2
    image_size.height = image_size.height / 2
    image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
    depth_image_zed = sl.Mat(image_size.width, image_size.height,
                             sl.MAT_TYPE.U8_C4)

    key = ''
    print("  Save the current image:     s")
    print("  Quit the video reading:     q\n")
    while key != 113:  # for 'q' key
        err = cam.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            cam.retrieve_image(image_zed, resolution=image_size)
            cv2.imshow("ZED Image", image_zed.get_data())
            cam.retrieve_image(depth_image_zed, sl.VIEW.DEPTH, sl.MEM.CPU,
                               image_size)
            cv2.imshow("ZED Depth", depth_image_zed.get_data())
            key = cv2.waitKey(1)
            saving_image(key, image_zed)
        else:
            key = cv2.waitKey(1)
    cv2.destroyAllWindows()
    cam.close()

    print("\nFINISH")
Exemple #10
0
def capture_thread_func(svo_filepath=None):
    global image_np_global, depth_np_global, exit_signal, new_data

    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    input_type = sl.InputType()
    if svo_filepath is not None:
        input_type.set_from_svo_file(svo_filepath)

    init_params = sl.InitParameters(input_t=input_type)
    init_params.camera_resolution = sl.RESOLUTION.HD720
    init_params.camera_fps = 30
    init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE
    init_params.coordinate_units = sl.UNIT.METER
    init_params.svo_real_time_mode = False

    # Open the camera
    err = zed.open(init_params)
    print(err)
    while err != sl.ERROR_CODE.SUCCESS:
        err = zed.open(init_params)
        print(err)
        sleep(1)

    image_mat = sl.Mat()
    depth_mat = sl.Mat()
    runtime_parameters = sl.RuntimeParameters()
    image_size = sl.Resolution(width, height)

    while not exit_signal:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            zed.retrieve_image(image_mat, sl.VIEW.LEFT, resolution=image_size)
            zed.retrieve_measure(depth_mat,
                                 sl.MEASURE.XYZRGBA,
                                 resolution=image_size)
            lock.acquire()
            image_np_global = load_image_into_numpy_array(image_mat)
            depth_np_global = load_depth_into_numpy_array(depth_mat)
            new_data = True
            lock.release()

        sleep(0.01)

    zed.close()
Exemple #11
0
def main():
    if len(sys.argv) != 3:
        exit()

    filepath = sys.argv[1]
    outputURL = sys.argv[2]

    print("Reading SVO file: {0}".format(filepath))

    input_type = sl.InputType()
    input_type.set_from_svo_file(filepath)
    init = sl.InitParameters(input_t=input_type, svo_real_time_mode=False)
    cam = sl.Camera()
    status = cam.open(init)

    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    mat = sl.Mat()

    h = 720
    w = 1280

    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(outputURL, fourcc, 20.0, (w, h))

    while cv2.waitKey(1) != ord("q"):  # for 'q' key
        err = cam.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            cam.retrieve_image(mat)
            im = mat.get_data()
            out.write(im)
            cv2.imshow("frame", im)

    cam.close()
    out.release()
    cv2.destroyAllWindows()

    print("\nFINISH")
Exemple #12
0
def main():

    if len(sys.argv) != 2:
        print("Please specify path to .svo file.")
        exit()

    filepath = sys.argv[1]
    print("Reading SVO file: {0}".format(filepath))

    input_type = sl.InputType()
    input_type.set_from_svo_file(filepath)
    init = sl.InitParameters(input_t=input_type, svo_real_time_mode=False)
    cam = sl.Camera()
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    mat = sl.Mat()

    key = ''
    print("  Save the current image:     s")
    print("  Quit the video reading:     q\n")
    while key != 113:  # for 'q' key
        err = cam.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            cam.retrieve_image(mat)
            cv2.imshow("ZED", mat.get_data())
            key = cv2.waitKey(1)
            saving_image(key, mat)
        else:
            key = cv2.waitKey(1)
    cv2.destroyAllWindows()

    print_camera_information(cam)
    cam.close()
    print("\nFINISH")
Exemple #13
0
def main(argv):

    thresh = 0.25
    darknet_path = "/home/apsync/GitHub/darknet/"
    config_path = darknet_path + "cfg/yolov4-tiny.cfg"
    weight_path = darknet_path + "yolov4-tiny.weights"
    meta_path = darknet_path + "cfg/coco.data"
    svo_path = None
    zed_id = 0

    help_str = 'darknet_zed.py -c <config> -w <weight> -m <meta> -t <threshold> -s <svo_file> -z <zed_id>'
    try:
        opts, args = getopt.getopt(argv, "hc:w:m:t:s:z:", [
            "config=", "weight=", "meta=", "threshold=", "svo_file=", "zed_id="
        ])
    except getopt.GetoptError:
        log.exception(help_str)
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            log.info(help_str)
            sys.exit()
        elif opt in ("-c", "--config"):
            config_path = arg
        elif opt in ("-w", "--weight"):
            weight_path = arg
        elif opt in ("-m", "--meta"):
            meta_path = arg
        elif opt in ("-t", "--threshold"):
            thresh = float(arg)
        elif opt in ("-s", "--svo_file"):
            svo_path = arg
        elif opt in ("-z", "--zed_id"):
            zed_id = int(arg)

    input_type = sl.InputType()
    if svo_path is not None:
        log.info("SVO file : " + svo_path)
        input_type.set_from_svo_file(svo_path)
    else:
        # Launch camera by id
        input_type.set_from_camera_id(zed_id)

    init = sl.InitParameters(input_t=input_type)
    init.coordinate_units = sl.UNIT.METER
    init.depth_mode = sl.DEPTH_MODE.PERFORMANCE  # Use ULTRA depth mode

    # Start the ROS nodes for the images and the depth information
    rospy.init_node('darknet_ros')
    rospy.loginfo('darknet yolo node started')
    pub = rospy.Publisher('/darknet_ros/detection_image', Image, queue_size=10)
    pub2 = rospy.Publisher('/darknet_ros/distance_array',
                           LaserScan,
                           queue_size=50)
    pub3 = rospy.Publisher('/darknet_ros/color_image', Image, queue_size=10)
    pub4 = rospy.Publisher('/darknet_ros/nine_sector_image',
                           Image,
                           queue_size=10)
    pub5 = rospy.Publisher('/darknet_ros/9sectorarray',
                           PointCloud,
                           queue_size=50)

    cam = sl.Camera()
    if not cam.is_opened():
        log.info("Opening ZED Camera...")
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        log.error(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    # Use STANDARD sensing mode
    runtime.sensing_mode = sl.SENSING_MODE.FILL
    mat = sl.Mat()
    mat_lv = sl.Mat()
    point_cloud_mat = sl.Mat()
    mat_9_sec = sl.Mat()

    # Import the global variables. This lets us instance Darknet once,
    # then just call performDetect() again without instancing again
    global metaMain, netMain, altNames  # pylint: disable=W0603
    assert 0 < thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
    if not os.path.exists(config_path):
        raise ValueError("Invalid config path `" +
                         os.path.abspath(config_path) + "`")
    if not os.path.exists(weight_path):
        raise ValueError("Invalid weight path `" +
                         os.path.abspath(weight_path) + "`")
    if not os.path.exists(meta_path):
        raise ValueError("Invalid data file path `" +
                         os.path.abspath(meta_path) + "`")
    if netMain is None:
        netMain = load_net_custom(config_path.encode("ascii"),
                                  weight_path.encode("ascii"), 0,
                                  1)  # batch size = 1
    if metaMain is None:
        metaMain = load_meta(meta_path.encode("ascii"))
    if altNames is None:
        # In thon 3, the metafile default access craps out on Windows (but not Linux)
        # Read the names file and create a list to feed to detect
        try:
            with open(meta_path) as meta_fh:
                meta_contents = meta_fh.read()
                import re
                match = re.search("names *= *(.*)$", meta_contents,
                                  re.IGNORECASE | re.MULTILINE)
                if match:
                    result = match.group(1)
                else:
                    result = None
                try:
                    if os.path.exists(result):
                        with open(result) as names_fh:
                            names_list = names_fh.read().strip().split("\n")
                            altNames = [x.strip() for x in names_list]
                except TypeError:
                    pass
        except Exception:
            pass

    color_array = generate_color(meta_path)

    # get parameters for depth sensing
    width = round(cam.get_camera_information().camera_resolution.width / 2)
    height = round(cam.get_camera_information().camera_resolution.height / 2)
    res = sl.Resolution()
    res.width = width
    res.height = height
    angle_offset = 0 - (depth_hfov_deg / 2)
    increment_f = depth_hfov_deg / (distances_array_length)

    #Initialize laserscan node
    scan = LaserScan()
    scan.header.frame_id = 'zed_horizontal_scan'
    scan.angle_min = angle_offset
    scan.angle_max = depth_hfov_deg / 2
    scan.angle_increment = increment_f
    scan.range_min = DEPTH_RANGE_M[0]
    scan.range_max = DEPTH_RANGE_M[1]
    scan.intensities = []
    scan.time_increment = 0
    fps = 1

    #Initialize pointcloud node
    channel = ChannelFloat32()
    channel.name = "depth_range"
    channel.values = [DEPTH_RANGE_M[0], DEPTH_RANGE_M[1]]
    pointcloud = PointCloud()
    pointcloud.header.frame_id = 'zed_9_sector_scan'
    pointcloud.channels = [channel]

    while not rospy.is_shutdown():
        start_time = time.time()  # start time of the loop
        current_time = rospy.Time.now()
        err = cam.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            cam.retrieve_image(mat, sl.VIEW.LEFT)
            image = mat.get_data()
            cam.retrieve_image(mat_lv, sl.VIEW.LEFT)
            image_lv = mat_lv.get_data()
            cam.retrieve_image(mat_9_sec, sl.VIEW.DEPTH)
            nine_sector_image = mat_9_sec.get_data()
            scan.header.stamp = current_time
            pointcloud.header.stamp = current_time
            scan.scan_time = fps

            cam.retrieve_measure(point_cloud_mat, sl.MEASURE.XYZRGBA)
            depth = point_cloud_mat.get_data()

            print("\033[0;0H")

            distances_from_depth_image(point_cloud_mat, distances,
                                       DEPTH_RANGE_M[0], DEPTH_RANGE_M[1],
                                       width, height,
                                       obstacle_line_thickness_pixel)
            scan.ranges = distances
            distance_center = np.mean(distances[34:38])

            # Publish the distance information for the mavros node
            pub2.publish(scan)

            # provide distance info to video stream and create image with horizontal distance information
            # Draw a horizontal line to visualize the obstacles' line
            x1, y1 = int(0), int(height)
            x2, y2 = int(width * 2), int(height)
            line_thickness = obstacle_line_thickness_pixel
            cv2.line(image_lv, (x1, y1), (x2, y2), (0, 255, 0),
                     thickness=line_thickness)

            #print("Distance to Camera at position {},{} (image center): {:1.3} m".format(width, height, distance_center))
            if distance_center > DEPTH_RANGE_M[1]:
                cv2.circle(image_lv, (width, height), 20, (0, 255, 0), 2)
            elif distance_center <= DEPTH_RANGE_M[1] and distance_center > 10:
                cv2.putText(image_lv, ("{:1.3} m".format(distance_center)),
                            ((width - 50), (height + 50)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
                cv2.circle(image_lv, (width, height), 20, (0, 255, 0), 2)
            elif distance_center > 5 and distance_center <= 10:
                cv2.putText(image_lv, ("{:1.3} m".format(distance_center)),
                            ((width - 70), (height + 65)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 255, 255), 2)
                cv2.circle(image_lv, (width, height), 20, (0, 255, 255), 4)
            else:
                cv2.putText(image_lv, ("{:1.3} m".format(distance_center)),
                            ((width - 100), (height + 70)),
                            cv2.FONT_HERSHEY_DUPLEX, 2, (0, 0, 255), 2)
                cv2.circle(image_lv, (width, height), 20, (0, 0, 255), -1)
                cv2.rectangle(image_lv, (0, 100),
                              ((width * 2 - 5), (height * 2)), (30, 30, 255),
                              3)

            # create 9 sector image with distance information
            # divide view into 3x3 matrix
            box = np.empty(4, dtype=int)
            pxstep = int(width * 2 / 3)
            pystep = int(height * 2 / 3)
            gx = 0
            gy = 0
            # draw sector lines on image
            while gx < (width * 2):
                cv2.line(nine_sector_image, (gx, 0), (gx, (height * 2)),
                         color=(0, 0, 255),
                         thickness=2)
                gx += pxstep
            while gy <= (height * 2):
                cv2.line(nine_sector_image, (0, gy), ((width * 2), gy),
                         color=(0, 0, 255),
                         thickness=2)
                gy += pystep
            # measure sector depth and printout in sectors
            gx = 0
            gy = 0
            i = 0
            box[1] = pystep / 2 + gy
            box[2] = pxstep
            box[3] = pystep
            while gx < (width * 2 - 2):
                gy = 0
                box[1] = pystep / 2 + gy
                box[0] = pxstep / 2 + gx
                # calculate depth of closest object in sector
                x, y, z = get_object_depth(depth, box)
                sector_obstacle_coordinates[i][0] = x
                sector_obstacle_coordinates[i][1] = y
                sector_obstacle_coordinates[i][2] = z
                distance = math.sqrt(x * x + y * y + z * z)
                distance = "{:.2f}".format(distance)
                cv2.putText(nine_sector_image,
                            "Dist.: " + (str(distance) + " m"),
                            ((gx + 10), (gy + pystep - 10)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
                gy += pystep
                i += 1

                while gy < (height * 2):
                    box[1] = pystep / 2 + gy
                    # calculate depth of closest object in sector
                    x, y, z = get_object_depth(depth, box)
                    sector_obstacle_coordinates[i][0] = x
                    sector_obstacle_coordinates[i][1] = y
                    sector_obstacle_coordinates[i][2] = z
                    distance = math.sqrt(x * x + y * y + z * z)
                    distance = "{:.2f}".format(distance)
                    cv2.putText(nine_sector_image,
                                "Dist.: " + (str(distance) + " m"),
                                ((gx + 10), (gy + pystep - 10)),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
                    gy += pystep
                    i += 1

                gx += pxstep
            pointcloud.points = [
                Point32(x=sector_obstacle_coordinates[j][0],
                        y=sector_obstacle_coordinates[j][1],
                        z=sector_obstacle_coordinates[j][2]) for j in range(9)
            ]
            pub5.publish(pointcloud)

            # Do the yolo object detection
            detections = detect(netMain, metaMain, image, thresh)

            print(
                chr(27) + "[2J" + "**** " + str(len(detections)) +
                " Detection Results ****")
            for detection in detections:
                label = detection[0]
                confidence = detection[1]
                pstring = label + ": " + str(np.rint(100 * confidence)) + "%"
                print(pstring)
                bounds = detection[2]
                y_extent = int(bounds[3])
                x_extent = int(bounds[2])
                # Coordinates are around the center
                x_coord = int(bounds[0] - bounds[2] / 2)
                y_coord = int(bounds[1] - bounds[3] / 2)
                thickness = 1
                x, y, z = get_object_depth(depth, bounds)
                distance = math.sqrt(x * x + y * y + z * z)
                distance = "{:.2f}".format(distance)
                cv2.rectangle(image,
                              (x_coord - thickness, y_coord - thickness),
                              (x_coord + x_extent + thickness, y_coord +
                               (18 + thickness * 4)),
                              color_array[detection[3]], -1)
                cv2.putText(image, pstring + " " + (str(distance) + " m"),
                            (x_coord + (thickness * 4), y_coord +
                             (10 + thickness * 4)), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (255, 255, 255), 2)
                cv2.rectangle(image,
                              (x_coord - thickness, y_coord - thickness),
                              (x_coord + x_extent + thickness,
                               y_coord + y_extent + thickness),
                              color_array[detection[3]], int(thickness * 2))

            # convert image to ros
            imgMsg = bridge.cv2_to_imgmsg(image, encoding="bgra8")
            imgMsg.header.stamp = rospy.Time.now()
            imgMsg.header.frame_id = "color_image"
            imgMsg_lv = bridge.cv2_to_imgmsg(image_lv, encoding="bgra8")
            imgMsg_lv.header.stamp = rospy.Time.now()
            imgMsg_lv.header.frame_id = "yolo_image"
            imgMsg_9sec = bridge.cv2_to_imgmsg(nine_sector_image,
                                               encoding="bgra8")
            imgMsg_9sec.header.stamp = rospy.Time.now()
            imgMsg_9sec.header.frame_id = "nine_sector_image"
            # publish images to ros nodes
            pub.publish(imgMsg)
            pub3.publish(imgMsg_lv)
            pub4.publish(imgMsg_9sec)
        fps = (time.time() - start_time)
        print("\033[0;0H" + "FPS: {:1.3}".format(1.0 / fps))
        #rospy.Rate(1.0).sleep()  # 1 Hz
    #cv2.destroyAllWindows()

    cam.close()
    log.info("\nFINISH")
Exemple #14
0
def main():

    # Prepare ZED camera only
    # For orther camera can ignore this section
    zed = sl.Camera()
    input_type = sl.InputType()
    if len(sys.argv) >= 2:
        input_type.set_from_svo_file(sys.argv[1])
    init = sl.InitParameters(input_t=input_type)
    init.camera_resolution = sl.RESOLUTION.HD1080

    err = zed.open(init)
    if err != sl.ERROR_CODE.SUCCESS:
        print(repr(err))
        zed.close()
        exit(1)

    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.STANDARD

    image_size = zed.get_camera_information().camera_resolution
    image_size.width = image_size.width / 2
    image_size.height = image_size.height / 2

    image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)

    key = ' '

    # End ZED setup

    def edge_detect(hr):
        canned = []
        if 1:
            for hr_i in hr:
                canned.append(canny(hr_i.unsqueeze(0).half(), use_cuda=True))
            canned = torch.cat(canned, dim=0).detach().float()
            EDGE_SIZE = 1
            canned[:, :, :EDGE_SIZE, :] = 0
            canned[:, :, -EDGE_SIZE:, :] = 0
            canned[:, :, :, :EDGE_SIZE] = 0
            canned[:, :, :, -EDGE_SIZE:] = 0
        return canned

    def customTransform(input, channel=3):
        im = np.array([input])
        if channel != 3:
            im = im[:, :, :, :channel]
        im = np.transpose(im, (0, 3, 1, 2))
        im = np.array(im, dtype=np.float32) / 255.0
        im = torch.FloatTensor(im)
        im = im.cuda()
        return im

    def show_img(ims, wait=0, names=['im']):
        for im, name in zip(ims, names):
            try:
                temp = im[0].cpu().numpy()
            except:
                temp = im[0].cpu().detach().numpy()
            temp = np.clip(np.transpose(temp, axes=(1, 2, 0)), 0, 1.0) * 255
            temp = temp.astype(np.uint8)
            cv2.imshow(name, temp)
        cv2.waitKey(wait)

    def rescale_img(img_in, scale):
        img_in = img_in[:, :, :3]
        frame_HR = cv2.resize(img_in, (640, 360))
        frame_LR = cv2.resize(frame_HR, (320, 180))
        Show_LR = cv2.resize(frame_LR, (640, 360))

        return frame_HR, frame_LR, Show_LR

    model = RCARN(
        num_channels=3,
        base_filter=256,
        feat=3,  #64
        num_stages=3,
        n_resblock=5,
        nFrames=4,
        scale_factor=4).to(device)
    model_c = './checkpoints/EPCARN.pth'
    model.load_state_dict(
        torch.load(model_c, map_location=lambda storage, loc: storage))

    # Update...

    # .........

    while key != 113:
        err = zed.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:

            zed.retrieve_image(image_zed, sl.VIEW.LEFT, sl.MEM.CPU, image_size)
            image_ocv = image_zed.get_data()
            HR, LR, Show_LR = rescale_img(image_ocv, 2)

            #-------------------------------------------------------------
            img_send_jpeg = cv2.imencode(".jpg", LR,
                                         [cv2.IMWRITE_JPEG_QUALITY, 100])[1]
            nparr = np.asarray(img_send_jpeg, np.uint8)
            LR = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
            #-------------------------------------------------------------
            HR = customTransform(HR)
            LR = customTransform(LR)
            Show_LR = customTransform(Show_LR)
            canned = edge_detect(HR)

            prediction = model(LR, canned)

            show_img([prediction, HR, Show_LR],
                     wait=1,
                     names=['sr', 'hr', 'lr'])

            key = cv2.waitKey(10)

    cv2.destroyAllWindows()
    zed.close()
Exemple #15
0
        # cv2.imshow('result',res)
        # cv2.waitKey(0)
        return res
    else:
        print("Not enough matches are found - {}/{}".format(
            len(good), MIN_MATCH_COUNT))
        matchesMask = None


width = 704  #1056
height = 416  #624
exit_signal = False

zed = sl.Camera()
# Create a InitParameters object and set configuration parameters
input_type = sl.InputType()

init_params = sl.InitParameters(input_t=input_type)
init_params.camera_resolution = sl.RESOLUTION.HD720
init_params.camera_fps = 30
init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE
init_params.coordinate_units = sl.UNIT.METER
init_params.svo_real_time_mode = False

# Open the camera
status = zed.open(init_params)
print(status)
time.sleep(1)

left_image_mat = sl.Mat()
# right_image_mat = sl.Mat()
Exemple #16
0
def main() :
    # construct the argument parser and parse command line arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--ip", type=str, required=True,
        help="ip address of the device")
    ap.add_argument("-o", "--port", type=int, required=True,
        help="ephemeral port number of the server (1024 to 65535)")
    ap.add_argument("-f", "--frame-count", type=int, default=32,
        help="# of frames used to construct the background model")
    ap.add_argument("-s", "--source", type=str, default="cam", help="using 'cam'era or 'svo'<SVO file>?")
    global args
    args = vars(ap.parse_args())

    # start the flask app
    print("running flask.")
    threading.Thread(target=flaskThread).start()
    print("flask app running.")
    # Create a ZED camera object
    zed = sl.Camera()

    # Set configuration parameters
    input_type = sl.InputType()
    if args["source"] != "cam" :
        input_type.set_from_svo_file(args["source"])
    init = sl.InitParameters(input_t=input_type)
    init.camera_resolution = sl.RESOLUTION.HD1080
    init.depth_mode = sl.DEPTH_MODE.PERFORMANCE
    init.coordinate_units = sl.UNIT.MILLIMETER

    # Open the camera
    err = zed.open(init)
    if err != sl.ERROR_CODE.SUCCESS :
        print(repr(err))
        zed.close()
        exit(1)

    # Create a TCP/IP socket
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

    # Bind the socket to the port
    server_address = ('localhost', 10000)
    print('starting up on {} port {}'.format(*server_address))
    sock.bind(server_address)

    # Listen for incoming connections
    sock.listen(1)

    # Display help in console
    print_help()

    # Set runtime parameters after opening the camera
    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.STANDARD

    # Prepare new image size to retrieve half-resolution images
    image_size = zed.get_camera_information().camera_resolution
    image_size.width = image_size.width /2
    image_size.height = image_size.height /2

    # Declare your sl.Mat matrices
    image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
    depth_image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
    depth = sl.Mat()
    point_cloud = sl.Mat()

    key = ' '
    # Wait for a connection
    print('waiting for a connection')
    connection, client_address = sock.accept()
    print('connection from ', client_address)

    while key != 113 :
        # Wait for request
        # print('Waiting for request...')
        data = connection.recv(16)
        # print('received {!r}'.format(data))

        err = zed.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS :
            # Retrieve the left image, depth image in the half-resolution
            zed.retrieve_image(image_zed, sl.VIEW.LEFT, sl.MEM.CPU, image_size)
            zed.retrieve_image(depth_image_zed, sl.VIEW.DEPTH, sl.MEM.CPU, image_size)
            zed.retrieve_measure(depth, sl.MEASURE.DEPTH, sl.MEM.CPU, image_size)
            # Retrieve the RGBA point cloud in half resolution
            zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA, sl.MEM.CPU, image_size)

            # To recover data from sl.Mat to use it with opencv, use the get_data() method
            # It returns a numpy array that can be used as a matrix with opencv
            image_ocv = image_zed.get_data()
            depth_image_ocv = depth_image_zed.get_data()
            depth_ocv = depth.get_data()
            
            # Detecting wood from depth_image_zed
            # Minimum_value def (y)
            # Get minimum depth value and circle it
            # Input: y colum number
            # Return: Minimum depth value
            n_min = 0
            n=0
            depth_value_min = 2500
            while n < image_size.width :
                x = n;
                y = image_size.height // 2
                depth_value = depth_ocv[y,x]
                if (math.isnan(depth_value)==0) and (depth_value < depth_value_min) and (depth_value > 0):
                    n_min = n
                    depth_value_min = depth_value
                n = n + 1
            # print('Min value is at: ' + str(n_min) + '. Value is: ' + str(depth_value_min) + '.')
            # n_min = 100
            # cv2.circle( depth_image_ocv, ( n_min, image_size.height // 2 ), \
            #        32, ( 0, 0, 255 ), 1, 8 )
            
            # Get points within a threshold and circle them
            # def (minimum_value)
            threshold = depth_value_min + 40
            n = 0
            point_depth = np.array([])
            point_x = np.array([])
            while n < image_size.width :
                x = n
                y = image_size.height // 2
                depth_value =depth_ocv[y,x]
                if math.isnan(depth_value)==0 and depth_value < threshold :
                    point_depth = np.append(point_depth, depth_value)
                    point_x = np.append(point_x, x)
                    # cv2.circle( depth_image_ocv, ( x, image_size.height // 2), \
                    #    16, ( 0, 0, 255 ), 1, 8 )
                n = n + 1
            # print(str(len(point_x)) + 'points are within threshold 40.\n')
            # print('Point within threshold:\n' + str(point_depth))
            girder_center = int(np.mean(point_x))
            # print('Girder ceter at: ' + str(girder_center)) 
            
            # Sending TCP msg
            connection.sendall(girder_center.to_bytes(2,'big'))
    
            # Draw girder center
            cv2.circle( depth_image_ocv, (int(girder_center), image_size.height // \
                    2), 8, (255, 0, 0), 2, 8 )
            # Draw view center
            cv2.circle(depth_image_ocv, (image_size.width //2, image_size.height //2), \
                    8, (0,255,0),2,8)
            # Draw Text depth_value_min
            # font 
            font = cv2.FONT_HERSHEY_SIMPLEX 
  
            # org 
            org = (int(girder_center), image_size.height //2) 
              
            # fontScale 
            fontScale = 1
               
            # Blue color in BGR 
            color = (255, 0, 0) 
              
            # Line thickness of 2 px 
            thickness = 2
               
            # Using cv2.putText() method 
            image = cv2.putText( depth_image_ocv, str(depth_value_min), org, font,  
                    fontScale, color, thickness, cv2.LINE_AA) 


            
            # cv2.imshow("Image", image_ocv)
            # cv2.imshow("Depth", depth_image_ocv)
            
            # Send to webserver
            global outputFrame
            outputFrame = depth_image_ocv.copy()
            key = cv2.waitKey(10)

            process_key_event(zed, key)

    cv2.destroyAllWindows()
    zed.close()
    # Clean up the connection
    connection.close()

    print("\nFINISH")
Exemple #17
0
def main(argv):

    thresh = 0.25
    darknet_path = "../libdarknet/"
    config_path = darknet_path + "cfg/yolov3-tiny.cfg"
    weight_path = "yolov3-tiny.weights"
    meta_path = "coco.data"
    svo_path = None
    zed_id = 0

    help_str = 'darknet_zed.py -c <config> -w <weight> -m <meta> -t <threshold> -s <svo_file> -z <zed_id>'
    try:
        opts, args = getopt.getopt(argv, "hc:w:m:t:s:z:", [
            "config=", "weight=", "meta=", "threshold=", "svo_file=", "zed_id="
        ])
    except getopt.GetoptError:
        log.exception(help_str)
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            log.info(help_str)
            sys.exit()
        elif opt in ("-c", "--config"):
            config_path = arg
        elif opt in ("-w", "--weight"):
            weight_path = arg
        elif opt in ("-m", "--meta"):
            meta_path = arg
        elif opt in ("-t", "--threshold"):
            thresh = float(arg)
        elif opt in ("-s", "--svo_file"):
            svo_path = arg
        elif opt in ("-z", "--zed_id"):
            zed_id = int(arg)

    input_type = sl.InputType()
    if svo_path is not None:
        log.info("SVO file : " + svo_path)
        input_type.set_from_svo_file(svo_path)
    else:
        # Launch camera by id
        input_type.set_from_camera_id(zed_id)

    init = sl.InitParameters(input_t=input_type)
    init.coordinate_units = sl.UNIT.METER

    cam = sl.Camera()
    if not cam.is_opened():
        log.info("Opening ZED Camera...")
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        log.error(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    # Use STANDARD sensing mode
    runtime.sensing_mode = sl.SENSING_MODE.STANDARD
    mat = sl.Mat()
    point_cloud_mat = sl.Mat()

    # Import the global variables. This lets us instance Darknet once,
    # then just call performDetect() again without instancing again
    global metaMain, netMain, altNames  # pylint: disable=W0603
    assert 0 < thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
    if not os.path.exists(config_path):
        raise ValueError("Invalid config path `" +
                         os.path.abspath(config_path) + "`")
    if not os.path.exists(weight_path):
        raise ValueError("Invalid weight path `" +
                         os.path.abspath(weight_path) + "`")
    if not os.path.exists(meta_path):
        raise ValueError("Invalid data file path `" +
                         os.path.abspath(meta_path) + "`")
    if netMain is None:
        netMain = load_net_custom(config_path.encode("ascii"),
                                  weight_path.encode("ascii"), 0,
                                  1)  # batch size = 1
    if metaMain is None:
        metaMain = load_meta(meta_path.encode("ascii"))
    if altNames is None:
        # In thon 3, the metafile default access craps out on Windows (but not Linux)
        # Read the names file and create a list to feed to detect
        try:
            with open(meta_path) as meta_fh:
                meta_contents = meta_fh.read()
                import re
                match = re.search("names *= *(.*)$", meta_contents,
                                  re.IGNORECASE | re.MULTILINE)
                if match:
                    result = match.group(1)
                else:
                    result = None
                try:
                    if os.path.exists(result):
                        with open(result) as names_fh:
                            names_list = names_fh.read().strip().split("\n")
                            altNames = [x.strip() for x in names_list]
                except TypeError:
                    pass
        except Exception:
            pass

    color_array = generate_color(meta_path)

    log.info("Running...")

    key = ''
    while key != 113:  # for 'q' key
        start_time = time.time()  # start time of the loop
        err = cam.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            cam.retrieve_image(mat, sl.VIEW.LEFT)
            image = mat.get_data()

            cam.retrieve_measure(point_cloud_mat, sl.MEASURE.XYZRGBA)
            depth = point_cloud_mat.get_data()

            # Do the detection
            detections = detect(netMain, metaMain, image, thresh)

            log.info(
                chr(27) + "[2J" + "**** " + str(len(detections)) +
                " Results ****")
            for detection in detections:
                label = detection[0]
                confidence = detection[1]
                pstring = label + ": " + str(np.rint(100 * confidence)) + "%"
                log.info(pstring)
                bounds = detection[2]
                y_extent = int(bounds[3])
                x_extent = int(bounds[2])
                # Coordinates are around the center
                x_coord = int(bounds[0] - bounds[2] / 2)
                y_coord = int(bounds[1] - bounds[3] / 2)
                #boundingBox = [[x_coord, y_coord], [x_coord, y_coord + y_extent], [x_coord + x_extent, y_coord + y_extent], [x_coord + x_extent, y_coord]]
                thickness = 1
                x, y, z = get_object_depth(depth, bounds)
                distance = math.sqrt(x * x + y * y + z * z)
                distance = "{:.2f}".format(distance)
                cv2.rectangle(image,
                              (x_coord - thickness, y_coord - thickness),
                              (x_coord + x_extent + thickness, y_coord +
                               (18 + thickness * 4)),
                              color_array[detection[3]], -1)
                cv2.putText(image, label + " " + (str(distance) + " m"),
                            (x_coord + (thickness * 4), y_coord +
                             (10 + thickness * 4)), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (255, 255, 255), 2)
                cv2.rectangle(image,
                              (x_coord - thickness, y_coord - thickness),
                              (x_coord + x_extent + thickness,
                               y_coord + y_extent + thickness),
                              color_array[detection[3]], int(thickness * 2))

            cv2.imshow("ZED", image)
            key = cv2.waitKey(5)
            log.info("FPS: {}".format(1.0 / (time.time() - start_time)))
        else:
            key = cv2.waitKey(5)
    cv2.destroyAllWindows()

    cam.close()
    log.info("\nFINISH")
Exemple #18
0
def main() :
    #default vars
    error = False
    AngleValue = 0
    RotationValue = 0
    xOffset = 0
    xRes = 1920
    middleOfRes = xRes/2
    #vars
    #Green Tone, change based on tone of light. MAY BE TOO WIDE CURRENTLY
    lowerBound=np.array([33,80,40])
    upperBound=np.array([102,255,255])

    zed = sl.Camera()

    input_type = sl.InputType()
    if len(sys.argv) >= 2 :
        input_type.set_from_svo_file(sys.argv[1])
    init = sl.InitParameters(input_t=input_type)
    init.camera_resolution = sl.RESOLUTION.HD1080
    init.depth_mode = sl.DEPTH_MODE.PERFORMANCE
    init.coordinate_units = sl.UNIT.MILLIMETER

    err = zed.open(init)
    if err != sl.ERROR_CODE.SUCCESS :
        print(repr(err))
        zed.close()
        exit(1)

    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.STANDARD

    image_size = zed.get_camera_information().camera_resolution
    image_size.width = image_size.width / 2
    image_size.height = image_size.height / 2

    # Declare your sl.Mat matrices
    image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
    
    depth_image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
    point_cloud = sl.Mat()

    while True:
        err = zed.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS :
            # Retrieve the left image, depth image in the half-resolution
            zed.retrieve_image(image_zed, sl.VIEW.LEFT, sl.MEM.CPU, image_size)
            zed.retrieve_image(depth_image_zed, sl.VIEW.DEPTH, sl.MEM.CPU, image_size)
            # Retrieve the RGBA point cloud in half resolution
            zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA, sl.MEM.CPU, image_size)

            #LEFT IMAGE CONVERTED TO OPENCV READABLE FORMAT
            image_ocv = image_zed.get_data()
            depth_image_ocv = depth_image_zed.get_data()

            #uncomment if u have a display
            #cv2.imshow("Image", image_ocv)
            #cv2.imshow("Depth", depth_image_ocv)

            ret, image = image_ocv.read()
            #not sure if resize function will work
            frame = cv2.resize(image, (1920, 1080))

            imgHSV = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
       
            mask=cv2.inRange(imgHSV,lowerBound,upperBound)
            maskOpen=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernelOpen)
            maskClose=cv2.morphologyEx(maskOpen,cv2.MORPH_CLOSE,kernelClose)
            maskFinal=maskClose
            _, conts, _= cv2.findContours(maskFinal.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

            #RUN THIS IF GREEN IS FOUND
            for i in range(len(conts)):
                c =  max(conts,key=cv2.contourArea)
                ((x, y), radius) = cv2.minEnclosingCircle(c)
                M = cv2.moments(c)
                center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
                if radius > 5:
                    x,y,w,h=cv2.boundingRect(conts[i])
                    cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255), 2)
                    xcenter = (int(M["m10"] / M["m00"]))
                    print(xcenter)
                    xOffset = int(xcenter - middleOfRes)
                    print(xOffset)
                    print(x)
                    err, point_cloud_value = point_cloud.get_value(x, y)
                    distance = math.sqrt(point_cloud_value[0] * point_cloud_value[0] + point_cloud_value[2] * point_cloud_value[2])
                    print(distance)
                    
                    
                else:
                    break

  
        else:
            #send network tables error, Bryon's time to pop off
            error = True

    #close everything down if it fails, WHICH IT WON'T
    cv2.destroyAllWindows()
    zed.close()
Exemple #19
0
def main(argv):

    thresh = 0.25
    darknet_path = "../libdarknet/"
    config_path = darknet_path + "cfg/yolov3-tiny.cfg"
    weight_path = "yolov3-tiny.weights"
    meta_path = "coco.data"
    svo_path = None
    zed_id = 0

    help_str = 'darknet_zed.py -c <config> -w <weight> -m <meta> -t <threshold> -s <svo_file> -z <zed_id>'
    try:
        opts, args = getopt.getopt(argv, "hc:w:m:t:s:z:", [
            "config=", "weight=", "meta=", "threshold=", "svo_file=", "zed_id="
        ])
    except getopt.GetoptError:
        log.exception(help_str)
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            log.info(help_str)
            sys.exit()
        elif opt in ("-c", "--config"):
            config_path = arg
        elif opt in ("-w", "--weight"):
            weight_path = arg
        elif opt in ("-m", "--meta"):
            meta_path = arg
        elif opt in ("-t", "--threshold"):
            thresh = float(arg)
        elif opt in ("-s", "--svo_file"):
            svo_path = arg
        elif opt in ("-z", "--zed_id"):
            zed_id = int(arg)

    input_type = sl.InputType()
    if svo_path is not None:
        log.info("SVO file : " + svo_path)
        input_type.set_from_svo_file(svo_path)
    else:
        # Launch camera by id
        input_type.set_from_camera_id(zed_id)

    init = sl.InitParameters(input_t=input_type)
    init.coordinate_units = sl.UNIT.METER

    cam = sl.Camera()
    if not cam.is_opened():
        log.info("Opening ZED Camera...")
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        log.error(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    # Use STANDARD sensing mode
    runtime.sensing_mode = sl.SENSING_MODE.FILL
    mat = sl.Mat()
    point_cloud_mat = sl.Mat()

    # Prepare new image size to retrieve half-resolution images
    image_size = cam.get_camera_information().camera_resolution
    image_size.width = image_size.width
    image_size.height = image_size.height
    depth_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.F32_C1)
    # Create an RGBA sl.Mat object
    depth_zed_show = sl.Mat(image_size.width, image_size.height,
                            sl.MAT_TYPE.U8_C4)

    # Import the global variables. This lets us instance Darknet once,
    # then just call performDetect() again without instancing again
    global metaMain, netMain, altNames  # pylint: disable=W0603
    assert 0 < thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
    if not os.path.exists(config_path):
        raise ValueError("Invalid config path `" +
                         os.path.abspath(config_path) + "`")
    if not os.path.exists(weight_path):
        raise ValueError("Invalid weight path `" +
                         os.path.abspath(weight_path) + "`")
    if not os.path.exists(meta_path):
        raise ValueError("Invalid data file path `" +
                         os.path.abspath(meta_path) + "`")
    if netMain is None:
        netMain = load_net_custom(config_path.encode("ascii"),
                                  weight_path.encode("ascii"), 0,
                                  1)  # batch size = 1
    if metaMain is None:
        metaMain = load_meta(meta_path.encode("ascii"))
    if altNames is None:
        # In thon 3, the metafile default access craps out on Windows (but not Linux)
        # Read the names file and create a list to feed to detect
        try:
            with open(meta_path) as meta_fh:
                meta_contents = meta_fh.read()
                import re
                match = re.search("names *= *(.*)$", meta_contents,
                                  re.IGNORECASE | re.MULTILINE)
                if match:
                    result = match.group(1)
                else:
                    result = None
                try:
                    if os.path.exists(result):
                        with open(result) as names_fh:
                            names_list = names_fh.read().strip().split("\n")
                            altNames = [x.strip() for x in names_list]
                except TypeError:
                    pass
        except Exception:
            pass

    color_array = generate_color(meta_path)

    log.info("Running...")

    key = ''
    while key != 113:  # for 'q' key
        start_time = time.time()  # start time of the loop
        err = cam.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            cam.retrieve_image(mat, sl.VIEW.LEFT)
            image = mat.get_data()

            cam.retrieve_measure(point_cloud_mat, sl.MEASURE.XYZRGBA)
            cam.retrieve_measure(depth_zed, sl.MEASURE.DEPTH)
            depth = point_cloud_mat.get_data()
            depth_ocv = depth_zed.get_data()
            # Retrieve the normalized depth image
            cam.retrieve_image(depth_zed_show, sl.VIEW.DEPTH)
            # Use get_data() to get the numpy array
            depth_zed_show_num = depth_zed_show.get_data()

            # Do the detection
            detections = detect(netMain, metaMain, image, thresh)
            grey_color = 153

            #depth_ocvを正規化する
            detect_ocv = np.clip(depth_ocv, 0, 10000)
            depth_ocv_n = (detect_ocv + 1) / 10000 * 255
            print(depth_ocv_n.max())

            log.info(
                chr(27) + "[2J" + "**** " + str(len(detections)) +
                " Results ****")
            for detection in detections:
                label = detection[0]
                confidence = detection[1]
                pstring = label + ": " + str(np.rint(100 * confidence)) + "%"
                log.info(pstring)
                bounds = detection[2]
                y_extent = int(bounds[3])
                x_extent = int(bounds[2])
                # Coordinates are around the sidecorner
                x_coord = int(bounds[0] - x_extent / 2)
                y_coord = int(bounds[1] - y_extent / 2)
                #boundingBox = [[x_coord, y_coord], [x_coord, y_coord + y_extent], [x_coord + x_extent, y_coord + y_extent], [x_coord + x_extent, y_coord]]
                bounds2 = [x_coord, y_coord]
                thickness = 1
                x, y, z = get_object_depth(depth, bounds)
                print(bounds)
                print(bounds2)
                bounds3 = [x_coord + x_extent, y_coord]
                image_size = cam.get_camera_information().camera_resolution
                width = image_size.width
                height = image_size.height
                #print(width)
                #print(height)

                #1ピクセル計算
                distance = math.sqrt(x * x + y * y + z * z)
                pixel_realsize_x = distance / 1280 * 1.92
                pixel_realsize_y = distance / 720 * 1.06
                distance_x = (x_extent) * pixel_realsize_x
                distance_y = (y_extent) * pixel_realsize_y

                #ROI
                roi = image[y_coord:y_coord + y_extent,
                            x_coord:x_coord + x_extent]
                roi_depth = depth_ocv_n[y_coord:y_coord + y_extent,
                                        x_coord:x_coord + x_extent]
                #print(roi_depth.shape)
                roi_depth_show = depth_zed_show_num[y_coord:y_coord + y_extent,
                                                    x_coord:x_coord + x_extent]
                #print(roi_depth_show.shape)
                #roi_depth_show_gray = cv2.cvtColor(roi_depth, cv2.COLOR_RGB2GRAY)

                #depth_image_3d = np.dstack((roi_depth,roi_depth,roi_depth,roi_depth))
                #bg_removed = np.where((depth_image_3d > z) | (depth_image_3d <= 0), grey_color, roi)
                #深度データを与えると2値を返す
                # 方法2 (OpenCVで実装)
                ret, th = cv2.threshold(depth_ocv_n, 0, 255, cv2.THRESH_OTSU)
                print("ret{}".format(ret))
                depth_image_3d = np.dstack((th, th, th, th))

                # th = threshold_otsu(roi_depth_show_gray)

                contours, hierarchy = cv2.findContours(th, cv2.RETR_LIST,
                                                       cv2.CHAIN_APPROX_NONE)
                for i in range(0, len(contours)):
                    if len(contours[i]) > 0:
                        # remove small objects
                        if cv2.contourArea(contours[i]) < 800:
                            continue
                        cv2.polylines(roi, contours[i], True, (255, 255, 255),
                                      5)

                image[y_coord:y_coord + y_extent,
                      x_coord:x_coord + x_extent] = depth_image_3d

                ##表示
                distance_string = "v:{1:.2f},h{0:.2f}".format(
                    distance_x, distance_y)
                cv2.rectangle(image,
                              (x_coord - thickness, y_coord - thickness),
                              (x_coord + x_extent + thickness, y_coord +
                               (18 + thickness * 4)),
                              color_array[detection[3]], -1)
                cv2.putText(image, label + " " + (str(distance_string) + " m"),
                            (x_coord + (thickness * 4), y_coord +
                             (10 + thickness * 4)), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (255, 255, 255), 2)
                cv2.rectangle(image,
                              (x_coord - thickness, y_coord - thickness),
                              (x_coord + x_extent + thickness,
                               y_coord + y_extent + thickness),
                              color_array[detection[3]], int(thickness * 2))

            cv2.imshow("ZED", image)
            key = cv2.waitKey(5)
            log.info("FPS: {}".format(1.0 / (time.time() - start_time)))
        else:
            key = cv2.waitKey(5)
    cv2.destroyAllWindows()

    cam.close()
    log.info("\nFINISH")
def predrel(argv):

    ##################################3# yolo detection weight
    thresh = 0.25
    darknet_path = "/home/lx/DRNet/detection_yolo/libdarknet/"
    # config_path = darknet_path + "cfg/yolov3.cfg"
    config_path = darknet_path + "cfg/yolov3-voc.cfg"
    # weight_path = "yolov3.weights"
    weight_path = "/home/lx/DRNet/datasets/data_yolo/backup/yolov3-voc_1300.weights"
    #weight_path = "yolov4.weights"
    meta_path = darknet_path + "cfg/yolo.data"
    # meta_path = "coco.data"

    ##################################3# spatial relation detection weight
    # checkpoint_path = "/home/lx/DRNet/SpatialSense/baselines/runs/drnet/checkpoints/model_datasetReSe_29.pth"
    checkpoint_path = "/home/lx/DRNet/SpatialSense/baselines/runs/customon_depthon_apprfeaton_spatialdualmask/checkpoints/model_96.970_28.pth"

    # args = parse_args()

    svo_path = None
    zed_id = 0

    # help_str = 'darknet_zed.py -c <config> -w <weight> -m <meta> -t <threshold> -s <svo_file> -z <zed_id>'
    # try:
    #     opts, args = getopt.getopt(
    #         argv, "hc:w:m:t:s:z:", ["config=", "weight=", "meta=", "threshold=", "svo_file=", "zed_id="])
    # except getopt.GetoptError:
    #     log.exception(help_str)
    #     sys.exit(2)
    # for opt, arg in opts:
    #     if opt == '-h':
    #         log.info(help_str)
    #         sys.exit()
    #     elif opt in ("-c", "--config"):
    #         config_path = arg
    #     elif opt in ("-w", "--weight"):
    #         weight_path = arg
    #     elif opt in ("-m", "--meta"):
    #         meta_path = arg
    #     elif opt in ("-t", "--threshold"):
    #         thresh = float(arg)
    #     elif opt in ("-s", "--svo_file"):
    #         svo_path = arg
    #     elif opt in ("-z", "--zed_id"):
    #         zed_id = int(arg)
    args = parse_args()
    input_type = sl.InputType()
    if svo_path is not None:
        log.info("SVO file : " + svo_path)
        input_type.set_from_svo_file(svo_path)
    else:
        # Launch camera by id
        input_type.set_from_camera_id(zed_id)

    init = sl.InitParameters(input_t=input_type)
    init.coordinate_units = sl.UNIT.METER

    cam = sl.Camera()
    if not cam.is_opened():
        log.info("Opening ZED Camera...")
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        log.error(repr(status))
        exit()
    runtime = sl.RuntimeParameters()
    # Use STANDARD sensing mode
    runtime.sensing_mode = sl.SENSING_MODE.STANDARD
    mat = sl.Mat()
    depth_img = sl.Mat()
    point_cloud_mat = sl.Mat()
    resl = sl.Resolution(640, 480)
    # Import the global variables. This lets us instance Darknet once,
    # then just call performDetect() again without instancing again
    global metaMain, netMain, altNames  # pylint: disable=W0603
    assert 0 < thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
    if not os.path.exists(config_path):
        raise ValueError("Invalid config path `" +
                         os.path.abspath(config_path) + "`")
    if not os.path.exists(weight_path):
        raise ValueError("Invalid weight path `" +
                         os.path.abspath(weight_path) + "`")
    if not os.path.exists(meta_path):
        raise ValueError("Invalid data file path `" +
                         os.path.abspath(meta_path) + "`")
    if netMain is None:
        netMain = load_net_custom(config_path.encode("ascii"),
                                  weight_path.encode("ascii"), 0,
                                  1)  # batch size = 1
    if metaMain is None:
        metaMain = load_meta(meta_path.encode("ascii"))
    if altNames is None:
        # In thon 3, the metafile default access craps out on Windows (but not Linux)
        # Read the names file and create a list to feed to detect
        try:
            with open(meta_path) as meta_fh:
                meta_contents = meta_fh.read()
                import re
                match = re.search("names *= *(.*)$", meta_contents,
                                  re.IGNORECASE | re.MULTILINE)
                if match:
                    result = match.group(1)
                else:
                    result = None
                try:
                    if os.path.exists(result):
                        with open(result) as names_fh:
                            names_list = names_fh.read().strip().split("\n")
                            altNames = [x.strip() for x in names_list]
                except TypeError:
                    pass
        except Exception:
            pass

    ###load relation detection weight
    phrase_encoder = RecurrentPhraseEncoder(300, 300)
    checkpoint = torch.load(checkpoint_path)
    model = DRNet_depth(phrase_encoder, 512, 3, args)
    model.cuda()
    model.eval()
    model.load_state_dict(checkpoint['state_dict'])
    model.cuda()

    color_array = generate_color(meta_path)
    log.info("Running...")

    key = ''
    mark2 = 0
    while key != 113:  # for 'q' key
        start_time = time.time()  # start time of the loop
        err = cam.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            cam.retrieve_image(mat, sl.VIEW.LEFT, resolution=resl)
            image = mat.get_data()
            # print(image)
            ih, iw = image.shape[:2]
            # cam.retrieve_image(depth_img, sl.VIEW.DEPTH, resolution=resl)

            cam.retrieve_measure(point_cloud_mat,
                                 sl.MEASURE.XYZ,
                                 resolution=resl)
            depth = point_cloud_mat.get_data()

            # Do the detection
            detections = detect(
                netMain, metaMain, image, depth, thresh
            )  #####object label , confidence , boundingbox , i , depthdata
            time1 = datetime.strftime(datetime.now(), '%Y%m%d%H%M%S')
            cv2.imwrite(
                '/home/lx/DRNet/experiment/qualitative_evaluation/10_28_raw/image_nobbox'
                + time1 + '_' + str(mark2) + '.jpg', image)
            # print(detections)
            # log.info(chr(27) + "[2J"+"**** " + str(len(detections)) + " Results ****")
            global relprelist
            relprelist = []
            relprelist1 = []
            ########### bits after image compression
            # global byteimage
            # img_encode = cv2.imencode('.jpg', image, [cv2.IMWRITE_JPEG_QUALITY, 99])[1]
            # byteimage = img_encode.tostring()
            # print (len(byteimage))
            ########### bits after image compression
            detectnum = len(detections)
            #### batchsize data input
            mark = 0

            sub_obj_list = []
            for idx in range(0, detectnum):
                label_sub = detections[idx][0]
                sub1 = phrase2vec(label_sub, 2, 300)
                sub = torch.unsqueeze(torch.Tensor(np.array(sub1, np.float32)),
                                      dim=0)

                bounds_sub = detections[idx][2]
                box3d_sub1 = detections[idx][4]
                box3d_sub = torch.unsqueeze(torch.Tensor(box3d_sub1), dim=0)

                subbbox = getBBox(bounds_sub)

                for idx1 in range(idx + 1, detectnum):
                    label_obj = detections[idx1][0]
                    obj1 = phrase2vec(label_obj, 2, 300)
                    obj = torch.unsqueeze(torch.Tensor(
                        np.array(obj1, np.float32)),
                                          dim=0)

                    bounds_obj = detections[idx1][2]
                    box3d_obj1 = detections[idx1][4]
                    box3d_obj = torch.unsqueeze(torch.Tensor(box3d_obj1),
                                                dim=0)

                    objbbox = getBBox(bounds_obj)
                    bbox_mask = spatial_fea(subbbox, objbbox, ih,
                                            iw)  ###bbox空间特征
                    bbox_img = img_fea(subbbox, objbbox, ih, iw, 1.25,
                                       image)  ###bbox图像特征

                    if mark < 1:
                        label_allsub = sub
                        label_allobj = obj
                        box3_allsub = box3d_sub
                        box3_allobj = box3d_obj
                        mask_allbbox = bbox_mask
                        img_allbbox = bbox_img

                    else:
                        label_allsub = torch.cat((label_allsub, sub), dim=0)
                        label_allobj = torch.cat((label_allobj, obj), dim=0)
                        img_allbbox = torch.cat((img_allbbox, bbox_img), dim=0)
                        mask_allbbox = torch.cat((mask_allbbox, bbox_mask),
                                                 dim=0)
                        box3_allsub = torch.cat((box3_allsub, box3d_sub),
                                                dim=0)
                        box3_allobj = torch.cat((box3_allobj, box3d_obj),
                                                dim=0)

                    sub_obj_list.append([label_sub, label_obj])
                    mark += 1

                thickness = 1
                cv2.rectangle(image,
                              (subbbox[2] - thickness, subbbox[0] - thickness),
                              (subbbox[3] + thickness, subbbox[1] + thickness),
                              color_array[detections[idx][3]],
                              int(thickness * 2))

            fh = open(
                '/home/lx/DRNet/experiment/qualitative_evaluation/10_28_raw/rel'
                + time1 + '_' + str(mark2) + '.txt',
                'w',
                encoding='utf-8')
            fh1 = open(
                '/home/lx/DRNet/experiment/qualitative_evaluation/10_28_num1/rel'
                + time1 + '_' + str(mark2) + '.txt',
                'w',
                encoding='utf-8')
            if mark > 0:
                relprelabel = model(label_allsub.cuda(), label_allobj.cuda(),
                                    img_allbbox.cuda(), mask_allbbox.cuda(), 1,
                                    box3_allsub.cuda(), box3_allobj.cuda(),
                                    args)
                for idxx, label in enumerate(relprelabel):
                    rellabel_t = label.argmax()
                    sub_t = object_categories.index(sub_obj_list[idxx][0])
                    obj_t = object_categories.index(sub_obj_list[idxx][1])
                    relpair_t = [sub_t, rellabel_t.item(), obj_t]

                    rellabel = predicate_categories[rellabel_t]
                    relpair = [
                        sub_obj_list[idxx][0], rellabel, sub_obj_list[idxx][1]
                    ]
                    fh.write(str(relpair) + '\r\n')

                    if (not relpair_t in relprelist1):
                        fh1.write(
                            str(relpair_t).replace('[', '').replace(
                                ']', '').replace(',', ' ') + '\r\n')
                        relprelist1.append(relpair_t)

                relprelist = relprelist1

            cv2.imshow("ZED", image)
            cv2.imwrite(
                '/home/lx/DRNet/experiment/qualitative_evaluation/10_28_raw/image'
                + time1 + '_' + str(mark2) + '.jpg', image)
            fh.close()
            fh1.close()

            key = cv2.waitKey(5)
            mark2 += 1

        else:
            key = cv2.waitKey(5)
    cv2.destroyAllWindows()
    cam.close()
    log.info("\nFINISH")
def extract_xyz(x_pixel_M1, y_pixel_M1, x_pixel_M2, y_pixel_M2, x_pixel_M3,
                y_pixel_M3, outputfilename, svofilename):
    x_world_M1 = []
    y_world_M1 = []
    z_world_M1 = []

    x_world_M2 = []
    y_world_M2 = []
    z_world_M2 = []

    x_world_M3 = []
    y_world_M3 = []
    z_world_M3 = []

    # x_pixel_M1 = [item[8] for item in xy_M1]
    # y_pixel_M1 = [item[9] for item in xy_M1]
    # x_pixel_M2 = [item[8] for item in xy_M2]
    # y_pixel_M2 = [item[9] for item in xy_M2]

    #filepath = "/home/avnish/zed_marmoset_recording/20200922.svo"
    #print("SVO file : ", filepath.rsplit('/', 1)[1])
    # Create a Camera object

    # Create a InitParameters object and set configuration parameters
    #init_params = sl.InitParameters(svo_input_filename=filepath,svo_real_time_mode=False)

    #InitParameters init_params; // Set initial parameters
    #init_params.sdk_verbose = True; // Enable verbose mode
    #init_params.input.setFromSVOFile("/path/to/file.svo"); // Selects the and SVO file to be read

    input_type = sl.InputType()
    print("reading " + svofilename)
    input_type.set_from_svo_file(svofilename)
    init = sl.InitParameters(input_t=input_type, svo_real_time_mode=False)
    init.depth_mode = sl.DEPTH_MODE.ULTRA
    init.coordinate_units = sl.UNIT.MILLIMETER  # Use millimeter units (for depth measurements)
    #init.depth_minimum_distance = 300
    #init.depth_maximum_distance = 1500

    zed = sl.Camera()

    # Open the camera
    err = zed.open(init)
    if err != sl.ERROR_CODE.SUCCESS:
        print('error_details: ', repr(err))
        exit(1)

    # Create and set RuntimeParameters after opening the camera
    runtime_parameters = sl.RuntimeParameters()
    runtime_parameters.sensing_mode = sl.SENSING_MODE.FILL
    #runtime_parameters.sensing_mode = sl.SENSING_MODE.SENSING_MODE_FILL  # Use STANDARD sensing mode

    i = 0
    pbar = tqdm(total=len(x_pixel_M1))
    # image = sl.Mat()
    point_cloud = sl.Mat()

    try:
        while i < len(x_pixel_M1):
            # A new image is available if grab() returns SUCCESS
            if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
                # Retrieve left image
                # zed.retrieve_image(image, sl.VIEW.VIEW_LEFT)
                # Retrieve depth map. Depth is aligned on the left image
                # zed.retrieve_measure(depth, sl.MEASURE.MEASURE_DEPTH)
                zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)
                #print(x_pixel_M1[i], y_pixel_M1[i], x_pixel_M2[i], y_pixel_M2[i])
                #print(type(x_pixel_M1[i]))

                err_M1, point_cloud_value_M1 = point_cloud.get_value(
                    x_pixel_M1[i], y_pixel_M1[i])
                err_M2, point_cloud_value_M2 = point_cloud.get_value(
                    x_pixel_M2[i], y_pixel_M2[i])
                err_M3, point_cloud_value_M3 = point_cloud.get_value(
                    x_pixel_M3[i], y_pixel_M3[i])
                #print('after get value')
                # Get and print distance value in mm at the center of the image
                # We measure the distance camera - object using Euclidean distance
                # x = round(image.get_width() / 2)
                # y = round(image.get_height() / 2)

                #lens2cage_top_mm=693.42
                lens2cage_top_mm = 528.00  #Amelia
                lens2cage_edge_x_mm = 387.35
                lens2cage_edge_y_mm = 387.35
                x_world_M1.append(
                    point_cloud_value_M1[0] +
                    lens2cage_edge_x_mm)  #millimeters; in cage coordinates
                y_world_M1.append(-point_cloud_value_M1[1] +
                                  lens2cage_edge_y_mm)
                z_world_M1.append(point_cloud_value_M1[2] - lens2cage_top_mm)

                x_world_M2.append(point_cloud_value_M2[0] +
                                  lens2cage_edge_x_mm)
                y_world_M2.append(-point_cloud_value_M2[1] +
                                  lens2cage_edge_y_mm)
                z_world_M2.append(point_cloud_value_M2[2] - lens2cage_top_mm)

                x_world_M3.append(point_cloud_value_M3[0] +
                                  lens2cage_edge_x_mm)
                y_world_M3.append(-point_cloud_value_M3[1] +
                                  lens2cage_edge_y_mm)
                z_world_M3.append(point_cloud_value_M3[2] - lens2cage_top_mm)

                # Increment the loop
                # print(i)
                pbar.update(1)
                i += 1
                # if (i % 1000) == 0:
                # print(i)
                # sys.stdout.flush()

        pbar.close()
        # Close the camera
        zed.close()

    except OverflowError as error:
        print(error)
        print("frame_number_completed: {}".format(i))
        pbar.close()
        zed.close()

    finally:
        rows = zip(x_world_M1, y_world_M1, z_world_M1, x_world_M2, y_world_M2,
                   z_world_M2, x_world_M3, y_world_M3, z_world_M3)
        print("writing " + outputfilename)
        with open(outputfilename, 'w', newline='') as f:
            writer = csv.writer(f)
            for row in rows:
                writer.writerow(row)
        print("Done")
Exemple #22
0
def main():

    # Create a ZED camera object
    zed = sl.Camera()

    # Set configuration parameters
    input_type = sl.InputType()
    if len(sys.argv) >= 2:
        input_type.set_from_svo_file(sys.argv[1])
    init = sl.InitParameters(input_t=input_type)
    init.camera_resolution = sl.RESOLUTION.HD720
    init.depth_mode = sl.DEPTH_MODE.PERFORMANCE
    init.coordinate_units = sl.UNIT.MILLIMETER
    # init.depth_minimum_distance = 0.3

    # Open the camera
    err = zed.open(init)
    if err != sl.ERROR_CODE.SUCCESS:
        print(repr(err))
        zed.close()
        exit(1)

    # Display help in console
    print_help()
    print(count_save)
    # check for existing folders and number of previously captured frames
    check_dir()

    # Set runtime parameters after opening the camera
    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.FILL

    # Prepare new image size to retrieve half-resolution images
    image_size = zed.get_camera_information().camera_resolution
    image_size.width = image_size.width / 2
    image_size.height = image_size.height / 2

    # Declare your sl.Mat matrices
    image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
    depth_image_zed = sl.Mat(image_size.width, image_size.height,
                             sl.MAT_TYPE.U8_C4)
    point_cloud = sl.Mat()

    key = ' '
    while key != 113:
        err = zed.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            # Retrieve the left image, depth image in the half-resolution
            zed.retrieve_image(image_zed, sl.VIEW.LEFT, sl.MEM.CPU, image_size)
            zed.retrieve_image(depth_image_zed, sl.VIEW.DEPTH, sl.MEM.CPU,
                               image_size)
            # Retrieve the RGBA point cloud in half resolution
            zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA, sl.MEM.CPU,
                                 image_size)

            # To recover data from sl.Mat to use it with opencv, use the get_data() method
            # It returns a numpy array that can be used as a matrix with opencv
            image_ocv = image_zed.get_data()
            depth_image_ocv = depth_image_zed.get_data()

            cv2.imshow("Image", image_ocv)
            cv2.imshow("Depth", depth_image_ocv)

            key = cv2.waitKey(10)

            process_key_event(zed, key)

    cv2.destroyAllWindows()
    zed.close()

    print("\nFINISH")
Exemple #23
0
def zedGrabber(zedFramesQueue):

    svo_path = "HD1080.svo"
    zed_id = 0
    init_mode = 0

    input_type = sl.InputType()
    if init_mode == 0:
        input_type.set_from_svo_file(svo_path)
    else:
        input_type.set_from_camera_id(zed_id)

    init = sl.InitParameters(input_t=input_type)
    init.camera_resolution = sl.RESOLUTION.HD1080
    init.camera_fps = 30
    init.coordinate_units = sl.UNIT.METER
    init.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Z_UP

    cam = sl.Camera()
    if not cam.is_opened():
        print("Opening ZED Camera...")
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        log.error(repr(status))
        exit()

    # Enable positional tracking with default parameters
    py_transform = sl.Transform(
    )  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.PositionalTrackingParameters(
        init_pos=py_transform)
    err = cam.enable_positional_tracking(tracking_parameters)

    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    zed_pose = sl.Pose()
    runtime = sl.RuntimeParameters()

    # Use STANDARD sensing mode
    runtime.sensing_mode = sl.SENSING_MODE.STANDARD
    mat = sl.Mat()
    point_cloud_mat = sl.Mat()

    # Import the global variables. This lets us instance Darknet once,

    while True:

        start_time = time.time()  # start time of the loop
        err = cam.grab(runtime)

        if (err != sl.ERROR_CODE.SUCCESS):
            break

        cam.retrieve_image(mat, sl.VIEW.LEFT)
        image = mat.get_data()

        cam.retrieve_measure(point_cloud_mat, sl.MEASURE.XYZRGBA)
        depth = point_cloud_mat.get_data()
        cam.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)

        py_translation = sl.Translation()
        tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
        ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
        tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
        camPosition = (tx, ty, tz)
        #print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}".format(tx, ty, tz, zed_pose.timestamp.get_milliseconds()))
        camOrientation = zed_pose.get_rotation_vector() * 180 / math.pi
        image = rotate(image, -camOrientation[1], center=None, scale=1.0)
        FPS = 1.0 / (time.time() - start_time)
        # Do the detection
        #rawDetections = detect(netMain, metaMain, image, thresh)
        #detectionsList = detectionsAnalayzer(rawDetections, depth)
        frame = zedFrame(image, depth, camOrientation, camPosition, FPS)
        zedFramesQueue.put(frame)
        #image = cvDrawBoxes(detectionsList, image)
        #image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        #image = cv2.resize(image,(1280,720),interpolation=cv2.INTER_LINEAR)
    cam.close()
Exemple #24
0
def main() :
    # Create a ZED camera object
    zed = sl.Camera()

    # Set configuration parameters
    input_type = sl.InputType()
    init = sl.InitParameters(input_t=input_type)
    init.camera_resolution = sl.RESOLUTION.HD1080
    #init.camera_resolution = sl.RESOLUTION.HD720
    init.depth_mode = sl.DEPTH_MODE.PERFORMANCE
    init.coordinate_units = sl.UNIT.MILLIMETER
    #init.coordinate_units = sl.UNIT.METER

    # Open the camera
    err = zed.open(init)
    if err != sl.ERROR_CODE.SUCCESS :
        print(repr(err))
        zed.close()
        exit(1)

    # Display help in console
    print_help()

    # Creating GUI
    gui = ZED_GUI()

    # Set runtime parameters after opening the camera
    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.STANDARD

    # Prepare new image size to retrieve half-resolution images
    image_size = zed.get_camera_information().camera_resolution
    image_size.width = image_size.width /2
    image_size.height = image_size.height /2

    # Declare your sl.Mat matrices
    image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
    depth_image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
    point_cloud = sl.Mat()

    sensors_data = sl.SensorsData()

    key = ' '
    event = ' '
    while key != 113:
        err = zed.grab(runtime)

        event, values = gui.window.read(timeout=20)
        if event == "Exit" or event == sg.WIN_CLOSED:
            break

        if event == "Capture":
            try:
                height = float(values["-HEIGHT-"])
                width  = float(values["-WIDTH-"])
                length = float(values["-LENGTH-"])
                no_cajas = float(values["-NO_CAJAS-"])
                no_cajas_base = float(values["-NO_CAJAS_BASE-"])
                no_cajas_alto = float(values["-NO_CAJAS_ALTO-"])

                sg.popup_timed('Capturando data, no mover la cámara ni el objeto', title='Capturando data', auto_close_duration=5, non_blocking=True)

                capture_data(zed, height, width, length, no_cajas, no_cajas_base, no_cajas_alto)

                gui.window["-HEIGHT-"].update('')
                gui.window["-WIDTH-"].update('')
                gui.window["-LENGTH-"].update('')
                gui.window["-NO_CAJAS-"].update('1')
                gui.window["-NO_CAJAS_BASE-"].update('1')
                gui.window["-NO_CAJAS_ALTO-"].update('1')

                sg.popup_timed('Captura guardada correctamente. Listo para continuar', title='Captura exitosa', auto_close_duration=5)
            except:
                sg.popup_timed('Debe ingresar valor numérico en los tres campos. Decimales con punto.', title='Valores incorrectos', auto_close_duration=5)
                pass

        if err == sl.ERROR_CODE.SUCCESS :
            # Retrieve the left image, depth image in the half-resolution
            zed.retrieve_image(image_zed, sl.VIEW.LEFT, sl.MEM.CPU, image_size)
            zed.retrieve_image(depth_image_zed, sl.VIEW.DEPTH, sl.MEM.CPU, image_size)
            # capture sensors data
            zed.get_sensors_data(sensors_data, sl.TIME_REFERENCE.IMAGE)

            # To recover data from sl.Mat to use it with opencv, use the get_data() method
            # It returns a numpy array that can be used as a matrix with opencv
            image_ocv = image_zed.get_data()
            depth_image_ocv = depth_image_zed.get_data()

            # concatenate both images to show them side by side in the GUI
            sbs_image = np.concatenate((image_ocv, depth_image_ocv), axis=1)
            imgbytes = cv2.imencode(".png", sbs_image)[1].tobytes()
            gui.window["-IMAGE-"].update(data=imgbytes)

            # show sensors data
            quaternion, linear_acceleration, angular_velocity, magnetic_field_calibrated, atmospheric_pressure =\
                get_data_from_sensors(sensors_data)

            gui.window["-IMU_ORIENTATION-"].update(quaternion)
            gui.window["-IMU_ACCELERATION-"].update(linear_acceleration)
            gui.window["-IMU_ANG_VEL-"].update(angular_velocity)
            gui.window["-IMU_MAG_FIELD-"].update(magnetic_field_calibrated)
            gui.window["-IMU_ATM_PRESS-"].update(atmospheric_pressure)

            key = cv2.waitKey(10)

    cv2.destroyAllWindows()
    zed.close()
    gui.window.close()

    print("\nFINISH")
    def Video_To_Picture(self,video_path, images_path,sample_rate,begin_number=0,type=".jpg"):
        """
        将视频变换成单张图片,存放图片的名称为(数字.jpg)
        如果是.svo文件,需要进行一下配置
        :param video_path: 视频路径
        :param images_path: 存放图片路径
        :param begin_number: 图片开始名称
        :param stop_number: 一共转换图片数
        :param type: 保存图片类型
        :return:
        """
        #1:确保有视频文件
        assert os.path.exists(video_path),video_path+" 路径下没有视频,不能进行视频帧的转换"
        assert os.path.exists(images_path),"图片保存路径"+images_path+"不存在,因此不能够进行文件保存"

        #2:初始化导入参数
        i=begin_number#用于计算转换视频
        all_number=0#用于跳过采样频率
        read_flag=False#用于确定是导入的问题还是读完的问题

        #3:导入视频
        #3.1:处理.svo文件
        if video_path[-4:]==".svo":
            import pyzed.sl as sl#如果是zed类型的才调用pyzed
            input_type = sl.InputType()
            input_type.set_from_svo_file(video_path)
            init = sl.InitParameters(input_t=input_type, svo_real_time_mode=False)
            cam = sl.Camera()
            status = cam.open(init)
            if status!=sl.ERROR_CODE.SUCCESS:
                print(".svo文件导入失败,错误原因为:",repr(status))
                return
            runtime=sl.RuntimeParameters()
            sl_image=sl.Mat()

            print("正在将.svo文件转换为图片")
            while True:
                err=cam.grab(runtime)
                if err==sl.ERROR_CODE.SUCCESS:
                    cam.retrieve_image(sl_image)
                    image=sl_image.get_data()[:,:,:3].copy()
                    image=image.astype(np.uint8)

                    all_number=all_number+1
                    if all_number%sample_rate!=0:
                        continue
                    image_filename=images_path+"/"+str(i)+type
                    cv.imwrite(image_filename,image)
                    read_flag=True

                else:
                    if read_flag:
                        break
                    else:
                        print("导入.svo文件出错,错误码为:",err)


                sys.stdout.write("\r当前已经处理了 {} 帧视频".format(str(i-begin_number)))  # 为了进行单行输出
                sys.stdout.flush()
                i = i + 1
            print(" ")
            print("完成视频解析,文件夹下共有{}帧图片,存放的路径为:{}".format(str(i), images_path))
            print("这个视频一共有:{}帧".format(all_number))


        #3.2:处理其他视频文件
        else:
            cap = cv.VideoCapture(video_path)

            print("正在将视频转换为图片")
            while True:
                ret, frame = cap.read()
                if ret:
                    all_number=all_number+1
                    if all_number%sample_rate!=0:
                        continue#不同采样率
                    image_filename = images_path + "/" + str(i) +type
                    cv.imwrite(image_filename, frame)
                    read_flag = True

                #避免视频读取完毕仍然在读取,同时确保是视频
                else:
                    if read_flag:
                        break
                    else:
                        print("该文件不是视频")

                #用于单行输出
                sys.stdout.write("\r当前已经处理了 {} 帧视频".format(str(i-begin_number)))  # 为了进行单行输出
                sys.stdout.flush()
                i = i + 1
            print(" ")
            print("完成视频解析,文件夹下共有{}帧图片,存放的路径为:{}".format(str(i), images_path))
            print("这个视频一共有:{}帧".format(all_number))
def main():

    recordVideo = bool(False)
    showImages = bool(False)

    if len(sys.argv) != 2:
        print("Please specify path to .svo file.")
        exit()

    filepath = sys.argv[1]
    print("Reading SVO file: {0}".format(filepath))

    # Specify SVO path parameter
    # init_params = sl.InitParameters()
    # init_params.set_from_svo_file(str(svo_input_path))
    # init_params.svo_real_time_mode = False  # Don't convert in realtime
    # init_params.coordinate_units = sl.UNIT.MILLIMETER  # Use milliliter units (for depth measurements)

    input_type = sl.InputType()
    input_type.set_from_svo_file(filepath)
    init_params = sl.InitParameters(input_t=input_type,
                                    svo_real_time_mode=False)
    init_params.coordinate_units = sl.UNIT.CENTIMETER  # Use milliliter units (for depth measurements)
    init_params.depth_mode = sl.DEPTH_MODE.ULTRA  # Use ULTRA depth mode
    init_params.depth_minimum_distance = 1  # Set the minimum depth perception distance to 1 m
    init_params.depth_maximum_distance = 40  # Set the maximum depth perception distance to 40m

    cam = sl.Camera()

    status = cam.open(init_params)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    image = sl.Mat()
    depth_map = sl.Mat()
    point_cloud = sl.Mat()

    ## initialize image window
    #define the screen resulation
    screen_res = 1920, 1200
    #image size
    width = cam.get_camera_information().camera_resolution.width
    height = cam.get_camera_information().camera_resolution.height

    scale_width = screen_res[0] / width
    scale_height = screen_res[1] / height
    scale = min(scale_width, scale_height)
    #resized window width and height
    window_width = int(width * scale)
    window_height = int(height * scale)
    window_size = (window_width, window_height)

    #cv2.WINDOW_NORMAL makes the output window resizealbe
    cv2.namedWindow('result', cv2.WINDOW_NORMAL)

    #resize the window according to the screen resolution
    #cv2.resizeWindow("result", window_width, window_height)

    key = ''
    print("  Save the current image:     s")
    print("  Quit the video reading:     q\n")

    print(cam.get_svo_position())
    cam.set_svo_position(10000)
    print(cam.get_svo_position())
    i = 0
    frame_processing_time = time.time()

    while (key != 113) or (i != 50):  # for 'q' key
        err = cam.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            cam.retrieve_image(image, sl.VIEW.LEFT)
            # To recover data from sl.Mat to use it with opencv, use the get_data() method
            # It returns a numpy array that can be used as a matrix with opencv
            frame = image.get_data()
            # Get the depth map
            cam.retrieve_measure(depth_map, sl.MEASURE.DEPTH)

            cam.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)
            #line detection
            lane_image = np.copy(frame)
            canny_image = canny(frame)
            cropped_image = region_of_intrest(canny_image)
            lines = cv2.HoughLinesP(cropped_image,
                                    2,
                                    np.pi / 180,
                                    100,
                                    np.array([]),
                                    minLineLength=40,
                                    maxLineGap=5)
            averaged_lines = average_slope_intercept(lane_image, lines)
            Hline_image = display_lines(lane_image, lines)
            line_image = display_lines(lane_image, averaged_lines)
            combo_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)
            cropped_combo_image = region_of_intrest(combo_image)

            ## get distance / location

            # Get and print distance value in mm at the center of the image
            # We measure the distance camera - object using Euclidean distance
            x = round(image.get_width() / 2)
            y = round(image.get_height() / 2)
            err, point_cloud_value = point_cloud.get_value(x, y)
            distance = math.sqrt(point_cloud_value[0] * point_cloud_value[0] +
                                 point_cloud_value[1] * point_cloud_value[1] +
                                 point_cloud_value[2] * point_cloud_value[2])

            point_cloud_np = point_cloud.get_data()
            # point_cloud_np.dot(tr_np)

            if not np.isnan(distance) and not np.isinf(distance):
                # print("Distance to Camera at ({}, {}) (image center): {:1.3} m".format(x, y, distance), end="\r")
                # Increment the loop
                i = i + 1
            else:
                print("Can't estimate distance at this position.")
                print(
                    "Your camera is probably too close to the scene, please move it backwards.\n"
                )
            sys.stdout.flush()

            # Get and print distance value in mm at the center of the image
            # We measure the distance camera - object using Euclidean distance
            # x = mat.get_width() / 2
            # y = mat.get_height() / 2
            # point_cloud_value = point_cloud.get_value(x, y)

            # distance = math.sqrt(point_cloud_value[0]*point_cloud_value[0] + point_cloud_value[1]*point_cloud_value[1] + point_cloud_value[2]*point_cloud_value[2])
            # printf("Distance to Camera at (", x, y, "): ", distance, "mm")

            # x1, y1, x2, y2 = lines[0].reshape(4)
            # depth_value = depth_map.get_value(x2,y2)
            # print("x2= ",x2," y2= ",y2," z= ",depth_value)
            # point3D = point_cloud.get_value(1220,850)#1220	850
            # x = point3D[0]
            # y = point3D[1]
            # z = point3D[2]
            # #color = point3D[3]
            # print("x= ",x," y= ",y," z= ",z)

            if showImages:
                # cv2.imshow("cropped_lane_image", cropped_lane_image)
                # frame_canny = cv2.cvtColor(canny_image, cv2.COLOR_GRAY2BGR)
                # frame_cropped = cv2.cvtColor(cropped_image, cv2.COLOR_GRAY2BGR)

                # plt.imshow(cropped_combo_image)#canny_image)
                # plt.show()

                # combined_image= combine(lane_image, cropped_combo_image ,canny_image, cropped_image, combo_image  , Hline_image )# , line_image
                #cv2.namedWindow("combined_image", cv2.WINDOW_NORMAL)
                #com_height, com_width, com_layers = combined_image.shape
                #imS = cv2.resize(combined_image, window_size)
                #cv2.imshow("combined_image",imS)
                #cv2.imshow("result", combo_image)
                # cv2.imshow("result", combined_image)
                cv2.imshow("result", cropped_combo_image)
                # end line detection

                #cv2.imshow("ZED", mat.get_data())
                key = cv2.waitKey(1)
                saving_image(key, mat)
                # print(int(cam.get_svo_number_of_frames()//2))
                # cam.set_svo_position(10000)
            else:
                key = 113
        else:
            key = cv2.waitKey(1)
        # i= i+1

        #measure the time it takes to process one frame
        print("--- %s seconds ---" % (time.time() - frame_processing_time))
        frame_processing_time = time.time()

    cv2.destroyAllWindows()

    print_camera_information(cam)
    cam.close()
    print("\nFINISH")