Example #1
0
def friendly_crs_equal(expected, actual, keys=None, use_obj=True, use_wkt=True):
    """Test if two projection definitions are equal.

    The main purpose of this function is to help manage differences
    between pyproj versions. Depending on the version installed and used
    pyresample may provide a different `proj_dict` or other similar
    CRS definition.

    Args:
        expected (dict, str, pyproj.crs.CRS): Expected CRS definition as
            a PROJ dictionary or string or CRS object.
        actual (dict, str, pyproj.crs.CRS): Actual CRS definition
        keys (list): Specific PROJ parameters to look for. Only takes effect
            if `use_obj` is `False`.
        use_obj (bool): Use pyproj's CRS object to test equivalence. Default
            is True.
        use_wkt (bool): Increase likely hood of making CRS objects equal by
            converting WellKnownText before converting to the final CRS
            object. Requires `use_obj`. Defaults to True.

    """
    if CRS is not None and use_obj:
        if hasattr(expected, 'crs'):
            expected = expected.crs
        if hasattr(actual, 'crs'):
            actual = actual.crs
        expected_crs = CRS(expected)
        actual_crs = CRS(actual)
        if use_wkt:
            expected_crs = CRS(expected_crs.to_wkt())
            actual_crs = CRS(actual_crs.to_wkt())
        return expected_crs == actual_crs
    raise NotImplementedError("""TODO""")
    def configure_database_with_custom_crs(self, index) -> None:
        """Function to configure database using custom EPSG
        :return:
        """
        custom_crs = self._load_configfile()

        for custom_crs_code_and_entity in custom_crs:
            custom_crs_entity, custom_crs_code = custom_crs_code_and_entity.split(":")
            resproxy = index._db._engine.execute(
                f"SELECT * FROM spatial_ref_sys WHERE srid = {custom_crs_code}"
            )

            resset = resproxy.first()
            if not resset:
                custom_crs_string = PJCRS(custom_crs[custom_crs_code_and_entity])

                index._db._engine.execute(
                    "INSERT INTO spatial_ref_sys VALUES ({}, '{}', '{}', '{}', '{}');".format(
                        custom_crs_code,
                        custom_crs_entity.upper(),
                        custom_crs_code,
                        custom_crs_string.to_wkt(),
                        custom_crs_string.to_proj4(),
                    )
                )
Example #3
0
    def from_pyproj(cls, pyproj_crs: pyproj.CRS):
        """
        Create CRS object from pyproj.CRS object.

        Parameters
        ----------
        pyproj_crs : pyproj.CRS
            pyproj.CRS object.

        Returns
        -------
        CRS
            CRS instance.

        """
        return cls(projection_string=pyproj_crs.to_wkt(version="WKT1_ESRI"))
Example #4
0
def convert(files,
            outfolder='./3dtiles',
            overwrite=False,
            jobs=multiprocessing.cpu_count(),
            cache_size=int(total_memory_MB / 10),
            srs_out=None,
            srs_in=None,
            fraction=100,
            benchmark=None,
            rgb=True,
            graph=False,
            color_scale=None,
            verbose=False,
            subdivision_limit=2000,
            length_limit=0.001,
            merge_limit=200):
    """convert

    Convert pointclouds (xyz or las) to 3dtiles tileset containing pnts node

    :param files: Filenames to process. The file must use the .las or .xyz format.
    :type files: list of str, or str
    :param outfolder: The folder where the resulting tileset will be written.
    :type outfolder: path-like object
    :param overwrite: Overwrite the ouput folder if it already exists.
    :type overwrite: bool
    :param jobs: The number of parallel jobs to start. Default to the number of cpu.
    :type jobs: int
    :param cache_size: Cache size in MB. Default to available memory / 10.
    :type cache_size: int
    :param srs_out: SRS to convert the output with (numeric part of the EPSG code)
    :type srs_out: int or str
    :param srs_in: Override input SRS (numeric part of the EPSG code)
    :type srs_in: int or str
    :param fraction: Percentage of the pointcloud to process, between 0 and 100.
    :type fraction: int
    :param benchmark: Print summary at the end of the process
    :type benchmark: str
    :param rgb: Export rgb attributes.
    :type rgb: bool
    :param graph: Produce debug graphes (requires pygal).
    :type graph: bool
    :param color_scale: Force color scale
    :type color_scale: float

    :raises SrsInMissingException: if py3dtiles couldn't find srs informations in input files and srs_in is not specified


    """
    global globs

    globs.subdivision_limit = subdivision_limit
    globs.length_limit = length_limit
    globs.merge_limit = merge_limit

    # allow str directly if only one input
    files = [files] if isinstance(files, str) else files

    # read all input files headers and determine the aabb/spacing
    _, ext = os.path.splitext(files[0])
    init_reader_fn = las_reader.init if ext == '.las' else xyz_reader.init
    infos = init_reader_fn(files,
                           color_scale=color_scale,
                           srs_in=srs_in,
                           srs_out=srs_out)

    avg_min = infos['avg_min']
    rotation_matrix = None
    # srs stuff
    transformer = None
    srs_out_wkt = None
    srs_in_wkt = None
    if srs_out is not None:
        crs_out = CRS('epsg:{}'.format(srs_out))
        if srs_in is not None:
            crs_in = CRS('epsg:{}'.format(srs_in))
        elif infos['srs_in'] is None:
            raise SrsInMissingException(
                'No SRS informations in the provided files')
        else:
            crs_in = CRS(infos['srs_in'])

        srs_out_wkt = crs_out.to_wkt()
        srs_in_wkt = crs_in.to_wkt()

        transformer = Transformer.from_crs(crs_in, crs_out)

        bl = np.array(
            list(
                transformer.transform(infos['aabb'][0][0], infos['aabb'][0][1],
                                      infos['aabb'][0][2])))
        tr = np.array(
            list(
                transformer.transform(infos['aabb'][1][0], infos['aabb'][1][1],
                                      infos['aabb'][1][2])))
        br = np.array(
            list(
                transformer.transform(infos['aabb'][1][0], infos['aabb'][0][1],
                                      infos['aabb'][0][2])))

        avg_min = np.array(
            list(transformer.transform(avg_min[0], avg_min[1], avg_min[2])))

        x_axis = br - bl

        bl = bl - avg_min
        tr = tr - avg_min

        if srs_out == '4978':
            # Transform geocentric normal => (0, 0, 1)
            # and 4978-bbox x axis => (1, 0, 0),
            # to have a bbox in local coordinates that's nicely aligned with the data
            rotation_matrix = make_rotation_matrix(avg_min, np.array([0, 0,
                                                                      1]))
            rotation_matrix = np.dot(
                make_rotation_matrix(x_axis, np.array([1, 0, 0])),
                rotation_matrix)

            bl = np.dot(bl, rotation_matrix[:3, :3].T)
            tr = np.dot(tr, rotation_matrix[:3, :3].T)

        root_aabb = np.array([np.minimum(bl, tr), np.maximum(bl, tr)])
    else:
        # offset
        root_aabb = infos['aabb'] - avg_min

    original_aabb = root_aabb

    if True:
        base_spacing = compute_spacing(root_aabb)
        if base_spacing > 10:
            root_scale = np.array([0.01, 0.01, 0.01])
        elif base_spacing > 1:
            root_scale = np.array([0.1, 0.1, 0.1])
        else:
            root_scale = np.array([1, 1, 1])

    root_aabb = root_aabb * root_scale
    root_spacing = compute_spacing(root_aabb)

    octree_metadata = OctreeMetadata(aabb=root_aabb,
                                     spacing=root_spacing,
                                     scale=root_scale[0])

    # create folder
    if os.path.isdir(outfolder):
        if overwrite:
            shutil.rmtree(outfolder, ignore_errors=True)
        else:
            print('Error, folder \'{}\' already exists'.format(outfolder))
            sys.exit(1)

    os.makedirs(outfolder)
    working_dir = os.path.join(outfolder, 'tmp')
    os.makedirs(working_dir)

    node_store = SharedNodeStore(working_dir)

    if verbose >= 1:
        print('Summary:')
        print('  - points to process: {}'.format(infos['point_count']))
        print('  - offset to use: {}'.format(avg_min))
        print('  - root spacing: {}'.format(root_spacing / root_scale[0]))
        print('  - root aabb: {}'.format(root_aabb))
        print('  - original aabb: {}'.format(original_aabb))
        print('  - scale: {}'.format(root_scale))

    startup = time.time()

    initial_portion_count = len(infos['portions'])

    if graph:
        progression_log = open('progression.csv', 'w')

    def add_tasks_to_process(state, name, task, point_count):
        assert point_count > 0
        tasks_to_process = state.node_process.input
        if name not in tasks_to_process:
            tasks_to_process[name] = ([task], point_count)
        else:
            tasks, count = tasks_to_process[name]
            tasks.append(task)
            tasks_to_process[name] = (tasks, count + point_count)

    processed_points = 0
    points_in_progress = 0
    previous_percent = 0
    points_in_pnts = 0

    max_splitting_jobs_count = max(1, jobs // 2)

    # zmq setup
    context = zmq.Context()

    zmq_skt = context.socket(zmq.ROUTER)
    zmq_skt.bind('ipc:///tmp/py3dtiles1')

    zmq_idle_clients = []

    state = State(infos['portions'])

    zmq_processes_killed = -1

    zmq_processes = [
        multiprocessing.Process(target=zmq_process,
                                args=(graph, srs_out_wkt, srs_in_wkt,
                                      node_store, octree_metadata, outfolder,
                                      rgb, verbose)) for i in range(jobs)
    ]

    for p in zmq_processes:
        p.start()
    activities = [p.pid for p in zmq_processes]

    time_waiting_an_idle_process = 0

    while True:
        # state.print_debug()
        now = time.time() - startup
        at_least_one_job_ended = False

        all_processes_busy = not can_queue_more_jobs(zmq_idle_clients)
        while all_processes_busy or zmq_skt.poll(timeout=0, flags=zmq.POLLIN):
            # Blocking read but it's fine because either all our child processes are busy
            # or we know that there's something to read (zmq.POLLIN)
            start = time.time()
            result = zmq_skt.recv_multipart()

            client_id = result[0]
            result = result[1:]

            if len(result) == 1:
                if len(result[0]) == 0:
                    assert client_id not in zmq_idle_clients
                    zmq_idle_clients += [client_id]

                    if all_processes_busy:
                        time_waiting_an_idle_process += time.time() - start
                    all_processes_busy = False
                elif result[0] == b'halted':
                    zmq_processes_killed += 1
                    all_processes_busy = False
                else:
                    result = pickle.loads(result[0])
                    processed_points += result['total']
                    points_in_progress -= result['total']

                    if 'save' in result and len(result['save']) > 0:
                        node_store.put(result['name'], result['save'])

                    if result['name'][0:4] == b'root':
                        state.reader.active.remove(result['name'])
                    else:
                        del state.node_process.active[result['name']]

                        if len(result['name']) > 0:
                            state.node_process.inactive.append(result['name'])

                            if not state.reader.input and not state.reader.active:
                                if state.node_process.active or state.node_process.input:
                                    finished_node = result['name']
                                    if not can_pnts_be_written(
                                            finished_node, finished_node,
                                            state.node_process.input,
                                            state.node_process.active):
                                        pass
                                    else:
                                        state.node_process.inactive.pop(-1)
                                        state.to_pnts.input.append(
                                            finished_node)

                                        for i in range(
                                                len(state.node_process.inactive
                                                    ) - 1, -1, -1):
                                            candidate = state.node_process.inactive[
                                                i]

                                            if can_pnts_be_written(
                                                    candidate, finished_node,
                                                    state.node_process.input,
                                                    state.node_process.active):
                                                state.node_process.inactive.pop(
                                                    i)
                                                state.to_pnts.input.append(
                                                    candidate)

                                else:
                                    for c in state.node_process.inactive:
                                        state.to_pnts.input.append(c)
                                    state.node_process.inactive.clear()

                    at_least_one_job_ended = True
            elif result[0] == b'pnts':
                points_in_pnts += struct.unpack('>I', result[1])[0]
                state.to_pnts.active.remove(result[2])
            else:
                count = struct.unpack('>I', result[2])[0]
                add_tasks_to_process(state, result[0], result[1], count)

        while state.to_pnts.input and can_queue_more_jobs(zmq_idle_clients):
            node_name = state.to_pnts.input.pop()
            datas = node_store.get(node_name)
            assert len(datas) > 0, '{} has no data??'.format(node_name)
            zmq_send_to_process(zmq_idle_clients, zmq_skt,
                                [b'pnts', node_name, datas])
            node_store.remove(node_name)
            state.to_pnts.active.append(node_name)

        if can_queue_more_jobs(zmq_idle_clients):
            potential = sorted([(k, v)
                                for k, v in state.node_process.input.items()
                                if k not in state.node_process.active],
                               key=lambda f: -len(f[0]))

            while can_queue_more_jobs(zmq_idle_clients) and potential:
                target_count = 100000
                job_list = []
                count = 0
                idx = len(potential) - 1
                while count < target_count and potential and idx >= 0:
                    name, (tasks, point_count) = potential[idx]
                    if name not in state.node_process.active:
                        count += point_count
                        job_list += [name]
                        job_list += [node_store.get(name)]
                        job_list += [struct.pack('>I', len(tasks))]
                        job_list += tasks
                        del potential[idx]
                        del state.node_process.input[name]
                        state.node_process.active[name] = (len(tasks),
                                                           point_count, now)

                        if name in state.node_process.inactive:
                            state.node_process.inactive.pop(
                                state.node_process.inactive.index(name))
                    idx -= 1

                if job_list:
                    zmq_send_to_process(zmq_idle_clients, zmq_skt, job_list)

        while (state.reader.input
               and (points_in_progress < 60000000 or not state.reader.active)
               and len(state.reader.active) < max_splitting_jobs_count
               and can_queue_more_jobs(zmq_idle_clients)):
            if verbose >= 1:
                print('Submit next portion {}'.format(state.reader.input[-1]))
            _id = 'root_{}'.format(len(state.reader.input)).encode('ascii')
            file, portion = state.reader.input.pop()
            points_in_progress += portion[1] - portion[0]

            zmq_send_to_process(zmq_idle_clients, zmq_skt, [
                pickle.dumps({
                    'filename':
                    file,
                    'offset_scale':
                    (-avg_min, root_scale, rotation_matrix[:3, :3].T
                     if rotation_matrix is not None else None,
                     infos['color_scale']),
                    'portion':
                    portion,
                    'id':
                    _id
                })
            ])

            state.reader.active.append(_id)

        # if at this point we have no work in progress => we're done
        if len(zmq_idle_clients) == jobs or zmq_processes_killed == jobs:
            if zmq_processes_killed < 0:
                zmq_send_to_all_process(zmq_idle_clients, zmq_skt,
                                        [pickle.dumps(b'shutdown')])
                zmq_processes_killed = 0
            else:
                assert points_in_pnts == infos[
                    'point_count'], '!!! Invalid point count in the written .pnts (expected: {}, was: {})'.format(
                        infos['point_count'], points_in_pnts)
                if verbose >= 1:
                    print('Writing 3dtiles {}'.format(infos['avg_min']))
                write_tileset(working_dir, outfolder, octree_metadata, avg_min,
                              root_scale, rotation_matrix, rgb)
                shutil.rmtree(working_dir)
                if verbose >= 1:
                    print('Done')

                if benchmark is not None:
                    print('{},{},{},{}'.format(
                        benchmark,
                        ','.join([os.path.basename(f) for f in files]),
                        points_in_pnts, round(time.time() - startup, 1)))

                for p in zmq_processes:
                    p.terminate()
                break

        if at_least_one_job_ended:
            if verbose >= 3:
                print('{:^16}|{:^8}|{:^8}'.format('Name', 'Points', 'Seconds'))
                for name, v in state.node_process.active.items():
                    print('{:^16}|{:^8}|{:^8}'.format(
                        '{} ({})'.format(name.decode('ascii'), v[0]), v[1],
                        round(now - v[2], 1)))
                print('')
                print('Pending:')
                print('  - root: {} / {}'.format(len(state.reader.input),
                                                 initial_portion_count))
                print('  - other: {} files for {} nodes'.format(
                    sum([len(f[0])
                         for f in state.node_process.input.values()]),
                    len(state.node_process.input)))
                print('')
            elif verbose >= 2:
                state.print_debug()
            if verbose >= 1:
                print('{} % points in {} sec [{} tasks, {} nodes, {} wip]'.
                      format(
                          round(100 * processed_points / infos['point_count'],
                                2), round(now, 1),
                          jobs - len(zmq_idle_clients),
                          len(state.node_process.active), points_in_progress))
            elif verbose >= 0:
                percent = round(100 * processed_points / infos['point_count'],
                                2)
                time_left = (100 - percent) * now / (percent + 0.001)
                print('\r{:>6} % in {} sec [est. time left: {} sec]'.format(
                    percent, round(now), round(time_left)),
                      end='',
                      flush=True)
                if False and int(percent) != previous_percent:
                    print('')
                    previous_percent = int(percent)

            if graph:
                percent = round(100 * processed_points / infos['point_count'],
                                3)
                print('{}, {}'.format(time.time() - startup, percent),
                      file=progression_log)

        node_store.control_memory_usage(cache_size, verbose)

    if verbose >= 1:
        print('destroy', round(time_waiting_an_idle_process, 2))

    if graph:
        progression_log.close()

    # pygal chart
    if graph:
        import pygal

        dateline = pygal.XY(x_label_rotation=25,
                            secondary_range=(0, 100))  # , show_y_guides=False)
        for pid in activities:
            activity = []
            filename = 'activity.{}.csv'.format(pid)
            i = len(activities) - activities.index(pid) - 1
            # activities.index(pid) =
            with open(filename, 'r') as f:
                content = f.read().split('\n')
                for line in content[1:]:
                    line = line.split(',')
                    if line[0]:
                        ts = float(line[0])
                        value = int(line[1]) / 3.0
                        activity.append((ts, i + value * 0.9))

            os.remove(filename)
            if activity:
                activity.append((activity[-1][0], activity[0][1]))
                activity.append(activity[0])
                dateline.add(str(pid), activity, show_dots=False, fill=True)

        with open('progression.csv', 'r') as f:
            values = []
            for line in f.read().split('\n'):
                if line:
                    line = line.split(',')
                    values += [(float(line[0]), float(line[1]))]
        os.remove('progression.csv')
        dateline.add('progression',
                     values,
                     show_dots=False,
                     secondary=True,
                     stroke_style={
                         'width': 2,
                         'color': 'black'
                     })

        dateline.render_to_file('activity.svg')

    context.destroy()
Example #5
0
def test_init_from_wkt():
    wgs84 = CRS.from_string("+proj=longlat +datum=WGS84 +no_defs")
    from_wkt = CRS(wgs84.to_wkt())
    assert wgs84.to_wkt() == from_wkt.to_wkt()
Example #6
0
# %%
lat_0 = 50
lon_0 = -120
laea_proj = {
    'datum': 'WGS84',
    'lat_0': '50',
    'lon_0': '-120',
    'no_defs': 'None',
    'proj': 'laea',
    'type': 'crs',
    'units': 'm',
    'x_0': '0',
    'y_0': '0'
}
p_laea = CRS(laea_proj)
p_laea.to_wkt()

# %% [markdown]
# ### Step 2 is to reproject the extent in the new coordinate system
#
# Remember that extent order is:  [xleft, xright, ybot, ytop].  We need to get these values in the new laea crs.
#
# Here are the two extents for the image"

# %%
crs_transform = Transformer.from_crs(p_utm10, p_laea)
extent_utm10 = [ul_x_utm10, lr_x_utm10, lr_y_utm10, ul_y_utm10]
ul_x_laea, ul_y_laea = crs_transform.transform(ul_x_utm10, ul_y_utm10)
lr_x_laea, lr_y_laea = crs_transform.transform(lr_x_utm10, lr_y_utm10)
extent_laea = [ul_x_laea, lr_x_laea, lr_y_laea, ul_y_laea]
print(f"{extent_utm10=}")
Example #7
0
def test_from_wkt():
    wgs84 = CRS.from_string("+proj=longlat +datum=WGS84 +no_defs")
    from_wkt = CRS(wgs84.to_wkt())
    assert wgs84.to_wkt() == from_wkt.to_wkt()