Esempio n. 1
0
File: pml.py Progetto: rmsare/PML
def transform(arg, transformation_matrix):

    matrix_string = " ".join(
        [str(e) for trans_row in transformation_matrix for e in trans_row])

    if isinstance(arg, str):
        json = u"""
                        {
                          "pipeline": [
                            \"""" + arg + """\",
                            {
                                "type":"filters.transformation",
                                "matrix":\"""" + matrix_string + """\"
                            }
                          ]
                        }"""
        pipeline = pdal.Pipeline(json)
    else:
        json = u"""
                        {
                            "pipeline": [
                              {
                                    "type":"filters.transformation",
                                    "matrix":\"""" + matrix_string + """\"
                              }
                            ]
                        }"""
        pipeline = pdal.Pipeline(json, arrays=arg)

    pipeline.validate()
    pipeline.loglevel = 8
    pipeline.execute()
    return pipeline.arrays
Esempio n. 2
0
 def test_merged_arrays(self):
     """Can we fetch multiple point views from merged PDAL data """
     json = self.fetch_json('/data/filters/chip.json')
     r = pdal.Pipeline(json)
     r.execute()
     arrays = r.arrays
     self.assertEqual(len(arrays), 43)
Esempio n. 3
0
    def test_read_arrays(self):
        """Can we read and filter data from a list of arrays to PDAL"""
        if Version(pdal.info.version) < Version('1.8'):
            return True

        # just some dummy data
        x_vals = [1.0, 2.0, 3.0, 4.0, 5.0]
        y_vals = [6.0, 7.0, 8.0, 9.0, 10.0]
        z_vals = [1.5, 3.5, 5.5, 7.5, 9.5]
        test_data = np.array([(x, y, z)
                              for x, y, z in zip(x_vals, y_vals, z_vals)],
                             dtype=[('X', np.float), ('Y', np.float),
                                    ('Z', np.float)])

        pipeline = """
        {
            "pipeline": [
                {
                    "type":"filters.range",
                    "limits":"X[2.5:4.5]"
                }
            ]
        }
        """

        p = pdal.Pipeline(pipeline, arrays=[
            test_data,
        ])
        p.loglevel = 8
        count = p.execute()
        arrays = p.arrays
        self.assertEqual(count, 2)
        self.assertEqual(len(arrays), 1)
Esempio n. 4
0
 def test_schema(self):
     """Fetching a schema works"""
     json = self.fetch_json('sort.json')
     r = pdal.Pipeline(json)
     r.validate()
     r.execute()
     self.assertEqual(r.schema['schema']['dimensions'][0]['name'], 'X')
def worker(mapped):
    """Multiprocessing worker function to be used by the
    p.map function to map objects to, and then start
    multiple times in parallel on separate CPU cores.
    In this case the worker function instances ground
    filter one file each, and return the resulting log,
    metadata and the point cloud data array itself.
    """
    print("PID {} starting to ground filter file {}".format(
        os.getpid(), mapped[2]))
    config, fpath, write = mapped[0], mapped[1] + mapped[2], mapped[3]
    if write == True:
        tag = mapped[4]
        config = ('[\n\t"' + fpath + '",\n' + config +
                  ',\n\t"' + fpath[:-4] + '_' + tag + '.las"\n]')
    else: config = ('[\n\t"' + fpath + '",\n' + config + '\n]')
    pipeline = pdal.Pipeline(config)
    start = time()
    pipeline.execute()
    end = time()
    print("PID {} finished ground filtering.".format(os.getpid()),
          "Time elapsed: {} sec.".format(round(end - start, 2)))
    log = pipeline.log
    metadata = pipeline.metadata
    arrays = pipeline.arrays
    return log, metadata, arrays
Esempio n. 6
0
def las_2_dataframe(las_path):
    inputfile = str(las_path)
    pipe_LASreader =\
    {
      "pipeline":[
        {
          "type":"readers.las",
          "filename":inputfile,
          "use_eb_vlr": "true"
        }
      ]
    }
    # """%lasinput
    #print(pipe_reader)

    pipeline = pdal.Pipeline(json.dumps(pipe_LASreader))
    pipeline.validate()
    print(pipeline.validate())
    # n_points = pipeline.execute()
    #%time 
    start_time = time.time()
    n_points = pipeline.execute()
    elapsed_time_fl = (time.time() - start_time)
    print('Time taken ', elapsed_time_fl,' seconds')
    
    lidar_df = pd.DataFrame(pipeline.arrays[0])
    print("Number of points in LiDAR:", n_points)
    #print(lidar_df)
    lidar_df.head()
    
    return lidar_df
Esempio n. 7
0
def tile_lidar(folder_with_las, spacing, output_folder, buffer_distance):
    creating_json = {
        "pipeline": [{
            "type": "readers.las",
            "filename": f"{folder_with_las}/*",
            "spatialreference": "EPSG:25830"
        }, {
            "type": "filters.splitter",
            "length": f"{spacing}",
            "buffer": f"{buffer_distance}"
        }, {
            "type": "writer.las",
            "srs": "EPSG:25830",
            "filename": f"{output_folder}"
        }]
    }

    consulta = json.dumps(creating_json, indent=4)
    print(consulta)

    pipeline = pdal.Pipeline(consulta)
    pipeline.validate()  # Check if json options are good
    pipeline.loglevel = 8
    count = pipeline.execute()
    print(count)
Esempio n. 8
0
def writeLAZBatches(path, data):
    """ Write a numpy array with batch number for each point into laz file
    Input:
    String path, filepath
    Numpy Array data, with at least 4 dimensions for X,Y,Z and BatchID
    """
    # create correct structured numpy array
    data = np.array(list(map(tuple, data)),
                    dtype=[('X', np.float64), ('Y', np.float64),
                           ('Z', np.float64), ('ClusterID', np.int32)])

    # calc offset from first data point
    x_offset = int(str(data[0]['X'])[:5]) * 1000.0
    y_offset = int(str(data[0]['Y'])[:5]) * 100.0

    pipeline = {
        "pipeline": [{
            "type": "writers.las",
            "dataformat_id": 0,
            "compression": "laszip",
            "offset_x": x_offset,
            "offset_y": y_offset,
            "extra_dims": "ClusterID=int32",
            "filename": path
        }]
    }

    r = pdal.Pipeline(json.dumps(pipeline), [data])

    r.validate()
    r.execute()
Esempio n. 9
0
def get_points(lote):
    s = lote
    bounds = ([s.bounds[0], s.bounds[2]], [s.bounds[1], s.bounds[3]])

    ept = {
        "pipeline":[
            {
            "type": "readers.ept",
            "filename": "https://ept-m3dc-pmsp.s3-sa-east-1.amazonaws.com/ept.json",
            "bounds": str(bounds)
            },
            {
                "type":"filters.crop",
                "polygon":s.wkt
            },
            {   
                "type":"filters.hag_delaunay"
            }
        ]}

    pipeline = pdal.Pipeline(json.dumps(ept))
    pipeline.validate()
    n_points = pipeline.execute()

    arr = pipeline.arrays[0]
    df = pd.DataFrame(arr)

    return df
Esempio n. 10
0
    def test_reference_counting(self):
        """Can we read and filter data from a list of arrays to PDAL"""
        if Version(pdal.info.version) < Version("1.8"):
            return True

        # just some dummy data
        x_vals = [1.0, 2.0, 3.0, 4.0, 5.0]
        y_vals = [6.0, 7.0, 8.0, 9.0, 10.0]
        z_vals = [1.5, 3.5, 5.5, 7.5, 9.5]
        test_data = np.array(
            [(x, y, z) for x, y, z in zip(x_vals, y_vals, z_vals)],
            dtype=[("X", np.float), ("Y", np.float), ("Z", np.float)],
        )

        pipeline = """
        {
            "pipeline": [
                {
                    "type":"filters.range",
                    "limits":"X[2.5:4.5]"
                }
            ]
        }
        """

        p = pdal.Pipeline(pipeline, arrays=[test_data])
        p.loglevel = 8
        count = p.execute()
        self.assertEqual(count, 2)
        self.assertEqual(1, sys.getrefcount(p.arrays[0]),
                         "Reference count should only be 1 in this case")
Esempio n. 11
0
def work(row):

    items = row.rstrip().split(',')

    try:
        lat = float(items[0])
        lon = float(items[1])
    except:
        # print('Input line in wrong format (possibly a header)')
        return None

    x, y = proj.transform(lat, lon)

    x_min, x_max = (x + offset for offset in (-50, 50))
    y_min, y_max = (y + offset for offset in (-50, 50))

    json_def = json_tmpl.format(x_min=x_min,
                                x_max=x_max,
                                y_min=y_min,
                                y_max=y_max,
                                EPT_JSON=EPT_JSON)

    #print(json_def)
    pipeline = pdal.Pipeline(json_def)
    pipeline.validate()  # check if our JSON and options were good
    pipeline.loglevel = 0  #really noisy
    count = pipeline.execute()

    arrays = pipeline.arrays

    return count
Esempio n. 12
0
def _run_PDAL_splitter(filename, tiled_temp_folder, tiling_mins, tiling_maxs,
                       n_tiles_side):
    length_PDAL_tile = ((tiling_maxs[0] - tiling_mins[0]) /
                        float(n_tiles_side))

    outfile_with_placeholder = "_#".join([filename.stem, filename.suffix])
    outfilepath = tiled_temp_folder.joinpath(outfile_with_placeholder)

    PDAL_pipeline_dict = {
        "pipeline": [
            filename.as_posix(), {
                "type": "filters.splitter",
                "origin_x": "{}".format(tiling_mins[0]),
                "origin_y": "{}".format(tiling_mins[1]),
                "length": "{}".format(length_PDAL_tile)
            }, {
                "type": "writers.las",
                "filename": outfilepath.as_posix(),
                "forward": ["scale_x", "scale_y", "scale_z"],
                "offset_x": "auto",
                "offset_y": "auto",
                "offset_z": "auto"
            }
        ]
    }
    _print_PDAL_pipeline_dict(PDAL_pipeline_dict)
    PDAL_pipeline = pdal.Pipeline(json.dumps(PDAL_pipeline_dict))
    logger.debug("... running PDAL:")
    PDAL_pipeline.execute()
Esempio n. 13
0
def get_points(ept_path, bounds, wkt):

    READ_PIPELINE = """
                    {{
                        "pipeline": [
                            {{
                                "type": "readers.ept",
                                "filename": "{path}",
                                "bounds": "{bounds}"
                            }},
                            {{
                                "type":"filters.crop",
                                "polygon":"{wkt}"
                            }}
                        ]
                    }}
                    """

    pipeline = pdal.Pipeline(
        READ_PIPELINE.format(path=ept_path, bounds=bounds, wkt=wkt))

    pipeline.validate()
    pipeline.execute()
    point_cloud = pipeline.arrays[0]

    return point_cloud
Esempio n. 14
0
    def load_from_file(self,
                       filename,    # type: str
                       proj,        # type: str
                       zone,        # type: str
                       ellps,       # type: str
                       datum        # type: str
                       ):           # type: (...) -> None
        pipe_json = json.dumps([
            {
                'type': 'readers.sbet',
                'filename': filename
            }
        ])

        sbet_pipeline = pdal.Pipeline(pipe_json)
        sbet_pipeline.validate()
        self._num_records = sbet_pipeline.execute()

        data = sbet_pipeline.arrays[0]
        self._record_length = len(data.dtype.names)

        # convert structured data array to dict
        self._nav_data = {convert_to_snake_case(name): data[name] for name in data.dtype.names}

        if not zone:
            self._projection = pyproj.Proj(proj=proj, ellps=ellps, datum=datum, preserve_units=True)
        else:
            self._projection = pyproj.Proj(proj=proj, zone=zone, ellps=ellps, datum=datum, preserve_units=True)
Esempio n. 15
0
def readlasfile(lasfile):
    """
    Run a PDAL pipeline. Input is a JSON declaration to
    deliver to PDAL. Output is a labelled numpy array.

    Data are filtered to compute height above ground using nearest ground point neighbours (TIN method arriving soon) and sort by morton order. Any unused dimensions are also trimmed.
    """
    pipeline = {
        "pipeline": [
            {
                "type": "readers.las",
                "filename": lasfile
            },
            {
                "type": "filters.hag"
            },
            {
                "type": "filters.mortonorder"
            }
        ]
    }

    #create a pipeline object
    pipeline = pdal.Pipeline(json.dumps(pipeline))

    # execute the pipeline
    count = pipeline.execute()

    #read points into a numpy structured array
    arrays = pipeline.arrays

    #return the numpy array to operate on
    return(arrays)
Esempio n. 16
0
def get_pipeline(filename):
    with open(os.path.join(DATADIRECTORY, filename), "r") as f:
        if filename.endswith(".json"):
            pipeline = pdal.Pipeline(f.read())
        elif filename.endswith(".py"):
            pipeline = eval(f.read(), vars(pdal))
    return pipeline
Esempio n. 17
0
File: pml.py Progetto: rmsare/PML
def read_file(filename, bounds=None):

    if bounds is None:
        json = u"""
                {
                    "pipeline": [
                     \"""" + filename + """\"
                    ]
                }"""
    else:
        json = u"""
                {
                  "pipeline": [
                    \"""" + filename + """\",
                    {
                        "type":"filters.crop",
                        "bounds":"([""" + str(bounds[0][0]) + """,""" \
                                        + str(bounds[0][1]) + """],[""" \
                                        + str(bounds[1][0]) + """,""" \
                                        + str(bounds[1][1]) + """])"
                    }
                  ]
                }"""
    pipeline = pdal.Pipeline(json)
    pipeline.validate()
    pipeline.loglevel = 8
    pipeline.execute()
    return pipeline.arrays
Esempio n. 18
0
    def test_reference_counting(self):
        """Can we read and filter data from a list of arrays to PDAL"""
        # just some dummy data
        x_vals = [1.0, 2.0, 3.0, 4.0, 5.0]
        y_vals = [6.0, 7.0, 8.0, 9.0, 10.0]
        z_vals = [1.5, 3.5, 5.5, 7.5, 9.5]
        test_data = np.array(
            [(x, y, z) for x, y, z in zip(x_vals, y_vals, z_vals)],
            dtype=[("X", float), ("Y", float), ("Z", float)],
        )

        pipeline = """
        {
            "pipeline": [
                {
                    "type":"filters.range",
                    "limits":"X[2.5:4.5]"
                }
            ]
        }
        """
        p = pdal.Pipeline(pipeline, arrays=[test_data])
        count = p.execute()
        assert count == 2
        refcount = sys.getrefcount(p.arrays[0])
        assert refcount == 1
Esempio n. 19
0
def openLAZ(path, extra_dims={}):
    """ Open a single laz file with pdal pipeline. 
        Then extract X,Y,Z values and classification.
    Input:
    String path, filepath
    Dictionary extra_dims, names and datatypes of additional fields
    Return:
    Numpy Array, with dimension (num_points, num_dims)
    """
    if extra_dims:
        pipeline = {
            "pipeline": [{
                "type": "readers.las",
                "filename": path,
                "extra_dims": createDimStr(extra_dims)
            }]
        }
    else:
        pipeline = {"pipeline": [{"type": "readers.las", "filename": path}]}

    r = pdal.Pipeline(json.dumps(pipeline))

    r.validate()
    r.execute()

    # first stack xyz together
    points = np.dstack((r.arrays[0]['X'], r.arrays[0]['Y'], r.arrays[0]['Z'],
                        r.arrays[0]['Classification']))

    # and now each predefined scalar field
    for field in extra_dims:
        points = np.dstack((points, r.arrays[0][field]))

    return points.squeeze()  # cut of axis with length one
Esempio n. 20
0
    def set_json_pipeline(self, json_pipe):
        infile = None
        outfile = None
        try:
            if "pipeline" not in json_pipe.keys():
                json_pipe = {"pipeline": json_pipe}
        except AttributeError:
            raise AttributeError(
                "json_pipe argument is not a json formatted dictionary.")
        except Exception as err:
            raise err

        if type(json_pipe["pipeline"][0]) is str:
            infile = json_pipe["pipeline"][0]

        if type(json_pipe["pipeline"][-1]) is str:
            outfile = json_pipe["pipeline"][-1]

        if infile is None:
            json_pipe["pipeline"].insert(0, self.infile)

        if outfile is None and self.outfile_set:
            json_pipe["pipeline"].append(self.outfile)

        # Ensure valid pipeline before actually making changes:
        try:
            pdal.Pipeline(json.dumps(json_pipe)).validate()
            self.json_pipeline = json_pipe
            if outfile is not None:
                self.outfile = outfile
            if infile is not None:
                self.infile = infile
        except Exception as err:
            raise err
Esempio n. 21
0
def write_to_laz(point_cloud, path):
    '''
    writes a structured array to a .laz file
    in:
        point_cloud [structured np array]:
            The output pointcloud; needs attributes x, y and z. 
            When createing a pointcloud from scratch, pay attention to 
            the data types of the specific attributes, this is a pain in the ass.
            Easier to add one new collumn to an existing (filtered) pointcloud.

        path [string]:
            Path to a laz file.

    out:
        None

    '''
    WRITE_PIPELINE = """
    {{
        "pipeline": [
            {{
                "type": "writers.las",
                "filename": "{path}",
                "extra_dims": "all"
            }}
        ]
    }}
    """
    pipeline = pdal.Pipeline(
        WRITE_PIPELINE.format(path=path),
        arrays=[point_cloud]
    )
    pipeline.validate()
    pipeline.execute()
Esempio n. 22
0
	def pipeline_realization(pip_json, print_result):
	    try:
	        # ===============================================
	        # Pipeline execution
	        pipeline = pdal.Pipeline(pip_json)
	        pipeline.validate()  # check if our JSON and options were good

	        pipeline.execute()

	        if print_result:
	            arrays = pipeline.arrays
	            metadata = pipeline.metadata
	            log = pipeline.log
	            print("\n================")
	            print("Arrays:")
	            print(arrays)
	            print("\n================")
	            print("Metadata:")
	            print(metadata)
	            print("\n================")
	            print("Log:")
	            print(log)

	        print("pdal pipeline finished")
	        return True
	    except:
	        print(" Error !!")
	        return False
def remove_noise(lasfile, outputlas):

    creating_json = {
        "pipeline": [
            {
                "type": "readers.las",
                "filename": f"{lasfile}"
            },
            {
                # Creates a window to find outliers. If they are found they are classified as noise (7).
                "type": "filters.outlier",
                "method": "statistical",
                "multiplier": 3,
                "mean_k": 8
            },
            {
                "type": "writers.las",
                "compression": "laszip",
                "filename": f"{outputlas}"
            }
        ]
    }

    consulta = json.dumps(creating_json, indent=4)
    print(consulta)

    pipeline = pdal.Pipeline(consulta)
    pipeline.validate()  # Check if json options are good
    pipeline.loglevel = 8
    count = pipeline.execute()
    print(count)
Esempio n. 24
0
def run_pipe(from_table, to_table):
    db_params = {
        "host": "localhost",
        "database": "pointclouds",
        "port": 5432,
        "user": username,
        "password": password
    }

    conn = dbutils.connect(db_params)
    sys.stdout.write("Connected to database 'pointclouds'.\n")
    this_pipe = deepcopy(test_pipe)
    reader = {
        "type": "readers.pgpointcloud",
        "connection": f"host=127.0.0.1 dbname='pointclouds' user={username} password={password}",
        "table": from_table,
        "column": "pa"
    }

    this_pipe["pipeline"].insert(0, reader)
    pipe = pdal.Pipeline(json.dumps(this_pipe))

    sys.stdout.write("Executing pdal pipeline.\n")
    pipe.execute()

    sys.stdout.write("pdal pipeline completed successfully.\n")
    dbutils.execute_mogrify(conn, pd.DataFrame(pipe.arrays[0]), to_table)
    sys.stdout.write(f"Added {from_table} to {to_table}.\n")
Esempio n. 25
0
def DEMonizator(lasfile, outputfile, resolution=1000):
    creating_json = {
        "pipeline": [{
            "type": "readers.las",
            "filename": f"{lasfile}",
            "spatialreference": "EPSG:25830"
        }, {
            "type": "filters.range",
            "limits": "Classification[2:2]"
        }, {
            "type": "writers.gdal",
            "gdaldriver": "GTiff",
            "nodata": "-9999",
            "output_type": "idw",
            "resolution": f"{resolution}",
            "filename": f"{outputfile}"
        }]
    }

    consulta = json.dumps(creating_json, indent=4)
    print(consulta)

    pipeline = pdal.Pipeline(consulta)
    pipeline.validate()  # Check if json options are good
    pipeline.loglevel = 8
    count = pipeline.execute()
    print(count)
Esempio n. 26
0
def getMetadata(file_path):
    """
    用来获取点云的 max and min  xyz value
    :param file_path:  las点云文件的路径
    :return:  返回一个  dict   {'maxx':value , 'maxy':value, 'maxz':value, 'minx':value, 'miny':value, 'minz':value}
    """
    '''获取最低点、最高点的字典'''
    data_json = """
            {
                "pipeline": [
                    \"""" + file_path + """\",
                    {
                        "type": "filters.stats"
                    }
                ]
            }"""
    # print(data_json)
    pipeline = pdal.Pipeline(data_json)

    pipeline.validate()  # 检查我们的JSON和选项是否良好
    pipeline.loglevel = 8  # really noisy 输入日志相关 未知
    count = pipeline.execute()  # 执行通道

    # arrays = pipeline.arrays
    # print("arrays:", arrays)

    metadata = pipeline.metadata  # 获取通道的 所有详情信息
    metadata_dict = eval(str(json.loads(metadata)))  # 把字符串 信息转成 字典
    bbox_dict = metadata_dict["metadata"]["filters.stats"][1]["bbox"][
        "native"]["bbox"]  # 获取到 详情信息包含 最小点最大点的
    # print(metadata_dict)

    subkey = ['maxx', 'maxy', 'maxz', 'minx', 'miny', 'minz']
    box_dict = dict([(key, bbox_dict[key]) for key in subkey])  # 获取到最大点、最小点的字典
    return box_dict
Esempio n. 27
0
    def test_merged_arrays(self):
        """Can we load data from a list of arrays to PDAL"""
        if Version(pdal.info.version) < Version('1.8'):
            return True
        data = np.load(os.path.join(DATADIRECTORY, 'test3d.npy'))

        arrays = [data, data, data]

        json = self.fetch_json('chip.json')
        chip = u"""{
  "pipeline":[
    {
      "type":"filters.range",
      "limits":"Intensity[100:300)"
    }
  ]
}"""

        p = pdal.Pipeline(chip, arrays)
        p.loglevel = 8
        count = p.execute()
        arrays = p.arrays
        self.assertEqual(len(arrays), 3)

        for data in arrays:
            self.assertEqual(len(data), 12)
            self.assertEqual(data['Intensity'].sum(), 1926)
def remove_overlay_points(lasfile,
                 outputlas
                 ):

    creating_json = {
        "pipeline" : [
            {
                "type": "readers.las",
                "filename": f"{lasfile}"
            },
            {
                # Filter assigning points of class overlay (12) to class noise (7)
                "type": "filters.assign",
                "assignment": "Classification[12:12]=7"
            },
            {
                "type": "writers.las",
                "compression": "laszip",
                "filename": f"{outputlas}"
            }
        ]
    }

    consulta = json.dumps(creating_json, indent=4)
    print(consulta)

    pipeline = pdal.Pipeline(consulta)
    pipeline.validate()  # Check if json options are good
    pipeline.loglevel = 8
    count = pipeline.execute()
    print(count)
Esempio n. 29
0
 def test_execution(self):
     """Can we execute a PDAL pipeline"""
     x = self.fetch_json('sort.json')
     r = pdal.Pipeline(x)
     r.validate()
     r.execute()
     self.assertGreater(len(r.pipeline), 200)
Esempio n. 30
0
    def test_merged_arrays(self):
        """Can we load data from a a list of arrays to PDAL"""
        data = np.load(os.path.join(DATADIRECTORY, 'perlin.npy'))

        arrays = [data, data, data]
        arrays = [data]

        json = self.fetch_json('chip.json')
        chip = u"""{
  "pipeline":[
    {
      "type":"filters.range",
      "limits":"Intensity[0:0.10]"
    }
  ]
}"""

        p = pdal.Pipeline(chip, arrays)
        p.loglevel = 8
        count = p.execute()
        arrays = p.arrays
        self.assertEqual(len(arrays), 1)

        data = arrays[0]
        self.assertEqual(len(data), 1836)