Example #1
0
    def loadFromFile(self, index, fileAbsPath):
        # Get connection
        connection = self.getConnection()
        cursor = connection.cursor()
        # Add point cloud format to poinctcloud_formats table
        (columns, pcid,
         compression) = self.addPCFormat(cursor, self.schemaFile, fileAbsPath,
                                         self.srid)
        connection.close()

        pdalCols = []
        for c in cols:
            pdalCols.append(self.DM_PDAL[c])

        # Get PDAL config and run PDAL
        xmlFile = os.path.basename(fileAbsPath) + '.xml'
        pdalops.PostgreSQLWriter(xmlFile, fileAbsPath,
                                 self.getConnectionString(), pcid, pdalCols,
                                 self.blockTable, self.srid, self.blockSize,
                                 compression)
        t0 = time.time()
        pdalops.executePDAL(xmlFile)
        print 'LOADSTATS', os.path.basename(
            fileAbsPath), lasops.getPCFileDetails(
                fileAbsPath)[1], time.time() - t0
Example #2
0
    def processFile(self, index, fileAbsPath):
        # Get the file extension
        extension = fileAbsPath.split('.')[-1]

        if extension == self.dataExtension:
            outputAbsPath = self.dataFolder + '/' + os.path.basename(fileAbsPath)
        else:
            outputAbsPath = self.dataFolder + '/' + os.path.basename(fileAbsPath).replace(extension, self.dataExtension)
        commands = []
        if self.sort:
            commands.append('lassort.exe -i ' + fileAbsPath + ' -o ' + outputAbsPath)
        else:
            if extension == self.dataExtension:
                commands.append('ln -s ' + fileAbsPath + ' ' + outputAbsPath)
            else:
                commands.append('las2las -i ' + fileAbsPath + ' -o ' + outputAbsPath)
        commands.append('lasindex -i ' + outputAbsPath)
        
        times = []
        for command in commands:
            logging.info(command)
            t0 = time.time()
            os.system(command)
            times.append(time.time() - t0)
        print 'LOADSTATS', os.path.basename(fileAbsPath), lasops.getPCFileDetails(fileAbsPath)[1], times[0], times[1]
    def processFile(self, index, fileAbsPath):
        # Get the file extension
        extension = fileAbsPath.split('.')[-1]

        if extension == self.dataExtension:
            outputAbsPath = self.dataFolder + '/' + os.path.basename(
                fileAbsPath)
        else:
            outputAbsPath = self.dataFolder + '/' + os.path.basename(
                fileAbsPath).replace(extension, self.dataExtension)
        commands = []
        if self.sort:
            commands.append('lassort.exe -i ' + fileAbsPath + ' -o ' +
                            outputAbsPath)
        else:
            if extension == self.dataExtension:
                commands.append('ln -s ' + fileAbsPath + ' ' + outputAbsPath)
            else:
                commands.append('las2las -i ' + fileAbsPath + ' -o ' +
                                outputAbsPath)
        commands.append('lasindex -i ' + outputAbsPath)

        times = []
        for command in commands:
            logging.info(command)
            t0 = time.time()
            os.system(command)
            times.append(time.time() - t0)
        print 'LOADSTATS', os.path.basename(
            fileAbsPath), lasops.getPCFileDetails(
                fileAbsPath)[1], times[0], times[1]
 def loadFromFile(self, index, fileAbsPath):
     # Get information of the contents of the LAS file
     logging.info(fileAbsPath)
     xmlFile = os.path.basename(fileAbsPath) + '.xml'
     if self.columns == 'all':
         pdalCols = None
     else:
         pdalCols = []
         for c in self.columns:
             pdalCols.append(self.DM_PDAL[c])
             
     useOffsetScale = self.useOffsetScale
     pdalops.OracleWriter(xmlFile, fileAbsPath, self.getConnectionString(), pdalCols, self.blockTable, self.baseTable, self.srid, self.blockSize, self.pdalCompression, self.pdalDimOrientation, useOffsetScale)
     t0 = time.time()
     pdalops.executePDAL(xmlFile)
     print 'LOADSTATS', os.path.basename(fileAbsPath), lasops.getPCFileDetails(fileAbsPath)[1], time.time() - t0
Example #5
0
    def loadFromFile(self, index, fileAbsPath):
        # Get connection
        connection = self.getConnection()
        cursor = connection.cursor()
        # Add point cloud format to poinctcloud_formats table
        (columns, pcid, compression) = self.addPCFormat(cursor, self.schemaFile, fileAbsPath, self.srid)
        connection.close()

        pdalCols = []
        for c in cols:
            pdalCols.append(self.DM_PDAL[c])

        # Get PDAL config and run PDAL
        xmlFile = os.path.basename(fileAbsPath) + '.xml'
        pdalops.PostgreSQLWriter(xmlFile, fileAbsPath, self.getConnectionString(), pcid, pdalCols, self.blockTable, self.srid, self.blockSize, compression)
        t0 = time.time()
        pdalops.executePDAL(xmlFile)
        print 'LOADSTATS', os.path.basename(fileAbsPath), lasops.getPCFileDetails(fileAbsPath)[1], time.time() - t0
def runChild(childId, childrenQueue, connectionString, dbtable, srid):
    kill_received = False
    connection = psycopg2.connect(connectionString)
    cursor = connection.cursor()
    while not kill_received:
        job = None
        try:
            # This call will patiently wait until new job is available
            job = childrenQueue.get()
        except:
            # if there is an error we will quit the loop
            kill_received = True
        if job == None:
            kill_received = True
        else:
            [
                identifier,
                inputFile,
            ] = job
            (_, count, minX, minY, minZ, maxX, maxY, maxZ, scaleX, scaleY,
             scaleZ, offsetX, offsetY,
             offsetZ) = lasops.getPCFileDetails(inputFile)

            insertStatement = """INSERT INTO """ + dbtable + """(id,filepath,num,scalex,scaley,scalez,offsetx,offsety,offsetz,geom) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, ST_MakeEnvelope(%s, %s, %s, %s, %s));"""
            insertArgs = [
                identifier, inputFile,
                int(count),
                float(scaleX),
                float(scaleY),
                float(scaleZ),
                float(offsetX),
                float(offsetY),
                float(offsetZ),
                float(minX),
                float(minY),
                float(maxX),
                float(maxY),
                int(srid)
            ]
            logging.info(cursor.mogrify(insertStatement, insertArgs))
            cursor.execute(insertStatement, insertArgs)
            connection.commit()
    cursor.close()
    connection.close()
Example #7
0
def PostgreSQLWriter(xmlFile, inputFileAbsPath, connectionString, pcid,
                     dimensionsNames, blockTable, srid, blockSize,
                     compression):
    """ Create a XML file to load the data, in the given file, into the DB """
    (_, _, minX, minY, minZ, _, _, _, scaleX, scaleY, scaleZ, offsetX, offsetY,
     offsetZ) = lasops.getPCFileDetails(inputFileAbsPath)

    xmlContent = """<?xml version="1.0" encoding="utf-8"?>
<Pipeline version="1.0">
<Writer type="writers.pgpointcloud">
    <Option name="connection">""" + connectionString + """</Option>
    <Option name="table">""" + blockTable + """</Option>
    <Option name="column">pa</Option>
    <Option name="srid">""" + str(srid) + """</Option>
    <Option name="pcid">""" + str(pcid) + """</Option>
    <Option name="overwrite">false</Option>
    <Option name="capacity">""" + str(blockSize) + """</Option>
    <Option name="compression">""" + compression + """</Option>
    <Option name="output_dims">""" + ",".join(dimensionsNames) + """</Option>
    <Option name="offset_x">""" + str(minX) + """</Option>
    <Option name="offset_y">""" + str(minY) + """</Option>
    <Option name="offset_z">""" + str(minZ) + """</Option>
    <Option name="scale_x">""" + str(scaleX) + """</Option>
    <Option name="scale_y">""" + str(scaleY) + """</Option>
    <Option name="scale_z">""" + str(scaleZ) + """</Option>
    <Filter type="filters.chipper">
        <Option name="capacity">""" + str(blockSize) + """</Option>
        <Reader type="readers.las">
            <Option name="filename">""" + inputFileAbsPath + """</Option>
            <Option name="spatialreference">EPSG:""" + str(srid) + """</Option>
        </Reader>
    </Filter>
</Writer>
</Pipeline>
"""
    utils.writeToFile(xmlFile, xmlContent)
Example #8
0
def OracleWriter(xmlFile, inputFileAbsPath, connectionString, dimensionsNames,
                 blockTable, baseTable, srid, blockSize, compression,
                 dimensionalOrientation, useOffsetScale):
    """ Create a XML file to load the data, in the given file, into the DB """

    offsetScale = ''
    if useOffsetScale:
        (_, _, minX, minY, minZ, _, _, _, scaleX, scaleY, scaleZ, offsetX,
         offsetY, offsetZ) = lasops.getPCFileDetails(inputFileAbsPath)
        offsetScale = """<Option name="offset_x">""" + str(
            offsetX) + """</Option>
   <Option name="offset_y">""" + str(offsetY) + """</Option>
   <Option name="offset_z">""" + str(offsetZ) + """</Option>
   <Option name="scale_x">""" + str(scaleX) + """</Option>
   <Option name="scale_y">""" + str(scaleY) + """</Option>
   <Option name="scale_z">""" + str(scaleZ) + """</Option>"""

    output_dims = ''
    if dimensionsNames != None:
        output_dims = '<Option name="output_dims">' + ','.join(
            dimensionsNames) + '</Option>'
    strsrid = ''
    if srid == '':
        strsrid = '4269'
    else:
        strsrid = str(srid)

    xmlContent = """
<?xml version="1.0" encoding="utf-8"?>
<Pipeline version="1.0">
 <Writer type="writers.oci">
   <Option name="debug">false</Option>
   <Option name="verbose">1</Option>
   <Option name="connection">""" + connectionString + """</Option>
   <Option name="base_table_name">""" + baseTable + """</Option>
   <Option name="block_table_name">""" + blockTable + """</Option>
   <Option name="compression">""" + str(compression).lower() + """</Option>
   <Option name="store_dimensional_orientation">""" + str(
        dimensionalOrientation).lower() + """</Option>
   <Option name="cloud_column_name">pc</Option>
   <Option name="is3d">false</Option>
   <Option name="solid">false</Option>
   <Option name="overwrite">false</Option>
   <Option name="disable_cloud_trigger">true</Option>
   <Option name="srid">""" + strsrid + """</Option>
   <Option name="create_index">false</Option>
   <Option name="capacity">""" + str(blockSize) + """</Option>
   <Option name="stream_output_precision">8</Option>
   <Option name="pack_ignored_fields">true</Option>
   """ + output_dims + """
   """ + offsetScale + """
   <Filter type="filters.chipper">
     <Option name="capacity">""" + str(blockSize) + """</Option>
     <Reader type="readers.las">
       <Option name="filename">""" + inputFileAbsPath + """</Option>
       <Option name="spatialreference">EPSG:""" + str(srid) + """</Option>
     </Reader>
   </Filter>            
 </Writer>
</Pipeline>      
"""
    utils.writeToFile(xmlFile, xmlContent)
 def loadFromFile(self,  index, fileAbsPath):
     t0 = time.time()
     self.loadInc(fileAbsPath, 1, self.blockTable, self.blockSeq, self.blockSize, self.batchSize)
     print 'LOADSTATS', os.path.basename(fileAbsPath), lasops.getPCFileDetails(fileAbsPath)[1], time.time() - t0
    def addPCFormat(self, cursor, schemaFile, fileAbsPath, srid):
        (_, _, _, _, _, _, _, _, scaleX, scaleY, scaleZ, offsetX, offsetY,
         offsetZ) = lasops.getPCFileDetails(fileAbsPath, srid)

        updatedFormat = False
        schema = None

        pc_namespace = '{http://pointcloud.org/schemas/PC/}'

        while not updatedFormat:

            # Check whether there is already a format with current scale-offset values
            cursor.execute(
                "SELECT pcid,schema FROM pointcloud_formats WHERE srid = %s AND scaleX = %s AND scaleY = %s AND scaleZ = %s AND offsetX = %s AND offsetY = %s AND offsetZ = %s",
                [srid, scaleX, scaleY, scaleZ, offsetX, offsetY, offsetZ])
            rows = cursor.fetchall()

            if len(rows):
                # There is already a format with these scale-offset values
                [pcid, schema] = rows[0]
                root = ET.fromstring(schema)
                updatedFormat = True
            else:
                if schema == None:
                    # There is not a format with these scale-offset values. We add a schema for these ones
                    # Get ElementTree of the XML schema file
                    tree = ET.parse(schemaFile)
                    root = tree.getroot()

                    offsets = {'x': offsetX, 'y': offsetY, 'z': offsetZ}
                    scales = {'x': scaleX, 'y': scaleY, 'z': scaleZ}
                    for dimension in root.findall(pc_namespace + 'dimension'):
                        dimName = dimension.find(pc_namespace + 'name').text
                        if dimName.lower() in offsets:
                            dimension.find(pc_namespace + 'offset').text = str(
                                offsets[dimName.lower()])
                            dimension.find(pc_namespace + 'scale').text = str(
                                scales[dimName.lower()])

                    schema = '<?xml version="1.0" encoding="UTF-8"?>' + '\n' + ET.tostring(
                        tree, encoding='utf8', method='xml')

                cursor.execute("SELECT max(pcid) FROM pointcloud_formats")
                rows = cursor.fetchall()
                pcid = 1
                if len(rows) and rows[0][0] != None:
                    pcid = rows[0][0] + 1
                try:
                    postgresops.mogrifyExecute(
                        cursor,
                        "INSERT INTO pointcloud_formats (pcid, srid, schema, scalex, scaley, scalez, offsetx, offsety, offsetz) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)",
                        [
                            pcid, srid, schema, scaleX, scaleY, scaleZ,
                            offsetX, offsetY, offsetZ
                        ])
                    updatedFormat = True
                except:
                    cursor.connection.rollback()

        # Get the used dimensions if still not acquired (they should be always the same)
        compression = root.find(pc_namespace +
                                'metadata').find('Metadata').text
        columns = []
        for dimension in root.findall(pc_namespace + 'dimension'):
            dName = dimension.find(pc_namespace + 'name').text
            correctDim = False
            for d in self.DM_PDAL:
                if self.DM_PDAL[d] == dName:
                    correctDim = True
                    columns.append(d)
                    break
            if not correctDim:
                raise Exception(
                    'Error: unexpected dimension name, check dimension mappings for PDAL'
                )

        return (columns, pcid, compression)
Example #11
0
def PostgreSQLWriter(xmlFile, inputFileAbsPath, connectionString, pcid, dimensionsNames, blockTable, srid, blockSize, compression):
    """ Create a XML file to load the data, in the given file, into the DB """
    (_, _, minX, minY, minZ, _, _, _, scaleX, scaleY, scaleZ, offsetX, offsetY, offsetZ) = lasops.getPCFileDetails(inputFileAbsPath)  

    xmlContent = """<?xml version="1.0" encoding="utf-8"?>
<Pipeline version="1.0">
<Writer type="writers.pgpointcloud">
    <Option name="connection">""" + connectionString + """</Option>
    <Option name="table">""" + blockTable + """</Option>
    <Option name="column">pa</Option>
    <Option name="srid">""" + str(srid) + """</Option>
    <Option name="pcid">""" + str(pcid) + """</Option>
    <Option name="overwrite">false</Option>
    <Option name="capacity">""" + str(blockSize) + """</Option>
    <Option name="compression">""" + compression + """</Option>
    <Option name="output_dims">""" + ",".join(dimensionsNames) + """</Option>
    <Option name="offset_x">""" + str(minX) + """</Option>
    <Option name="offset_y">""" + str(minY) + """</Option>
    <Option name="offset_z">""" + str(minZ) + """</Option>
    <Option name="scale_x">""" + str(scaleX) + """</Option>
    <Option name="scale_y">""" + str(scaleY) + """</Option>
    <Option name="scale_z">""" + str(scaleZ) + """</Option>
    <Filter type="filters.chipper">
        <Option name="capacity">""" + str(blockSize) + """</Option>
        <Reader type="readers.las">
            <Option name="filename">""" + inputFileAbsPath + """</Option>
            <Option name="spatialreference">EPSG:""" + str(srid) + """</Option>
        </Reader>
    </Filter>
</Writer>
</Pipeline>
"""
    utils.writeToFile(xmlFile, xmlContent)
Example #12
0
def OracleWriter(xmlFile, inputFileAbsPath, connectionString, dimensionsNames, blockTable, baseTable, srid, blockSize, compression, dimensionalOrientation, useOffsetScale):
    """ Create a XML file to load the data, in the given file, into the DB """
    
    offsetScale = ''
    if useOffsetScale:
        (_, _, minX, minY, minZ, _, _, _, scaleX, scaleY, scaleZ, offsetX, offsetY, offsetZ) = lasops.getPCFileDetails(inputFileAbsPath)
        offsetScale = """<Option name="offset_x">""" + str(offsetX) + """</Option>
   <Option name="offset_y">""" + str(offsetY) + """</Option>
   <Option name="offset_z">""" + str(offsetZ) + """</Option>
   <Option name="scale_x">""" + str(scaleX) + """</Option>
   <Option name="scale_y">""" + str(scaleY) + """</Option>
   <Option name="scale_z">""" + str(scaleZ) + """</Option>"""

    output_dims = ''
    if dimensionsNames != None:
        output_dims = '<Option name="output_dims">' + ','.join(dimensionsNames) + '</Option>'
    
    xmlContent = """
<?xml version="1.0" encoding="utf-8"?>
<Pipeline version="1.0">
 <Writer type="writers.oci">
   <Option name="debug">false</Option>
   <Option name="verbose">1</Option>
   <Option name="connection">""" + connectionString + """</Option>
   <Option name="base_table_name">""" + baseTable + """</Option>
   <Option name="block_table_name">""" + blockTable + """</Option>
   <Option name="compression">""" + str(compression).lower() + """</Option>
   <Option name="store_dimensional_orientation">""" + str(dimensionalOrientation).lower() + """</Option>
   <Option name="cloud_column_name">pc</Option>
   <Option name="is3d">false</Option>
   <Option name="solid">false</Option>
   <Option name="overwrite">false</Option>
   <Option name="disable_cloud_trigger">true</Option>
   <Option name="srid">""" + str(srid) + """</Option>
   <Option name="create_index">false</Option>
   <Option name="capacity">""" + str(blockSize) + """</Option>
   <Option name="stream_output_precision">8</Option>
   <Option name="pack_ignored_fields">true</Option>
   """ + output_dims + """
   """ + offsetScale + """
   <Filter type="filters.chipper">
     <Option name="capacity">""" + str(blockSize) + """</Option>
     <Reader type="readers.las">
       <Option name="filename">""" + inputFileAbsPath + """</Option>
       <Option name="spatialreference">EPSG:""" + str(srid) + """</Option>
     </Reader>
   </Filter>            
 </Writer>
</Pipeline>      
"""
    utils.writeToFile(xmlFile, xmlContent)
def runChild(childId, childrenQueue, connectionString, dbtable, srid):
    kill_received = False
    connection = psycopg2.connect(connectionString)
    cursor = connection.cursor()
    while not kill_received:
        job = None
        try:
            # This call will patiently wait until new job is available
            job = childrenQueue.get()
        except:
            # if there is an error we will quit the loop
            kill_received = True
        if job == None:
            kill_received = True
        else:            
            [identifier, inputFile,] = job
            (_, count, minX, minY, minZ, maxX, maxY, maxZ, scaleX, scaleY, scaleZ, offsetX, offsetY, offsetZ) = lasops.getPCFileDetails(inputFile)
            
            insertStatement = """INSERT INTO """ + dbtable + """(id,filepath,num,scalex,scaley,scalez,offsetx,offsety,offsetz,geom) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, ST_MakeEnvelope(%s, %s, %s, %s, %s));"""
            insertArgs = [identifier, inputFile, int(count), float(scaleX), float(scaleY), float(scaleZ), float(offsetX), float(offsetY), float(offsetZ), float(minX), float(minY), float(maxX), float(maxY), int(srid)]
            logging.info(cursor.mogrify(insertStatement, insertArgs))
            cursor.execute(insertStatement, insertArgs)
            connection.commit()
    cursor.close()
    connection.close()
    def addPCFormat(self, cursor, schemaFile, fileAbsPath, srid):
        (_, _, _, _, _, _, _, _, scaleX, scaleY, scaleZ, offsetX, offsetY, offsetZ) = lasops.getPCFileDetails(
            fileAbsPath, srid
        )

        updatedFormat = False
        schema = None

        pc_namespace = "{http://pointcloud.org/schemas/PC/}"

        while not updatedFormat:

            # Check whether there is already a format with current scale-offset values
            cursor.execute(
                "SELECT pcid,schema FROM pointcloud_formats WHERE srid = %s AND scaleX = %s AND scaleY = %s AND scaleZ = %s AND offsetX = %s AND offsetY = %s AND offsetZ = %s",
                [srid, scaleX, scaleY, scaleZ, offsetX, offsetY, offsetZ],
            )
            rows = cursor.fetchall()

            if len(rows):
                # There is already a format with these scale-offset values
                [pcid, schema] = rows[0]
                root = ET.fromstring(schema)
                updatedFormat = True
            else:
                if schema == None:
                    # There is not a format with these scale-offset values. We add a schema for these ones
                    # Get ElementTree of the XML schema file
                    tree = ET.parse(schemaFile)
                    root = tree.getroot()

                    offsets = {"x": offsetX, "y": offsetY, "z": offsetZ}
                    scales = {"x": scaleX, "y": scaleY, "z": scaleZ}
                    for dimension in root.findall(pc_namespace + "dimension"):
                        dimName = dimension.find(pc_namespace + "name").text
                        if dimName.lower() in offsets:
                            dimension.find(pc_namespace + "offset").text = str(offsets[dimName.lower()])
                            dimension.find(pc_namespace + "scale").text = str(scales[dimName.lower()])

                    schema = (
                        '<?xml version="1.0" encoding="UTF-8"?>'
                        + "\n"
                        + ET.tostring(tree, encoding="utf8", method="xml")
                    )

                cursor.execute("SELECT max(pcid) FROM pointcloud_formats")
                rows = cursor.fetchall()
                pcid = 1
                if len(rows) and rows[0][0] != None:
                    pcid = rows[0][0] + 1
                try:
                    postgresops.mogrifyExecute(
                        cursor,
                        "INSERT INTO pointcloud_formats (pcid, srid, schema, scalex, scaley, scalez, offsetx, offsety, offsetz) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)",
                        [pcid, srid, schema, scaleX, scaleY, scaleZ, offsetX, offsetY, offsetZ],
                    )
                    updatedFormat = True
                except:
                    cursor.connection.rollback()

        # Get the used dimensions if still not acquired (they should be always the same)
        compression = root.find(pc_namespace + "metadata").find("Metadata").text
        columns = []
        for dimension in root.findall(pc_namespace + "dimension"):
            dName = dimension.find(pc_namespace + "name").text
            correctDim = False
            for d in self.DM_PDAL:
                if self.DM_PDAL[d] == dName:
                    correctDim = True
                    columns.append(d)
                    break
            if not correctDim:
                raise Exception("Error: unexpected dimension name, check dimension mappings for PDAL")

        return (columns, pcid, compression)