def initialize(self):
        # Initialize DB and extensions if creation of user is required
        if self.cDB:
            self.createDB()
            connection = self.getConnection()
            cursor = connection.cursor()
            self.initPointCloud(cursor)
        else:
            connection = self.getConnection()
            cursor = connection.cursor()
        # Create SQL method to get the quad cell id
        self.createQuadCellId(cursor)
        
        logging.info('Getting files and extent from input folder ' + self.inputFolder)
        self.numProcessesLoad = int(os.popen('grep -c ^processor /proc/cpuinfo').read())
        (self.inputFiles, _, _, _, boundingCube, scales) = lasops.getPCFolderDetails(self.inputFolder, numProc = self.numProcessesLoad)
        (self.minX, self.minY, self.minZ, self.maxX, self.maxY, _) = boundingCube
        (self.scaleX, self.scaleY, self.scaleZ) = scales
        
        self.createBlocksTable(cursor, self.blockTable, self.tableSpace, True)
        
        # Create meta table to save the extent of the PC
        self.metaTable = self.blockTable + '_meta'
        self.createMetaTable(cursor, self.metaTable)

        connection.close()
示例#2
0
 def initialize(self):
     # Remove possible previous data
     logging.info('Creating data folder ' + self.dataFolder)
     os.system('rm -rf ' + self.dataFolder)
     os.system('mkdir -p ' + self.dataFolder)
     
     logging.info('Getting files and SRID from input folder ' + self.inputFolder)
     (self.inputFiles, _, _, _, _, _) = lasops.getPCFolderDetails(self.inputFolder, numProc = self.numProcessesLoad)
示例#3
0
 def getNumPoints(self) :
     try:
         if self.dbIndex:
             connString = self.getConnectionString(False, True)
             return int(os.popen('psql ' + connString + ' -c "select sum(num) from ' + self.lasIndexTableName + '" -t -A').read())
         else:
             return int(lasops.getPCFolderDetails(self.dataFolder, numProc = self.numProcessesLoad)[3])    
     except Exception, msg: 
         logging.error(msg)
         return 0
    def initialize(self):
        # Check parameters for this loader
        if self.columns != 'xyz':
            raise Exception('ERROR: This loader only currently accepts XYZ!. First you need to change the hilbert pre-processor')
        
        if self.cUser:
            self.createUser()
        
        # Get the point cloud folder description
        logging.info('Getting files, extent and SRID from input folder ' + self.inputFolder)
        (self.inputFiles, _, _, _, boundingCube, _) = lasops.getPCFolderDetails(self.inputFolder, numProc = self.numProcessesLoad)
        (self.minX, self.minY, _, self.maxX, self.maxY, _) = boundingCube
        
        # Get the parent folder and the wildcard text with file selection
        if os.path.isfile(self.inputFolder):
            parentFolder = os.path.abspath(os.path.join(self.inputFolder,'..'))
            lasFiles = os.path.basename(self.inputFolder)
            extension = self.inputFolder.split('.')[-1]
        else:
            parentFolder = self.inputFolder
            if len(self.inputFiles) == 0:
                raise Exception('ERROR: None PC file in ' + self.inputFolder)
            extension = self.inputFiles[0].split('.')[-1]
            lasFiles = '*.' + extension
        if extension.lower() == 'laz':
            raise Exception('ERROR: pre-processor only accepts LAS files!')            

        # We define the External table name
        self.flatTable = self.blockTable + '_STAGING'
        self.extTable = ('EXT_' + self.flatTable).upper()
        self.lasDirVariableName = (self.userName + '_las_dir').upper()
        
        # Create the Oracle directory within the DB
        self.createLASDirectory(self.lasDirVariableName, parentFolder)

        connection = self.getConnection()
        cursor = connection.cursor()
        
        self.colsToUse = list(self.columns)
        # If hilbert method is required we need to request also the hilbert code from the LAS reader
        if self.blockMethod == 'hilbert':
            if 'h' not in self.colsToUse:
                self.colsToUse.append('h')
        
        # Define the external table (which use the preprocessor file in Oracle directory EXE_DIR)
        self.createExternalTable(cursor, lasFiles, self.extTable, self.colsToUse, self.lasDirVariableName, self.numProcessesLoad)
        # Create the blocks table
        self.createBlocksTable(cursor, self.blockTable, self.tableSpace, self.compression, self.baseTable)
        connection.close()
 def initialize(self):
     # Creates the user that will store the tables
     #if self.cUser:
     #    self.createUser()
     
     # Get the point cloud folder description
     logging.info('Getting files, extent and SRID from input folder ' + self.inputFolder)
     (self.inputFiles, _, _, _, boundingCube, _) = lasops.getPCFolderDetails(self.inputFolder, numProc = self.numProcessesLoad)
     (self.minX, self.minY, _, self.maxX, self.maxY, _) = boundingCube
     
     # Creates connection
     connection = self.getConnection()
     cursor = connection.cursor()
     
     # Create blocks table and base table (for PDAL blocks we need the blockID as well in the blocks table)
     self.createBlocksTable(cursor, self.blockTable, self.tableSpace, self.compression, self.baseTable, includeBlockId = True)
     connection.close()
    def initialize(self):
        if 'k' in self.columns or 'k' in self.index:
            raise Exception('ERROR: LoaderExt not compatible with Morton codes')

        if self.cUser:
            self.createUser()
            
        # Get the point cloud folder description
        logging.info('Getting files, extent, scale and SRID from input folder ' + self.inputFolder)
        (self.inputFiles, _, _, _, boundingCube, scales) = lasops.getPCFolderDetails(self.inputFolder, numProc = self.numProcessesLoad)
        (self.minX, self.minY, _, self.maxX, self.maxY, _) = boundingCube
        (self.scaleX, self.scaleY, _) = scales
        
        # Creates connection
        connection = self.getConnection()
        cursor = connection.cursor()
        
        # Get the parent folder and the wildcard text with file selection
        if os.path.isfile(self.inputFolder):
            parentFolder = os.path.abspath(os.path.join(self.inputFolder,'..'))
            lasFiles = os.path.basename(self.inputFolder)
            extension = self.inputFolder.split('.')[-1]
        else:
            parentFolder = self.inputFolder
            if len(self.inputFiles) == 0:
                raise Exception('ERROR: None PC file in ' + self.inputFolder)
            extension = self.inputFiles[0].split('.')[-1]
            lasFiles = '*.' + extension

        if extension.lower() == 'laz':
            raise Exception('ERROR: pre-processor only accepts LAS files!')

        self.extTable = ('EXT_' + self.flatTable).upper()
        self.lasDirVariableName = (self.userName + '_las_dir').upper()

        # Create the Oracle directory within the DB
        self.createLASDirectory(self.lasDirVariableName, parentFolder)

        connection = self.getConnection()
        cursor = connection.cursor()
        
        # Define the external table (which use the preprocessor file in Oracle directory EXE_DIR)
        self.createExternalTable(cursor, lasFiles, self.extTable, self.columns, self.lasDirVariableName, self.numProcessesLoad)
        self.createFlatMeta(cursor, self.metaTable)
        connection.close()
 def initialize(self):
     # Initialize DB and extensions if creation of user is required
     if self.cDB:
         self.createDB()
     # Get connection
     connection = self.getConnection()
     cursor = connection.cursor()
     # Create flat table
     self.createFlatTable(cursor, self.flatTable, self.tableSpace, self.columns)
     
     logging.info('Getting files, extent and SRID from input folder ' + self.inputFolder)
     (self.inputFiles, _, _, _, boundingCube, scales) = lasops.getPCFolderDetails(self.inputFolder, numProc = self.numProcessesLoad)
     (self.minX, self.minY, _, self.maxX, self.maxY, _) = boundingCube
     (self.scaleX, self.scaleY, _) = scales
     
     # Create meta table to save the extent of the PC
     self.createMetaTable(cursor, self.metaTable)
     connection.close()
示例#8
0
 def initialize(self):
     if self.cUser:
         self.createUser()
     
     # Get the point cloud folder description
     logging.info('Getting files, extent, scale and SRID from input folder ' + self.inputFolder)
     (self.inputFiles, _, _, _, boundingCube, scales) = lasops.getPCFolderDetails(self.inputFolder, numProc = self.numProcessesLoad)
     (self.minX, self.minY, _, self.maxX, self.maxY, _) = boundingCube
     (self.scaleX, self.scaleY, _) = scales
     
     # Creates connection
     connection = self.getConnection()
     cursor = connection.cursor()
     
     # Create the flat table
     self.createFlatTable(cursor, self.flatTable, self.tableSpace, self.columns)
     self.createFlatMeta(cursor, self.metaTable)
     connection.close()
示例#9
0
 def initialize(self):
     # Initialize DB and extensions if creation of user is required
     if self.cDB:
         self.createDB()
         connection = self.getConnection()
         cursor = connection.cursor()
         self.initPointCloud(cursor)
     else:
         connection = self.getConnection()
         cursor = connection.cursor()
     
     logging.info('Getting files and extent from input folder ' + self.inputFolder)
     (self.inputFiles, _, _, _, boundingCube, scales) = lasops.getPCFolderDetails(self.inputFolder, numProc = self.numProcessesLoad)
     (self.minX, self.minY, self.minZ, _, _ , _) = boundingCube
     (self.scaleX, self.scaleY, self.scaleZ) = scales
     
     
     self.createBlocksTable(cursor, self.blockTable, self.tableSpace)
     connection.close()
示例#10
0
    def initialize(self):
        # Initialize DB and extensions if creation of user is required
        if self.cDB:
            self.createDB()
            connection = self.getConnection()
            cursor = connection.cursor()
            self.initPointCloud(cursor)
        else:
            connection = self.getConnection()
            cursor = connection.cursor()

        logging.info('Getting files and extent from input folder ' +
                     self.inputFolder)
        (self.inputFiles, _, _, _, boundingCube,
         scales) = lasops.getPCFolderDetails(self.inputFolder,
                                             numProc=self.numProcessesLoad)
        (self.minX, self.minY, self.minZ, _, _, _) = boundingCube
        (self.scaleX, self.scaleY, self.scaleZ) = scales

        self.createBlocksTable(cursor, self.blockTable, self.tableSpace)
        connection.close()
 def initialize(self):
     # Creates the user that will store the tables
     if self.columns != 'xyz':
         raise Exception('ERROR: This loader only currently accepts XYZ!. First you need to change the JAVA incremental loader')
     
     if self.cUser:
         self.createUser()
     
     # Get the point cloud folder description
     logging.info('Getting files, extent and SRID from input folder ' + self.inputFolder)
     (self.inputFiles, _, _, _, boundingCube, _) = lasops.getPCFolderDetails(self.inputFolder, numProc = self.numProcessesLoad)
     (self.minX, self.minY, _, self.maxX, self.maxY, _) = boundingCube
     
     # Creates connection
     connection = self.getConnection()
     cursor = connection.cursor()
     
     # Create blocks table and base table
     self.createBlocksTable(cursor, self.blockTable, self.tableSpace, self.compression, self.baseTable)
     
     self.blockSeq = self.blockTable + '_ID_SEQ'
     oracleops.mogrifyExecute(cursor, "CREATE SEQUENCE " + self.blockSeq)
     self.initCreatePC(cursor, self.srid, self.minX, self.minY, self.maxX, self.maxY, None, self.blockTable, self.baseTable, self.blockSize, self.tolerance, self.workTableSpace, False)
     connection.close()
示例#12
0
    def initialize(self):
        if self.partitioning and not self.imprints:
            raise Exception('Partitioning without imprints is not supported!')

        if self.createDB:
            logging.info('Creating DB ' + self.dbName)
            # Drop previous DB if exist and create a new one
            os.system('monetdb stop ' + self.dbName)
            os.system('monetdb destroy ' + self.dbName + ' -f')
            os.system('monetdb create ' + self.dbName)
            os.system('monetdb release ' + self.dbName)

            connection = self.getConnection()
            cursor = connection.cursor()

#            monetdbops.mogrifyExecute(cursor, """CREATE FUNCTION GetX(morton BIGINT, scaleX DOUBLE, globalOffset BIGINT) RETURNS DOUBLE external name geom."GetX";""")
#            monetdbops.mogrifyExecute(cursor, """CREATE FUNCTION GetY(morton BIGINT, scaleY DOUBLE, globalOffset BIGINT) RETURNS DOUBLE external name geom."GetY";""")

        logging.info('Getting files, extent and SRID from input folder ' +
                     self.inputFolder)
        (self.inputFiles, inputFilesBoundingCube, _, _, boundingCube,
         scales) = lasops.getPCFolderDetails(self.inputFolder,
                                             numProc=self.numProcessesLoad)
        (self.minX, self.minY, self.minZ, self.maxX, self.maxY,
         self.maxZ) = boundingCube
        (self.scaleX, self.scaleY, _) = scales

        if not self.imprints:
            # If we want to create a final indexed table we need to put the
            # points in a temporal table
            self.tempFlatTable = 'TEMP_' + self.flatTable
            ftName = self.tempFlatTable
        else:
            ftName = self.flatTable

        connection = self.getConnection()
        cursor = connection.cursor()
        if self.partitioning:
            rangeX = self.maxX - self.minX
            rangeY = self.maxY - self.minY
            nX = int(
                math.ceil(
                    math.sqrt(self.numPartitions) *
                    (float(rangeX) / float(rangeY))))
            nY = int(
                math.ceil(
                    math.sqrt(self.numPartitions) /
                    (float(rangeX) / float(rangeY))))

            self.tilesFiles = {}
            for i in range(len(self.inputFiles)):
                (fminX, fminY, _, fmaxX, fmaxY, _) = inputFilesBoundingCube[i]
                pX = fminX + ((fmaxX - fminX) / 2.)
                pY = fminY + ((fmaxY - fminY) / 2.)
                m = morton.EncodeMorton2D(
                    *self.getTileIndex(pX, pY, self.minX, self.minY, self.maxX,
                                       self.maxY, nX, nY))
                if m not in self.tilesFiles:
                    self.tilesFiles[m] = []
                self.tilesFiles[m].append(self.inputFiles[i])

            logging.info('Real number of partitions is ' +
                         str(len(self.tilesFiles)))

            monetdbops.mogrifyExecute(
                cursor, "CREATE MERGE TABLE " + ftName + " (" +
                (', '.join(self.getDBColumns())) + ")")
            for m in sorted(self.tilesFiles):
                monetdbops.mogrifyExecute(
                    cursor, "CREATE TABLE " + ftName + str(m) + " (" +
                    (', '.join(self.getDBColumns())) + ")")
        else:
            monetdbops.mogrifyExecute(
                cursor, "CREATE TABLE " + ftName + " (" +
                (', '.join(self.getDBColumns())) + ")")

        #  Create the meta-data table
        monetdbops.mogrifyExecute(
            cursor, "CREATE TABLE " + self.metaTable +
            " (tablename text, srid integer, minx DOUBLE PRECISION, miny DOUBLE PRECISION, maxx DOUBLE PRECISION, maxy DOUBLE PRECISION, scalex DOUBLE PRECISION, scaley DOUBLE PRECISION)"
        )
        # Close connection
        connection.close()
    def initialize(self):
        if self.partitioning and not self.imprints:
            raise Exception('Partitioning without imprints is not supported!')        

        if self.createDB:
            logging.info('Creating DB ' + self.dbName)
            # Drop previous DB if exist and create a new one
            os.system('monetdb stop ' + self.dbName)
            os.system('monetdb destroy ' + self.dbName + ' -f')
            os.system('monetdb create ' + self.dbName)
            os.system('monetdb release ' + self.dbName)
        
            connection = self.getConnection()
            cursor = connection.cursor()
            
#            monetdbops.mogrifyExecute(cursor, """CREATE FUNCTION GetX(morton BIGINT, scaleX DOUBLE, globalOffset BIGINT) RETURNS DOUBLE external name geom."GetX";""")
#            monetdbops.mogrifyExecute(cursor, """CREATE FUNCTION GetY(morton BIGINT, scaleY DOUBLE, globalOffset BIGINT) RETURNS DOUBLE external name geom."GetY";""")
        
        logging.info('Getting files, extent and SRID from input folder ' + self.inputFolder)        
        (self.inputFiles, inputFilesBoundingCube, _, _, boundingCube, scales) = lasops.getPCFolderDetails(self.inputFolder, numProc = self.numProcessesLoad)
        (self.minX, self.minY, self.minZ, self.maxX, self.maxY, self.maxZ) = boundingCube
        (self.scaleX, self.scaleY, _) = scales
        
        if not self.imprints:
            # If we want to create a final indexed table we need to put the 
            # points in a temporal table
            self.tempFlatTable = 'TEMP_' + self.flatTable
            ftName = self.tempFlatTable
        else:
            ftName = self.flatTable
        
        connection = self.getConnection()
        cursor = connection.cursor()
        if self.partitioning:
            rangeX = self.maxX - self.minX
            rangeY = self.maxY - self.minY
            nX = int(math.ceil(math.sqrt(self.numPartitions) * (float(rangeX)/float(rangeY))))
            nY = int(math.ceil(math.sqrt(self.numPartitions) / (float(rangeX)/float(rangeY))))
            
            self.tilesFiles = {}
            for i in range(len(self.inputFiles)):
                (fminX, fminY, _, fmaxX, fmaxY, _) = inputFilesBoundingCube[i]
                pX = fminX + ((fmaxX - fminX) / 2.)
                pY = fminY + ((fmaxY - fminY) / 2.)
                m = morton.EncodeMorton2D(*self.getTileIndex(pX, pY, self.minX, self.minY, self.maxX, self.maxY, nX, nY))
                if m not in self.tilesFiles:
                    self.tilesFiles[m] = []
                self.tilesFiles[m].append(self.inputFiles[i])
            
            logging.info('Real number of partitions is ' + str(len(self.tilesFiles)))
            
            monetdbops.mogrifyExecute(cursor, "CREATE MERGE TABLE " + ftName + " (" + (', '.join(self.getDBColumns())) + ")")
            for m in sorted(self.tilesFiles):
                monetdbops.mogrifyExecute(cursor, "CREATE TABLE " + ftName + str(m) + " (" + (', '.join(self.getDBColumns())) + ")")
        else:
            monetdbops.mogrifyExecute(cursor, "CREATE TABLE " + ftName + " (" + (', '.join(self.getDBColumns())) + ")")
                
        #  Create the meta-data table
        monetdbops.mogrifyExecute(cursor, "CREATE TABLE " + self.metaTable + " (tablename text, srid integer, minx DOUBLE PRECISION, miny DOUBLE PRECISION, maxx DOUBLE PRECISION, maxy DOUBLE PRECISION, scalex DOUBLE PRECISION, scaley DOUBLE PRECISION)")
        # Close connection
        connection.close()