Esempio n. 1
0
def lasDuplicateFree(directory, output):
    """ Takes a directory with las [laz] files and an output directory

        Returns las, [laz] files free from duplicates"""
    files = getFiles(directory,['laz'],True)
    checkDirectory(output)
    for file in files:
        print file
        fh = readFileLaspy(file)
        writeFile(output,file[file.rfind('\\')+1:],fh.header,removeDuplicate(fh).transpose())
def run(inputFolder, numcores, dbname, dbuser, dbpass, dbhost, dbport,
        createdb, dbtable, srid):
    opts = 0
    childrenQueue = multiprocessing.Queue()
    ifiles = utils.getFiles(inputFolder)
    for i in range(len(ifiles)):
        childrenQueue.put([i, ifiles[i]])
    for i in range(
            int(numcores)
    ):  #we add as many None jobs as numWorkers to tell them to terminate (queue is FIFO)
        childrenQueue.put(None)

    clineConString = postgresops.getConnectString(dbname, dbuser, dbpass,
                                                  dbhost, dbport, True)
    psycopgConString = postgresops.getConnectString(dbname, dbuser, dbpass,
                                                    dbhost, dbport, False)

    if createdb:
        os.system('dropdb ' + clineConString)
        os.system('createdb ' + clineConString)

    connection = psycopg2.connect(psycopgConString)
    cursor = connection.cursor()
    if createdb:
        cursor.execute('CREATE EXTENSION postgis')
    connection.commit()
    q = """
    CREATE TABLE """ + dbtable + """ (
        id integer,
        filepath text,
        num integer,
        scalex double precision,
        scaley double precision,
        scalez double precision,
        offsetx double precision,     
        offsety double precision,
        offsetz double precision,
        geom public.geometry(Geometry,""" + str(srid) + """)
    )"""
    logging.info(cursor.mogrify(q))
    cursor.execute(q)
    connection.commit()

    #    q = "select addgeometrycolumn('" + dbtable + "','geom',28992,'POLYGON',2)"
    #    logging.info(cursor.mogrify(q))
    #    cursor.execute(q)
    #    connection.commit()
    print 'numcores', numcores
    children = []
    # We start numcores children processes
    for i in range(int(numcores)):
        children.append(
            multiprocessing.Process(target=runChild,
                                    args=(i, childrenQueue, psycopgConString,
                                          dbtable, srid)))
        children[-1].start()

    # wait for all children to finish their execution
    for i in range(int(numcores)):
        children[i].join()

    q = "create index ON " + dbtable + " using GIST (geom)"
    logging.info(cursor.mogrify(q))
    cursor.execute(q)

    connection.commit()

    old_isolation_level = connection.isolation_level
    connection.set_isolation_level(0)
    q = "VACUUM FULL ANALYZE " + dbtable
    logging.info(cursor.mogrify(q))
    cursor.execute(q)
    connection.commit()
    connection.set_isolation_level(old_isolation_level)
    cursor.close()
Esempio n. 3
0
def getPCFolderDetails(absPath, srid=None, numProc=1):
    """ Get the details (count numPoints and extent) of a folder with LAS/LAZ files (using LAStools, hence it is fast)
    It is assumed that all file shave same SRID and scale as first one"""
    tcount = 0
    (tminx, tminy, tminz, tmaxx, tmaxy, tmaxz) = (None, None, None, None, None, None)
    (tscalex, tscaley, tscalez) = (None, None, None)
    tsrid = None

    if os.path.isdir(absPath):
        inputFiles = utils.getFiles(absPath, recursive=True)
    else:
        inputFiles = [absPath]

    numInputFiles = len(inputFiles)

    tasksQueue = multiprocessing.Queue()  # The queue of tasks
    detailsQueue = multiprocessing.Queue()  # The queue of results/details

    inputFilesAbsPath = []
    inputFilesBoundingCube = []
    for i in range(numInputFiles):
        inputFilesAbsPath.append(None)
        inputFilesBoundingCube.append(None)
        tasksQueue.put((i, inputFiles[i]))
    for i in range(numProc):  # we add as many None jobs as numProc to tell them to terminate (queue is FIFO)
        tasksQueue.put(None)

    workers = []
    # We start numProc users workers
    for i in range(numProc):
        workers.append(
            multiprocessing.Process(target=runProcGetPCFolderDetailsWorker, args=(tasksQueue, detailsQueue, srid))
        )
        workers[-1].start()

    for i in range(numInputFiles):
        sys.stdout.write("\r")
        (
            inputFileIndex,
            inputFileAbsPath,
            srid,
            count,
            minx,
            miny,
            minz,
            maxx,
            maxy,
            maxz,
            scalex,
            scaley,
            scalez,
            _,
            _,
            _,
        ) = detailsQueue.get()
        inputFilesAbsPath[inputFileIndex] = inputFileAbsPath
        inputFilesBoundingCube[inputFileIndex] = (minx, miny, minz, maxx, maxy, maxz)

        if i == 0:
            (tscalex, tscaley, tscalez) = (scalex, scaley, scalez)
            tsrid = srid

        tcount += count
        if count:
            if tminx == None or minx < tminx:
                tminx = minx
            if tminy == None or miny < tminy:
                tminy = miny
            if tminz == None or minz < tminz:
                tminz = minz
            if tmaxx == None or maxx > tmaxx:
                tmaxx = maxx
            if tmaxy == None or maxy > tmaxy:
                tmaxy = maxy
            if tmaxz == None or maxz > tmaxz:
                tmaxz = maxz
        sys.stdout.write("\rCompleted %.02f%%" % (100.0 * float(i) / float(numInputFiles)))
        sys.stdout.flush()
    sys.stdout.write("\r")
    sys.stdout.write("\rCompleted 100.00%!")

    # wait for all users to finish their execution
    for i in range(numProc):
        workers[i].join()

    print
    return (
        inputFilesAbsPath,
        inputFilesBoundingCube,
        tsrid,
        tcount,
        (tminx, tminy, tminz, tmaxx, tmaxy, tmaxz),
        (tscalex, tscaley, tscalez),
    )
def getPCFolderDetails(absPath, srid=None, numProc=1):
    """ Get the details (count numPoints and extent) of a folder with LAS/LAZ files (using LAStools, hence it is fast)
    It is assumed that all file shave same SRID and scale as first one"""
    tcount = 0
    (tminx, tminy, tminz, tmaxx, tmaxy, tmaxz) = (None, None, None, None, None,
                                                  None)
    (tscalex, tscaley, tscalez) = (None, None, None)
    tsrid = None

    if os.path.isdir(absPath):
        inputFiles = utils.getFiles(absPath, recursive=True)
    else:
        inputFiles = [
            absPath,
        ]

    numInputFiles = len(inputFiles)

    tasksQueue = multiprocessing.Queue()  # The queue of tasks
    detailsQueue = multiprocessing.Queue()  # The queue of results/details

    inputFilesAbsPath = []
    inputFilesBoundingCube = []
    for i in range(numInputFiles):
        inputFilesAbsPath.append(None)
        inputFilesBoundingCube.append(None)
        tasksQueue.put((i, inputFiles[i]))
    for i in range(
            numProc
    ):  #we add as many None jobs as numProc to tell them to terminate (queue is FIFO)
        tasksQueue.put(None)

    workers = []
    # We start numProc users workers
    for i in range(numProc):
        workers.append(
            multiprocessing.Process(target=runProcGetPCFolderDetailsWorker,
                                    args=(tasksQueue, detailsQueue, srid)))
        workers[-1].start()

    for i in range(numInputFiles):
        sys.stdout.write('\r')
        (inputFileIndex, inputFileAbsPath, srid, count, minx, miny, minz, maxx,
         maxy, maxz, scalex, scaley, scalez, _, _, _) = detailsQueue.get()
        inputFilesAbsPath[inputFileIndex] = inputFileAbsPath
        inputFilesBoundingCube[inputFileIndex] = (minx, miny, minz, maxx, maxy,
                                                  maxz)

        if i == 0:
            (tscalex, tscaley, tscalez) = (scalex, scaley, scalez)
            tsrid = srid

        tcount += count
        if count:
            if tminx == None or minx < tminx:
                tminx = minx
            if tminy == None or miny < tminy:
                tminy = miny
            if tminz == None or minz < tminz:
                tminz = minz
            if tmaxx == None or maxx > tmaxx:
                tmaxx = maxx
            if tmaxy == None or maxy > tmaxy:
                tmaxy = maxy
            if tmaxz == None or maxz > tmaxz:
                tmaxz = maxz
        sys.stdout.write("\rCompleted %.02f%%" %
                         (100. * float(i) / float(numInputFiles)))
        sys.stdout.flush()
    sys.stdout.write('\r')
    sys.stdout.write('\rCompleted 100.00%!')

    # wait for all users to finish their execution
    for i in range(numProc):
        workers[i].join()

    print
    return (inputFilesAbsPath, inputFilesBoundingCube, tsrid, tcount,
            (tminx, tminy, tminz, tmaxx, tmaxy, tmaxz), (tscalex, tscaley,
                                                         tscalez))
def run(inputFolder, numcores, dbname, dbuser, dbpass, dbhost, dbport, createdb, dbtable, srid):
    opts = 0
    childrenQueue = multiprocessing.Queue()
    ifiles = utils.getFiles(inputFolder)
    for i in range(len(ifiles)):
        childrenQueue.put([i, ifiles[i]])
    for i in range(int(numcores)): #we add as many None jobs as numWorkers to tell them to terminate (queue is FIFO)
        childrenQueue.put(None)
    
    clineConString = postgresops.getConnectString(dbname, dbuser, dbpass, dbhost, dbport, True)
    psycopgConString = postgresops.getConnectString(dbname, dbuser, dbpass, dbhost, dbport, False)
    
    if createdb:
        os.system('dropdb ' + clineConString)
        os.system('createdb ' + clineConString)

    connection = psycopg2.connect(psycopgConString)
    cursor = connection.cursor()
    if createdb:
        cursor.execute('CREATE EXTENSION postgis')
    connection.commit()
    q = """
    CREATE TABLE """ + dbtable + """ (
        id integer,
        filepath text,
        num integer,
        scalex double precision,
        scaley double precision,
        scalez double precision,
        offsetx double precision,     
        offsety double precision,
        offsetz double precision,
        geom public.geometry(Geometry,""" + str(srid) + """)
    )"""
    logging.info(cursor.mogrify(q))
    cursor.execute(q)
    connection.commit()

#    q = "select addgeometrycolumn('" + dbtable + "','geom',28992,'POLYGON',2)"
#    logging.info(cursor.mogrify(q))
#    cursor.execute(q)
#    connection.commit()
    print 'numcores',numcores
    children = []
    # We start numcores children processes
    for i in range(int(numcores)):
        children.append(multiprocessing.Process(target=runChild, 
            args=(i, childrenQueue, psycopgConString, dbtable, srid)))
        children[-1].start()

    # wait for all children to finish their execution
    for i in range(int(numcores)):
        children[i].join()
         
    q = "create index ON " + dbtable + " using GIST (geom)"
    logging.info(cursor.mogrify(q))
    cursor.execute(q)

    connection.commit()
     
    old_isolation_level = connection.isolation_level
    connection.set_isolation_level(0)
    q = "VACUUM FULL ANALYZE " + dbtable
    logging.info(cursor.mogrify(q))
    cursor.execute(q)
    connection.commit()
    connection.set_isolation_level(old_isolation_level)
    cursor.close()