Exemple #1
0
    def __init__(self, fileHandle, mode):
        self.fileHandle = fileHandle
        self.mode = mode

        # read the pixelgrid info out of the header
        # this is same for all spatial indices on SPD V4
        fileAttrs = fileHandle.attrs
        self.binSize = fileAttrs['BIN_SIZE']
        shape = fileAttrs['NUMBER_BINS_Y'], fileAttrs['NUMBER_BINS_X']
        xMin = fileAttrs['INDEX_TLX']
        yMax = fileAttrs['INDEX_TLY']
        xMax = xMin + (shape[1] * self.binSize)
        yMin = yMax - (shape[0] * self.binSize)
        self.wkt = fileAttrs['SPATIAL_REFERENCE']
        if sys.version_info[0] == 3 and isinstance(self.wkt, bytes):
            self.wkt = self.wkt.decode()

        if shape[0] != 0 or shape[1] != 0:
            self.pixelGrid = pixelgrid.PixelGridDefn(projection=self.wkt,
                                                     xMin=xMin,
                                                     xMax=xMax,
                                                     yMin=yMin,
                                                     yMax=yMax,
                                                     xRes=self.binSize,
                                                     yRes=self.binSize)
        else:
            self.pixelGrid = None
Exemple #2
0
def findCommonPixelGridRegion(gridList, refGrid, combine=INTERSECTION):
    """
    Returns a PixelGridDefn for the combination of all the grids 
    in the given gridList. The output grid is in the same coordinate 
    system as the reference grid. 
    
    This is adapted from the original in RIOS. This version does not
    attempt to reproject between coordinate systems. Firstly, because
    many LiDAR files do not seem to have the projection set. Secondly,
    we don't support reprojection anyway - unlike RIOS.
    
    The combine parameter controls whether UNION, INTERSECTION 
    or BOUNDS_FROM_REFERENCE is performed. 
    
    """
    newGrid = refGrid
    if combine != imageio.BOUNDS_FROM_REFERENCE:
        for grid in gridList:
            if not newGrid.alignedWith(grid):
                xMin = grid.snapToGrid(grid.xMin, refGrid.xMin, refGrid.xRes)
                xMax = grid.snapToGrid(grid.xMax, refGrid.xMax, refGrid.xRes)
                yMin = grid.snapToGrid(grid.yMin, refGrid.yMin, refGrid.yRes)
                yMax = grid.snapToGrid(grid.yMax, refGrid.yMax, refGrid.yRes)
                grid = pixelgrid.PixelGridDefn(xMin=xMin, xMax=xMax, yMin=yMin, 
                        yMax=yMax, xRes=refGrid.xRes, yRes=refGrid.yRes, 
                        projection=refGrid.projection)

            if combine == imageio.INTERSECTION:
                newGrid = newGrid.intersection(grid)
            elif combine == imageio.UNION:
                newGrid = newGrid.union(grid)
        
    return newGrid
Exemple #3
0
    def getPixelGrid(self):
        """
        Return the PixelGridDefn for this file

        """
        if not self.hasSpatialIndex():
            msg = 'las file has no index. See lasindex'
            raise generic.LiDARFunctionUnsupported(msg)

        header = self.getHeader()
        try:
            epsg = self.lasFile.getEPSG()
            wkt = self.getWktFromEPSG(epsg)
        except _las.error:
            # no projection info
            wkt = None

        binSize = self.lasFile.binSize
        if binSize == 0:
            msg = 'Must set BIN_SIZE option to read Las files spatially'
            raise generic.LiDARFunctionUnsupported(msg)

        pixgrid = pixelgrid.PixelGridDefn(projection=wkt,
                                          xMin=header['X_MIN'],
                                          xMax=header['X_MAX'],
                                          yMin=header['Y_MIN'],
                                          yMax=header['Y_MAX'],
                                          xRes=binSize,
                                          yRes=binSize)
        return pixgrid
Exemple #4
0
def run(oldpath, newpath):
    """
    Runs the 7th basic test suite. Tests:

    setting pixel grid different from the spatial index
    """
    input = os.path.join(oldpath, IN_FILE)
    interp = os.path.join(newpath, OUT_FILE)
    origInterp = os.path.join(oldpath, OUT_FILE)

    dataFiles = lidarprocessor.DataFiles()
    dataFiles.input = lidarprocessor.LidarFile(input, lidarprocessor.READ)
    dataFiles.imageOut = lidarprocessor.ImageFile(interp, lidarprocessor.CREATE)

    controls = lidarprocessor.Controls()
    progress = cuiprogress.GDALProgressBar()
    controls.setProgress(progress)
    controls.setSpatialProcessing(True)

    # can't use origInterp as projection source as this might not 
    # be created yet (eg called from testing_cmds.sh)
    projectionSource = os.path.join(oldpath, PROJECTION_SOURCE)
    wkt = getProjection(projectionSource)
    pixGrid = pixelgrid.PixelGridDefn(xMin=509199.0, yMax=6944830, xMax=509857, 
                    yMin=6944130, xRes=2.0, yRes=2.0, projection=wkt)
    controls.setFootprint(lidarprocessor.BOUNDS_FROM_REFERENCE)
    controls.setReferencePixgrid(pixGrid)

    lidarprocessor.doProcessing(writeImageFunc, dataFiles, controls=controls)

    utils.compareImageFiles(origInterp, interp)
Exemple #5
0
def main():
    metadataFile = sys.argv[1]
    imgFile = sys.argv[2]
    toaFile = sys.argv[3]
    d = float(sys.argv[4])
    outfile = "%s_envi.exp" % (metadataFile.split('.')[0])

    if os.path.exists(toaFile):
        os.remove(toaFile)

    (calFactors, solarZenithAngle, eSun, layerNames) = getParams(metadataFile)

    infiles = applier.FilenameAssociations()
    outfiles = applier.FilenameAssociations()
    otherargs = applier.OtherInputs()
    controls = applier.ApplierControls()

    infiles.raw = imgFile
    outfiles.toaFile = toaFile

    otherargs.calFactors = calFactors
    otherargs.solarZenithAngle = solarZenithAngle
    otherargs.pi = 3.14159265358979323846
    otherargs.d = d
    otherargs.eSun = eSun

    info = fileinfo.ImageInfo(imgFile)
    xMin = np.floor(info.xMin)
    xMax = np.ceil(info.xMax)
    yMin = np.floor(info.yMin)
    yMax = np.ceil(info.yMax)
    proj = info.projection
    transform = info.transform
    xRes = info.xRes
    yRes = info.yRes
    otherargs.outNull = 0
    print(otherargs.outNull)

    controls.setStatsIgnore(otherargs.outNull)
    controls.setOutputDriverName('GTiff')
    controls.setCreationOptions(['COMPRESS=LZW'])
    controls.setCreationOptions(['BIGTIFF=IF_SAFER'])

    pixgrid = pixelgrid.PixelGridDefn(geotransform=transform,
                                      xMin=xMin,
                                      xMax=xMax,
                                      yMin=yMin,
                                      yMax=yMax,
                                      xRes=xRes,
                                      yRes=yRes,
                                      projection=proj)
    print(pixgrid)
    controls.setReferencePixgrid(pixgrid)
    controls.setLayerNames(layerNames)
    controls.setWindowXsize(20)
    controls.setWindowYsize(20)
    progress = cuiprogress.GDALProgressBar()
    controls.setProgress(progress)

    applier.apply(doTOA, infiles, outfiles, otherargs, controls=controls)
Exemple #6
0
def calcWorkingExtent(vector, raster, layer):
    """
    Calculates the working extent of the vector
    in the coordinate system of the raster and returns a
    PixelGridDefn instance.
    Necessary since RIOS only calculates intersection
    based on rasters
    """
    vectords = ogr.Open(vector)
    if vectords is None:
        raise IOError('Unable to read vector file %s' % vector)
    vectorlyr = vectords.GetLayer(layer)

    vectorsr = vectorlyr.GetSpatialRef()
    vectorextent = vectorlyr.GetExtent()
    (xmin, xmax, ymin, ymax) = vectorextent

    rasterds = gdal.Open(raster)
    if rasterds is None:
        raise IOError('Unable to read raster file %s' % raster)
    rasterproj = rasterds.GetProjection()
    if rasterproj is None or rasterproj == '':
        raise ValueError('Raster must have projection set')
    rastertransform = rasterds.GetGeoTransform()

    rastersr = osr.SpatialReference(rasterproj)
    transform = osr.CoordinateTransformation(vectorsr, rastersr)
    (tl_x, tl_y, z) = transform.TransformPoint(xmin, ymax)
    (tr_x, tr_y, z) = transform.TransformPoint(xmax, ymax)
    (bl_x, bl_y, z) = transform.TransformPoint(xmin, ymin)
    (br_x, br_y, z) = transform.TransformPoint(xmax, ymin)

    extent = (min(tl_x, bl_x), max(tr_x, br_x), min(bl_y,
                                                    br_y), max(tl_y, tr_y))

    # round to pixels
    roundedextent = (roundToRasterGridX(rastertransform, extent[0],
                                        ROUND_DOWN),
                     roundToRasterGridX(rastertransform, extent[1], ROUND_UP),
                     roundToRasterGridX(rastertransform, extent[2],
                                        ROUND_DOWN),
                     roundToRasterGridX(rastertransform, extent[3], ROUND_UP))

    pixgrid = pixelgrid.PixelGridDefn(projection=rasterproj,
                                      xMin=roundedextent[0],
                                      xMax=roundedextent[1],
                                      yMin=roundedextent[2],
                                      yMax=roundedextent[3],
                                      xRes=rastertransform[1],
                                      yRes=abs(rastertransform[5]))

    del vectords
    del rasterds

    return pixgrid
Exemple #7
0
def getGridInfoFromHeader(header, binSize, footprint=lidarprocessor.UNION):
    """
    Given a Lidar file header (or a list of headers - maximum extent 
    will be calculated)
    plus a binSize return a tuple of (xMin, yMax, ncols, nrows)
    for doing operations on a grid
    Specify lidarprocessor.UNION or lidarprocessor.INTERSECTION to determine
    how multiple headers are combined.
    """
    if isinstance(header, dict):
        headers = [header]
    else:
        headers = header

    # get the dims of the first one
    pixGrid = pixelgrid.PixelGridDefn(xMin=headers[0]['X_MIN'],
                                      xMax=headers[0]['X_MAX'],
                                      yMax=headers[0]['Y_MAX'],
                                      yMin=headers[0]['Y_MIN'],
                                      xRes=binSize,
                                      yRes=binSize)

    for header in headers[1:]:
        newGrid = pixelgrid.PixelGridDefn(xMin=header['X_MIN'],
                                          xMax=header['X_MAX'],
                                          yMax=header['Y_MAX'],
                                          yMin=header['Y_MIN'],
                                          xRes=binSize,
                                          yRes=binSize)

        if footprint == lidarprocessor.UNION:
            pixGrid = pixGrid.union(newGrid)
        elif footprint == lidarprocessor.INTERSECTION:
            pixGrid = pixGrid.intersection(newGrid)
        else:
            msg = 'unsupported footprint value'
            raise SpatialException(msg)

    # nasty rounding errors propogated with ceil()
    ncols = int(numpy.round((pixGrid.xMax - pixGrid.xMin) / binSize))
    nrows = int(numpy.round((pixGrid.yMax - pixGrid.yMin) / binSize))
    return (pixGrid.xMin, pixGrid.yMax, ncols, nrows)
Exemple #8
0
def indexAndMerge(extentList, extent, wkt, outfile, header):
    """
    Internal method to merge all the temporary files into the output
    spatially indexing as we go.
    """
    controls = lidarprocessor.Controls()
    controls.setSpatialProcessing(False)

    # open in read mode
    driverExtentList = []
    for fname, subExtent in extentList:
        userClass = lidarprocessor.LidarFile(fname, generic.READ)
        driver = spdv4.SPDV4File(fname, generic.READ, controls, userClass)
        
        data = (subExtent, driver)
        driverExtentList.append(data)


    # create output file    
    userClass = lidarprocessor.LidarFile(outfile, generic.CREATE)
    userClass.setLiDARDriverOption('SCALING_BUT_NO_DATA_WARNING', False)
    controls = lidarprocessor.Controls()
    controls.setSpatialProcessing(True)
    outDriver = spdv4.SPDV4File(outfile, generic.CREATE, controls, userClass)
    pixGrid = pixelgrid.PixelGridDefn(xMin=extent.xMin, xMax=extent.xMax,
                yMin=extent.yMin, yMax=extent.yMax, projection=wkt,
                xRes=extent.binSize, yRes=extent.binSize)
    outDriver.setPixelGrid(pixGrid)
    
    # update header
    nrows,ncols = pixGrid.getDimensions()
    header['NUMBER_BINS_X'] = ncols
    header['NUMBER_BINS_Y'] = nrows

    # clobber these values since we don't want to 
    # start with the number in the original file
    # they will be reset to 0 in the new file
    del header['NUMBER_OF_POINTS']
    del header['NUMBER_OF_PULSES']
    # these too
    del header['GENERATING_SOFTWARE']
    del header['CREATION_DATETIME']
    
    progress = cuiprogress.GDALProgressBar()
    progress.setLabelText('Merging...')
    progress.setTotalSteps(len(extentList))
    progress.setProgress(0)
    nFilesProcessed = 0
    nFilesWritten = 0
    for subExtent, driver in driverExtentList:

        # read in all the data
        # NOTE: can't write data in blocks as the driver needs to be able to 
        # sort all the data in one go.
        bDataWritten = False
        npulses = driver.getTotalNumberPulses()
        if npulses > 0:
            pulseRange = generic.PulseRange(0, npulses)
            driver.setPulseRange(pulseRange)
            pulses = driver.readPulsesForRange()
            points = driver.readPointsByPulse()
            waveformInfo = driver.readWaveformInfo()
            recv = driver.readReceived()
            trans = driver.readTransmitted()

            outDriver.setExtent(subExtent)
            if nFilesWritten == 0:
                copyScaling(driver, outDriver)
                outDriver.setHeader(header)

            # on create, a spatial index is created
            outDriver.writeData(pulses, points, trans, recv, 
                            waveformInfo)
            nFilesWritten += 1

        # close the driver while we are here
        driver.close()
        
        if bDataWritten:
            nFilesWritten += 1
            
        nFilesProcessed += 1
        progress.setProgress(nFilesProcessed)

    outDriver.close()
Exemple #9
0
def splitFileIntoTiles(infiles, binSize=1.0, blockSize=None, 
        tempDir='.', extent=None, indexType=INDEX_CARTESIAN,
        pulseIndexMethod=PULSE_INDEX_FIRST_RETURN, 
        footprint=lidarprocessor.UNION, outputFormat='SPDV4',
        buildPulses=False):
    """
    Takes a filename (or list of filenames) and creates a tempfile for every 
    block (using blockSize).
    If blockSize isn't set then it is picked using BLOCKSIZE_N_BLOCKS.
    binSize is the size of the bins to create the spatial index.
    indexType is one of the INDEX_* constants.
    pulseIndexMethod is one of the PULSE_INDEX_* constants.
    footprint is one of lidarprocessor.UNION or lidarprocessor.INTERSECTION
    and is how to combine extents if there is more than one file.
    outputFormat is either 'SPDV4' or 'LAS'. 'LAS' outputs only supported
    when input is 'LAS'.
    buildPulses relevant for 'LAS' and determines whether to build the 
    pulse structure or not. 

    returns the header of the first input file, the extent used and a list
    of (fname, extent) tuples that contain the information for 
    each tempfile.
    """

    if isinstance(infiles, basestring):
        infiles = [infiles]

    # use the first file for header. Not
    # clear how to combine headers from multiple inputs
    # or if one should.
    # leave setting this in case we grab it when working out the extent.
    firstHeader = None
    
    if extent is None:
        # work out from headers
        pixGrid = None
        for infile in infiles:
            info = generic.getLidarFileInfo(infile)
            header = info.header

            if firstHeader is None:
                firstHeader = header

            try:
                if indexType == INDEX_CARTESIAN:
                    xMax = header['X_MAX']
                    xMin = header['X_MIN']
                    yMax = header['Y_MAX']
                    yMin = header['Y_MIN']
                elif indexType == INDEX_SPHERICAL:
                    xMax = header['AZIMUTH_MAX']
                    xMin = header['AZIMUTH_MIN']
                    yMax = header['ZENITH_MAX']
                    yMin = header['ZENITH_MIN']
                elif indexType == INDEX_SCAN:
                    xMax = header['SCANLINE_IDX_MAX']
                    xMin = header['SCANLINE_IDX_MIN']
                    yMax = header['SCANLINE_MAX']
                    yMin = header['SCANLINE_MIN']
                else:
                    msg = 'unsupported indexing method'
                    raise generic.LiDARSpatialIndexNotAvailable(msg)
            except KeyError:
                msg = 'info for creating bounding box not available'
                raise generic.LiDARFunctionUnsupported(msg)

            newPixGrid = pixelgrid.PixelGridDefn(xMin=xMin, xMax=xMax, 
                            yMin=yMin, yMax=yMax, xRes=binSize, yRes=binSize)
            if pixGrid is None:
                pixGrid = newPixGrid
            elif footprint == lidarprocessor.UNION:
                pixGrid = pixGrid.union(newPixGrid)
            elif footprint == lidarprocessor.INTERSECTION:
                pixGrid = pixGrid.intersection(newPixGrid)
            else:
                msg = 'Unsupported footprint option'
                raise generic.LiDARFunctionUnsupported(msg)

        # TODO: we treat points as being in the block when they are >=
        # the min coords and < the max coords. What happens on the bottom
        # and right margins?? We could possibly miss points that are there.

        # round the coords to the nearest multiple
        xMin = numpy.floor(pixGrid.xMin / binSize) * binSize
        yMin = numpy.floor(pixGrid.yMin / binSize) * binSize
        xMax = numpy.ceil(pixGrid.xMax / binSize) * binSize
        yMax = numpy.ceil(pixGrid.yMax / binSize) * binSize
            
        extent = Extent(xMin, xMax, yMin, yMax, binSize)
        
    else:
        # ensure that our binSize comes from their exent
        binSize = extent.binSize

        # get the first header since we aren't doing the above
        info = generic.getLidarFileInfo(infiles[0])
        firstHeader = info.header
    
    if blockSize is None:
        minAxis = min(extent.xMax - extent.xMin, extent.yMax - extent.yMin)
        blockSize = min(minAxis / BLOCKSIZE_N_BLOCKS, 200.0)
        # make it a multiple of binSize
        blockSize = int(numpy.ceil(blockSize / binSize)) * binSize
    else:
        # ensure that their given block size can be evenly divided by 
        # the binSize
        # the modulo operator doesn't work too well with floats 
        # so we take a different approach
        a = blockSize / binSize
        if a != int(a):
            msg = 'blockSize must be evenly divisible be the binSize'
            raise generic.LiDARInvalidData(msg)
        
    extentList = []
    subExtent = Extent(extent.xMin, extent.xMin + blockSize, 
            extent.yMax - blockSize, extent.yMax, binSize)
    controls = lidarprocessor.Controls()
    controls.setSpatialProcessing(False)

    tmpSuffix = '.' + outputFormat.lower()

    bMoreToDo = True
    while bMoreToDo:
        fd, fname = tempfile.mkstemp(suffix=tmpSuffix, dir=tempDir)
        os.close(fd)
        
        userClass = lidarprocessor.LidarFile(fname, generic.CREATE)
        if outputFormat == 'SPDV4':
            userClass.setLiDARDriverOption('SCALING_BUT_NO_DATA_WARNING', False)
            driver = spdv4.SPDV4File(fname, generic.CREATE, controls, userClass)
        elif outputFormat == 'LAS':
            driver = las.LasFile(fname, generic.CREATE, controls, userClass)
        else:
            msg = 'Unsupported output format %s' % outputFormat
            raise generic.LiDARFunctionUnsupported(msg)
        data = (copy.copy(subExtent), driver)
        extentList.append(data)

        # move it along
        subExtent.xMin += blockSize
        subExtent.xMax += blockSize

        if subExtent.xMin >= extent.xMax:
            # next line down
            subExtent.xMin = extent.xMin
            subExtent.xMax = extent.xMin + blockSize
            subExtent.yMax -= blockSize
            subExtent.yMin -= blockSize
            
        # done?
        bMoreToDo = subExtent.yMax > extent.yMin

    # ok now set up to read the input files using lidarprocessor
    dataFiles = lidarprocessor.DataFiles()
    dataFiles.inputs = []

    for infile in infiles:
        input = lidarprocessor.LidarFile(infile, lidarprocessor.READ)

        # must be a better way of doing this, but this is what 
        # translate does. We don't know what formats we are getting ahead of time
        info = generic.getLidarFileInfo(infile)
        inFormat = info.getDriverName()
        if inFormat == 'LAS':
            input.setLiDARDriverOption('BUILD_PULSES', buildPulses)

        dataFiles.inputs.append(input)
        
    controls = lidarprocessor.Controls()
    progress = cuiprogress.GDALProgressBar()
    progress.setLabelText('Splitting...')
    controls.setProgress(progress)
    controls.setSpatialProcessing(False)
    controls.setMessageHandler(lidarprocessor.silentMessageFn)
        
    otherArgs = lidarprocessor.OtherArgs()
    otherArgs.outList = extentList
    otherArgs.indexType = indexType
    otherArgs.pulseIndexMethod = pulseIndexMethod
        
    lidarprocessor.doProcessing(classifyFunc, dataFiles, controls=controls, 
                otherArgs=otherArgs)
    
    # close all the output files and save their names to return
    newExtentList = []
    for subExtent, driver in extentList:
        fname = driver.fname
        driver.close()

        data = (fname, subExtent)
        newExtentList.append(data)

    return firstHeader, extent, newExtentList
Exemple #10
0
def translate(info,
              infile,
              outfile,
              expectRange=None,
              spatial=None,
              extent=None,
              scaling=None,
              epsg=None,
              binSize=None,
              buildPulses=False,
              pulseIndex=None,
              nullVals=None,
              constCols=None,
              useLASScaling=False):
    """
    Main function which does the work.

    * Info is a fileinfo object for the input file.
    * infile and outfile are paths to the input and output files respectively.
    * expectRange is a list of tuples with (type, varname, min, max).
    * spatial is True or False - dictates whether we are processing spatially or not.
        If True then spatial index will be created on the output file on the fly.
    * extent is a tuple of values specifying the extent to work with. 
        xmin ymin xmax ymax
    * scaling is a list of tuples with (type, varname, dtype, gain, offset).
    * if epsg is not None should be a EPSG number to use as the coord system
    * binSize is the used by the LAS spatial index
    * buildPulses dictates whether to attempt to build the pulse structure
    * pulseIndex should be 'FIRST_RETURN' or 'LAST_RETURN' and determines how the
        pulses are indexed.
    * nullVals is a list of tuples with (type, varname, value)
    * constCols is a list of tupes with (type, varname, dtype, value)
    * if useLASScaling is True, then the scaling used in the LAS file
        is used for columns. Overrides anything given in 'scaling'
    
    """
    scalingsDict = translatecommon.overRideDefaultScalings(scaling)

    if epsg is None and (info.wkt is None or len(info.wkt) == 0):
        msg = 'No projection set in las file. Must set EPSG on command line'
        raise generic.LiDARInvalidSetting(msg)

    if spatial and not info.hasSpatialIndex:
        msg = 'Spatial processing requested but file does not have spatial index'
        raise generic.LiDARInvalidSetting(msg)

    if spatial and binSize is None:
        msg = "For spatial processing, the bin size must be set"
        raise generic.LiDARInvalidSetting(msg)

    if extent is not None and not spatial:
        msg = 'Extent can only be set when processing spatially'
        raise generic.LiDARInvalidSetting(msg)

    # set up the variables
    dataFiles = lidarprocessor.DataFiles()

    dataFiles.input1 = lidarprocessor.LidarFile(infile, lidarprocessor.READ)
    if pulseIndex == 'FIRST_RETURN':
        dataFiles.input1.setLiDARDriverOption('PULSE_INDEX', las.FIRST_RETURN)
    elif pulseIndex == 'LAST_RETURN':
        dataFiles.input1.setLiDARDriverOption('PULSE_INDEX', las.LAST_RETURN)
    else:
        msg = "Pulse index argument not recognised."
        raise generic.LiDARInvalidSetting(msg)

    dataFiles.input1.setLiDARDriverOption('BUILD_PULSES', buildPulses)

    if spatial:
        dataFiles.input1.setLiDARDriverOption('BIN_SIZE', float(binSize))

    controls = lidarprocessor.Controls()
    progress = cuiprogress.GDALProgressBar()
    controls.setProgress(progress)
    controls.setSpatialProcessing(spatial)

    otherArgs = lidarprocessor.OtherArgs()
    otherArgs.scaling = scalingsDict
    otherArgs.epsg = epsg
    otherArgs.expectRange = expectRange
    otherArgs.lasInfo = info
    otherArgs.nullVals = nullVals
    otherArgs.constCols = constCols
    otherArgs.useLASScaling = useLASScaling

    if extent is not None:
        extent = [float(x) for x in extent]
        pixgrid = pixelgrid.PixelGridDefn(xMin=extent[0],
                                          yMin=extent[1],
                                          xMax=extent[2],
                                          yMax=extent[3],
                                          xRes=binSize,
                                          yRes=binSize)
        controls.setReferencePixgrid(pixgrid)
        controls.setFootprint(lidarprocessor.BOUNDS_FROM_REFERENCE)

    dataFiles.output1 = lidarprocessor.LidarFile(outfile,
                                                 lidarprocessor.CREATE)
    dataFiles.output1.setLiDARDriver('SPDV4')
    dataFiles.output1.setLiDARDriverOption('SCALING_BUT_NO_DATA_WARNING',
                                           False)

    lidarprocessor.doProcessing(transFunc,
                                dataFiles,
                                controls=controls,
                                otherArgs=otherArgs)