def extract_hdf5_points(inputPath):
    '''Extract data from an Icebridge hdf5 file.
       Use this ASP format code to read: --csv-format 1:lat,2:lon,3:height_above_datum '''

    outputPath = inputPath.replace('.h5', '.csv')

    tempLat = inputPath + '_lat.txt'
    tempLon = inputPath + '_lon.txt'
    tempAlt = inputPath + '_alt.txt'

    lookInLibexec = True
    execPath = asp_system_utils.which('h5dump', lookInLibexec)
    
    cmd = execPath + ' -o '+tempLat+' -m "%.7f" --noindex  -w 20 --dataset=latitude  ' + inputPath
    os.system(cmd)
    cmd = execPath + ' -o '+tempLon+' -m "%.7f" --noindex  -w 20 --dataset=longitude ' + inputPath
    os.system(cmd)
    cmd = execPath + ' -o '+tempAlt+' -m "%.7f" --noindex  -w 20 --dataset=elevation ' + inputPath
    os.system(cmd)
    
    execPath = asp_system_utils.which('paste', lookInLibexec)
    cmd = execPath + ' ' + tempLat + ' ' + tempLon + ' ' + tempAlt + ' > ' + outputPath
    os.system(cmd)
    
    if os.path.exists(tempLat): os.remove(tempLat)
    if os.path.exists(tempLon): os.remove(tempLon)
    if os.path.exists(tempAlt): os.remove(tempAlt)
    
    if os.path.exists(outputPath):
        print 'Wrote file: ' + outputPath
    else:
        print 'Failed to write file ' + outputPath + '!!!'
Example #2
0
def corrFireball(demFolder, corrDemFolder, isNorth):
    '''Correct Fireball DEMs'''

    logger = logging.getLogger(__name__)
    logger.info('Correcting Fireball DEMs ...')

    # Loop through all the input images
    os.system('mkdir -p ' + corrDemFolder)
    demFiles = os.listdir(demFolder)
    for demFile in demFiles:

        inputPath = os.path.join(demFolder, demFile)
        if not icebridge_common.isDEM(inputPath): continue

        # Make sure the timestamp and frame number are in the output file name
        outputPath = os.path.join(corrDemFolder, os.path.basename(inputPath))

        # Skip existing files
        if os.path.exists(outputPath):
            logger.info("File exists, skipping: " + outputPath)
            continue

        execPath = asp_system_utils.which('correct_icebridge_l3_dem')
        cmd = (('%s %s %s %d') % (execPath, inputPath, outputPath, isNorth))

        logger.info(cmd)
        os.system(cmd)
        if not icebridge_common.isValidImage(outputPath):
            raise Exception('Failed to convert dem file: ' + demFile)
def extract_qi_points(inputPath):
    '''Extract data from a n Icebridge qi file.
       Use this ASP format code to read: --csv-format 1:lat,2:lon,3:height_above_datum '''

    # Extract the data from the name
    name       = os.path.basename(inputPath)
    start      = name.find('_')
    stop       = name.rfind('_')
    dateString = name[start+1:stop]
    year       = int(dateString[0:4])
    month      = int(dateString[4:6])
    
    # Check if the file is little endian
    endianFlag = ' -L '
    if (year < 2010) or ((year == 2010) and (month < 10)):
        endianFlag = ' ' # Earlier dates are big endian, later dates are little endian.

    outputPath = inputPath.replace('.qi', '.csv')

    lookInLibexec = True
    execPath = asp_system_utils.which('qi2txt', lookInLibexec)
    cmd = execPath + ' -S' + endianFlag + inputPath + ' > ' + outputPath
    print(cmd)
    os.system(cmd)  
    if os.path.exists(outputPath):
        print ('Wrote file: ' + outputPath)
    else:
        print ('Failed to write file ' + outputPath + '!!!')
def extract_hdf5_points(inputPath):
    '''Extract data from an Icebridge hdf5 file.
       Use this ASP format code to read: --csv-format 1:lat,2:lon,3:height_above_datum '''

    outputPath = inputPath.replace('.h5', '.csv')

    tempLat = inputPath + '_lat.txt'
    tempLon = inputPath + '_lon.txt'
    tempAlt = inputPath + '_alt.txt'

    lookInLibexec = True
    execPath = asp_system_utils.which('h5dump', lookInLibexec)
    
    # Extract the three values of interest one at a time
    cmd = execPath + ' -o '+tempLat+' -m "%.7f" --noindex  -w 20 --dataset=latitude  ' + \
          inputPath + ' >/dev/null'
    print(cmd)
    os.system(cmd)
    cmd = execPath + ' -o '+tempLon+' -m "%.7f" --noindex  -w 20 --dataset=longitude ' + \
          inputPath + ' >/dev/null'
    print(cmd)
    os.system(cmd)
    cmd = execPath + ' -o '+tempAlt+' -m "%.7f" --noindex  -w 20 --dataset=elevation ' + \
          inputPath + ' >/dev/null'
    print(cmd)
    os.system(cmd)
    
    # Merge the three one column files into a single three column file
    execPath = asp_system_utils.which('paste', lookInLibexec)
    cmd = execPath + ' ' + tempLat + ' ' + tempLon + ' ' + tempAlt + ' > ' + outputPath
    print(cmd)
    os.system(cmd)
    
    if os.path.exists(tempLat): os.remove(tempLat)
    if os.path.exists(tempLon): os.remove(tempLon)
    if os.path.exists(tempAlt): os.remove(tempAlt)
    
    # Remove any trailing commas in lines and strip all lines without commas.
    cmd = "sed -i 's/,$//;/,/!d' " + outputPath
    print(cmd)
    os.system(cmd)
    
    if os.path.exists(outputPath):
        print('Wrote: ' + outputPath)
    else:
        print('Failed to write: ' + outputPath)
Example #5
0
def convertCoordinate(input_srs_string, output_srs_string, x, y):
    '''Convert a single 2D coordinate between proj.4 coordinate systems.'''

    cmd = [asp_system_utils.which('gdaltransform'), '-s_srs',
           input_srs_string, '-t_srs', output_srs_string]
    p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, encoding = 'utf8')
    input_str = str(x) + " " + str(y)
    out, err = p.communicate(input = input_str)
    parts = out.split()
    return (float(parts[0]), float(parts[1]))
Example #6
0
def convertCoords(x, y, projStringIn, projStringOut):
    '''Convert coordinates from one projection to another'''

    # Using subprocess32 to access the timeout argument which is not always present in subprocess
    cmd = [asp_system_utils.which('gdaltransform'), '-s_srs', projStringIn, '-t_srs', projStringOut]
    #print(" ".join(cmd))
    p = subprocess32.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=False)
    textOutput, err = p.communicate( ('%f %f\n' % (x, y)), timeout=0.5 )
    parts = textOutput.split()

    return ( float(parts[0]), float(parts[1]) )
def fetchIndices(options, logger):
    '''Fetch the csv indices of available files.'''
    logger.info("Fetch indices from NSIDC.")
    pythonPath = asp_system_utils.which('python')
    cmd = ( (pythonPath + ' ' + icebridge_common.fullPath('full_processing_script.py') + \
             ' --camera-calibration-folder %s --reference-dem-folder %s --site %s ' + \
             '--yyyymmdd %s --stop-after-index-fetch --no-nav ' ) % \
            (options.inputCalFolder, options.refDemFolder, options.site, options.yyyymmdd))
    logger.info(cmd)
    start_time()
    os.system(cmd)
    stop_time("fetch index", logger)
Example #8
0
def fetchIndices(options, logger):
    '''Fetch the csv indices of available files.'''
    logger.info("Fetch indices from NSIDC.")
    pythonPath = asp_system_utils.which('python')
    cmd = ( (pythonPath + ' ' + icebridge_common.fullPath('full_processing_script.py') + \
             ' --camera-calibration-folder %s --reference-dem-folder %s --site %s ' + \
             '--yyyymmdd %s --stop-after-index-fetch --no-nav ' ) % \
            (options.inputCalFolder, options.refDemFolder, options.site, options.yyyymmdd))
    logger.info(cmd)
    start_time()
    os.system(cmd)
    stop_time("fetch index", logger)
Example #9
0
def convertCoords(x, y, projStringIn, projStringOut):
    '''Convert coordinates from one projection to another'''

    # Using subprocess32 to access the timeout argument which is not always present in subprocess
    cmd = [asp_system_utils.which('gdaltransform'), '-s_srs', projStringIn, '-t_srs', projStringOut]
    #print(" ".join(cmd))
    p = subprocess32.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=False,
                           universal_newlines=True)
    textOutput, err = p.communicate( ('%f %f\n' % (x, y)), timeout=0.5 )
    parts = textOutput.split()

    return ( float(parts[0]), float(parts[1]) )
def isValidImage(filename):
    
    if not os.path.exists(filename):
        return False
    
    gdalinfoPath = asp_system_utils.which("gdalinfo")
    cmd = gdalinfoPath + ' ' + filename
    
    p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
    output, error = p.communicate()
    if p.returncode != 0:
        return False
    
    return True
Example #11
0
def convertCoordinate(input_srs_string, output_srs_string, x, y):
    '''Convert a single 2D coordinate between proj.4 coordinate systems.'''

    cmd = [asp_system_utils.which('gdaltransform'), '-s_srs', input_srs_string, '-t_srs', output_srs_string]
    try:
        # Fancier way, if installed.
        p = subprocess32.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=False,
                               universal_newlines=True)
        text, err = p.communicate( ('%f %f\n' % (x, y)), timeout=0.5 )    
    except:
        # Simpler way.
        p = subprocess.Popen(cmd, stdin=subprocess.PIPE,stdout=subprocess.PIPE)
        text = p.communicate("%f %f" % (x, y))[0]
    parts = text.split()
    return (float(parts[0]), float(parts[1]))
def convertCoordinate(input_srs_string, output_srs_string, x, y):
    '''Convert a single 2D coordinate between proj.4 coordinate systems.'''

    cmd = [asp_system_utils.which('gdaltransform'), '-s_srs', input_srs_string, '-t_srs', output_srs_string]
    try:
        # Fancier way, if installed.
        p = subprocess32.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=False,
                               universal_newlines=True)
        text, err = p.communicate( ('%f %f\n' % (x, y)), timeout=0.5 )    
    except:
        # Simpler way.
        p = subprocess.Popen(cmd, stdin=subprocess.PIPE,stdout=subprocess.PIPE)
        text = p.communicate("%f %f" % (x, y))[0]
    parts = text.split()
    return (float(parts[0]), float(parts[1]))
Example #13
0
def label_images(inputFolder, outputFolder, minFrame, maxFrame, trainingPath, numProcesses):
    '''Apply the labeling algorithm to a single image.'''

    # Format the input path

    # Run the label tool

    toolPath = asp_system_utils.which('batch_process_mp.py')
    
    NO_SPLITTING = 1 # Plenty of RAM to load these images
    
    cmd = ('%s %s --output_dir %s --min_frame %d --max_frame %d srgb %s --splits %d --parallel %d' % 
           (toolPath, inputFolder, outputFolder, minFrame, maxFrame, trainingPath, NO_SPLITTING, numProcesses))
    print cmd
    os.system(cmd)
Example #14
0
def doesImageHaveGeoData(imagePath):
    '''Returns true if a file has geo data associated with it'''
    
    if not os.path.exists(imagePath):
        raise Exception('Image file ' + imagePath + ' not found!')
    
    # Call command line tool silently
    cmd = [asp_system_utils.which('gdalinfo'), imagePath, '-proj4']
    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
    textOutput, err = p.communicate()
    
    # For now we just do a very simple check
    if "Coordinate System is `'" in textOutput:
        return False
    else:
        return True
Example #15
0
def doesImageHaveGeoData(imagePath):
    '''Returns true if a file has geo data associated with it'''
    
    if not os.path.exists(imagePath):
        raise Exception('Image file ' + imagePath + ' not found!')
    
    # Call command line tool silently
    cmd = [asp_system_utils.which('gdalinfo'), imagePath, '-proj4']
    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
    textOutput, err = p.communicate()
    
    # For now we just do a very simple check
    if "Coordinate System is `'" in textOutput:
        return False
    else:
        return True
Example #16
0
def cameraFromOrthoWrapper(inputPath, orthoPath, inputCamFile, outputCamFile, \
                           refDemPath, numThreads):
    '''Generate a camera model from a single ortho file'''

    logger = logging.getLogger(__name__)

    # Call ortho2pinhole command
    ortho2pinhole = asp_system_utils.which("ortho2pinhole")
    cmd = (('%s %s %s %s %s --reference-dem %s --threads %d') %
           (ortho2pinhole, inputPath, orthoPath, inputCamFile, outputCamFile,
            refDemPath, numThreads))
    logger.info(cmd)
    os.system(cmd)

    if not os.path.exists(outputCamFile):
        # This function is getting called from a pool, so just log the failure.
        logger.error('Failed to convert ortho file: ' + orthoFile)
Example #17
0
def label_images(inputFolder, outputFolder, minFrame, maxFrame, trainingPath,
                 numProcesses):
    '''Apply the labeling algorithm to a single image.'''

    # Format the input path

    # Run the label tool

    toolPath = asp_system_utils.which('batch_process_mp.py')

    NO_SPLITTING = 1  # Plenty of RAM to load these images

    cmd = (
        '%s %s --output_dir %s --min_frame %d --max_frame %d srgb %s --splits %d --parallel %d'
        % (toolPath, inputFolder, outputFolder, minFrame, maxFrame,
           trainingPath, NO_SPLITTING, numProcesses))
    print cmd
    os.system(cmd)
def convertJpegs(jpegFolder, imageFolder, startFrame, stopFrame, skipValidate,
                 cameraMounting, logger):
    '''Convert jpeg images from RGB to single channel.
       Returns false if any files failed.'''

    badFiles = False
    
    logger.info('Converting input images to grayscale...')

    os.system('mkdir -p ' + imageFolder)

    # Loop through all the input images

    jpegIndexPath = icebridge_common.csvIndexFile(jpegFolder)
    if not os.path.exists(jpegIndexPath):
        raise Exception("Error: Missing jpeg index file: " + jpegIndexPath + ".")
    (jpegFrameDict, jpegUrlDict) = icebridge_common.readIndexFile(jpegIndexPath,
                                                                  prependFolder = True)
    
    # Need the orthos to get the timestamp
    orthoFolder = icebridge_common.getOrthoFolder(os.path.dirname(jpegFolder))
    orthoIndexPath = icebridge_common.csvIndexFile(orthoFolder)
    if not os.path.exists(orthoIndexPath):
        raise Exception("Error: Missing ortho index file: " + orthoIndexPath + ".")
    (orthoFrameDict, orthoUrlDict) = icebridge_common.readIndexFile(orthoIndexPath,
                                                                  prependFolder = True)
    
    if not skipValidate:
        validFilesList = icebridge_common.validFilesList(os.path.dirname(jpegFolder),
                                                         startFrame, stopFrame)
        validFilesSet = set()
        validFilesSet = icebridge_common.updateValidFilesListFromDisk(validFilesList, validFilesSet)
        numInitialValidFiles = len(validFilesSet)
        
    # Fast check for missing images. This is fragile, as maybe it gets
    # the wrong file with a similar name, but an honest check is very slow.
    imageFiles = icebridge_common.getTifs(imageFolder, prependFolder = True)
    imageFrameDict = {}
    for imageFile in imageFiles:
        frame = icebridge_common.getFrameNumberFromFilename(imageFile)
        if frame < startFrame or frame > stopFrame: continue
        imageFrameDict[frame] = imageFile
        
    for frame in sorted(jpegFrameDict.keys()):

        inputPath = jpegFrameDict[frame]
        
        # Only deal with frames in range
        if not ( (frame >= startFrame) and (frame <= stopFrame) ):
            continue

        if frame in imageFrameDict.keys() and skipValidate:
            # Fast, hackish check
            continue

        if frame not in orthoFrameDict:
            logger.info("Error: Could not find ortho image for jpeg frame: " + str(frame))
            # Don't want to throw here. Just ignore the missing ortho
            continue
        
        # Make sure the timestamp and frame number are in the output file name
        try:
            outputPath = icebridge_common.jpegToImageFile(inputPath, orthoFrameDict[frame])
        except Exception as e:
            logger.info(str(e))
            logger.info("Removing bad file: " + inputPath)
            os.system('rm -f ' + inputPath) # will not throw
            badFiles = True
            continue
        
        # Skip existing valid files
        if skipValidate:
            if os.path.exists(outputPath):
                logger.info("File exists, skipping: " + outputPath)
                continue
        else:
            if outputPath in validFilesSet and os.path.exists(outputPath):
                #logger.info('Previously validated: ' + outputPath) # very verbose
                validFilesSet.add(inputPath) # Must have this
                continue
            
            if icebridge_common.isValidImage(outputPath):
                #logger.info("File exists and is valid, skipping: " + outputPath) # verbose
                if not skipValidate:
                    # Mark both the input and the output as validated
                    validFilesSet.add(inputPath) 
                    validFilesSet.add(outputPath)
                continue
        
        # Use ImageMagick tool to convert from RGB to grayscale
        # - Some image orientations are rotated to make stereo processing easier.
        rotateString = ''
        if cameraMounting == 2: # Flight direction towards top of image
            rotateString = '-rotate 90 '
        if cameraMounting == 3: # Flight direction towards bottom of image
            rotateString = '-rotate -90 '
        cmd = ('%s %s -colorspace Gray %s%s') % \
              (asp_system_utils.which('convert'), inputPath, rotateString, outputPath)
        logger.info(cmd)

        # Run command and fetch its output
        p = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                             universal_newlines=True)
        output, error = p.communicate()
        if p.returncode != 0:
            badFiles = True
            logger.error("Command failed.")
            logger.error("Wiping bad files: " + inputPath + " and " + outputPath + '\n'
                         + output)
            os.system('rm -f ' + inputPath) # will not throw
            os.system('rm -f ' + outputPath) # will not throw

        if not os.path.exists(outputPath):
            badFiles = True
            logger.error('Failed to convert jpeg file: ' + inputPath)
            logger.error("Wiping bad files: " + inputPath + " and " + outputPath + '\n'
                         + output)
            os.system('rm -f ' + inputPath) # will not throw
            os.system('rm -f ' + outputPath) # will not throw

        # Check for corrupted files
        if error is not None:
            output += error
        m = re.match("^.*?premature\s+end", output, re.IGNORECASE|re.MULTILINE|re.DOTALL)
        if m:
            badFiles = True
            logger.error("Wiping bad files: " + inputPath + " and " + outputPath + '\n'
                         + output)
            os.system('rm -f ' + inputPath) # will not throw
            os.system('rm -f ' + outputPath) # will not throw

    if not skipValidate:
        # Write to disk the list of validated files, but only if new
        # validations happened.  First re-read that list, in case a
        # different process modified it in the meantime, such as if two
        # managers are running at the same time.
        numFinalValidFiles = len(validFilesSet)
        if numInitialValidFiles != numFinalValidFiles:
            validFilesSet = icebridge_common.updateValidFilesListFromDisk(validFilesList,
                                                                          validFilesSet)
            icebridge_common.writeValidFilesList(validFilesList, validFilesSet)
            
    if badFiles:
        logger.error("Converstion of JPEGs failed. If any files were corrupted, " +
                     "they were removed, and need to be re-fetched.")
    
    return (not badFiles)
Example #19
0
def getImageGeoInfo(imagePath, getStats=True):
    """Obtains some image geo information from gdalinfo in dictionary format"""
    
    if not os.path.exists(imagePath):
        raise Exception('Error: input file ' + imagePath + ' does not exist!')
    
    outputDict = {}
    
    # Call command line tool silently
    cmd = [asp_system_utils.which('gdalinfo'), imagePath, '-proj4']
    if getStats:
        cmd.append('-stats')
    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
    textOutput, err = p.communicate()
    
    # Get the size in pixels
    imageSizeLine = asp_string_utils.getLineAfterText(textOutput, 'Size is ')
    sizeVals      = imageSizeLine.split(',')
    outputDict['image_size'] = (int(sizeVals[0]), int(sizeVals[1]))

    # Get origin location and pixel size    
    originLine    = asp_string_utils.getLineAfterText(textOutput, 'Origin = ')
    pixelSizeLine = asp_string_utils.getLineAfterText(textOutput, 'Pixel Size = ')    
    originVals    = asp_string_utils.getNumbersInParentheses(originLine)
    pixelSizeVals = asp_string_utils.getNumbersInParentheses(pixelSizeLine)
    outputDict['origin']     = originVals
    outputDict['pixel_size'] = pixelSizeVals

    # Get bounding box in projected coordinates
    upperLeftLine  = asp_string_utils.getLineAfterText(textOutput, 'Upper Left')
    lowerRightLine = asp_string_utils.getLineAfterText(textOutput, 'Lower Right')
    (minX, maxY)   = asp_string_utils.getNumbersInParentheses(upperLeftLine)
    (maxX, minY)   = asp_string_utils.getNumbersInParentheses(lowerRightLine)
    outputDict['projection_bounds'] = (minX, maxX, minY, maxY)
    outputDict['projection_center'] = ( (minX+maxX)/2.0, (minY+maxY)/2.0 )

    # Get some proj4 values
    outputDict['standard_parallel_1'] = getGdalInfoTagValue(textOutput, 'standard_parallel_1')
    outputDict['central_meridian']    = getGdalInfoTagValue(textOutput, 'central_meridian')

    # Get the projection type
    projStart = textOutput.find('PROJ.4 string is:')
    nextLine  = textOutput.find("'", projStart)+1
    endLine   = textOutput.find("'", nextLine)
    outputDict['proj_string'] = textOutput[nextLine:endLine]
    outputDict['projection'] = 'UNKNOWN'
    if '+proj=eqc' in textOutput:
        outputDict['projection'] = 'EQUIRECTANGULAR'
    elif '+proj=ster' in textOutput:
        outputDict['projection'] = 'POLAR STEREOGRAPHIC'
    
    # Extract this variable which ASP inserts into its point cloud files
    try:
        pointOffsetLine = asp_string_utils.getLineAfterText(textOutput, 'POINT_OFFSET=') # Tag name must be synced with C++ code
        offsetValues    = pointOffsetLine.split(' ')
        outputDict['point_offset'] =  (float(offsetValues[0]), float(offsetValues[1]), float(offsetValues[2]))        
    except:
        pass # In most cases this line will not be present

    # TODO: Currently this does not find much information, and there
    #       is another function in image_utils dedicated to returning statistics.
    if getStats:

        # List of dictionaries per band
        outputDict['band_info'] = []
    
        # Populate band information
        band = 1
        while (True): # Loop until we run out of bands
            bandString = 'Band ' + str(band) + ' Block='
            bandLoc = textOutput.find(bandString)
            if bandLoc < 0: # Ran out of bands
                break
        
            # Found the band, read pertinent information
            bandInfo = {}
        
            # Get the type string
            bandLine = asp_string_utils.getLineAfterText(textOutput, bandString)
            typePos  = bandLine.find('Type=')
            commaPos = bandLine.find(',')
            typeName = bandLine[typePos+5:commaPos-1]
            bandInfo['type'] = typeName
        
            outputDict['band_info'] = bandInfo
        
            band = band + 1 # Move on to the next band
        
    return outputDict
Example #20
0
def doFetch(options, outputFolder):

    # Verify that required files exist
    home = os.path.expanduser("~")
    if not (os.path.exists(home + '/.netrc')
            and os.path.exists(home + '/.urs_cookies')):
        logger.error(
            'Missing a required authentication file!  See instructions here:\n'
            +
            '    https://nsidc.org/support/faq/what-options-are-available-bulk-downloading-data-https-earthdata-login-enabled'
        )
        return -1

    curlPath = asp_system_utils.which("curl")
    curlOpts = ' -n -L '
    cookiePaths = ' -b ~/.urs_cookies -c ~/.urs_cookies '
    baseCurlCmd = curlPath + curlOpts + cookiePaths

    logger.info('Creating output folder: ' + outputFolder)
    os.system('mkdir -p ' + outputFolder)

    isSouth = (options.site == 'AN')
    parsedIndexPath = fetchAndParseIndexFile(options, isSouth, baseCurlCmd,
                                             outputFolder)
    if not icebridge_common.fileNonEmpty(parsedIndexPath):
        # Some dirs are weird, both images, dems, and ortho.
        # Just accept whatever there is, but with a warning.
        logger.info('Warning: Missing index file: ' + parsedIndexPath)

    # Store file information in a dictionary
    # - Keep track of the earliest and latest frame
    logger.info('Reading file list from ' + parsedIndexPath)
    try:
        (frameDict, urlDict) = readIndexFile(parsedIndexPath)
    except:
        # We probably ran into old format index file. Must refetch.
        logger.info('Could not read index file. Try again.')
        options.refetchIndex = True
        parsedIndexPath = fetchAndParseIndexFile(options, isSouth, baseCurlCmd,
                                                 outputFolder)
        (frameDict, urlDict) = readIndexFile(parsedIndexPath)

    allFrames = sorted(frameDict.keys())
    firstFrame = icebridge_common.getLargestFrame()  # start big
    lastFrame = icebridge_common.getSmallestFrame()  # start small
    for frameNumber in allFrames:
        if frameNumber < firstFrame:
            firstFrame = frameNumber
        if frameNumber > lastFrame:
            lastFrame = frameNumber

    if options.allFrames:
        options.startFrame = firstFrame
        options.stopFrame = lastFrame

    # There is always a chance that not all requested frames are available.
    # That is particularly true for Fireball DEMs. Instead of failing,
    # just download what is present and give a warning.
    if options.startFrame not in frameDict:
        logger.info("Warning: Frame " + str(options.startFrame) + \
                    " is not found in this flight.")

    if options.stopFrame and (options.stopFrame not in frameDict):
        logger.info("Warning: Frame " + str(options.stopFrame) + \
                    " is not found in this flight.")

    allFilesToFetch = [
    ]  # Files that we will fetch, relative to the current dir.
    allUrlsToFetch = []  # Full url of each file.

    # Loop through all found frames within the provided range
    currentFileCount = 0
    lastFrame = ""
    if len(allFrames) > 0:
        lastFrame = allFrames[len(allFrames) - 1]

    hasTfw = (options.type == 'dem')
    hasXml = ((options.type in LIDAR_TYPES) or (options.type == 'ortho')
              or hasTfw)
    numFetched = 0
    for frame in allFrames:
        if (frame >= options.startFrame) and (frame <= options.stopFrame):

            filename = frameDict[frame]

            # Some files have an associated xml file. DEMs also have a tfw file.
            currFilesToFetch = [filename]
            if hasXml:
                currFilesToFetch.append(icebridge_common.xmlFile(filename))
            if hasTfw:
                currFilesToFetch.append(icebridge_common.tfwFile(filename))

            for filename in currFilesToFetch:
                url = os.path.join(urlDict[frame], filename)
                outputPath = os.path.join(outputFolder, filename)
                allFilesToFetch.append(outputPath)
                allUrlsToFetch.append(url)

    if options.maxNumToFetch > 0 and len(
            allFilesToFetch) > options.maxNumToFetch:
        allFilesToFetch = allFilesToFetch[0:options.maxNumToFetch]
        allUrlsToFetch = allUrlsToFetch[0:options.maxNumToFetch]

    icebridge_common.fetchFilesInBatches(baseCurlCmd, MAX_IN_ONE_CALL,
                                         options.dryRun, outputFolder,
                                         allFilesToFetch, allUrlsToFetch,
                                         logger)

    # Verify that all files were fetched and are in good shape
    failedFiles = []
    for outputPath in allFilesToFetch:

        if options.skipValidate: continue

        if not icebridge_common.fileNonEmpty(outputPath):
            logger.info('Missing file: ' + outputPath)
            failedFiles.append(outputPath)
            continue

        if icebridge_common.hasImageExtension(outputPath):
            if not icebridge_common.isValidImage(outputPath):
                logger.info('Found an invalid image. Will wipe it: ' +
                            outputPath)
                if os.path.exists(outputPath): os.remove(outputPath)
                failedFiles.append(outputPath)
                continue
            else:
                logger.info('Valid image: ' + outputPath)

        # Sanity check: XML files must have the right latitude.
        if icebridge_common.fileExtension(outputPath) == '.xml':
            if os.path.exists(outputPath):
                latitude = icebridge_common.parseLatitude(outputPath)
                isGood = hasGoodLat(latitude, isSouth)
                if not isGood:
                    logger.info("Wiping XML file " + outputPath + " with bad latitude " + \
                                str(latitude))
                    os.remove(outputPath)
                    imageFile = icebridge_common.xmlToImage(outputPath)
                    if os.path.exists(imageFile):
                        logger.info("Wiping TIF file " + imageFile + " with bad latitude " + \
                                    str(latitude))
                        os.remove(imageFile)

        # Verify the chcksum
        if hasXml and len(outputPath) >= 4 and outputPath[-4:] != '.xml' \
               and outputPath[-4:] != '.tfw':
            isGood = icebridge_common.hasValidChkSum(outputPath)
            if not isGood:
                xmlFile = icebridge_common.xmlFile(outputPath)
                logger.info('Found invalid data. Will wipe it: ' + outputPath +
                            ' ' + xmlFile)
                if os.path.exists(outputPath): os.remove(outputPath)
                if os.path.exists(xmlFile): os.remove(xmlFile)
                failedFiles.append(outputPath)
                failedFiles.append(xmlFile)
                continue
            else:
                logger.info('Valid chksum: ' + outputPath)

        if hasTfw and icebridge_common.fileExtension(outputPath) == '.tfw':
            isGood = icebridge_common.isValidTfw(outputPath)
            if not isGood:
                xmlFile = icebridge_common.xmlFile(outputPath)
                logger.info('Found invalid data. Will wipe it: ' + outputPath +
                            ' ' + xmlFile)
                if os.path.exists(outputPath): os.remove(outputPath)
                if os.path.exists(xmlFile): os.remove(xmlFile)
                failedFiles.append(outputPath)
                failedFiles.append(xmlFile)
                continue
            else:
                logger.info('Valid tfw file: ' + outputPath)

    numFailed = len(failedFiles)
    if numFailed > 0:
        logger.info("Number of files that could not be processed: " +
                    str(numFailed))

    return numFailed
def convertJpegs(jpegFolder, imageFolder, startFrame, stopFrame, skipValidate,
                 cameraMounting, logger):
    '''Convert jpeg images from RGB to single channel.
       Returns false if any files failed.'''

    badFiles = False

    logger.info('Converting input images to grayscale...')

    os.system('mkdir -p ' + imageFolder)

    # Loop through all the input images

    jpegIndexPath = icebridge_common.csvIndexFile(jpegFolder)
    if not os.path.exists(jpegIndexPath):
        raise Exception("Error: Missing jpeg index file: " + jpegIndexPath +
                        ".")
    (jpegFrameDict,
     jpegUrlDict) = icebridge_common.readIndexFile(jpegIndexPath,
                                                   prependFolder=True)

    # Need the orthos to get the timestamp
    orthoFolder = icebridge_common.getOrthoFolder(os.path.dirname(jpegFolder))
    orthoIndexPath = icebridge_common.csvIndexFile(orthoFolder)
    if not os.path.exists(orthoIndexPath):
        raise Exception("Error: Missing ortho index file: " + orthoIndexPath +
                        ".")
    (orthoFrameDict,
     orthoUrlDict) = icebridge_common.readIndexFile(orthoIndexPath,
                                                    prependFolder=True)

    if not skipValidate:
        validFilesList = icebridge_common.validFilesList(
            os.path.dirname(jpegFolder), startFrame, stopFrame)
        validFilesSet = set()
        validFilesSet = icebridge_common.updateValidFilesListFromDisk(
            validFilesList, validFilesSet)
        numInitialValidFiles = len(validFilesSet)

    # Fast check for missing images. This is fragile, as maybe it gets
    # the wrong file with a similar name, but an honest check is very slow.
    imageFiles = icebridge_common.getTifs(imageFolder, prependFolder=True)
    imageFrameDict = {}
    for imageFile in imageFiles:
        frame = icebridge_common.getFrameNumberFromFilename(imageFile)
        if frame < startFrame or frame > stopFrame: continue
        imageFrameDict[frame] = imageFile

    for frame in sorted(jpegFrameDict.keys()):

        inputPath = jpegFrameDict[frame]

        # Only deal with frames in range
        if not ((frame >= startFrame) and (frame <= stopFrame)):
            continue

        if frame in imageFrameDict.keys() and skipValidate:
            # Fast, hackish check
            continue

        if frame not in orthoFrameDict:
            logger.info("Error: Could not find ortho image for jpeg frame: " +
                        str(frame))
            # Don't want to throw here. Just ignore the missing ortho
            continue

        # Make sure the timestamp and frame number are in the output file name
        try:
            outputPath = icebridge_common.jpegToImageFile(
                inputPath, orthoFrameDict[frame])
        except Exception, e:
            logger.info(str(e))
            logger.info("Removing bad file: " + inputPath)
            os.system('rm -f ' + inputPath)  # will not throw
            badFiles = True
            continue

        # Skip existing valid files
        if skipValidate:
            if os.path.exists(outputPath):
                logger.info("File exists, skipping: " + outputPath)
                continue
        else:
            if outputPath in validFilesSet and os.path.exists(outputPath):
                #logger.info('Previously validated: ' + outputPath) # very verbose
                validFilesSet.add(inputPath)  # Must have this
                continue

            if icebridge_common.isValidImage(outputPath):
                #logger.info("File exists and is valid, skipping: " + outputPath) # verbose
                if not skipValidate:
                    # Mark both the input and the output as validated
                    validFilesSet.add(inputPath)
                    validFilesSet.add(outputPath)
                continue

        # Use ImageMagick tool to convert from RGB to grayscale
        # - Some image orientations are rotated to make stereo processing easier.
        rotateString = ''
        if cameraMounting == 2:  # Flight direction towards top of image
            rotateString = '-rotate 90'
        if cameraMounting == 3:  # Flight direction towards bottom of image
            rotateString = '-rotate -90'
        cmd = ('%s %s -colorspace Gray %s %s') % \
              (asp_system_utils.which('convert'), inputPath, rotateString, outputPath)
        logger.info(cmd)

        # Run command and fetch its output
        p = subprocess.Popen(cmd.split(" "),
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        output, error = p.communicate()
        if p.returncode != 0:
            badFiles = True
            logger.error("Command failed.")
            logger.error("Wiping bad files: " + inputPath + " and " +
                         outputPath + '\n' + output)
            os.system('rm -f ' + inputPath)  # will not throw
            os.system('rm -f ' + outputPath)  # will not throw

        if not os.path.exists(outputPath):
            badFiles = True
            logger.error('Failed to convert jpeg file: ' + inputPath)
            logger.error("Wiping bad files: " + inputPath + " and " +
                         outputPath + '\n' + output)
            os.system('rm -f ' + inputPath)  # will not throw
            os.system('rm -f ' + outputPath)  # will not throw

        # Check for corrupted files
        if error is not None:
            output += error
        m = re.match("^.*?premature\s+end", output,
                     re.IGNORECASE | re.MULTILINE | re.DOTALL)
        if m:
            badFiles = True
            logger.error("Wiping bad files: " + inputPath + " and " +
                         outputPath + '\n' + output)
            os.system('rm -f ' + inputPath)  # will not throw
            os.system('rm -f ' + outputPath)  # will not throw
Example #22
0
def doFetch(options, outputFolder):
    '''The main fetch function.
       Returns the number of failures.'''

    # Verify that required files exist
    home = os.path.expanduser("~")
    if not (os.path.exists(home + '/.netrc')
            and os.path.exists(home + '/.urs_cookies')):
        logger.error(
            'Missing a required authentication file!  See instructions here:\n'
            +
            '    https://nsidc.org/support/faq/what-options-are-available-bulk-'
            + 'downloading-data-https-earthdata-login-enabled')
        return -1

    curlPath = asp_system_utils.which("curl")
    curlOpts = ' -n -L '
    cookiePaths = ' -b ~/.urs_cookies -c ~/.urs_cookies '
    baseCurlCmd = curlPath + curlOpts + cookiePaths

    logger.info('Creating output folder: ' + outputFolder)
    os.system('mkdir -p ' + outputFolder)

    isSouth = (options.site == 'AN')

    if options.type == 'nav':  # Nav fetching is much less complicated
        return fetchNavData(options, outputFolder)

    parsedIndexPath = fetchAndParseIndexFile(options, isSouth, baseCurlCmd,
                                             outputFolder)
    if not icebridge_common.fileNonEmpty(parsedIndexPath):
        # Some dirs are weird, both images, fireball dems, and ortho.
        # Just accept whatever there is, but with a warning.
        logger.info('Warning: Missing index file: ' + parsedIndexPath)

    # Store file information in a dictionary
    # - Keep track of the earliest and latest frame
    logger.info('Reading file list from ' + parsedIndexPath)
    try:
        (frameDict, urlDict) = icebridge_common.readIndexFile(parsedIndexPath)
    except:
        # We probably ran into old format index file. Must refetch.
        logger.info('Could not read index file. Try again.')
        options.refetchIndex = True
        parsedIndexPath = fetchAndParseIndexFile(options, isSouth, baseCurlCmd,
                                                 outputFolder)
        (frameDict, urlDict) = icebridge_common.readIndexFile(parsedIndexPath)

    if options.stopAfterIndexFetch:
        return 0

    isLidar = (options.type in LIDAR_TYPES)

    allFrames = sorted(frameDict.keys())

    if not isLidar:
        # The lidar frames use a totally different numbering than the image/ortho/dem frames
        firstFrame = icebridge_common.getLargestFrame()  # start big
        lastFrame = icebridge_common.getSmallestFrame()  # start small
        for frameNumber in allFrames:
            if frameNumber < firstFrame:
                firstFrame = frameNumber
            if frameNumber > lastFrame:
                lastFrame = frameNumber

        if options.allFrames:
            options.startFrame = firstFrame
            options.stopFrame = lastFrame

    if isLidar:
        # Based on image frames, determine which lidar frames to fetch.
        if options.ignoreMissingLidar and len(frameDict.keys()) == 0:
            # Nothing we can do if this run has no lidar and we are told to continue
            logger.info("Warning: missing lidar, but continuing.")
            lidarsToFetch = set()
        else:
            lidarsToFetch = lidarFilesInRange(frameDict, outputFolder,
                                              options.startFrame,
                                              options.stopFrame)

    # There is always a chance that not all requested frames are available.
    # That is particularly true for Fireball DEMs. Instead of failing,
    # just download what is present and give a warning.
    if options.startFrame not in frameDict and not isLidar:
        logger.info("Warning: Frame " + str(options.startFrame) +
                    " is not found in this flight.")

    if options.stopFrame and (options.stopFrame
                              not in frameDict) and not isLidar:
        logger.info("Warning: Frame " + str(options.stopFrame) +
                    " is not found in this flight.")

    allFilesToFetch = [
    ]  # Files that we will fetch, relative to the current dir.
    allUrlsToFetch = []  # Full url of each file.

    # Loop through all found frames within the provided range
    currentFileCount = 0
    lastFrame = ""
    if len(allFrames) > 0:
        lastFrame = allFrames[len(allFrames) - 1]

    hasTfw = (options.type == 'fireball')
    hasXml = (isLidar or (options.type == 'ortho') or hasTfw)
    numFetched = 0
    skipCount = 0
    for frame in allFrames:

        # Skip frame outside of range
        if isLidar:
            if frameDict[frame] not in lidarsToFetch:
                continue
        else:
            if ((frame < options.startFrame) or (frame > options.stopFrame)):
                continue

        # Handle the frame skip option
        if options.frameSkip > 0:
            if skipCount < options.frameSkip:
                skipCount += 1
                continue
            skipCount = 0

        filename = frameDict[frame]

        # Some files have an associated xml file. Fireball DEMs also have a tfw file.
        currFilesToFetch = [filename]
        if hasXml:
            currFilesToFetch.append(icebridge_common.xmlFile(filename))
        if hasTfw:
            currFilesToFetch.append(icebridge_common.tfwFile(filename))

        for filename in currFilesToFetch:
            url = os.path.join(urlDict[frame], filename)
            outputPath = os.path.join(outputFolder, filename)
            allFilesToFetch.append(outputPath)
            allUrlsToFetch.append(url)

    # Restrict lidar fetch amount according to the parameter
    if (isLidar and options.maxNumLidarToFetch > 0
            and len(allFilesToFetch) > options.maxNumLidarToFetch):

        # Ensure an even number, to fetch both the lidar file and its xml
        if options.maxNumLidarToFetch % 2 == 1:
            options.maxNumLidarToFetch += 1

        allFilesToFetch = allFilesToFetch[0:options.maxNumLidarToFetch]
        allUrlsToFetch = allUrlsToFetch[0:options.maxNumLidarToFetch]

    icebridge_common.fetchFilesInBatches(baseCurlCmd, MAX_IN_ONE_CALL,
                                         options.dryRun, outputFolder,
                                         allFilesToFetch, allUrlsToFetch,
                                         logger)

    # Fetch from disk the set of already validated files, if any
    validFilesList = icebridge_common.validFilesList(
        os.path.dirname(outputFolder), options.startFrame, options.stopFrame)
    validFilesSet = set()
    validFilesSet = icebridge_common.updateValidFilesListFromDisk(
        validFilesList, validFilesSet)
    numInitialValidFiles = len(validFilesSet)

    # Verify that all files were fetched and are in good shape
    failedFiles = []
    for outputPath in allFilesToFetch:

        if options.skipValidate:
            continue

        if not icebridge_common.fileNonEmpty(outputPath):
            logger.info('Missing file: ' + outputPath)
            failedFiles.append(outputPath)
            continue

        if icebridge_common.hasImageExtension(outputPath):
            if False:
                # This check is just so slow. Turn it off for now.
                # This will impact only the validation of jpegs,
                # as the other files can be validated via the checksum.
                # Jpegs will be validated when converting them to 1 band images
                if outputPath in validFilesSet and os.path.exists(outputPath):
                    #logger.info('Previously validated: ' + outputPath)   # verbose
                    continue
                else:
                    if not icebridge_common.isValidImage(outputPath):
                        logger.info('Found an invalid image. Will wipe it: ' +
                                    outputPath)
                        if os.path.exists(outputPath): os.remove(outputPath)
                        failedFiles.append(outputPath)
                        continue
                    else:
                        logger.info('Valid image: ' + outputPath)
                        validFilesSet.add(outputPath)  # mark it as validated

        # Sanity check: XML files must have the right latitude.
        if icebridge_common.fileExtension(outputPath) == '.xml':
            if outputPath in validFilesSet and os.path.exists(outputPath):
                #logger.info('Previously validated: ' + outputPath) #verbose
                continue
            else:
                if os.path.exists(outputPath):
                    try:
                        latitude = icebridge_common.parseLatitude(outputPath)
                        logger.info('Valid file: ' + outputPath)
                        validFilesSet.add(outputPath)  # mark it as validated
                    except:
                        # Corrupted file
                        logger.info("Failed to parse latitude, will wipe: " +
                                    outputPath)
                        if os.path.exists(outputPath): os.remove(outputPath)
                        failedFiles.append(outputPath)

                    # On a second thought, don't wipe files with wrong latitude, as
                    # next time we run fetch we will have to fetch them again.
                    # Hopefully they will be ignored.
                    #isGood = hasGoodLat(latitude, isSouth)
                    #if not isGood:
                    #    logger.info("Wiping XML file " + outputPath + " with bad latitude " + \
                    #                str(latitude))
                    #    os.remove(outputPath)
                    #    imageFile = icebridge_common.xmlToImage(outputPath)
                    #    if os.path.exists(imageFile):
                    #        logger.info("Wiping TIF file " + imageFile + " with bad latitude " + \
                    #                    str(latitude))
                    #        os.remove(imageFile)

        # Verify the chcksum
        if hasXml and len(outputPath) >= 4 and outputPath[-4:] != '.xml' \
               and outputPath[-4:] != '.tfw':
            if outputPath in validFilesSet and os.path.exists(outputPath):
                #logger.info('Previously validated: ' + outputPath) # verbose
                continue
            else:
                isGood = icebridge_common.hasValidChkSum(outputPath, logger)
                if not isGood:
                    xmlFile = icebridge_common.xmlFile(outputPath)
                    logger.info('Found invalid data. Will wipe: ' +
                                outputPath + ' ' + xmlFile)
                    if os.path.exists(outputPath): os.remove(outputPath)
                    if os.path.exists(xmlFile): os.remove(xmlFile)
                    failedFiles.append(outputPath)
                    failedFiles.append(xmlFile)
                    continue
                else:
                    logger.info('Valid file: ' + outputPath)
                    validFilesSet.add(outputPath)

        if hasTfw and icebridge_common.fileExtension(outputPath) == '.tfw':
            if outputPath in validFilesSet and os.path.exists(outputPath):
                #logger.info('Previously validated: ' + outputPath)
                continue
            else:
                isGood = icebridge_common.isValidTfw(outputPath, logger)
                if not isGood:
                    xmlFile = icebridge_common.xmlFile(outputPath)
                    logger.info('Found invalid tfw. Will wipe: ' + outputPath +
                                ' ' + xmlFile)
                    if os.path.exists(outputPath): os.remove(outputPath)
                    if os.path.exists(xmlFile): os.remove(xmlFile)
                    failedFiles.append(outputPath)
                    failedFiles.append(xmlFile)
                    continue
                else:
                    logger.info('Valid tfw file: ' + outputPath)
                    validFilesSet.add(outputPath)

    # Write to disk the list of validated files, but only if new
    # validations happened.  First re-read that list, in case a
    # different process modified it in the meantime, such as if two
    # managers are running at the same time.
    numFinalValidFiles = len(validFilesSet)
    if numInitialValidFiles != numFinalValidFiles:
        validFilesSet = \
                      icebridge_common.updateValidFilesListFromDisk(validFilesList, validFilesSet)
        icebridge_common.writeValidFilesList(validFilesList, validFilesSet)

    numFailed = len(failedFiles)
    if numFailed > 0:
        logger.info("Number of files that could not be processed: " +
                    str(numFailed))

    return numFailed
def main(argsIn):

    # Command line parsing

    try:
        usage = "usage: camera_models_from_nav.py <image_folder> <ortho_folder> <cal_folder> <nav_folder> <output_folder> [options]"

        parser = optparse.OptionParser(usage=usage)

        parser.add_option('--start-frame',
                          dest='startFrame',
                          default=-1,
                          type='int',
                          help='The frame number to start processing with.')
        parser.add_option('--stop-frame',
                          dest='stopFrame',
                          default=999999,
                          type='int',
                          help='The frame number to finish processing with.')
        parser.add_option("--input-calibration-camera",
                          dest="inputCalCamera",
                          default="",
                          help="Use this input calibrated camera.")
        parser.add_option(
            '--camera-mounting',
            dest='cameraMounting',
            default=0,
            type='int',
            help=
            '0=right-forwards, 1=left-forwards, 2=top-forwards, 3=bottom-forwards.'
        )
        (options, args) = parser.parse_args(argsIn)

        if len(args) < 5:
            print('Error: Missing arguments.')
            print(usage)
            return -1

        imageFolder = os.path.abspath(args[0])
        orthoFolder = os.path.abspath(args[1])
        calFolder = os.path.abspath(args[2])
        navFolder = os.path.abspath(args[3])
        outputFolder = os.path.abspath(args[4])

    except optparse.OptionError as msg:
        raise Usage(msg)

    runDir = os.path.dirname(orthoFolder)
    os.system("mkdir -p " + runDir)

    logLevel = logging.INFO  # Make this an option??
    logger = icebridge_common.setUpLogger(runDir, logLevel,
                                          'camera_models_from_nav_log')
    if not os.path.exists(orthoFolder):
        logger.error('Ortho folder ' + orthoFolder + ' does not exist!')
        return -1

    # Find the nav file
    # - There should only be one or two nav files per flight.
    fileList = os.listdir(navFolder)
    fileList = [x for x in fileList if '.out' in x]
    if len(fileList) == 0:
        logger.error('No nav files in: ' + navFolder)
        return -1

    navPath = os.path.join(navFolder, fileList[0])
    parsedNavPath = navPath.replace('.out', '.txt')

    if not asp_file_utils.fileIsNonZero(navPath):
        logger.error('Nav file ' + navPath + ' is invalid!')
        return -1

    # Create the output file only if it is empty or does not exist
    isNonEmpty = asp_file_utils.fileIsNonZero(parsedNavPath)

    if not isNonEmpty:
        # Initialize the output file as being empty
        logger.info("Create empty file: " + parsedNavPath)
        open(parsedNavPath, 'w').close()

    # Append to the output parsed nav file
    for fileName in fileList:
        # Convert the nav file from binary to text
        navPath = os.path.join(navFolder, fileName)

        with open(navPath, 'r') as f:
            try:
                text = f.readline()
                if 'HTML' in text:
                    # Sometimes the server is down, and instead of the binary nav file
                    # we are given an html file with an error message.
                    logger.info("Have invalid nav file: " + navPath)
                    return -1  # Die in this case!
            except UnicodeDecodeError as e:
                # Got a binary file, that means likely we are good
                pass

        cmd = asp_system_utils.which(
            'sbet2txt.pl') + ' -q ' + navPath + ' >> ' + parsedNavPath
        logger.info(cmd)
        if not isNonEmpty:
            os.system(cmd)

    cameraPath = options.inputCalCamera
    if cameraPath == "":
        # No input camera file provided, look one up. It does not matter much,
        # as later ortho2pinhole will insert the correct intrinsics.
        goodFile = False
        fileList = os.listdir(calFolder)
        fileList = [x for x in fileList if (('.tsai' in x) and ('~' not in x))]
        if not fileList:
            logger.error('Unable to find any camera files in ' + calFolder)
            return -1
        for fileName in fileList:
            cameraPath = os.path.join(calFolder, fileName)
            #  Check if this path is valid
            with open(cameraPath, 'r') as f:
                for line in f:
                    if 'fu' in line:
                        goodFile = True
                        break
            if goodFile:
                break

    # Get the ortho list
    orthoFiles = icebridge_common.getTifs(orthoFolder)
    logger.info('Found ' + str(len(orthoFiles)) + ' ortho files.')

    # Look up the frame numbers for each ortho file
    infoDict = {}
    for ortho in orthoFiles:
        if ('gray' in ortho) or ('sub' in ortho):
            continue
        frame = icebridge_common.getFrameNumberFromFilename(ortho)
        if frame < options.startFrame or frame > options.stopFrame:
            continue
        infoDict[frame] = [ortho, '']

    # Get the image file list
    try:
        imageFiles = icebridge_common.getTifs(imageFolder)
    except Exception as e:
        raise Exception(
            "Cannot continue with nav generation, will resume later when images are created. This is not a fatal error. "
            + str(e))

    logger.info('Found ' + str(len(imageFiles)) + ' image files.')

    # Update the second part of each dictionary object
    for image in imageFiles:
        if ('gray' in image) or ('sub' in image):
            continue
        frame = icebridge_common.getFrameNumberFromFilename(image)
        if frame < options.startFrame or frame > options.stopFrame:
            continue
        if frame not in infoDict:
            logger.info('Image missing ortho file: ' + image)
            # don't throw here, that will mess the whole batch, we will recover
            # the missing one later.
            continue
        infoDict[frame][1] = image

    os.system('mkdir -p ' + outputFolder)
    orthoListFile = os.path.join(
        outputFolder, 'ortho_file_list_' + str(options.startFrame) + "_" +
        str(options.stopFrame) + '.csv')

    # Open the output file for writing
    logger.info("Writing: " + orthoListFile)
    with open(orthoListFile, 'w') as outputFile:

        # Loop through frames in order
        for key in sorted(infoDict):

            # Write the ortho name and the output camera name to the file
            (ortho, image) = infoDict[key]
            if not image:
                #raise Exception('Ortho missing image file: ' +ortho)
                continue
            camera = image.replace('.tif', '.tsai')
            outputFile.write(ortho + ', ' + camera + '\n')

    # Check if we already have all of the output camera files.
    haveAllFiles = True
    with open(orthoListFile, 'r') as inputFile:
        for line in inputFile:
            parts = line.split(',')
            camPath = os.path.join(outputFolder, parts[1].strip())
            if not asp_file_utils.fileIsNonZero(camPath):
                logger.info('Missing file -> ' + camPath)
                haveAllFiles = False
                break

    # Call the C++ tool to generate a camera model for each ortho file
    if not haveAllFiles:
        cmd = (
            'nav2cam --input-cam %s --nav-file %s --cam-list %s --output-folder %s --camera-mounting %d'
            % (cameraPath, parsedNavPath, orthoListFile, outputFolder,
               options.cameraMounting))
        logger.info(cmd)
        os.system(cmd)
    else:
        logger.info("All nav files were already generated.")

    # Generate a kml file for the nav camera files
    kmlPath = os.path.join(outputFolder, 'nav_cameras.kml')

    # This is a hack. If we are invoked from a Pleiades node, do not
    # create this kml file, as nodes will just overwrite each other.
    # This job may happen anyway earlier or later when on the head node.
    if not 'PBS_NODEFILE' in os.environ:
        try:
            tempPath = os.path.join(outputFolder, 'list.txt')
            logger.info('Generating nav camera kml file: ' + kmlPath)
            os.system('ls ' + outputFolder + '/*.tsai > ' + tempPath)
            orbitviz_pinhole = asp_system_utils.which('orbitviz_pinhole')
            cmd = orbitviz_pinhole + ' --hide-labels -o ' + kmlPath + ' --input-list ' + tempPath
            logger.info(cmd)
            asp_system_utils.executeCommand(cmd,
                                            kmlPath,
                                            suppressOutput=True,
                                            redo=False)
            os.remove(tempPath)
        except Exception as e:
            logger.info("Warning: " + str(e))

    logger.info('Finished generating camera models from nav!')

    return 0
def doFetch(options, outputFolder):
    '''The main fetch function.
       Returns the number of failures.'''
    
    # Verify that required files exist
    home = os.path.expanduser("~")
    if not (os.path.exists(home+'/.netrc') and os.path.exists(home+'/.urs_cookies')):
        logger.error('Missing a required authentication file!  See instructions here:\n' +
                     '    https://nsidc.org/support/faq/what-options-are-available-bulk-' +
                     'downloading-data-https-earthdata-login-enabled')
        return -1
    
    curlPath = asp_system_utils.which("curl")
    curlOpts    = ' -n -L '
    cookiePaths = ' -b ~/.urs_cookies -c ~/.urs_cookies '
    baseCurlCmd = curlPath + curlOpts + cookiePaths

    logger.info('Creating output folder: ' + outputFolder)
    os.system('mkdir -p ' + outputFolder)  

    isSouth = (options.site == 'AN')
    
    if options.type == 'nav': # Nav fetching is much less complicated
        return fetchNavData(options, outputFolder)
    
    parsedIndexPath = fetchAndParseIndexFile(options, isSouth, baseCurlCmd, outputFolder)
    if not icebridge_common.fileNonEmpty(parsedIndexPath):
        # Some dirs are weird, both images, fireball dems, and ortho.
        # Just accept whatever there is, but with a warning.
        logger.info('Warning: Missing index file: ' + parsedIndexPath)

    # Store file information in a dictionary
    # - Keep track of the earliest and latest frame
    logger.info('Reading file list from ' + parsedIndexPath)
    try:
        (frameDict, urlDict) = icebridge_common.readIndexFile(parsedIndexPath)
    except:
        # We probably ran into old format index file. Must refetch.
        logger.info('Could not read index file. Try again.')
        options.refetchIndex = True
        parsedIndexPath = fetchAndParseIndexFile(options, isSouth, baseCurlCmd, outputFolder)
        (frameDict, urlDict) = icebridge_common.readIndexFile(parsedIndexPath)

    if options.stopAfterIndexFetch:
        return 0
    
    isLidar = (options.type in LIDAR_TYPES)

    allFrames  = sorted(frameDict.keys())
    
    if not isLidar:
        # The lidar frames use a totally different numbering than the image/ortho/dem frames
        firstFrame = icebridge_common.getLargestFrame()    # start big
        lastFrame  = icebridge_common.getSmallestFrame()   # start small
        for frameNumber in allFrames:
            if frameNumber < firstFrame:
                firstFrame = frameNumber
            if frameNumber > lastFrame:
                lastFrame = frameNumber

        if options.allFrames:
            options.startFrame = firstFrame
            options.stopFrame  = lastFrame

    if isLidar:
        # Based on image frames, determine which lidar frames to fetch.
        if options.ignoreMissingLidar and len(frameDict.keys()) == 0:
            # Nothing we can do if this run has no lidar and we are told to continue
            logger.info("Warning: missing lidar, but continuing.")
            lidarsToFetch = set()
        else:
            lidarsToFetch = lidarFilesInRange(frameDict, outputFolder,
                                              options.startFrame, options.stopFrame)
        
    # There is always a chance that not all requested frames are available.
    # That is particularly true for Fireball DEMs. Instead of failing,
    # just download what is present and give a warning. 
    if options.startFrame not in frameDict and not isLidar:
        logger.info("Warning: Frame " + str(options.startFrame) +
                    " is not found in this flight.")
                    
    if options.stopFrame and (options.stopFrame not in frameDict) and not isLidar:
        logger.info("Warning: Frame " + str(options.stopFrame) +
                    " is not found in this flight.")

    allFilesToFetch = [] # Files that we will fetch, relative to the current dir. 
    allUrlsToFetch  = [] # Full url of each file.
    
    # Loop through all found frames within the provided range
    currentFileCount = 0
    lastFrame = ""
    if len(allFrames) > 0:
        lastFrame = allFrames[len(allFrames)-1]

    hasTfw = (options.type == 'fireball')
    hasXml = ( isLidar or (options.type == 'ortho') or hasTfw )
    numFetched = 0
    skipCount  = 0
    for frame in allFrames:

        # Skip frame outside of range
        if isLidar:
            if frameDict[frame] not in lidarsToFetch:
                continue
        else:       
            if ((frame < options.startFrame) or (frame > options.stopFrame) ):
                continue
                
        # Handle the frame skip option
        if options.frameSkip > 0: 
            if skipCount < options.frameSkip:
                skipCount += 1
                continue
            skipCount = 0

        filename = frameDict[frame]
        
        # Some files have an associated xml file. Fireball DEMs also have a tfw file.
        currFilesToFetch = [filename]
        if hasXml: 
            currFilesToFetch.append(icebridge_common.xmlFile(filename))
        if hasTfw: 
            currFilesToFetch.append(icebridge_common.tfwFile(filename))

        for filename in currFilesToFetch:    
            url        = os.path.join(urlDict[frame], filename)
            outputPath = os.path.join(outputFolder, filename)
            allFilesToFetch.append(outputPath)
            allUrlsToFetch.append(url)

    # Restrict lidar fetch amount according to the parameter
    if (isLidar and options.maxNumLidarToFetch > 0 and 
           len(allFilesToFetch) > options.maxNumLidarToFetch):

        # Ensure an even number, to fetch both the lidar file and its xml
        if options.maxNumLidarToFetch % 2 == 1:
            options.maxNumLidarToFetch += 1
        
        allFilesToFetch = allFilesToFetch[0:options.maxNumLidarToFetch]
        allUrlsToFetch  = allUrlsToFetch [0:options.maxNumLidarToFetch]
                
    icebridge_common.fetchFilesInBatches(baseCurlCmd, MAX_IN_ONE_CALL, options.dryRun,
                                         outputFolder,
                                         allFilesToFetch, allUrlsToFetch, logger)

    # Fetch from disk the set of already validated files, if any
    validFilesList = icebridge_common.validFilesList(os.path.dirname(outputFolder),
                                                     options.startFrame, options.stopFrame)
    validFilesSet = set()
    validFilesSet = icebridge_common.updateValidFilesListFromDisk(validFilesList, validFilesSet)
    numInitialValidFiles = len(validFilesSet)
    
    # Verify that all files were fetched and are in good shape
    failedFiles = []
    for outputPath in allFilesToFetch:

        if options.skipValidate:
            continue
        
        if not icebridge_common.fileNonEmpty(outputPath):
            logger.info('Missing file: ' + outputPath)
            failedFiles.append(outputPath)
            continue

        if icebridge_common.hasImageExtension(outputPath):
            if False:
                # This check is just so slow. Turn it off for now.
                # This will impact only the validation of jpegs,
                # as the other files can be validated via the checksum.
                # Jpegs will be validated when converting them to 1 band images
                if outputPath in validFilesSet and os.path.exists(outputPath):
                    #logger.info('Previously validated: ' + outputPath)   # verbose
                    continue
                else:
                    if not icebridge_common.isValidImage(outputPath):
                        logger.info('Found an invalid image. Will wipe it: ' + outputPath)
                        if os.path.exists(outputPath): os.remove(outputPath)
                        failedFiles.append(outputPath)
                        continue
                    else:
                        logger.info('Valid image: ' + outputPath)
                        validFilesSet.add(outputPath) # mark it as validated

        # Sanity check: XML files must have the right latitude.
        if icebridge_common.fileExtension(outputPath) == '.xml':
            if outputPath in validFilesSet and os.path.exists(outputPath):
                #logger.info('Previously validated: ' + outputPath) #verbose
                continue
            else:
                if os.path.exists(outputPath):
                    try:
                        latitude = icebridge_common.parseLatitude(outputPath)
                        logger.info('Valid file: ' + outputPath)
                        validFilesSet.add(outputPath) # mark it as validated
                    except:
                        # Corrupted file
                        logger.info("Failed to parse latitude, will wipe: " + outputPath)
                        if os.path.exists(outputPath): os.remove(outputPath)
                        failedFiles.append(outputPath)

                    # On a second thought, don't wipe files with wrong latitude, as
                    # next time we run fetch we will have to fetch them again.
                    # Hopefully they will be ignored.
                    #isGood = hasGoodLat(latitude, isSouth)
                    #if not isGood:
                    #    logger.info("Wiping XML file " + outputPath + " with bad latitude " + \
                    #                str(latitude))
                    #    os.remove(outputPath)
                    #    imageFile = icebridge_common.xmlToImage(outputPath)
                    #    if os.path.exists(imageFile):
                    #        logger.info("Wiping TIF file " + imageFile + " with bad latitude " + \
                    #                    str(latitude))
                    #        os.remove(imageFile)
                    
        # Verify the chcksum    
        if hasXml and len(outputPath) >= 4 and outputPath[-4:] != '.xml' \
               and outputPath[-4:] != '.tfw':
            if outputPath in validFilesSet and os.path.exists(outputPath):
                #logger.info('Previously validated: ' + outputPath) # verbose
                continue
            else:
                isGood = icebridge_common.hasValidChkSum(outputPath, logger)
                if not isGood:
                    xmlFile = icebridge_common.xmlFile(outputPath)
                    logger.info('Found invalid data. Will wipe: ' + outputPath + ' ' + xmlFile)
                    if os.path.exists(outputPath): os.remove(outputPath)
                    if os.path.exists(xmlFile):    os.remove(xmlFile)
                    failedFiles.append(outputPath)
                    failedFiles.append(xmlFile)
                    continue
                else:
                    logger.info('Valid file: ' + outputPath)
                    validFilesSet.add(outputPath)

        if hasTfw and icebridge_common.fileExtension(outputPath) == '.tfw':
            if outputPath in validFilesSet and os.path.exists(outputPath):
                #logger.info('Previously validated: ' + outputPath)
                continue
            else:
                isGood = icebridge_common.isValidTfw(outputPath, logger)
                if not isGood:
                    xmlFile = icebridge_common.xmlFile(outputPath)
                    logger.info('Found invalid tfw. Will wipe: ' + outputPath + ' ' + xmlFile)
                    if os.path.exists(outputPath): os.remove(outputPath)
                    if os.path.exists(xmlFile):    os.remove(xmlFile)
                    failedFiles.append(outputPath)
                    failedFiles.append(xmlFile)
                    continue
                else:
                    logger.info('Valid tfw file: ' + outputPath)
                    validFilesSet.add(outputPath)

    # Write to disk the list of validated files, but only if new
    # validations happened.  First re-read that list, in case a
    # different process modified it in the meantime, such as if two
    # managers are running at the same time.
    numFinalValidFiles = len(validFilesSet)
    if numInitialValidFiles != numFinalValidFiles:
        validFilesSet = \
                      icebridge_common.updateValidFilesListFromDisk(validFilesList, validFilesSet)
        icebridge_common.writeValidFilesList(validFilesList, validFilesSet)

    numFailed = len(failedFiles)
    if numFailed > 0:
        logger.info("Number of files that could not be processed: " + str(numFailed))
        
    return numFailed
Example #25
0
    except optparse.OptionError, msg:
        raise Usage(msg)

    if len(args) != 1:
        raise Exception(usage)

    # The input is a pdf file
    inFile = args[0]
    base, ext = os.path.splitext(inFile)
    if ext != ".pdf":
        raise Exception("The input file must be in pdf format.")
    if not os.path.isfile(inFile):
        raise Exception("Missing input file: " + inFile + ".")

    # Convert from pdf to text
    pdfToTextExec = asp_system_utils.which("pdftotext")
    cmd = pdfToTextExec + ' ' + inFile
    print(cmd)
    p = subprocess.Popen(cmd, shell=True)
    os.waitpid(p.pid, 0)

    txtFile = base + '.txt'
    if not os.path.isfile(txtFile):
        raise Exception("Missing file: " + txtFile + ".")

    tsaiFile = base + '.tsai'

    # Read the lines
    lines = []
    fin = open(txtFile, 'r')
    while True:
Example #26
0
def getImageGeoInfo(imagePath, getStats=True):
    """Obtains some image geo information from gdalinfo in dictionary format"""
    
    if not os.path.exists(imagePath):
        raise Exception('Error: input file ' + imagePath + ' does not exist!')
    
    outputDict = {}
    
    # Call command line tool silently
    cmd = [asp_system_utils.which('gdalinfo'), imagePath, '-proj4']
    if getStats:
        cmd.append('-stats')
    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
    textOutput, err = p.communicate()
    
    # Get the size in pixels
    imageSizeLine = asp_string_utils.getLineAfterText(textOutput, 'Size is ')
    sizeVals      = imageSizeLine.split(',')
    outputDict['image_size'] = (int(sizeVals[0]), int(sizeVals[1]))

    # Get origin location and pixel size    
    originLine    = asp_string_utils.getLineAfterText(textOutput, 'Origin = ')
    pixelSizeLine = asp_string_utils.getLineAfterText(textOutput, 'Pixel Size = ')    
    originVals    = asp_string_utils.getNumbersInParentheses(originLine)
    pixelSizeVals = asp_string_utils.getNumbersInParentheses(pixelSizeLine)
    outputDict['origin']     = originVals
    outputDict['pixel_size'] = pixelSizeVals

    # Get bounding box in projected coordinates
    upperLeftLine  = asp_string_utils.getLineAfterText(textOutput, 'Upper Left')
    lowerRightLine = asp_string_utils.getLineAfterText(textOutput, 'Lower Right')
    (minX, maxY)   = asp_string_utils.getNumbersInParentheses(upperLeftLine)
    (maxX, minY)   = asp_string_utils.getNumbersInParentheses(lowerRightLine)
    outputDict['projection_bounds'] = (minX, maxX, minY, maxY)
    outputDict['projection_center'] = ( (minX+maxX)/2.0, (minY+maxY)/2.0 )

    # Get some proj4 values
    outputDict['standard_parallel_1'] = getGdalInfoTagValue(textOutput, 'standard_parallel_1')
    outputDict['central_meridian']    = getGdalInfoTagValue(textOutput, 'central_meridian')

    # Get the projection type
    projStart = textOutput.find('PROJ.4 string is:')
    nextLine  = textOutput.find("'", projStart)+1
    endLine   = textOutput.find("'", nextLine)
    outputDict['proj_string'] = textOutput[nextLine:endLine]
    outputDict['projection'] = 'UNKNOWN'
    if '+proj=eqc' in textOutput:
        outputDict['projection'] = 'EQUIRECTANGULAR'
    elif ('+proj=ster' in textOutput) or ('+proj=stere' in textOutput):
        outputDict['projection'] = 'POLAR STEREOGRAPHIC'

    outputDict['lonlat_bounds'] = outputDict['projection_bounds']
    if '+proj=longlat' not in outputDict['proj_string']:
        longlatString = getLonLatProjString(outputDict['proj_string'])
        ul = convertCoordinate(outputDict['proj_string'], longlatString, minX, maxY)
        br = convertCoordinate(outputDict['proj_string'], longlatString, maxX, minY)
        outputDict['lonlat_bounds'] = (ul[0], br[0], br[1], ul[1])

    # Extract this variable which ASP inserts into its point cloud files
    try:
        pointOffsetLine = asp_string_utils.getLineAfterText(textOutput, 'POINT_OFFSET=') # Tag name must be synced with C++ code
        offsetValues    = pointOffsetLine.split(' ')
        outputDict['point_offset'] =  (float(offsetValues[0]), float(offsetValues[1]), float(offsetValues[2]))        
    except:
        pass # In most cases this line will not be present

    # TODO: Currently this does not find much information, and there
    #       is another function in image_utils dedicated to returning statistics.
    if getStats:

        # List of dictionaries per band
        outputDict['band_info'] = []
    
        # Populate band information
        band = 1
        while (True): # Loop until we run out of bands
            bandString = 'Band ' + str(band) + ' Block='
            bandLoc = textOutput.find(bandString)
            if bandLoc < 0: # Ran out of bands
                break
        
            # Found the band, read pertinent information
            bandInfo = {}
        
            # Get the type string
            bandLine = asp_string_utils.getLineAfterText(textOutput, bandString)
            typePos  = bandLine.find('Type=')
            commaPos = bandLine.find(',')
            typeName = bandLine[typePos+5:commaPos-1]
            bandInfo['type'] = typeName
        
            outputDict['band_info'] = bandInfo
        
            band = band + 1 # Move on to the next band
        
    return outputDict
        os.system(cmd)
    else:
        logger.info("All nav files were already generated.")

    # Generate a kml file for the nav camera files
    kmlPath = os.path.join(outputFolder, 'nav_cameras.kml')

    # This is a hack. If we are invoked from a Pleiades node, do not
    # create this kml file, as nodes will just overwrite each other.
    # This job may happen anyway earlier or later when on the head node.
    if not 'PBS_NODEFILE' in os.environ:
        try:
            tempPath = os.path.join(outputFolder, 'list.txt')
            logger.info('Generating nav camera kml file: ' + kmlPath)
            os.system('ls ' + outputFolder + '/*.tsai > ' + tempPath)
            orbitviz_pinhole = asp_system_utils.which('orbitviz_pinhole')
            cmd = orbitviz_pinhole + ' --hide-labels -o ' + kmlPath + ' --input-list ' + tempPath
            logger.info(cmd)
            asp_system_utils.executeCommand(cmd,
                                            kmlPath,
                                            suppressOutput=True,
                                            redo=False)
            os.remove(tempPath)
        except Exception, e:
            logger.info("Warning: " + str(e))

    logger.info('Finished generating camera models from nav!')

    return 0

Example #28
0
def main(argsIn):

    try:
        usage = '''usage: process_calibration_file.py file.pdf'''
                      
        parser = optparse.OptionParser(usage=usage)
        (options, args) = parser.parse_args(argsIn)
    except optparse.OptionError as msg:
        raise Usage(msg)


    if len(args) != 1:
        raise Exception(usage)
    
    # The input is a pdf file
    inFile = args[0]
    base, ext = os.path.splitext(inFile)
    if ext != ".pdf":
        raise Exception("The input file must be in pdf format.")
    if not os.path.isfile(inFile):
        raise Exception("Missing input file: " + inFile + ".")

    # Convert from pdf to text
    pdfToTextExec = asp_system_utils.which("pdftotext")
    cmd = pdfToTextExec + ' ' + inFile 
    print(cmd)
    p = subprocess.Popen(cmd, shell=True, universal_newlines=True)
    os.waitpid(p.pid, 0)
    
    txtFile = base + '.txt'
    if not os.path.isfile(txtFile):
        raise Exception("Missing file: " + txtFile + ".")

    tsaiFile = base + '.tsai'
    
    # Read the lines
    lines = []
    fin = open(txtFile, 'r')
    while True:
        line = fin.readline()
        if not line: break
        line = line.strip()
        if line == "":
            continue
        lines.append(line)
    fin.close()
    
    width, height, f, xp, yp, pitch_x, pitch_y, k1, k2, k3, p1, p2, b1, b2 = \
           (-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)

    # Strip the mm
    count = 0
    for line in lines:
        while True:
            m = re.match("^(.*?\d)mm(.*?)$", line)
            if not m:
                break
            line = m.group(1) + m.group(2)
            lines[count] = line
        count += 1
        #print("line is ", line)
     
    # Parse the lines
    count = 0
    for line in lines:

        m = re.match("^.*?Resolution\s*=\s*(\d+)\s*x\s*(\d+)", line) 
        if m and width == -1 and height == -1:
            width = m.group(1)
            height = m.group(2)

        m = re.match("^.*?Pixel width\s*=\s*(.*?),\s*Pixel height\s*=\s*(.*?)\s*$", line)
        if m and pitch_x == -1 and pitch_y == -1:
            pitch_x = m.group(1)
            pitch_y = m.group(2)
            if pitch_x != pitch_y:
            	raise Exception ("Expecting pixel width and height to be same.")

        m = re.match(".*?c\s*=", line)
        if m and f == -1:
            f = lines[count+3]
            
        m = re.match(".*?xp\s*=", line)
        if m and  xp == -1:
            xp = lines[count+1]
            
        m = re.match(".*?yp\s*=", line)
        if m and  yp == -1:
            yp = lines[count+1]
        
        m = re.match(".*?K1\s*=\s*(.*?)\s*$", line)
        if m and  k1 == -1:
            k1 = m.group(1)
            
        m = re.match(".*?K2\s*=\s*(.*?)\s*$", line)
        if m and  k2 == -1:
            k2 = m.group(1)

        m = re.match(".*?K3\s*=\s*(.*?)\s*$", line)
        if m and  k3 == -1:
            k3 = m.group(1)

        m = re.match(".*?P1\s*=\s*(.*?)\s*$", line)
        if m and  p1 == -1:
            p1 = m.group(1)

        m = re.match(".*?P2\s*=\s*(.*?)\s*$", line)
        if m and  p2 == -1:
            p2 = m.group(1)

        m = re.match(".*?B1\s*=\s*(.*?)\s*$", line)
        if m and  b1 == -1:
            b1 = m.group(1)

        m = re.match(".*?B2\s*=\s*(.*?)\s*$", line)
        if m and  b2 == -1:
            b2 = m.group(1)

        count += 1
        
    cu = float(width)  * float(pitch_x) / 2.0
    cv = float(height) * float(pitch_y) / 2.0

    print ("Writing: " + tsaiFile)
    fout = open(tsaiFile, 'w')
    fout.write("VERSION_3\n")
    fout.write("fu = " + f + "\n")
    fout.write("fv = " + f + "\n")
    fout.write("cu = " + repr(cu) + "\n")
    fout.write("cv = " + repr(cv) + "\n")
    fout.write("u_direction = 1  0  0\n")
    fout.write("v_direction = 0  1  0\n")
    fout.write("w_direction = 0  0  1\n")
    fout.write("C = 0 0 0\n")
    fout.write("R = 1 0 0 0 1 0 0 0 1\n")
    fout.write("pitch = " + pitch_x + "\n")
    fout.write("Photometrix\n")
    fout.write("xp = " + xp + "\n")
    fout.write("yp = " + yp + "\n")
    fout.write("k1 = " + k1 + "\n")
    fout.write("k2 = " + k2 + "\n")
    fout.write("k3 = " + k3 + "\n")
    fout.write("p1 = " + p1 + "\n")
    fout.write("p2 = " + p2 + "\n")
    fout.write("b1 = " + b1 + "\n")
    fout.write("b2 = " + b2 + "\n")
    fout.close()
def cameraFromOrthoWrapper(inputPath, orthoPath, inputCamFile, estimatedCameraPath, 
                           outputCamFile, refDemPath, simpleCamera, numThreads):
    '''Generate a camera model from a single ortho file'''

    # Make multiple calls with different options until we get one that works well
    IP_METHOD    = [1, 0, 2, 1, 1, 2, 0] # IP method
    FORCE_SIMPLE = [0, 0, 0, 0, 0, 0, 1] # If all else fails use simple mode
    LOCAL_NORM   = [False, False, False, False, True, True, False] # If true, image tiles are individually normalized with method 1 and 2
    IP_PER_TILE  = [0, 0, 0, 1024, 0, 0, 0]
    numAttempts = len(IP_METHOD)
   
    MIN_IP     = 15  # Require more IP to make sure we don't get bogus camera models
    DESIRED_IP = 200 # If we don't hit this number, try other methods before taking the best one.

    # The max distance in meters the ortho2pinhole solution is allowed to move from the input
    #  navigation estimate.
    MAX_TRANSLATION = 7

    bestIpCount = 0
    tempFilePath  = outputCamFile + '_temp' # Used to hold the current best result
    matchPath     = outputCamFile + '.match' # Used to hold the match file if it exists
    tempMatchPath = matchPath + '_temp'

    os.system("ulimit -c 0")  # disable core dumps
    os.system("rm -f core.*") # these keep on popping up
    os.system("umask 022")    # enforce files be readable by others

    numPoints = 0 # must be initialized
    for i in range(0,numAttempts):

        # Get parameters for this attempt
        ipMethod  = IP_METHOD[i]
        localNorm = LOCAL_NORM[i]
    
        if FORCE_SIMPLE[i]: # Always turn this on for the final attempt!
            simpleCamera = True

        # Call ortho2pinhole command
        ortho2pinhole = asp_system_utils.which("ortho2pinhole")
        cmd = (('%s %s %s %s %s --reference-dem %s --crop-reference-dem --threads %d ' \
                '--ip-detect-method %d --minimum-ip %d --max-translation %f') 
                % (ortho2pinhole, inputPath, orthoPath, inputCamFile, outputCamFile, 
                   refDemPath, numThreads, ipMethod, MIN_IP, MAX_TRANSLATION) )
        if localNorm:
            cmd += ' --skip-image-normalization'
        if estimatedCameraPath is not None:
            cmd += ' --camera-estimate ' + estimatedCameraPath
        if simpleCamera:
            cmd += ' --short-circuit'
        if IP_PER_TILE[i] > 0:
            cmd += ' --ip-per-tile ' + str(IP_PER_TILE[i])

        # Use a print statement as the logger fails from multiple processes
        print(cmd)

        os.system('rm -f ' + matchPath) # Needs to be gone
        p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, universal_newlines=True)
        textOutput, err = p.communicate()
        p.wait()
        print(textOutput)

        if not os.path.exists(outputCamFile): # Keep trying if no output file produced
            continue

        if simpleCamera:
            break # Never need more than one attempt with simpleCamera!

        # Check the number of IP used
        m = re.findall(r"Using (\d+) points to create the camera model.", textOutput)
        if len(m) != 1: # An unknown error occurred, move on.
            continue
        numPoints = int(m[0])
        if numPoints >= DESIRED_IP: # Got a lot of points, quit
            break
        if numPoints > bestIpCount: # Got some points but not many, try other options 
            bestIpCount = numPoints #  to see if we can beat this result.
            shutil.move(outputCamFile, tempFilePath)
            
            if os.path.exists(matchPath):
                shutil.move(matchPath, tempMatchPath)

    if (not simpleCamera) and (numPoints < DESIRED_IP): # If we never got the desired # of points
        shutil.move(tempFilePath, outputCamFile) # Use the camera file with the most points found
        if os.path.exists(tempMatchPath):
            shutil.move(tempMatchPath, matchPath)
        print ('Best number of ortho points = ' + str(bestIpCount))
    else:
        print ('Best number of ortho points = ' + str(numPoints))
    
    os.system('rm -f ' + tempFilePath ) # Clean up these files
    os.system('rm -f ' + tempMatchPath)
    os.system("rm -f core.*") # these keep on popping up
    os.system("rm -f " + outputCamFile + "*-log-*") # wipe logs
              
    if not os.path.exists(outputCamFile):
        # This function is getting called from a pool, so just log the failure.
        print('Failed to convert ortho file: ' + orthoPath)

    # I saw this being recommended, to dump all print statements in the current task
    sys.stdout.flush()
def correctFireballDems(fireballFolder, corrFireballFolder, startFrame, stopFrame, isNorth,
                        skipValidate, logger):
    '''Fix the header problem in Fireball DEMs'''

    logger.info('Correcting Fireball DEMs ...')

    # Read the existing DEMs
    fireballIndexPath = icebridge_common.csvIndexFile(fireballFolder)
    if not os.path.exists(fireballIndexPath):
        raise Exception("Error: Missing fireball index file: " + fireballIndexPath + ".")
        
    (fireballFrameDict, fireballUrlDict) = \
                        icebridge_common.readIndexFile(fireballIndexPath, prependFolder = True)
    
    if not skipValidate:
        validFilesList = icebridge_common.validFilesList(os.path.dirname(fireballFolder),
                                                         startFrame, stopFrame)
        validFilesSet = set()
        validFilesSet = icebridge_common.updateValidFilesListFromDisk(validFilesList, validFilesSet)
        numInitialValidFiles = len(validFilesSet)

    # Loop through all the input images
    os.system('mkdir -p ' + corrFireballFolder)
    badFiles = False
    for frame in sorted(fireballFrameDict.keys()):

        # Skip if outside the frame range
        if not ( (frame >= startFrame) and (frame <= stopFrame) ):
            continue

        inputPath = fireballFrameDict[frame]
        if not icebridge_common.isDEM(inputPath):
            continue

        outputPath = os.path.join(corrFireballFolder, os.path.basename(inputPath))

        # Skip existing valid files
        if skipValidate:
            if os.path.exists(outputPath):
                logger.info("File exists, skipping: " + outputPath)
                continue
        else:
            if outputPath in validFilesSet and os.path.exists(outputPath):
                #logger.info('Previously validated: ' + outputPath) # very vebose
                continue
            
            if icebridge_common.isValidImage(outputPath):
                #logger.info("File exists and is valid, skipping: " + outputPath)
                validFilesSet.add(outputPath) # mark it as validated
                continue
        
        # Run the correction script
        execPath = asp_system_utils.which('correct_icebridge_l3_dem')
        cmd = (('%s %s %s %d') %
               (execPath, inputPath, outputPath, isNorth))
        logger.info(cmd)
        # TODO: Run this as a subprocess and check the return code
        os.system(cmd)
        
        # Check if the output file is good
        if not icebridge_common.isValidImage(outputPath):
            logger.error('Failed to convert dem file, wiping: ' + inputPath + ' ' + outputPath)
            os.system('rm -f ' + inputPath) # will not throw
            os.system('rm -f ' + outputPath) # will not throw
            badFiles = True
        else:
            if not skipValidate:
                validFilesSet.add(outputPath) # mark it as validated
            
    if not skipValidate:
        # Write to disk the list of validated files, but only if new
        # validations happened.  First re-read that list, in case a
        # different process modified it in the meantime, such as if two
        # managers are running at the same time.
        numFinalValidFiles = len(validFilesSet)
        if numInitialValidFiles != numFinalValidFiles:
            validFilesSet = icebridge_common.updateValidFilesListFromDisk(validFilesList,
                                                                          validFilesSet)
            icebridge_common.writeValidFilesList(validFilesList, validFilesSet)

    return (not badFiles)
def correctFireballDems(fireballFolder, corrFireballFolder, startFrame,
                        stopFrame, isNorth, skipValidate, logger):
    '''Fix the header problem in Fireball DEMs'''

    logger.info('Correcting Fireball DEMs ...')

    # Read the existing DEMs
    fireballIndexPath = icebridge_common.csvIndexFile(fireballFolder)
    if not os.path.exists(fireballIndexPath):
        raise Exception("Error: Missing fireball index file: " +
                        fireballIndexPath + ".")

    (fireballFrameDict, fireballUrlDict) = \
                        icebridge_common.readIndexFile(fireballIndexPath, prependFolder = True)

    if not skipValidate:
        validFilesList = icebridge_common.validFilesList(
            os.path.dirname(fireballFolder), startFrame, stopFrame)
        validFilesSet = set()
        validFilesSet = icebridge_common.updateValidFilesListFromDisk(
            validFilesList, validFilesSet)
        numInitialValidFiles = len(validFilesSet)

    # Loop through all the input images
    os.system('mkdir -p ' + corrFireballFolder)
    badFiles = False
    for frame in sorted(fireballFrameDict.keys()):

        # Skip if outside the frame range
        if not ((frame >= startFrame) and (frame <= stopFrame)):
            continue

        inputPath = fireballFrameDict[frame]
        if not icebridge_common.isDEM(inputPath):
            continue

        outputPath = os.path.join(corrFireballFolder,
                                  os.path.basename(inputPath))

        # Skip existing valid files
        if skipValidate:
            if os.path.exists(outputPath):
                logger.info("File exists, skipping: " + outputPath)
                continue
        else:
            if outputPath in validFilesSet and os.path.exists(outputPath):
                #logger.info('Previously validated: ' + outputPath) # very vebose
                continue

            if icebridge_common.isValidImage(outputPath):
                #logger.info("File exists and is valid, skipping: " + outputPath)
                validFilesSet.add(outputPath)  # mark it as validated
                continue

        # Run the correction script
        execPath = asp_system_utils.which('correct_icebridge_l3_dem')
        cmd = (('%s %s %s %d') % (execPath, inputPath, outputPath, isNorth))
        logger.info(cmd)
        # TODO: Run this as a subprocess and check the return code
        os.system(cmd)

        # Check if the output file is good
        if not icebridge_common.isValidImage(outputPath):
            logger.error('Failed to convert dem file, wiping: ' + inputPath +
                         ' ' + outputPath)
            os.system('rm -f ' + inputPath)  # will not throw
            os.system('rm -f ' + outputPath)  # will not throw
            badFiles = True
        else:
            if not skipValidate:
                validFilesSet.add(outputPath)  # mark it as validated

    if not skipValidate:
        # Write to disk the list of validated files, but only if new
        # validations happened.  First re-read that list, in case a
        # different process modified it in the meantime, such as if two
        # managers are running at the same time.
        numFinalValidFiles = len(validFilesSet)
        if numInitialValidFiles != numFinalValidFiles:
            validFilesSet = icebridge_common.updateValidFilesListFromDisk(
                validFilesList, validFilesSet)
            icebridge_common.writeValidFilesList(validFilesList, validFilesSet)

    return (not badFiles)
        open(parsedNavPath, 'w').close()
        
    # Append to the output parsed nav file
    for fileName in fileList:
        # Convert the nav file from binary to text    
        navPath = os.path.join(navFolder, fileName)

        with open(navPath, 'r') as f:
            text = f.readline()
            if 'HTML' in text:
                # Sometimes the server is down, and instead of the binary nav file
                # we are given an html file with an error message.
                logger.info("Have invalid nav file: " + navPath)
                return -1 # Die in this case!

        cmd = asp_system_utils.which('sbet2txt.pl') + ' -q ' + navPath + ' >> ' + parsedNavPath
        logger.info(cmd)
        if not isNonEmpty:
            os.system(cmd)

    cameraPath = options.inputCalCamera
    if cameraPath == "":
        # No input camera file provided, look one up. It does not matter much,
        # as later ortho2pinhole will insert the correct intrinsics.
        goodFile = False
        fileList = os.listdir(calFolder)
        fileList = [x for x in fileList if (('.tsai' in x) and ('~' not in x))]
        if not fileList:
            logger.error('Unable to find any camera files in ' + calFolder)
            return -1
        for fileName in fileList:
def cameraFromOrthoWrapper(inputPath, orthoPath, inputCamFile,
                           estimatedCameraPath, outputCamFile, refDemPath,
                           simpleCamera, numThreads):
    '''Generate a camera model from a single ortho file'''

    # Make multiple calls with different options until we get one that works well
    IP_METHOD = [1, 0, 2, 1, 1, 2, 0]  # IP method
    FORCE_SIMPLE = [0, 0, 0, 0, 0, 0, 1]  # If all else fails use simple mode
    LOCAL_NORM = [
        False, False, False, False, True, True, False
    ]  # If true, image tiles are individually normalized with method 1 and 2
    IP_PER_TILE = [0, 0, 0, 1024, 0, 0, 0]
    numAttempts = len(IP_METHOD)

    MIN_IP = 15  # Require more IP to make sure we don't get bogus camera models
    DESIRED_IP = 200  # If we don't hit this number, try other methods before taking the best one.

    # The max distance in meters the ortho2pinhole solution is allowed to move from the input
    #  navigation estimate.
    MAX_TRANSLATION = 7

    bestIpCount = 0
    tempFilePath = outputCamFile + '_temp'  # Used to hold the current best result
    matchPath = outputCamFile + '.match'  # Used to hold the match file if it exists
    tempMatchPath = matchPath + '_temp'

    os.system("ulimit -c 0")  # disable core dumps
    os.system("rm -f core.*")  # these keep on popping up
    os.system("umask 022")  # enforce files be readable by others

    numPoints = 0  # must be initialized
    for i in range(0, numAttempts):

        # Get parameters for this attempt
        ipMethod = IP_METHOD[i]
        localNorm = LOCAL_NORM[i]

        if FORCE_SIMPLE[i]:  # Always turn this on for the final attempt!
            simpleCamera = True

        # Call ortho2pinhole command
        ortho2pinhole = asp_system_utils.which("ortho2pinhole")
        cmd = (('%s %s %s %s %s --reference-dem %s --crop-reference-dem --threads %d ' \
                '--ip-detect-method %d --minimum-ip %d --max-translation %f')
                % (ortho2pinhole, inputPath, orthoPath, inputCamFile, outputCamFile,
                   refDemPath, numThreads, ipMethod, MIN_IP, MAX_TRANSLATION) )
        if localNorm:
            cmd += ' --skip-image-normalization'
        if estimatedCameraPath is not None:
            cmd += ' --camera-estimate ' + estimatedCameraPath
        if simpleCamera:
            cmd += ' --short-circuit'
        if IP_PER_TILE[i] > 0:
            cmd += ' --ip-per-tile ' + str(IP_PER_TILE[i])

        # Use a print statement as the logger fails from multiple processes
        print(cmd)

        os.system('rm -f ' + matchPath)  # Needs to be gone
        p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
        textOutput, err = p.communicate()
        p.wait()
        print(textOutput)

        if not os.path.exists(
                outputCamFile):  # Keep trying if no output file produced
            continue

        if simpleCamera:
            break  # Never need more than one attempt with simpleCamera!

        # Check the number of IP used
        m = re.findall(r"Using (\d+) points to create the camera model.",
                       textOutput)
        if len(m) != 1:  # An unknown error occurred, move on.
            continue
        numPoints = int(m[0])
        if numPoints >= DESIRED_IP:  # Got a lot of points, quit
            break
        if numPoints > bestIpCount:  # Got some points but not many, try other options
            bestIpCount = numPoints  #  to see if we can beat this result.
            shutil.move(outputCamFile, tempFilePath)

            if os.path.exists(matchPath):
                shutil.move(matchPath, tempMatchPath)

    if (not simpleCamera) and (
            numPoints < DESIRED_IP):  # If we never got the desired # of points
        shutil.move(
            tempFilePath,
            outputCamFile)  # Use the camera file with the most points found
        if os.path.exists(tempMatchPath):
            shutil.move(tempMatchPath, matchPath)
        print 'Best number of ortho points = ' + str(bestIpCount)
    else:
        print 'Best number of ortho points = ' + str(numPoints)

    os.system('rm -f ' + tempFilePath)  # Clean up these files
    os.system('rm -f ' + tempMatchPath)
    os.system("rm -f core.*")  # these keep on popping up
    os.system("rm -f " + outputCamFile + "*-log-*")  # wipe logs

    if not os.path.exists(outputCamFile):
        # This function is getting called from a pool, so just log the failure.
        print('Failed to convert ortho file: ' + orthoPath)

    # I saw this being recommended, to dump all print statements in the current task
    sys.stdout.flush()