예제 #1
0
def fetchNavData(options, outputFolder):
    '''Fetch all the nav data for a flight.'''

    numFailed = 0

    # The storage convention for these is very easy!
    # - A few dates have two files instead of one.
    folderUrl = getFolderUrl(options.yyyymmdd, options.year, options.month,
                             options.day, False, options.site, options.type)
    filename = 'sbet_' + options.yyyymmdd + '.out'
    filenameA = 'sbet_' + options.yyyymmdd + 'a.out'
    filenameB = 'sbet_' + options.yyyymmdd + 'b.out'

    # Check which urls are accurate for this file
    url = folderUrl + filename
    if checkIfUrlExists(url):
        fileList = [filename]
    else:
        fileList = [filenameA, filenameB]

    # Download the files
    for f in fileList:
        url = os.path.join(folderUrl, f)
        outputPath = os.path.join(outputFolder, f)
        # TODO: How to handle refetch?
        if os.path.exists(outputPath):
            continue
        if not icebridge_common.fetchFile(url, outputPath):
            numFailed = numFailed + 1

    return numFailed
def fetchNavData(options, outputFolder):
    '''Fetch all the nav data for a flight.'''

    success = False

    # The storage convention for these is very easy!
    # - A few dates have two files instead of one.
    folderUrl = getFolderUrl(options.yyyymmdd, options.year, options.month,
                             options.day, False,
                             options.site, options.type)
    filename  = 'sbet_' + options.yyyymmdd + '.out'
    filenameA = 'sbet_' + options.yyyymmdd + 'a.out'
    filenameB = 'sbet_' + options.yyyymmdd + 'b.out'
    
    # Check which urls are accurate for this file
    # This is not robust enough, as it can return good status even when the data is missing.
    # So comment it out. Rather fetch all files and check them later.
    #url = folderUrl + filename
    #if checkIfUrlExists(url):
    #    fileList = [filename]
    #else:
    #    fileList = [filenameA, filenameB]
    fileList = [filename, filenameA, filenameB]
    
    if options.refetchNav:
        cmd = "rm -f " + os.path.join(outputFolder, "sbet_*")
        print(cmd)
        os.system(cmd)
     
    # Download the files    
    for f in fileList:
        url        = os.path.join(folderUrl, f)
        outputPath = os.path.join(outputFolder, f)
        # TODO: How to handle refetch?
        if validateNavOrWipe(outputPath, logger):
            success = True
            continue

        # This times out, so avoid it
        #if not checkIfUrlExists(url):
        #    continue
        
        ans = icebridge_common.fetchFile(url, outputPath)
        if not ans:
            logger.info("Bad url: " + url)
            continue
        
        if validateNavOrWipe(outputPath, logger):
            success = True
            
    if success:
        return 0
    
    return 1
def fetchNavData(options, outputFolder):
    '''Fetch all the nav data for a flight.'''

    success = False

    # The storage convention for these is very easy!
    # - A few dates have two files instead of one.
    folderUrl = getFolderUrl(options.yyyymmdd, options.year, options.month,
                             options.day, False, options.site, options.type)
    filename = 'sbet_' + options.yyyymmdd + '.out'
    filenameA = 'sbet_' + options.yyyymmdd + 'a.out'
    filenameB = 'sbet_' + options.yyyymmdd + 'b.out'

    # Check which urls are accurate for this file
    # This is not robust enough, as it can return good status even when the data is missing.
    # So comment it out. Rather fetch all files and check them later.
    #url = folderUrl + filename
    #if checkIfUrlExists(url):
    #    fileList = [filename]
    #else:
    #    fileList = [filenameA, filenameB]
    fileList = [filename, filenameA, filenameB]

    if options.refetchNav:
        cmd = "rm -f " + os.path.join(outputFolder, "sbet_*")
        print(cmd)
        os.system(cmd)

    # Download the files
    for f in fileList:
        url = os.path.join(folderUrl, f)
        outputPath = os.path.join(outputFolder, f)
        # TODO: How to handle refetch?
        if validateNavOrWipe(outputPath, logger):
            success = True
            continue

        # This times out, so avoid it
        #if not checkIfUrlExists(url):
        #    continue

        ans = icebridge_common.fetchFile(url, outputPath)
        if not ans:
            logger.info("Bad url: " + url)
            continue

        if validateNavOrWipe(outputPath, logger):
            success = True

    if success:
        return 0

    return 1
예제 #4
0
    # TODO: Move into a common function!
    # Verify that required files exist
    home = os.path.expanduser("~")
    if not (os.path.exists(home+'/.netrc') and os.path.exists(home+'/.urs_cookies')):
        print 'Missing a required authentication file!  See instructions here:'
        print '    https://nsidc.org/support/faq/what-options-are-available-bulk-downloading-data-https-earthdata-login-enabled'
        return -1
    
    topIndexPath  = outputPath + '_top.csv'
    tempIndexPath = outputPath + '_temp.csv'

    # Get the top level index
    TOP_URL = 'https://n5eil01u.ecs.nsidc.org/ICEBRIDGE_FTP/IODMS0_DMSraw_v01/'
    print 'Fetching top level index from: ' + TOP_URL
    icebridge_common.fetchFile(TOP_URL, topIndexPath)
    
    with open(topIndexPath, 'r') as f:
        topText = f.read()
    
    # Find all the sub folders in this file
    matches     = re.findall(">[0-9_ANGR]*_NASA", topText)
    missionList = [x[1:] for x in matches]
    
    allDates = []
    
    # Loop through the sub folders
    for mission in missionList:
        missionUrl = TOP_URL + mission
        print 'Checking mission at: ' + missionUrl
        icebridge_common.fetchFile(missionUrl, tempIndexPath)
def main(argsIn):

    # Command line parsing
    try:
        usage  = "usage: get_date_list.py output_path"
        parser = optparse.OptionParser(usage=usage)

        # This call handles all the parallel_mapproject specific options.
        (options, args) = parser.parse_args(argsIn)

        if len(args) != 1:
            print 'Error: Missing output path.'
            print usage
            return -1
        outputPath = os.path.abspath(args[0])

    except optparse.OptionError as msg:
        raise Exception(msg)

    # TODO: Move into a common function!
    # Verify that required files exist
    home = os.path.expanduser("~")
    if not (os.path.exists(home+'/.netrc') and os.path.exists(home+'/.urs_cookies')):
        print 'Missing a required authentication file!  See instructions here:'
        print '    https://nsidc.org/support/faq/what-options-are-available-bulk-downloading-data-https-earthdata-login-enabled'
        return -1
    
    topIndexPath  = outputPath + '_top.csv'
    tempIndexPath = outputPath + '_temp.csv'

    # Get the top level index
    TOP_URL = 'https://n5eil01u.ecs.nsidc.org/ICEBRIDGE_FTP/IODMS0_DMSraw_v01/'
    print 'Fetching top level index from: ' + TOP_URL
    icebridge_common.fetchFile(TOP_URL, topIndexPath)
    
    with open(topIndexPath, 'r') as f:
        topText = f.read()
    
    # Find all the sub folders in this file
    matches     = re.findall(">[0-9_ANGR]*_NASA", topText)
    missionList = [x[1:] for x in matches]
    
    allDates = []
    
    # Loop through the sub folders
    for mission in missionList:
        missionUrl = TOP_URL + mission
        print 'Checking mission at: ' + missionUrl
        icebridge_common.fetchFile(missionUrl, tempIndexPath)

        site = mission[5:7]

        with open(tempIndexPath, 'r') as f:
            missionText = f.read()
        
        matches  = re.findall(">[0-9]*_raw", missionText)
        dateList = [x[1:] for x in matches]
        
        for date in dateList:
            yyyymmdd = date[4:8] + date[0:4]
            allDates.append( (yyyymmdd, site) )

    with open(outputPath, 'w') as f:
        for date in allDates:
            f.write(date[0] +', '+ date[1] +'\n')

    print 'Wrote out ' + str(len(allDates)) + ' dates to file.'
    print 'Finished!'

    # Clean up
    os.remove(topIndexPath)
    os.remove(tempIndexPath)
예제 #6
0
def main(argsIn):

    # Command line parsing
    try:
        usage = "usage: get_date_list.py output_path"
        parser = optparse.OptionParser(usage=usage)

        # This call handles all the parallel_mapproject specific options.
        (options, args) = parser.parse_args(argsIn)

        if len(args) != 1:
            print 'Error: Missing output path.'
            print usage
            return -1
        outputPath = os.path.abspath(args[0])

    except optparse.OptionError as msg:
        raise Exception(msg)

    # TODO: Move into a common function!
    # Verify that required files exist
    home = os.path.expanduser("~")
    if not (os.path.exists(home + '/.netrc')
            and os.path.exists(home + '/.urs_cookies')):
        print 'Missing a required authentication file!  See instructions here:'
        print '    https://nsidc.org/support/faq/what-options-are-available-bulk-downloading-data-https-earthdata-login-enabled'
        return -1

    topIndexPath = outputPath + '_top.csv'
    tempIndexPath = outputPath + '_temp.csv'

    # Get the top level index
    TOP_URL = 'https://n5eil01u.ecs.nsidc.org/ICEBRIDGE_FTP/IODMS0_DMSraw_v01/'
    print 'Fetching top level index from: ' + TOP_URL
    icebridge_common.fetchFile(TOP_URL, topIndexPath)

    with open(topIndexPath, 'r') as f:
        topText = f.read()

    # Find all the sub folders in this file
    matches = re.findall(">[0-9_ANGR]*_NASA", topText)
    missionList = [x[1:] for x in matches]

    allDates = []

    # Loop through the sub folders
    for mission in missionList:
        missionUrl = TOP_URL + mission
        print 'Checking mission at: ' + missionUrl
        icebridge_common.fetchFile(missionUrl, tempIndexPath)

        site = mission[5:7]

        with open(tempIndexPath, 'r') as f:
            missionText = f.read()

        matches = re.findall(">[0-9]*_raw", missionText)
        dateList = [x[1:] for x in matches]

        for date in dateList:
            yyyymmdd = date[4:8] + date[0:4]
            allDates.append((yyyymmdd, site))

    with open(outputPath, 'w') as f:
        for date in allDates:
            f.write(date[0] + ', ' + date[1] + '\n')

    print 'Wrote out ' + str(len(allDates)) + ' dates to file.'
    print 'Finished!'

    # Clean up
    os.remove(topIndexPath)
    os.remove(tempIndexPath)