Esempio n. 1
0
def downloadImagesJpg(folderPath, ids):
    """
    Download JPEG images given by (mission,roll,frame) tuples and return paths if successfull.
    On any error, False is returned.
    Files that are already existing are not downloaded again.
    
    :param folderPath: download location
    :param ids: list of tuples (mission,roll,frame)
    :rtype: list of str | False
    """
    urls = [
        jpgUrlPattern.format(mission=mission, roll=roll, frame=frame)
        for mission, roll, frame in ids
    ]
    paths = [
        os.path.join(
            folderPath,
            jpgFilePattern.format(mission=mission, roll=roll, frame=frame))
        for mission, roll, frame in ids
    ]

    makedirs(folderPath)

    if downloadFiles(urls, paths):
        return paths
    else:
        return False
Esempio n. 2
0
def solveImages(imagePaths,
                wcsFolder,
                tleFolder,
                spacetrackUser,
                spacetrackPassword,
                noradId,
                debugOutputFolder=None,
                parallel=True,
                maxWorkers=None,
                solveTimeout=60 * 5,
                maskingFn=maskStarfield,
                returnUnsolved=False,
                noAstrometryPlots=False,
                oddsToSolve=None,
                channel=None,
                sigma=None,
                verbose=False):
    """
    Returns a generator containing (imagePath, wcsPath) tuples of successfully solved images.
    This allows to execute actions directly after each solve.
    Images which are already solved are ignored.
    
    See :func:`solve` for parameters.
    """
    makedirs(wcsFolder)
    unsolvedImagePaths = []
    unsolvedWcsPaths = []
    for imagePath in imagePaths:
        fileName = os.path.basename(imagePath)
        fileBase = os.path.splitext(fileName)[0]
        wcsPath = os.path.join(wcsFolder, fileBase + '.wcs')
        if not os.path.exists(wcsPath):
            unsolvedImagePaths.append(imagePath)
            unsolvedWcsPaths.append(wcsPath)

    wcsHeaders = _solveImages(unsolvedImagePaths,
                              tleFolder,
                              spacetrackUser,
                              spacetrackPassword,
                              noradId,
                              maskingFn,
                              debugOutputFolder=debugOutputFolder,
                              parallel=parallel,
                              maxWorkers=maxWorkers,
                              solveTimeout=solveTimeout,
                              noAstrometryPlots=noAstrometryPlots,
                              oddsToSolve=oddsToSolve,
                              channel=channel,
                              sigma=sigma,
                              verbose=verbose)

    for (imagePath, wcsPath, wcsHeader) in zip(unsolvedImagePaths,
                                               unsolvedWcsPaths, wcsHeaders):
        if wcsHeader is not None:
            auromat.fits.writeHeader(wcsPath, wcsHeader)
            yield (imagePath, wcsPath)
        elif returnUnsolved:
            yield (imagePath, None)
Esempio n. 3
0
def correctLensDistortion(folderPath, undistFolderPath, lensfunDbObj=None):
    """
    Corrects the lens distortion of all images in `folderPath` using
    lensfun's distortion profile database.
    
    It is assumed that all images have the same camera and lens.
    Images are skipped whose corrected version already exists in undistFolderPath.
    """
    meta = loadMetaData(folderPath)
    firstImagePath = os.path.join(folderPath, filenameOf(meta.fromFrame, meta))
    mod, cam, lens = getLensfunModifier(firstImagePath,
                                        lensfunDbObj=lensfunDbObj)

    makedirs(undistFolderPath)

    print('starting lens distortion correction for ' + folderPath)

    splitted = os.path.splitext(meta.pattern)
    undistPattern = splitted[0] + '_dc' + splitted[1]

    with exiftool.ExifTool() as et:
        for filename, frame in filenameIter(meta):
            imagePath = os.path.join(folderPath, filename)
            filenameUndist = _filenameOf(meta.mission, meta.roll, frame,
                                         undistPattern)
            undistImagePath = os.path.join(undistFolderPath, filenameUndist)
            if os.path.exists(undistImagePath):
                continue
            auromat.util.lensdistortion.correctLensDistortion(imagePath,
                                                              undistImagePath,
                                                              exiftoolObj=et,
                                                              mod=mod)

    dcParams = LensDistortionCorrectionParams(cam.maker, cam.model,
                                              cam.variant, lens.maker,
                                              lens.model, mod.focal_length,
                                              mod.aperture)

    meta.pattern = undistPattern
    meta.lensDistortionCorrected = True
    meta.lensDistortionCorrectionParams = dcParams

    storeMetaData(undistFolderPath, meta)
Esempio n. 4
0
def solveImages(imagePaths, wcsFolder, tleFolder, spacetrackUser, spacetrackPassword, noradId, 
                debugOutputFolder=None, parallel=True, maxWorkers=None, 
                solveTimeout=60*5, maskingFn=maskStarfield,
                returnUnsolved=False, noAstrometryPlots=False, oddsToSolve=None,
                channel=None, sigma=None, verbose=False):
    """
    Returns a generator containing (imagePath, wcsPath) tuples of successfully solved images.
    This allows to execute actions directly after each solve.
    Images which are already solved are ignored.
    
    See :func:`solve` for parameters.
    """
    makedirs(wcsFolder)
    unsolvedImagePaths = []
    unsolvedWcsPaths = []
    for imagePath in imagePaths:
        fileName = os.path.basename(imagePath)
        fileBase = os.path.splitext(fileName)[0]
        wcsPath = os.path.join(wcsFolder, fileBase + '.wcs')
        if not os.path.exists(wcsPath):
            unsolvedImagePaths.append(imagePath)
            unsolvedWcsPaths.append(wcsPath)
        
    wcsHeaders = _solveImages(unsolvedImagePaths, 
                              tleFolder, spacetrackUser, spacetrackPassword, 
                              noradId, maskingFn,
                              debugOutputFolder=debugOutputFolder, 
                              parallel=parallel, maxWorkers=maxWorkers, 
                              solveTimeout=solveTimeout,
                              noAstrometryPlots=noAstrometryPlots, oddsToSolve=oddsToSolve,
                              channel=channel, sigma=sigma, verbose=verbose)
    
    for (imagePath, wcsPath, wcsHeader) in zip(unsolvedImagePaths, unsolvedWcsPaths, wcsHeaders):
        if wcsHeader is not None:
            auromat.fits.writeHeader(wcsPath, wcsHeader)
            yield (imagePath, wcsPath)
        elif returnUnsolved:
            yield (imagePath, None)
Esempio n. 5
0
File: eol.py Progetto: esa/auromat
def downloadImagesJpg(folderPath, ids):
    """
    Download JPEG images given by (mission,roll,frame) tuples and return paths if successfull.
    On any error, False is returned.
    Files that are already existing are not downloaded again.
    
    :param folderPath: download location
    :param ids: list of tuples (mission,roll,frame)
    :rtype: list of str | False
    """
    urls = [jpgUrlPattern.format(mission=mission, roll=roll, frame=frame)
            for mission, roll, frame in ids]
    paths = [os.path.join(folderPath, jpgFilePattern.format(mission=mission, 
                                                            roll=roll, 
                                                            frame=frame))
             for mission, roll, frame in ids]
    
    makedirs(folderPath)
    
    if downloadFiles(urls, paths):
        return paths
    else:
        return False
Esempio n. 6
0
def correctLensDistortion(folderPath, undistFolderPath, lensfunDbObj=None):
    """
    Corrects the lens distortion of all images in `folderPath` using
    lensfun's distortion profile database.
    
    It is assumed that all images have the same camera and lens.
    Images are skipped whose corrected version already exists in undistFolderPath.
    """
    meta = loadMetaData(folderPath)
    firstImagePath = os.path.join(folderPath, filenameOf(meta.fromFrame, meta))
    mod, cam, lens = getLensfunModifier(firstImagePath, lensfunDbObj=lensfunDbObj)
    
    makedirs(undistFolderPath)
    
    print('starting lens distortion correction for ' + folderPath)
    
    splitted = os.path.splitext(meta.pattern)
    undistPattern = splitted[0] + '_dc' + splitted[1]
       
    with exiftool.ExifTool() as et:
        for filename, frame in filenameIter(meta):
            imagePath = os.path.join(folderPath, filename)
            filenameUndist = _filenameOf(meta.mission, meta.roll, frame, undistPattern)
            undistImagePath = os.path.join(undistFolderPath, filenameUndist)
            if os.path.exists(undistImagePath):
                continue
            auromat.util.lensdistortion.correctLensDistortion(imagePath, undistImagePath, exiftoolObj=et, mod=mod)
    
    dcParams = LensDistortionCorrectionParams(cam.maker, cam.model, cam.variant, lens.maker, lens.model,
                                              mod.focal_length, mod.aperture)
    
    meta.pattern = undistPattern
    meta.lensDistortionCorrected = True
    meta.lensDistortionCorrectionParams = dcParams
    
    storeMetaData(undistFolderPath, meta)
    
Esempio n. 7
0
def _downloadImageSequenceRaw(folderPath,
                              mission,
                              fromFrame,
                              toFrame,
                              roll='E'):
    assert roll == 'E'  # only those have RAW files

    # first, download in temp folder, then copy over and remove temp folder if successful
    tempFolderPath = os.path.join(folderPath, 'in_progress')

    metadataPath = os.path.join(folderPath, metadataFilename)

    fromFrame, toFrame = int(fromFrame), int(toFrame)

    # check if already fully downloaded
    if os.path.exists(metadataPath):
        return True

    makedirs(folderPath, tempFolderPath)

    # first, we determine the RAW filename pattern by looking at a photo page
    firstPhotoPageUrl = photoPageUrlPattern.format(mission=mission,
                                                   roll=roll,
                                                   frame=fromFrame)
    photoPageContent = urllib.request.urlopen(firstPhotoPageUrl).read()

    match = re.search(rawFilePhotoPagePattern, photoPageContent)
    if match is None:
        raise RuntimeError('Could not find RAW filename on page ' +
                           firstPhotoPageUrl)

    rawFilename = match.group(1)
    rawFileBase, rawFileExt = os.path.splitext(rawFilename)

    assert mission in rawFileBase or mission.lower() in rawFileBase
    assert roll in rawFileBase or roll.lower() in rawFileBase
    assert str(fromFrame) in rawFileBase

    rawFileBasePattern = rawFileBase
    if mission in rawFileBase:
        rawFileBasePattern = rawFileBase.replace(mission, '{mission}')
        missionCased = mission
    elif mission.lower() in rawFileBasePattern:
        rawFileBasePattern = rawFileBasePattern.replace(
            mission.lower(), '{mission}')
        missionCased = mission.lower()
    else:
        raise RuntimeError('Could not find mission name in ' + rawFileBase)

    if roll in rawFileBasePattern:
        rawFileBasePattern = rawFileBasePattern.replace(roll, '{roll}')
        rollCased = roll
    elif roll.lower() in rawFileBasePattern:
        rawFileBasePattern = rawFileBasePattern.replace(roll.lower(), '{roll}')
        rollCased = roll.lower()
    else:
        raise RuntimeError('Could not find roll name in ' + rawFileBase)

    frameZfilled = lambda frame: str(frame).zfill(6)
    if frameZfilled(fromFrame) in rawFileBasePattern:
        rawFileBasePattern = rawFileBasePattern.replace(
            frameZfilled(fromFrame), '{frame}')
        frameFn = frameZfilled
    elif str(fromFrame) in rawFileBasePattern:
        rawFileBasePattern = rawFileBasePattern.replace(
            str(fromFrame), '{frame}')
        frameFn = str
    else:
        raise RuntimeError('Could not find frame number in ' + rawFileBase)

    rawFilenamePattern = rawFileBasePattern + rawFileExt
    print('Raw filename pattern: ' + rawFilenamePattern)

    frames = range(fromFrame, toFrame + 1)
    rawFilenames = [
        rawFilenamePattern.format(mission=missionCased,
                                  roll=rollCased,
                                  frame=frameFn(frame)) for frame in frames
    ]
    rawRequestUrls = [
        rawRequestUrlPattern.format(mission=mission,
                                    roll=roll,
                                    frame=frame,
                                    file=rawFilename)
        for frame, rawFilename in zip(frames, rawFilenames)
    ]
    rawUrls = [
        rawUrlPattern.format(file=rawFilename) for rawFilename in rawFilenames
    ]

    rawFilePatternDisk = rawFilePatternNoExt + rawFileExt.lower()
    rawFilenamesDisk = [
        rawFilePatternDisk.format(mission=mission, roll=roll, frame=frame)
        for frame in frames
    ]
    paths = [
        os.path.join(tempFolderPath, rawFilenameDisk)
        for rawFilenameDisk in rawFilenamesDisk
    ]

    # jpg URLs are used to check if the frame exists (or whether there's a frame gap)
    jpgUrls = [
        jpgUrlPattern.format(mission=mission, roll=roll, frame=frame)
        for frame in frames
    ]

    frameGaps = []
    failures = []
    queue = []

    for frame, jpgUrl, rawUrl, rawRequestUrl, path in zip(
            frames, jpgUrls, rawUrls, rawRequestUrls, paths):
        if os.path.exists(path):
            continue
        try:
            code = urlResponseCode(jpgUrl)
            if code == 200:
                queue.append((rawUrl, rawRequestUrl, path))
                print('Got 200, added frame ' + str(frame) + ' to queue')
            elif code == 404:
                if fromFrame < frame < toFrame:
                    frameGaps.append(frame)
                    print('Got 404, ignoring frame ' + str(frame))
                else:
                    raise ValueError('Start/end frame ' + str(frame) +
                                     ' not downloadable (404)')
            else:
                failures.append((rawUrl, code))
                print('Failure: Unexpected response code for jpgUrl: ' +
                      str(code))
        except Exception as e:
            failures.append((rawUrl, e))
            print('Failure: ' + repr(e))

    # download RAW files in batches to avoid overloading the server
    batchSize = 30
    batches = [queue[i:i + batchSize] for i in range(0, len(queue), batchSize)]
    for batch in batches:
        batchUrls = []
        batchPaths = []
        for rawUrl, rawRequestUrl, path in batch:
            try:
                code = urlResponseCode(rawRequestUrl)
                if code == 200:
                    print('queried ' + rawRequestUrl)
                    batchUrls.append(rawUrl)
                    batchPaths.append(path)
                else:
                    failures.append((rawUrl, code))
                    print(
                        'Failure: Unexpected response code for rawRequestUrl: '
                        + str(code))
            except Exception as e:
                failures.append((rawUrl, e))
                print('Failure: ' + repr(e))

        # now check rawUrls until the files are available for download
        # The request "may take 5 minutes or more to complete" (quote from request page)
        success, failures_ = downloadFiles(batchUrls,
                                           batchPaths,
                                           retFailures=True)
        failureCount = len(failures_)
        lastFailureCountDecrease = datetime.now()
        while not success and datetime.now(
        ) - lastFailureCountDecrease < timedelta(minutes=8):
            sleep(30)
            success, failures_ = downloadFiles(batchUrls,
                                               batchPaths,
                                               retFailures=True)
            if len(failures_) < failureCount:
                lastFailureCountDecrease = datetime.now()
                failureCount = len(failures_)

        failures.extend(failures_)

    if len(failures) > 0:
        return False, failures

    for filename in os.listdir(tempFolderPath):
        shutil.move(os.path.join(tempFolderPath, filename), folderPath)

    os.rmdir(tempFolderPath)

    meta = SequenceMetadata(mission=mission,
                            roll=roll,
                            fromFrame=fromFrame,
                            toFrame=toFrame,
                            pattern=rawFilePatternDisk,
                            frameGaps=frameGaps,
                            lensDistortionCorrected=False)
    storeMetaData(metadataPath, meta)

    return meta, []
Esempio n. 8
0
def _downloadImageSequenceJpg(folderPath,
                              mission,
                              fromFrame,
                              toFrame,
                              roll='E',
                              lensDistortionCorrected=False):
    # first, download in temp folder, then copy over and remove temp folder if successful
    tempFolderPath = os.path.join(folderPath, 'in_progress')

    metadataPath = os.path.join(folderPath, metadataFilename)

    fromFrame, toFrame = int(fromFrame), int(toFrame)

    # check if already fully downloaded
    firstImage = os.path.join(
        folderPath,
        jpgFilePattern.format(mission=mission, roll=roll, frame=fromFrame))
    if os.path.exists(firstImage):
        # as the files are only moved over at the very end, it is enough to
        # check for existance of the first image

        # write metadata if not existing yet (for whatever reason..)
        if not os.path.exists(metadataPath):
            frameGaps = []
            for frame in range(fromFrame, toFrame + 1):
                imagePath = os.path.join(
                    folderPath,
                    jpgFilePattern.format(mission=mission,
                                          roll=roll,
                                          frame=frame))
                if not os.path.exists(imagePath):
                    frameGaps.append(frame)

            meta = SequenceMetadata(
                mission=mission,
                roll=roll,
                fromFrame=fromFrame,
                toFrame=toFrame,
                pattern=jpgFilePattern,
                frameGaps=frameGaps,
                lensDistortionCorrected=lensDistortionCorrected)
            storeMetaData(metadataPath, meta)
        else:
            meta = loadMetaData(metadataPath)

        return meta, []

    makedirs(folderPath, tempFolderPath)

    frames = range(fromFrame, toFrame + 1)

    urls = [
        jpgUrlPattern.format(mission=mission, roll=roll, frame=frame)
        for frame in frames
    ]
    paths = [
        os.path.join(
            tempFolderPath,
            jpgFilePattern.format(mission=mission, roll=roll, frame=frame))
        for frame in frames
    ]

    print('downloading sequence frames', fromFrame, 'to', toFrame, 'of',
          mission + '-' + roll)

    _, errors = downloadFiles(urls, paths, retFailures=True)

    # We ignore 404s for frames which are not the start or end.
    # This is because there are sometimes gaps in frame numbers.
    # E.g. for ISS030 the frames 115426 to 115442 don't exist within the
    #      sequence 114986 to 115574
    failures = []
    frameGaps = []
    for url, error in errors:
        if isinstance(error, urllib.error.HTTPError):
            i = urls.index(url)
            frame = frames[i]
            if error.code == 404:
                if fromFrame < frame < toFrame:
                    frameGaps.append(frame)
                    continue
                else:
                    raise ValueError('Start/end frame ' + str(frame) +
                                     ' not downloadable (404)')
            else:
                failures.append((url, error.code))
        else:
            failures.append((url, error))

    if len(failures) > 0:
        return False, failures

    for filename in os.listdir(tempFolderPath):
        shutil.move(os.path.join(tempFolderPath, filename), folderPath)

    os.rmdir(tempFolderPath)

    meta = SequenceMetadata(mission=mission,
                            roll=roll,
                            fromFrame=fromFrame,
                            toFrame=toFrame,
                            pattern=jpgFilePattern,
                            frameGaps=frameGaps,
                            lensDistortionCorrected=lensDistortionCorrected)
    storeMetaData(metadataPath, meta)

    return meta, []
Esempio n. 9
0
File: eol.py Progetto: esa/auromat
def _downloadImageSequenceRaw(folderPath, mission, fromFrame, toFrame, roll='E'):
    assert roll == 'E' # only those have RAW files
    
    # first, download in temp folder, then copy over and remove temp folder if successful
    tempFolderPath = os.path.join(folderPath, 'in_progress')
    
    metadataPath = os.path.join(folderPath, metadataFilename)
    
    fromFrame, toFrame = int(fromFrame), int(toFrame)
    
    # check if already fully downloaded
    if os.path.exists(metadataPath):
        return True
    
    makedirs(folderPath, tempFolderPath)

    # first, we determine the RAW filename pattern by looking at a photo page
    firstPhotoPageUrl = photoPageUrlPattern.format(mission=mission, roll=roll, frame=fromFrame)
    photoPageContent = urllib.request.urlopen(firstPhotoPageUrl).read()
    
    match = re.search(rawFilePhotoPagePattern, photoPageContent)
    if match is None:
        raise RuntimeError('Could not find RAW filename on page ' + firstPhotoPageUrl)
    
    rawFilename = match.group(1)
    rawFileBase, rawFileExt = os.path.splitext(rawFilename)
    
    assert mission in rawFileBase or mission.lower() in rawFileBase
    assert roll in rawFileBase or roll.lower() in rawFileBase
    assert str(fromFrame) in rawFileBase
    
    rawFileBasePattern = rawFileBase
    if mission in rawFileBase:
        rawFileBasePattern = rawFileBase.replace(mission, '{mission}')
        missionCased = mission
    elif mission.lower() in rawFileBasePattern:
        rawFileBasePattern = rawFileBasePattern.replace(mission.lower(), '{mission}')
        missionCased = mission.lower()
    else:
        raise RuntimeError('Could not find mission name in ' + rawFileBase)

    if roll in rawFileBasePattern:
        rawFileBasePattern = rawFileBasePattern.replace(roll, '{roll}')
        rollCased = roll
    elif roll.lower() in rawFileBasePattern:
        rawFileBasePattern = rawFileBasePattern.replace(roll.lower(), '{roll}')
        rollCased = roll.lower()
    else:
        raise RuntimeError('Could not find roll name in ' + rawFileBase)
    
    frameZfilled = lambda frame: str(frame).zfill(6)
    if frameZfilled(fromFrame) in rawFileBasePattern:
        rawFileBasePattern = rawFileBasePattern.replace(frameZfilled(fromFrame), '{frame}')
        frameFn = frameZfilled
    elif str(fromFrame) in rawFileBasePattern:
        rawFileBasePattern = rawFileBasePattern.replace(str(fromFrame), '{frame}')
        frameFn = str
    else:
        raise RuntimeError('Could not find frame number in ' + rawFileBase)

    rawFilenamePattern = rawFileBasePattern + rawFileExt
    print('Raw filename pattern: ' + rawFilenamePattern)
    
    frames = range(fromFrame, toFrame+1)
    rawFilenames = [rawFilenamePattern.format(mission=missionCased, roll=rollCased, frame=frameFn(frame))
                    for frame in frames]
    rawRequestUrls = [rawRequestUrlPattern.format(mission=mission, roll=roll, frame=frame, file=rawFilename)
                      for frame, rawFilename in zip(frames, rawFilenames)]
    rawUrls = [rawUrlPattern.format(file=rawFilename) for rawFilename in rawFilenames]
    
    rawFilePatternDisk = rawFilePatternNoExt + rawFileExt.lower()
    rawFilenamesDisk = [rawFilePatternDisk.format(mission=mission, roll=roll, frame=frame) for frame in frames]
    paths = [os.path.join(tempFolderPath, rawFilenameDisk) for rawFilenameDisk in rawFilenamesDisk]
    
    # jpg URLs are used to check if the frame exists (or whether there's a frame gap)
    jpgUrls = [jpgUrlPattern.format(mission=mission, roll=roll, frame=frame)
               for frame in frames]
    
    frameGaps = []
    failures = []
    queue = []
    
    for frame, jpgUrl, rawUrl, rawRequestUrl, path in zip(frames, jpgUrls, rawUrls, rawRequestUrls, paths):
        if os.path.exists(path):
            continue
        try:
            code = urlResponseCode(jpgUrl)
            if code == 200:
                queue.append((rawUrl, rawRequestUrl, path))
                print('Got 200, added frame ' + str(frame) + ' to queue')
            elif code == 404:
                if fromFrame < frame < toFrame:
                    frameGaps.append(frame)
                    print('Got 404, ignoring frame ' + str(frame))
                else:
                    raise ValueError('Start/end frame ' + str(frame) + ' not downloadable (404)')
            else:
                failures.append((rawUrl, code))
                print('Failure: Unexpected response code for jpgUrl: ' + str(code))
        except Exception as e:
            failures.append((rawUrl, e))
            print('Failure: ' + repr(e))
            
    # download RAW files in batches to avoid overloading the server
    batchSize = 30
    batches = [queue[i:i+batchSize] for i in range(0, len(queue), batchSize)]
    for batch in batches:
        batchUrls = []
        batchPaths = []
        for rawUrl, rawRequestUrl, path in batch:
            try:
                code = urlResponseCode(rawRequestUrl)
                if code == 200:
                    print('queried ' + rawRequestUrl)
                    batchUrls.append(rawUrl)
                    batchPaths.append(path)
                else:
                    failures.append((rawUrl, code))
                    print('Failure: Unexpected response code for rawRequestUrl: ' + str(code))
            except Exception as e:
                failures.append((rawUrl, e))
                print('Failure: ' + repr(e))
        
        # now check rawUrls until the files are available for download
        # The request "may take 5 minutes or more to complete" (quote from request page)
        success, failures_ = downloadFiles(batchUrls, batchPaths, retFailures=True)
        failureCount = len(failures_)
        lastFailureCountDecrease = datetime.now()
        while not success and datetime.now() - lastFailureCountDecrease < timedelta(minutes=8):
            sleep(30)
            success, failures_ = downloadFiles(batchUrls, batchPaths, retFailures=True)
            if len(failures_) < failureCount:
                lastFailureCountDecrease = datetime.now()
                failureCount = len(failures_)
        
        failures.extend(failures_)
        
    if len(failures) > 0:
        return False, failures
        
    for filename in os.listdir(tempFolderPath):
        shutil.move(os.path.join(tempFolderPath, filename), folderPath)
    
    os.rmdir(tempFolderPath)
    
    meta = SequenceMetadata(mission=mission, roll=roll, fromFrame=fromFrame,
                            toFrame=toFrame, pattern=rawFilePatternDisk,
                            frameGaps=frameGaps,
                            lensDistortionCorrected=False)
    storeMetaData(metadataPath, meta)
    
    return meta, [] 
Esempio n. 10
0
File: eol.py Progetto: esa/auromat
def _downloadImageSequenceJpg(folderPath, mission, fromFrame, toFrame, roll='E', lensDistortionCorrected=False):
    # first, download in temp folder, then copy over and remove temp folder if successful
    tempFolderPath = os.path.join(folderPath, 'in_progress')
    
    metadataPath = os.path.join(folderPath, metadataFilename)
    
    fromFrame, toFrame = int(fromFrame), int(toFrame)
    
    # check if already fully downloaded
    firstImage = os.path.join(folderPath, jpgFilePattern.format(mission=mission, 
                                                                roll=roll, 
                                                                frame=fromFrame))
    if os.path.exists(firstImage):
        # as the files are only moved over at the very end, it is enough to
        # check for existance of the first image
        
        # write metadata if not existing yet (for whatever reason..)
        if not os.path.exists(metadataPath):
            frameGaps = []
            for frame in range(fromFrame, toFrame+1):
                imagePath = os.path.join(folderPath, jpgFilePattern.format(mission=mission, 
                                                                           roll=roll, 
                                                                           frame=frame))
                if not os.path.exists(imagePath):
                    frameGaps.append(frame)
            
            meta = SequenceMetadata(mission=mission, roll=roll, fromFrame=fromFrame,
                                    toFrame=toFrame, pattern=jpgFilePattern,
                                    frameGaps=frameGaps, 
                                    lensDistortionCorrected=lensDistortionCorrected)
            storeMetaData(metadataPath, meta)
        else:
            meta = loadMetaData(metadataPath)

        return meta, []
    
    makedirs(folderPath, tempFolderPath)    
    
    frames = range(fromFrame, toFrame+1)
    
    urls = [jpgUrlPattern.format(mission=mission, roll=roll, frame=frame)
            for frame in frames]
    paths = [os.path.join(tempFolderPath, jpgFilePattern.format(mission=mission, 
                                                                roll=roll, 
                                                                frame=frame))
             for frame in frames]
    
    print('downloading sequence frames', fromFrame, 'to', toFrame, 'of', mission + '-' + roll) 
    
    _, errors = downloadFiles(urls, paths, retFailures=True)
    
    # We ignore 404s for frames which are not the start or end.
    # This is because there are sometimes gaps in frame numbers.
    # E.g. for ISS030 the frames 115426 to 115442 don't exist within the
    #      sequence 114986 to 115574
    failures = [] 
    frameGaps = []
    for url, error in errors:
        if isinstance(error, urllib.error.HTTPError):
            i = urls.index(url)
            frame = frames[i]
            if error.code == 404:
                if fromFrame < frame < toFrame:
                    frameGaps.append(frame)
                    continue
                else:
                    raise ValueError('Start/end frame ' + str(frame) + ' not downloadable (404)')
            else:
                failures.append((url, error.code))
        else:
            failures.append((url, error))
        
    if len(failures) > 0:
        return False, failures

    for filename in os.listdir(tempFolderPath):
        shutil.move(os.path.join(tempFolderPath, filename), folderPath)
    
    os.rmdir(tempFolderPath)
    
    meta = SequenceMetadata(mission=mission, roll=roll, fromFrame=fromFrame,
                            toFrame=toFrame, pattern=jpgFilePattern,
                            frameGaps=frameGaps, 
                            lensDistortionCorrected=lensDistortionCorrected)
    storeMetaData(metadataPath, meta)
    
    return meta, []
Esempio n. 11
0
def main():
    args = parseargs()
        
    # first, we try to figure out from which provider the data came from
    dataPath = args.data
    dataFiles = os.listdir(dataPath)
    if any(f == 'api.json' for f in dataFiles):
        provider = ISSMappingProvider(dataPath, altitude=args.altitude,
                                      noRawPostprocessCaching=True,
                                      raw_bps=args.bps, raw_auto_bright=args.autobright,
                                      raw_gamma=None if args.correctgamma else (1,1),
                                      offline=True)
    
    elif any(f.startswith('thg_l1_') for f in dataFiles):
        if args.format == Format.cdf:
            print ('Note that THEMIS files are already in CDF format where each file '
                   'contains 1h of images. With this script a CDF file for each image '
                   'is created.')
        if not (args.start and args.end):
            print('For THEMIS data you have to specify --start and --end')
            sys.exit(1)
                        
        provider = ThemisMappingProvider(dataPath, dataPath, altitude=args.altitude,
                                         offline=True)
    
    else:
        raise NotImplementedError('Not recognized as THEMIS or ESA ISS data')
    
    mappings = provider.getSequence(args.start, args.end)
    
    if args.resample:
        if args.grid == Grid.geo:
            resample_ = resample
        elif args.grid == Grid.mag:
            resample_ = resampleMLatMLT
        resample_ = partial(resample_, arcsecPerPx=args.resolution)
            
        mappings = map(resample_, mappings)
    
    if args.format == Format.cdf:
        import auromat.export.cdf
        export = auromat.export.cdf.write
        ext = '.cdf'
        
    elif args.format == Format.netcdf:
        import auromat.export.netcdf
        export = auromat.export.netcdf.write
        ext = '.nc'
    
    export = partial(export, includeBounds=not args.withoutBounds, includeMagCoords=not args.withoutMag,
                     includeGeoCoords=not args.withoutGeo)
        
    makedirs(args.out)
    for mapping in mappings:
        path = os.path.join(args.out, mapping.identifier + ext)
        if os.path.exists(path):
            if args.skip:
                print('skipping', path)
                continue
            elif args.overwrite:
                os.remove(path)
            else:
                print('The file', path, 'already exists.\n'
                      'Please use --skip or --overwrite, or a different output folder.',
                      file=sys.stderr)
                sys.exit(1)
            
        print('storing', path)
        export(path, mapping)
    
    print('Done.')
Esempio n. 12
0
 def __init__(self, cacheFolder, id_=None, useRaw=True, altitude=110, 
              sequenceInParallel=False, fastCenterCalculation=False, maxTimeOffset=3,
              raw_white_balance=None, raw_gamma=(1,1), raw_bps=16, raw_auto_bright=False,
              noRawPostprocessCaching=True,
              baseUrl=defaultBaseUrl, offline=False):
     """
     
     :param cacheFolder: folder where images and WCS files are downloaded to
                         Note that each sequence must be in its own folder!
     :param int id_: sequence id, can be omitted in later calls due to caching
     :param useRaw: 
         If True, download raw images and apply necessary pre-processing
         locally if necessary (rotation, lens distortion correction,
         bad pixel removal).
         This requires rawpy and lensfunpy.
         If the sequence is not available in RAW format, then JPEGs will
         be downloaded instead.
     :param altitude: in km
     :param raw_white_balance: (r,g,b) tuple of multipliers for each color.
         If not given, uses white balance from data set (corresponds to daylight). 
     :param raw_gamma: (inv_gamma,toe_slope) tuple. 
         For visually pleasing images, use (1/2.222,4.5), see recommendation BT.709.
         For linear images (photon count corresponds linearly to color values), use (1,1).
     :param raw_bps: 8 or 16, bits per color sample
     :param raw_auto_bright: 
         If True, automatically brightens the image such that 1% of all pixels are
         fully saturated. Note that this may destroy useful image information.
     :param noRawPostprocessCaching: 
         If True, then postprocessed RAW files are not written to disk as .tiff files.
         This saves disk space but requires re-computation if a mapping is requested
         multiple times. If False, then postprocessed images are cached and must be
         deleted with removePostProcessedImages() if different RAW postprocessing
         settings should be used.
     :param baseUrl: API base url to the mapping sequences
     :param offline: if True, then missing data is not automatically downloaded,
                     instead an exception is raised
     """
     if raw_bps == 16 and not noRawPostprocessCaching:
         noRawPostprocessCaching = True
         print('noRawPostprocessCaching=False can currently not be used together with raw_bps=16,'
               'the parameter is implicitly set to True')
         # This is because there is data corruption when saving 16bit Tiff images
         # with scikit-image (0.11dev) and Pillow (2.6). The exact cause is not known.
         
     BaseMappingProvider.__init__(self, maxTimeOffset=maxTimeOffset)
     makedirs(cacheFolder)
     self.cacheFolder = cacheFolder
     self.noRawPostprocessCaching = noRawPostprocessCaching
     self.offline = offline
     
     self.apiDataPath = os.path.join(cacheFolder, 'api.json')
     if not os.path.exists(self.apiDataPath) and not offline:
         if not id_:
             raise ValueError('The id_ parameter must be given the first time')
         url = baseUrl + str(id_)
         downloadFile(url, self.apiDataPath)
     with open(self.apiDataPath, 'r') as fp:
         self.apiData = json.load(fp, object_hook=_parseDates)
                 
     self.metadataPath = os.path.join(cacheFolder, 'metadata.json')
     if not os.path.exists(self.metadataPath) and not offline:
         downloadFile(self.apiData['metadata_uri'], self.metadataPath)
     with open(self.metadataPath, 'r') as fp:
         self.metadata = json.load(fp, object_hook=_parseDates)
         
     self.apiImages = OrderedDict(sorted(self.apiData['images'].items(), key=lambda k_v: k_v[1]['date']))
     self.useRaw = useRaw and 'raw_extension' in self.apiData
     self.altitude = altitude
     self.sequenceInParallel = sequenceInParallel
     self.fastCenterCalculation = fastCenterCalculation
     
     self.processedImagePaths = {}
     self.wcsPaths = {}
     if self.useRaw:
         self.raw_white_balance = raw_white_balance
         self.raw_gamma = raw_gamma
         self.raw_bps = raw_bps
         self.raw_no_auto_bright = not raw_auto_bright
         self.rawImagePaths = {}
         self.badPixelsPath = os.path.join(cacheFolder, 'bad_pixels.gz')
         if not os.path.exists(self.badPixelsPath) and not offline:
             downloadFile(self.apiData['raw_bad_pixels_uri'], self.badPixelsPath)
         self.badPixels = np.loadtxt(self.badPixelsPath, int)
Esempio n. 13
0
    def __init__(self,
                 cacheFolder,
                 id_=None,
                 useRaw=True,
                 altitude=110,
                 sequenceInParallel=False,
                 fastCenterCalculation=False,
                 maxTimeOffset=3,
                 raw_white_balance=None,
                 raw_gamma=(1, 1),
                 raw_bps=16,
                 raw_auto_bright=False,
                 noRawPostprocessCaching=True,
                 baseUrl=defaultBaseUrl,
                 offline=False):
        """
        
        :param cacheFolder: folder where images and WCS files are downloaded to
                            Note that each sequence must be in its own folder!
        :param int id_: sequence id, can be omitted in later calls due to caching
        :param useRaw: 
            If True, download raw images and apply necessary pre-processing
            locally if necessary (rotation, lens distortion correction,
            bad pixel removal).
            This requires rawpy and lensfunpy.
            If the sequence is not available in RAW format, then JPEGs will
            be downloaded instead.
        :param altitude: in km
        :param raw_white_balance: (r,g,b) tuple of multipliers for each color.
            If not given, uses white balance from data set (corresponds to daylight). 
        :param raw_gamma: (inv_gamma,toe_slope) tuple. 
            For visually pleasing images, use (1/2.222,4.5), see recommendation BT.709.
            For linear images (photon count corresponds linearly to color values), use (1,1).
        :param raw_bps: 8 or 16, bits per color sample
        :param raw_auto_bright: 
            If True, automatically brightens the image such that 1% of all pixels are
            fully saturated. Note that this may destroy useful image information.
        :param noRawPostprocessCaching: 
            If True, then postprocessed RAW files are not written to disk as .tiff files.
            This saves disk space but requires re-computation if a mapping is requested
            multiple times. If False, then postprocessed images are cached and must be
            deleted with removePostProcessedImages() if different RAW postprocessing
            settings should be used.
        :param baseUrl: API base url to the mapping sequences
        :param offline: if True, then missing data is not automatically downloaded,
                        instead an exception is raised
        """
        if raw_bps == 16 and not noRawPostprocessCaching:
            noRawPostprocessCaching = True
            print(
                'noRawPostprocessCaching=False can currently not be used together with raw_bps=16,'
                'the parameter is implicitly set to True')
            # This is because there is data corruption when saving 16bit Tiff images
            # with scikit-image (0.11dev) and Pillow (2.6). The exact cause is not known.

        BaseMappingProvider.__init__(self, maxTimeOffset=maxTimeOffset)
        makedirs(cacheFolder)
        self.cacheFolder = cacheFolder
        self.noRawPostprocessCaching = noRawPostprocessCaching
        self.offline = offline

        self.apiDataPath = os.path.join(cacheFolder, 'api.json')
        if not os.path.exists(self.apiDataPath) and not offline:
            if not id_:
                raise ValueError(
                    'The id_ parameter must be given the first time')
            url = baseUrl + str(id_)
            downloadFile(url, self.apiDataPath)
        with open(self.apiDataPath, 'r') as fp:
            self.apiData = json.load(fp, object_hook=_parseDates)

        self.metadataPath = os.path.join(cacheFolder, 'metadata.json')
        if not os.path.exists(self.metadataPath) and not offline:
            downloadFile(self.apiData['metadata_uri'], self.metadataPath)
        with open(self.metadataPath, 'r') as fp:
            self.metadata = json.load(fp, object_hook=_parseDates)

        self.apiImages = OrderedDict(
            sorted(self.apiData['images'].items(),
                   key=lambda k_v: k_v[1]['date']))
        self.useRaw = useRaw and 'raw_extension' in self.apiData
        self.altitude = altitude
        self.sequenceInParallel = sequenceInParallel
        self.fastCenterCalculation = fastCenterCalculation

        self.processedImagePaths = {}
        self.wcsPaths = {}
        if self.useRaw:
            self.raw_white_balance = raw_white_balance
            self.raw_gamma = raw_gamma
            self.raw_bps = raw_bps
            self.raw_no_auto_bright = not raw_auto_bright
            self.rawImagePaths = {}
            self.badPixelsPath = os.path.join(cacheFolder, 'bad_pixels.gz')
            if not os.path.exists(self.badPixelsPath) and not offline:
                downloadFile(self.apiData['raw_bad_pixels_uri'],
                             self.badPixelsPath)
            self.badPixels = np.loadtxt(self.badPixelsPath, int)
Esempio n. 14
0
def main():
    args = parseargs()

    # first, we try to figure out from which provider the data came from
    dataPath = args.data
    dataFiles = os.listdir(dataPath)
    if any(f == 'api.json' for f in dataFiles):
        provider = ISSMappingProvider(dataPath,
                                      altitude=args.altitude,
                                      noRawPostprocessCaching=True,
                                      raw_bps=args.bps,
                                      raw_auto_bright=args.autobright,
                                      raw_gamma=None if args.correctgamma else
                                      (1, 1),
                                      offline=True)

    elif any(f.startswith('thg_l1_') for f in dataFiles):
        if args.format == Format.cdf:
            print(
                'Note that THEMIS files are already in CDF format where each file '
                'contains 1h of images. With this script a CDF file for each image '
                'is created.')
        if not (args.start and args.end):
            print('For THEMIS data you have to specify --start and --end')
            sys.exit(1)

        provider = ThemisMappingProvider(dataPath,
                                         dataPath,
                                         altitude=args.altitude,
                                         offline=True)

    else:
        raise NotImplementedError('Not recognized as THEMIS or ESA ISS data')

    mappings = provider.getSequence(args.start, args.end)

    if args.resample:
        if args.grid == Grid.geo:
            resample_ = resample
        elif args.grid == Grid.mag:
            resample_ = resampleMLatMLT
        resample_ = partial(resample_, arcsecPerPx=args.resolution)

        mappings = map(resample_, mappings)

    if args.format == Format.cdf:
        import auromat.export.cdf
        export = auromat.export.cdf.write
        ext = '.cdf'

    elif args.format == Format.netcdf:
        import auromat.export.netcdf
        export = auromat.export.netcdf.write
        ext = '.nc'

    export = partial(export,
                     includeBounds=not args.withoutBounds,
                     includeMagCoords=not args.withoutMag,
                     includeGeoCoords=not args.withoutGeo)

    makedirs(args.out)
    for mapping in mappings:
        path = os.path.join(args.out, mapping.identifier + ext)
        if os.path.exists(path):
            if args.skip:
                print('skipping', path)
                continue
            elif args.overwrite:
                os.remove(path)
            else:
                print(
                    'The file',
                    path, 'already exists.\n'
                    'Please use --skip or --overwrite, or a different output folder.',
                    file=sys.stderr)
                sys.exit(1)

        print('storing', path)
        export(path, mapping)

    print('Done.')
Esempio n. 15
0
def _solveStarfield(imagePath,
                    tmpDir=None,
                    keepTempFiles=False,
                    timeout=60 * 1,
                    useSextractor=True,
                    downsample=2,
                    sigma=None,
                    searchField=None,
                    arcsecPerPxLowHigh=None,
                    pixelError=10,
                    oddsToSolve=None,
                    plotsBgImagePath=None,
                    noPlots=False,
                    astrometryBinPath=None,
                    useModifiedPath=False,
                    verbose=False):
    """
    NOTE: The astrometry/bin folder must be in the PATH. 
    
    :param imagePath:
    :param timeout: time in seconds after which the solving process is terminated
                    If solving fails, increase this. astrometry.net tries to solve
                    using bright stars first, and if these are not in the center then
                    the distortion may be too high and only fainter stars might lead
                    to a successful solve, which in turn needs longer processing time.
    :param useSextractor: sextractor sometimes delivers better results than
                          the built-in star extraction of astrometry.net (image2xy)
    :param int|None downsample: Whether and how much astrometry should downsample the image before solving
    :param sigma: noise level override
    :param (ra,dec,radius)|None searchField: search only within 'radius' of the field center 'ra','dec', all in degrees
    :param str plotsBgImagePath: path to .jpg file to use as background for all plots
    :param tuple arcsecPerPxLowHigh: tuple of lower and upper arcsecPerPx to restrict search
    :param int pixelError: size of pixel positional error, use higher values (e.g. 10)
                           if image contains star trails (ISS images)
    :param oddsToSolve: default 1e9, see astrometry.net docs
    :param bool useModifiedPath: invokes astrometry.net with /usr/bin/env PATH=os.environ['PATH']
                                 This may be useful when the PATH was modified after launching Python, Unix only.
    :rtype: wcs header, or None if no solution was found
    """
    # adapt these constants if necessary for newer astrometry.net versions
    solvefieldName = 'solve-field'
    backendName = 'astrometry-engine'

    if tmpDir is None and keepTempFiles is True:
        print(
            "solveStarfield: tmpDir is not set but keepTempFiles is true, this doesn't make much sense"
        )

    if tmpDir is None:
        tmpDir = tempfile.mkdtemp()
    tmpTmpDir = os.path.join(tmpDir, "tmp")
    makedirs(tmpTmpDir)

    imageBase = os.path.splitext(os.path.basename(imagePath))[0]

    solvedPath = os.path.join(tmpDir, imageBase + ".solved")
    wcsPath = os.path.join(tmpDir, imageBase + ".wcs")
    matchPath = os.path.join(tmpDir, imageBase + ".match")
    indxXyPath = os.path.join(tmpDir, imageBase + ".xyls")
    corrPath = os.path.join(tmpDir, imageBase + ".corr")
    logPath = os.path.join(tmpDir, imageBase + ".log")

    if not astrometryBinPath:
        astrometryBinPath = ''

    args = [os.path.join(astrometryBinPath, solvefieldName)]

    if useModifiedPath:
        args = ['/usr/bin/env', 'PATH=' + os.environ['PATH']] + args

    args += ["--cpulimit", str(timeout)
             ]  # see https://github.com/dstndstn/astrometry.net/issues/6
    args += ["--dir", tmpDir, "--temp-dir", tmpTmpDir, "--no-delete-temp"]
    # TODO there are no params for the plot filenames (..-indx.png and ..-objs.png)
    #      and also not for .axy
    args += ["--wcs", wcsPath, "--solved", solvedPath, "--match", matchPath]
    args += ["--index-xyls", indxXyPath, "--corr", corrPath]
    args += ["--crpix-center"]
    args += ["--no-background-subtraction"]

    if PYFITS_TOO_OLD:
        args += ["--no-remove-lines"]
        args += ["--no-fits2fits"]

    if arcsecPerPxLowHigh is not None:
        arcsecLow, arcsecHigh = arcsecPerPxLowHigh
        args += [
            "--scale-low",
            str(arcsecLow), "--scale-high",
            str(arcsecHigh), "--scale-units", "arcsecperpix"
        ]

    args += ["--no-tweak"
             ]  # no SIP polynomial; we correct lens distortion before-hand
    args += ["--pixel-error", str(pixelError)]

    if oddsToSolve:
        args += ["--odds-to-solve", str(oddsToSolve)]

    if verbose:
        args += ["--verbose"]

    if sigma:
        args += ["--sigma", str(sigma)]

    if searchField:
        ra, dec, radius = searchField
        args += ["--ra", str(ra), "--dec", str(dec), "--radius", str(radius)]

    if downsample:
        args += ["--downsample", str(downsample)]

    if useSextractor:
        args += ["--use-sextractor"]

    if plotsBgImagePath:
        args += ["--plot-bg", plotsBgImagePath]

    if keepTempFiles:
        print('astrometry files in', tmpDir)
        if noPlots:
            args += ["--no-plots"]
    else:
        args += ["--no-plots"]
        args += [
            "--new-fits", "none", "--index-xyls", "none", "--rdls", "none",
            "--corr", "none"
        ]

    args += [imagePath]

    print(' '.join(args))

    def print_and_store_output(out, logPath):
        # work-around the fact that using sys.stdout in subprocess breaks
        # if sys.stdout got redirected to a non-file object (e.g. StringIO)
        with open(logPath, 'w') as logfile:
            logfile.write(' '.join(args) + '\n')
            for line in iter(out.readline, b''):
                print(line, end='')
                logfile.write(line)
            out.close()

    try:
        process = psutil.Popen(args,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.STDOUT,
                               bufsize=1)
        t = Thread(target=print_and_store_output,
                   args=(process.stdout, logPath))
        t.daemon = True  # thread dies with the program, should not be needed if no error occurs
        t.start()

    except OSError as e:
        if e.errno == errno.ENOENT:
            raise RuntimeError('The "' + solvefieldName + '" program from astrometry.net could not be launched. ' + \
                               'Make sure it is in the PATH!')
        else:
            raise

    try:
        process.wait(timeout + 10)
    except:  # psutil.TimeoutExpired or KeyboardInterrupt (Ctrl+C)
        print('astrometry.net timeout reached, killing processes now')

        backend = list(
            filter(lambda p: p.name() == backendName,
                   process.get_children(True)))
        processes = [process] + backend

        for p in processes:
            print('terminating {} (pid {})'.format(p.name(), p.pid))
            try:
                p.terminate()
            except psutil.NoSuchProcess:
                pass

        # if astrometry is just writing its solution, let it have some time to do so
        t = 5
        _, alive = psutil.wait_procs(processes, t)
        if alive:
            # This shouldn't happen.
            warnings.warn('solve-field or backend did NOT exit in ' + t +
                          's after SIGTERM was sent!' +
                          '...killing them now and ignoring results')
            for p in processes:
                print('killing {} (pid {})'.format(p.name(), p.pid))
                try:
                    p.kill()
                except psutil.NoSuchProcess:
                    pass
            if not keepTempFiles:
                shutil.rmtree(tmpDir)
            return None

    # FIXME astrometry seems to leave files in an inconsistent state
    #       if it is terminated while persisting results
    #       -> e.g. it seems that the .solved file is written first, and then the rest
    #  -> quick work-around: check for .solved AND .wcs, and catch wcs reading exceptions
    #     may still fail in the end, but the probability is lower
    #  in theory, astrometry should handle SIGTERM accordingly instead of just exiting immediately

    try:
        if not os.path.exists(solvedPath) or not os.path.exists(wcsPath):
            return None
        fitsWcsHeader = auromat.fits.readHeader(wcsPath)
    except Exception as e:
        warnings.warn('error reading wcs file ' + wcsPath)
        print(repr(e))
        return None
    finally:
        if not keepTempFiles:
            shutil.rmtree(tmpDir)

    return fitsWcsHeader
Esempio n. 16
0
    def updateTLEs(self, noradId, tlePath):
        """
        Updates the TLEs to the latest available data.
        
        :param str|int noradId:
        :param str tlePath:
        :return: True, if new TLEs were added, False otherwise
        :raise: DownloadError: on any network error
        :raise: ValueError: if the downloaded TLEs could not be correctly read
        """
        if os.path.exists(tlePath):
            mtime = datetime.fromtimestamp(os.path.getmtime(tlePath))
            if datetime.now() - mtime < self.minUpdateInterval:
                return False

            # read latest available epoch
            with open(tlePath, 'r') as t:
                tles = t.readlines()
            lastTle = ephem.readtle('foo', tles[-2], tles[-1])
            year, month, day, h, m, s = lastTle._epoch.tuple()
            date = datetime(year, month, day, h, m,
                            int(round(s))).strftime('%Y-%m-%d %H:%M:%S')
        else:
            date = '0000-00-00'
            tles = []

        query = 'class/tle/NORAD_CAT_ID/%s/EPOCH/>%s/orderby/EPOCH asc/format/tle' % (
            noradId, date)
        response = self.query(query)
        newTles = response.splitlines()

        if len(newTles) == 0:
            return False

        if len(newTles) % 2 != 0:
            raise ValueError(
                'The number of returned TLE lines from space-track.org is not a multiple of 2'
            )

        # filter out TLEs where the checksum is missing
        # e.g. within the ISS TLEs sporadically between 2001-2004
        # Note that appending a 0 as checksum isn't enough to satisfy pyephem,
        # but it would be enough for the sgp4 library.
        # TODO recalculate checksum
        newTles = [line for line in newTles if len(line) == 69]

        tleCount = len(newTles) // 2
        for i in range(tleCount):
            try:
                ephem.readtle('foo', newTles[i * 2], newTles[i * 2 + 1])
            except Exception as e:
                raise ValueError("The following TLE couldn't be read: [" +
                                 newTles[i * 2] + ', ' + newTles[i * 2 + 1] +
                                 '] (reason: ' + repr(e) + ')')

        makedirs(os.path.dirname(tlePath))
        with open(tlePath, 'a') as t:
            t.write('\n'.join(newTles))
            t.write('\n')

        return True