コード例 #1
0
def archive_log_files(order_id, product_id):
    """Archive the log files for the current job
    """

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    try:
        # Determine the destination path for the logs
        output_dir = Environment().get_distribution_directory()
        destination_path = os.path.join(output_dir, 'logs', order_id)
        # Create the path
        utilities.create_directory(destination_path)

        # Job log file
        logfile_path = EspaLogging.get_filename(settings.PROCESSING_LOGGER)
        full_logfile_path = os.path.abspath(logfile_path)
        log_name = os.path.basename(full_logfile_path)
        # Determine full destination
        destination_file = os.path.join(destination_path, log_name)
        # Copy it
        shutil.copyfile(full_logfile_path, destination_file)

        # Mapper log file
        full_logfile_path = os.path.abspath(MAPPER_LOG_FILENAME)
        final_log_name = '-'.join([MAPPER_LOG_PREFIX, order_id, product_id])
        final_log_name = '.'.join([final_log_name, 'log'])
        # Determine full destination
        destination_file = os.path.join(destination_path, final_log_name)
        # Copy it
        shutil.copyfile(full_logfile_path, destination_file)

    except Exception:
        # We don't care because we are at the end of processing
        # And if we are on the successful path, we don't care either
        logger.exception("Exception encountered and follows")
コード例 #2
0
ファイル: ondemand_mapper.py プロジェクト: NGenetzky/espa
def set_product_error(server, order_id, product_id, processing_location):
    '''
    Description:
        Call the xmlrpc server routine to set a product request to error.
        Provides a sleep retry implementation to hopefully by-pass any errors
        encountered, so that we do not get requests that have failed, but
        show a status of processing.
    '''

    if server is not None:
        logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

        attempt = 0
        sleep_seconds = settings.DEFAULT_SLEEP_SECONDS
        while True:
            try:
                # START - DEBUG
                if product_id is None:
                    logger.info("DEBUG: Product ID is [None]")
                else:
                    logger.info("DEBUG: Product ID is [%s]" % product_id)
                if order_id is None:
                    logger.info("DEBUG: Order ID is [None]")
                else:
                    logger.info("DEBUG: Order ID is [%s]" % order_id)
                if processing_location is None:
                    logger.info("DEBUG: Processing Location is [None]")
                else:
                    logger.info("DEBUG: Processing Location is [%s]"
                                % processing_location)
                # END - DEBUG

                logged_contents = \
                    EspaLogging.read_logger_file(settings.PROCESSING_LOGGER)

                status = server.set_scene_error(product_id, order_id,
                                                processing_location,
                                                logged_contents)

                if not status:
                    logger.critical("Failed processing xmlrpc call to"
                                    " set_scene_error")
                    return False

                break

            except Exception, e:
                logger.critical("Failed processing xmlrpc call to"
                                " set_scene_error")
                logger.exception("Exception encountered and follows")

                if attempt < settings.MAX_SET_SCENE_ERROR_ATTEMPTS:
                    sleep(sleep_seconds)  # sleep before trying again
                    attempt += 1
                    sleep_seconds = int(sleep_seconds * 1.5)
                    continue
                else:
                    return False
コード例 #3
0
ファイル: staging.py プロジェクト: NGenetzky/espa
def untar_data(source_file, destination_directory):
    '''
    Description:
        Using tar extract the file contents into a destination directory.

    Notes:
        Works with '*.tar.gz' and '*.tar' files.
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    # If both source and destination are localhost we can just copy the data
    cmd = ' '.join(['tar', '--directory', destination_directory,
                    '-xvf', source_file])

    logger.info("Unpacking [%s] to [%s]"
                % (source_file, destination_directory))

    # Unpack the data and raise any errors
    output = ''
    try:
        output = utilities.execute_cmd(cmd)
    except Exception as e:
        logger.error("Failed to unpack data")
        raise e
    finally:
        if len(output) > 0:
            logger.info(output)
コード例 #4
0
ファイル: warp.py プロジェクト: NGenetzky/espa
def reformat(metadata_filename, work_directory, input_format, output_format):
    '''
    Description:
      Re-format the bands to the specified format using our raw binary tools
      or gdal, whichever is appropriate for the task.

      Input espa:
          Output Formats: envi(espa), gtiff, and hdf
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    # Don't do anything if they match
    if input_format == output_format:
        return

    # Change to the working directory
    current_directory = os.getcwd()
    os.chdir(work_directory)

    try:
        # Convert from our internal ESPA/ENVI format to GeoTIFF
        if input_format == 'envi' and output_format == 'gtiff':
            gtiff_name = metadata_filename.rstrip('.xml')
            # Call with deletion of source files
            cmd = ' '.join(['convert_espa_to_gtif', '--del_src_files',
                            '--xml', metadata_filename,
                            '--gtif', gtiff_name])

            output = ''
            try:
                output = utilities.execute_cmd(cmd)

                # Rename the XML file back to *.xml from *_gtif.xml
                meta_gtiff_name = metadata_filename.split('.xml')[0]
                meta_gtiff_name = ''.join([meta_gtiff_name, '_gtif.xml'])

                os.rename(meta_gtiff_name, metadata_filename)
            except Exception, e:
                raise ee.ESPAException(ee.ErrorCodes.reformat,
                                       str(e)), None, sys.exc_info()[2]
            finally:
                if len(output) > 0:
                    logger.info(output)

            # Remove all the *.tfw files since gtiff was chosen a bunch may
            # be present
            files_to_remove = glob.glob('*.tfw')
            if len(files_to_remove) > 0:
                cmd = ' '.join(['rm', '-rf'] + files_to_remove)
                logger.info(' '.join(['REMOVING TFW DATA COMMAND:', cmd]))

                output = ''
                try:
                    output = utilities.execute_cmd(cmd)
                except Exception, e:
                    raise ee.ESPAException(ee.ErrorCodes.reformat,
                                           str(e)), None, sys.exc_info()[2]
                finally:
コード例 #5
0
def get_landsat_metadata(work_dir, product_id):
    '''
    Description:
        Fixes potentially bad MTL file from Landsat and returns the Landsat
        metadata filename to use with the rest of the processing.
    '''
    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    # Find the metadata file
    metadata_filename = ''

    # Save the current directory and change to the work directory
    current_directory = os.getcwd()
    os.chdir(work_dir)

    try:
        for meta_file in glob.glob('%s_MTL.*' % product_id):
            if ('old' not in meta_file and
                    not meta_file.startswith('lnd')):

                # Save the filename and break out of the directory loop
                metadata_filename = meta_file
                break

        if metadata_filename == '':
            msg = "Could not locate the Landsat MTL file in %s" % work_dir
            raise RuntimeError(msg)

        logger.info("Located MTL file:%s" % metadata_filename)

        # Backup the original file
        shutil.copy(metadata_filename, ''.join([metadata_filename, '.old']))

        file_data = list()
        # Read in the file and write it back out to get rid of binary
        # characters at the end of some of the GLS metadata files
        with open(metadata_filename, 'r') as metadata_fd:
            file_data = metadata_fd.readlines()

        data_buffer = StringIO()
        for line in file_data:
            data_buffer.write(line)
        fixed_data = data_buffer.getvalue()

        # Fix the stupid error where the filename has a bad extention
        if metadata_filename.endswith('.TIF'):
            metadata_filename = metadata_filename.replace('.TIF', '.txt')

        # Write the newly formatted file out
        with open(metadata_filename, 'w+') as metadata_fd:
            metadata_fd.write(fixed_data)

    finally:
        # Change back to the original directory
        os.chdir(current_directory)

    return metadata_filename
コード例 #6
0
ファイル: distribution.py プロジェクト: NGenetzky/espa
def distribute_product_local(product_name, source_path, packaging_path):

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    # Deliver the product files
    # Attempt X times sleeping between each attempt
    sleep_seconds = settings.DEFAULT_SLEEP_SECONDS
    max_number_of_attempts = settings.MAX_DISTRIBUTION_ATTEMPTS
    max_package_attempts = settings.MAX_PACKAGING_ATTEMPTS
    max_delivery_attempts = settings.MAX_DELIVERY_ATTEMPTS

    attempt = 0
    product_file = 'ERROR'
    cksum_file = 'ERROR'

    while True:
        try:
            # Package the product files to the online cache location
            # Attempt X times sleeping between each sub_attempt
            sub_attempt = 0
            while True:
                try:
                    (product_file, cksum_file,
                     local_cksum_value) = package_product(source_path,
                                                          packaging_path,
                                                          product_name)
                except Exception as e:
                    logger.exception("An exception occurred processing %s"
                                     % product_name)
                    if sub_attempt < max_package_attempts:
                        sleep(sleep_seconds)  # sleep before trying again
                        sub_attempt += 1
                        continue
                    else:
                        raise ee.ESPAException(ee.ErrorCodes.packaging_product,
                                               str(e)), None, sys.exc_info()[2]
                break

            # Always log where we placed the files
            logger.info("Delivered product to location %s"
                        " and checksum location %s" % (product_file,
                                                       cksum_file))
        except Exception as e:
            if attempt < max_number_of_attempts:
                sleep(sleep_seconds)  # sleep before trying again
                attempt += 1
                # adjust for next set
                sleep_seconds = int(sleep_seconds * 1.5)
                continue
            else:
                # May already be an ESPAException so don't override that
                raise e
        break

    return (product_file, cksum_file)
コード例 #7
0
ファイル: transfer.py プロジェクト: NGenetzky/espa
def transfer_file(source_host, source_file,
                  destination_host, destination_file,
                  source_username=None, source_pw=None,
                  destination_username=None, destination_pw=None):
    '''
    Description:
      Using cp/FTP/SCP transfer a file from a source location to a destination
      location.

    Notes:
      We are not doing anything significant here other then some logic and
      fallback to SCP if FTP fails.
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    logger.info("Transfering [%s:%s] to [%s:%s]"
                % (source_host, source_file,
                   destination_host, destination_file))

    # If both source and destination are localhost we can just copy the data
    if source_host == 'localhost' and destination_host == 'localhost':
        shutil.copyfile(source_file, destination_file)
        return

    # If both source and destination hosts are the same, we can use ssh to copy
    # the files locally on the remote host
    if source_host == destination_host:
        remote_copy_file_to_file(source_host, source_file, destination_file)
        return

    # Try FTP first before SCP if usernames and passwords are provided
    if source_username is not None and source_pw is not None:
        try:
            ftp_from_remote_location(source_username, source_pw, source_host,
                                     source_file, destination_file)
            return
        except Exception as e:
            logger.warning("FTP failures will attempt transfer using SCP")
            logger.warning("FTP Errors: %s" % str(e))

    elif destination_username is not None and destination_pw is not None:
        try:
            ftp_to_remote_location(destination_username, destination_pw,
                                   source_file, destination_host,
                                   destination_file)
            return
        except Exception as e:
            logger.warning("FTP failures will attempt transfer using SCP")
            logger.warning("FTP Errors: %s" % str(e))

    # As a last resort try SCP
    scp_transfer_file(source_host, source_file,
                      destination_host, destination_file)
コード例 #8
0
ファイル: transfer.py プロジェクト: NGenetzky/espa
def scp_transfer_file(source_host, source_file,
                      destination_host, destination_file):
    '''
    Description:
      Using SCP transfer a file from a source location to a destination
      location.

    Note:
      - It is assumed ssh has been setup for access between the localhost
        and destination system
      - If wild cards are to be used with the source, then the destination
        file must be a directory.  ***No checking is performed in this code***
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    if source_host == destination_host:
        msg = "source and destination host match unable to scp"
        logger.error(msg)
        raise Exception(msg)

    cmd = ['scp', '-q', '-o', 'StrictHostKeyChecking=no', '-c', 'arcfour',
           '-C']

    # Build the source portion of the command
    # Single quote the source to allow for wild cards
    if source_host == 'localhost':
        cmd.append(source_file)
    else:
        cmd.append("'%s:%s'" % (source_host, source_file))

    # Build the destination portion of the command
    if destination_host == 'localhost':
        cmd.append(destination_file)
    else:
        cmd.append('%s:%s' % (destination_host, destination_file))

    cmd = ' '.join(cmd)

    # Transfer the data and raise any errors
    output = ''
    try:
        output = utilities.execute_cmd(cmd)
    except Exception as e:
        if len(output) > 0:
            logger.info(output)
        logger.error("Failed to transfer data")
        raise e

    logger.info("Transfer complete - SCP")
コード例 #9
0
ファイル: transfer.py プロジェクト: NGenetzky/espa
def ftp_from_remote_location(username, pw, host, remotefile, localfile):
    '''
    Author: David Hill

    Date: 12/5/13

    Description:
      Transfers files from a remote location to the local machine using ftplib.

    Parameters:
      username = Username for ftp account
      pw = Password for ftp account
      host = The ftp server host
      remotefile = The file to transfer
      localfile = Full path to where the local file should be created.
                  (Parent directories must exist)

    Returns: None

    Errors: Raises Exception() in the event of error
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    # Make sure the src_file is absolute, otherwise ftp will choke
    if not remotefile.startswith('/'):
        remotefile = ''.join(['/', remotefile])

    pw = urllib2.unquote(pw)

    url = 'ftp://%s/%s' % (host, remotefile)

    logger.info("Transferring file from %s to %s" % (url, localfile))
    ftp = None
    try:
        with open(localfile, 'wb') as loc_file:
            def callback(data):
                loc_file.write(data)

            ftp = ftplib.FTP(host, timeout=60)
            ftp.login(user=username, passwd=pw)
            ftp.set_debuglevel(0)
            ftp.retrbinary(' '.join(['RETR', remotefile]), callback)

    finally:
        if ftp:
            ftp.quit()

    logger.info("Transfer complete - FTP")
コード例 #10
0
ファイル: warp.py プロジェクト: spgriffin/espa-processing
def warp_image(source_file, output_file,
               base_warp_command=None,
               resample_method='near',
               pixel_size=None,
               no_data_value=None):
    '''
    Description:
      Executes the warping command on the specified source file
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    try:
        # Turn GDAL PAM off to prevent *.aux.xml files
        os.environ['GDAL_PAM_ENABLED'] = 'NO'

        cmd = copy.deepcopy(base_warp_command)

        # Resample method to use
        cmd.extend(['-r', resample_method])

        # Resize the pixels
        if pixel_size is not None:
            cmd.extend(['-tr', str(pixel_size), str(pixel_size)])

        # Specify the fill/nodata value
        if no_data_value is not None:
            cmd.extend(['-srcnodata', no_data_value])
            cmd.extend(['-dstnodata', no_data_value])

        # Now add the filenames
        cmd.extend([source_file, output_file])

        cmd = ' '.join(cmd)
        logger.info("Warping %s with %s" % (source_file, cmd))

        output = utilities.execute_cmd(cmd)
        if len(output) > 0:
            logger.info(output)

    except Exception:
        raise

    finally:
        # Remove the environment variable we set above
        del os.environ['GDAL_PAM_ENABLED']
コード例 #11
0
ファイル: transfer.py プロジェクト: NGenetzky/espa
def ftp_to_remote_location(username, pw, localfile, host, remotefile):
    '''
    Author: David Hill

    Date: 12/5/13

    Description:
      Transfers files from the local machine to a remote location using ftplib.

    Parameters:
      username = Username for ftp account
      pw = Password for ftp account
      host = The ftp server host
      remotefile = Full path of where to store the file
                   (Directories must exist)
      localfile = Full path of file to transfer out

    Returns: None

    Errors: Raises Exception() in the event of error
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    # Make sure the src_file is absolute, otherwise ftp will choke
    if not remotefile.startswith('/'):
        remotefile = ''.join(['/', remotefile])

    pw = urllib2.unquote(pw)

    logger.info("Transferring file from %s to %s"
                % (localfile, 'ftp://%s/%s' % (host, remotefile)))

    ftp = None

    try:
        ftp = ftplib.FTP(host, user=username, passwd=pw, timeout=60)
        with open(localfile, 'rb') as tmp_fd:
            ftp.storbinary(' '.join(['STOR', remotefile]), tmp_fd, 1024)
    finally:
        if ftp:
            ftp.quit()

    logger.info("Transfer complete - FTP")
コード例 #12
0
def get_sleep_duration(start_time):
    """Logs details and returns number of seconds to sleep
    """

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    # Determine if we need to sleep
    end_time = datetime.datetime.now()
    seconds_elapsed = (end_time - start_time).seconds
    logger.info('Processing Time Elapsed {0} Seconds'.format(seconds_elapsed))

    seconds_to_sleep = 1
    if seconds_elapsed < settings.MIN_REQUEST_DURATION_IN_SECONDS:
        # Joe-Developer doesn't want to wait so check and skip
        # This directory will not exist for HADOOP processing
        if not os.path.isdir('unittests'):
            seconds_to_sleep = (settings.MIN_REQUEST_DURATION_IN_SECONDS -
                                seconds_elapsed)

    logger.info('Sleeping An Additional {0} Seconds'.format(seconds_to_sleep))

    return seconds_to_sleep
コード例 #13
0
ファイル: transfer.py プロジェクト: NGenetzky/espa
def remote_copy_file_to_file(source_host, source_file, destination_file):
    '''
    Description:
      Use unix 'cp' to copy a file from one place to another on a remote
      machine using ssh.
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    cmd = ' '.join(['ssh', '-q', '-o', 'StrictHostKeyChecking=no',
                    source_host, 'cp', source_file, destination_file])

    # Transfer the data and raise any errors
    output = ''
    try:
        output = utilities.execute_cmd(cmd)
    except Exception as e:
        logger.error("Failed to copy file")
        raise e
    finally:
        if len(output) > 0:
            logger.info(output)

    logger.info("Transfer complete - SSH-CP")
コード例 #14
0
ファイル: transfer.py プロジェクト: NGenetzky/espa
def move_files_to_directory(source_files, destination_directory):
    '''
    Description:
      Move files from one place to another on the localhost.
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    if type(source_files) == str:
        filename = os.path.basename(source_files)

        new_name = os.path.join(destination_directory, filename)

        os.rename(source_files, new_name)

    elif type(source_files) == list:
        for source_file in source_files:
            filename = os.path.basename(source_file)

            new_name = os.path.join(destination_directory, filename)

            os.rename(source_file, new_name)

    logger.info("Transfer complete - MOVE")
コード例 #15
0
ファイル: transfer.py プロジェクト: NGenetzky/espa
def copy_files_to_directory(source_files, destination_directory):
    '''
    Description:
      Use unix 'cp' to copy files from one place to another on the localhost.
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    if type(source_files) == list:
        for source_file in source_files:
            cmd = ' '.join(['cp', source_file, destination_directory])

            # Transfer the data and raise any errors
            output = ''
            try:
                output = utilities.execute_cmd(cmd)
            except Exception as e:
                logger.error("Failed to copy file")
                raise e
            finally:
                if len(output) > 0:
                    logger.info(output)

    logger.info("Transfer complete - CP")
コード例 #16
0
ファイル: warp.py プロジェクト: spgriffin/espa-processing
def update_espa_xml(parms, xml, xml_filename):

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    try:
        # Default the datum to WGS84
        datum = settings.WGS84
        if parms['datum'] is not None:
            datum = parms['datum']

        bands = xml.get_bands()
        for band in bands.band:
            img_filename = band.get_file_name()
            logger.info("Updating XML for %s" % img_filename)

            ds = gdal.Open(img_filename)
            if ds is None:
                msg = "GDAL failed to open (%s)" % img_filename
                raise RuntimeError(msg)

            try:
                ds_band = ds.GetRasterBand(1)
                ds_transform = ds.GetGeoTransform()
                ds_srs = osr.SpatialReference()
                ds_srs.ImportFromWkt(ds.GetProjection())
            except Exception as excep:
                raise ee.ESPAException(ee.ErrorCodes.warping,
                                       str(excep)), None, sys.exc_info()[2]

            projection_name = ds_srs.GetAttrValue('PROJECTION')

            number_of_lines = float(ds_band.YSize)
            number_of_samples = float(ds_band.XSize)
            # Need to abs these because they are coming from the transform,
            # which may becorrect for the transform,
            # but not how us humans understand it
            x_pixel_size = abs(ds_transform[1])
            y_pixel_size = abs(ds_transform[5])

            del ds_band
            del ds

            # Update the band information in the XML file
            band.set_nlines(number_of_lines)
            band.set_nsamps(number_of_samples)
            band_pixel_size = band.get_pixel_size()
            band_pixel_size.set_x(x_pixel_size)
            band_pixel_size.set_y(y_pixel_size)

            # For sanity report the resample method applied to the data
            resample_method = band.get_resample_method()
            logger.info("RESAMPLE METHOD [%s]" % resample_method)

            # We only support one unit type for each projection
            if projection_name is not None:
                if projection_name.lower().startswith('transverse_mercator'):
                    band_pixel_size.set_units('meters')
                elif projection_name.lower().startswith('polar'):
                    band_pixel_size.set_units('meters')
                elif projection_name.lower().startswith('albers'):
                    band_pixel_size.set_units('meters')
                elif projection_name.lower().startswith('sinusoidal'):
                    band_pixel_size.set_units('meters')
            else:
                # Must be Geographic Projection
                band_pixel_size.set_units('degrees')

        ######################################################################
        # Fix the projection information for the warped data
        ######################################################################
        gm = xml.get_global_metadata()

        # If the image extents were changed, then the scene center time is
        # meaningless so just remove it
        # We don't have any way to calculate a new one
        if parms['image_extents']:
            del gm.scene_center_time
            gm.scene_center_time = None

        # Remove the projection parameter object from the structure so that it
        # can be replaced with the new one
        # Geographic doesn't have one
        if gm.projection_information.utm_proj_params is not None:
            del gm.projection_information.utm_proj_params
            gm.projection_information.utm_proj_params = None

        if gm.projection_information.ps_proj_params is not None:
            del gm.projection_information.ps_proj_params
            gm.projection_information.ps_proj_params = None

        if gm.projection_information.albers_proj_params is not None:
            del gm.projection_information.albers_proj_params
            gm.projection_information.albers_proj_params = None

        if gm.projection_information.sin_proj_params is not None:
            del gm.projection_information.sin_proj_params
            gm.projection_information.sin_proj_params = None

        # Rebuild the projection parameters
        projection_name = ds_srs.GetAttrValue('PROJECTION')
        if projection_name is not None:
            # ----------------------------------------------------------------
            if projection_name.lower().startswith('transverse_mercator'):
                logger.info("---- Updating UTM Parameters")
                # Get the parameter values
                zone = int(ds_srs.GetUTMZone())
                # Get a new UTM projection parameter object and populate it
                utm_projection = metadata_api.utm_proj_params()
                utm_projection.set_zone_code(zone)
                # Add the object to the projection information
                gm.projection_information.set_utm_proj_params(utm_projection)
                # Update the attribute values
                gm.projection_information.set_projection("UTM")
                gm.projection_information.set_datum(settings.WGS84)
            # ----------------------------------------------------------------
            elif projection_name.lower().startswith('polar'):
                logger.info("---- Updating Polar Stereographic Parameters")
                # Get the parameter values
                latitude_true_scale = ds_srs.GetProjParm('latitude_of_origin')
                longitude_pole = ds_srs.GetProjParm('central_meridian')
                false_easting = ds_srs.GetProjParm('false_easting')
                false_northing = ds_srs.GetProjParm('false_northing')
                # Get a new PS projection parameter object and populate it
                ps_projection = metadata_api.ps_proj_params()
                ps_projection.set_latitude_true_scale(latitude_true_scale)
                ps_projection.set_longitude_pole(longitude_pole)
                ps_projection.set_false_easting(false_easting)
                ps_projection.set_false_northing(false_northing)
                # Add the object to the projection information
                gm.projection_information.set_ps_proj_params(ps_projection)
                # Update the attribute values
                gm.projection_information.set_projection("PS")
                gm.projection_information.set_datum(settings.WGS84)
            # ----------------------------------------------------------------
            elif projection_name.lower().startswith('albers'):
                logger.info("---- Updating Albers Equal Area Parameters")
                # Get the parameter values
                standard_parallel1 = ds_srs.GetProjParm('standard_parallel_1')
                standard_parallel2 = ds_srs.GetProjParm('standard_parallel_2')
                origin_latitude = ds_srs.GetProjParm('latitude_of_center')
                central_meridian = ds_srs.GetProjParm('longitude_of_center')
                false_easting = ds_srs.GetProjParm('false_easting')
                false_northing = ds_srs.GetProjParm('false_northing')
                # Get a new ALBERS projection parameter object and populate it
                albers_projection = metadata_api.albers_proj_params()
                albers_projection.set_standard_parallel1(standard_parallel1)
                albers_projection.set_standard_parallel2(standard_parallel2)
                albers_projection.set_origin_latitude(origin_latitude)
                albers_projection.set_central_meridian(central_meridian)
                albers_projection.set_false_easting(false_easting)
                albers_projection.set_false_northing(false_northing)
                # Add the object to the projection information
                gm.projection_information. \
                    set_albers_proj_params(albers_projection)
                # Update the attribute values
                gm.projection_information.set_projection("ALBERS")
                # This projection can have different datums, so use the datum
                # requested by the user
                gm.projection_information.set_datum(datum)
            # ----------------------------------------------------------------
            elif projection_name.lower().startswith('sinusoidal'):
                logger.info("---- Updating Sinusoidal Parameters")
                # Get the parameter values
                central_meridian = ds_srs.GetProjParm('longitude_of_center')
                false_easting = ds_srs.GetProjParm('false_easting')
                false_northing = ds_srs.GetProjParm('false_northing')
                # Get a new SIN projection parameter object and populate it
                sin_projection = metadata_api.sin_proj_params()
                sin_projection.set_sphere_radius(
                    settings.SINUSOIDAL_SPHERE_RADIUS)
                sin_projection.set_central_meridian(central_meridian)
                sin_projection.set_false_easting(false_easting)
                sin_projection.set_false_northing(false_northing)
                # Add the object to the projection information
                gm.projection_information.set_sin_proj_params(sin_projection)
                # Update the attribute values
                gm.projection_information.set_projection("SIN")
                # This projection doesn't have a datum
                del gm.projection_information.datum
                gm.projection_information.datum = None
        else:
            # ----------------------------------------------------------------
            # Must be Geographic Projection
            logger.info("---- Updating Geographic Parameters")
            gm.projection_information.set_projection('GEO')
            gm.projection_information.set_datum(settings.WGS84)  # WGS84 only
            gm.projection_information.set_units('degrees')  # always degrees

        # Fix the UL and LR center of pixel map coordinates
        (map_ul_x, map_ul_y) = convert_imageXY_to_mapXY(0.5, 0.5,
                                                        ds_transform)
        (map_lr_x, map_lr_y) = convert_imageXY_to_mapXY(
            number_of_samples - 0.5, number_of_lines - 0.5, ds_transform)
        for cp in gm.projection_information.corner_point:
            if cp.location == 'UL':
                cp.set_x(map_ul_x)
                cp.set_y(map_ul_y)
            if cp.location == 'LR':
                cp.set_x(map_lr_x)
                cp.set_y(map_lr_y)

        # Fix the UL and LR center of pixel latitude and longitude coordinates
        srs_lat_lon = ds_srs.CloneGeogCS()
        coord_tf = osr.CoordinateTransformation(ds_srs, srs_lat_lon)
        for corner in gm.corner:
            if corner.location == 'UL':
                (lon, lat, height) = \
                    coord_tf.TransformPoint(map_ul_x, map_ul_y)
                corner.set_longitude(lon)
                corner.set_latitude(lat)
            if corner.location == 'LR':
                (lon, lat, height) = \
                    coord_tf.TransformPoint(map_lr_x, map_lr_y)
                corner.set_longitude(lon)
                corner.set_latitude(lat)

        # Determine the bounding coordinates
        # Initialize using the UL and LR, then walk the edges of the image,
        # because some projections may not have the values in the corners of
        # the image
        # UL
        (map_x, map_y) = convert_imageXY_to_mapXY(0.0, 0.0, ds_transform)
        (ul_lon, ul_lat, height) = coord_tf.TransformPoint(map_x, map_y)
        # LR
        (map_x, map_y) = convert_imageXY_to_mapXY(number_of_samples,
                                                  number_of_lines,
                                                  ds_transform)
        (lr_lon, lr_lat, height) = coord_tf.TransformPoint(map_x, map_y)

        # Set the initial values
        west_lon = min(ul_lon, lr_lon)
        east_lon = max(ul_lon, lr_lon)
        north_lat = max(ul_lat, lr_lat)
        south_lat = min(ul_lat, lr_lat)

        # Walk across the top and bottom of the image
        for sample in range(0, int(number_of_samples)+1):
            (map_x, map_y) = \
                convert_imageXY_to_mapXY(sample, 0.0, ds_transform)
            (top_lon, top_lat, height) = coord_tf.TransformPoint(map_x, map_y)

            (map_x, map_y) = \
                convert_imageXY_to_mapXY(sample, number_of_lines, ds_transform)
            (bottom_lon, bottom_lat, height) = \
                coord_tf.TransformPoint(map_x, map_y)

            west_lon = min(top_lon, bottom_lon, west_lon)
            east_lon = max(top_lon, bottom_lon, east_lon)
            north_lat = max(top_lat, bottom_lat, north_lat)
            south_lat = min(top_lat, bottom_lat, south_lat)

        # Walk down the left and right of the image
        for line in range(0, int(number_of_lines)+1):
            (map_x, map_y) = \
                convert_imageXY_to_mapXY(0.0, line, ds_transform)
            (left_lon, left_lat, height) = \
                coord_tf.TransformPoint(map_x, map_y)

            (map_x, map_y) = \
                convert_imageXY_to_mapXY(number_of_samples, line, ds_transform)
            (right_lon, right_lat, height) = \
                coord_tf.TransformPoint(map_x, map_y)

            west_lon = min(left_lon, right_lon, west_lon)
            east_lon = max(left_lon, right_lon, east_lon)
            north_lat = max(left_lat, right_lat, north_lat)
            south_lat = min(left_lat, right_lat, south_lat)

        # Update the bounding coordinates in the XML
        bounding_coords = gm.get_bounding_coordinates()
        bounding_coords.set_west(west_lon)
        bounding_coords.set_east(east_lon)
        bounding_coords.set_north(north_lat)
        bounding_coords.set_south(south_lat)

        del ds_transform
        del ds_srs

        # Write out a new XML file after validation
        logger.info("---- Validating XML Modifications and"
                    " Creating Temp Output File")
        tmp_xml_filename = 'tmp-%s' % xml_filename
        with open(tmp_xml_filename, 'w') as tmp_fd:
            # Call the export with validation
            metadata_api.export(tmp_fd, xml)

        # Remove the original
        if os.path.exists(xml_filename):
            os.unlink(xml_filename)

        # Rename the temp file back to the original name
        os.rename(tmp_xml_filename, xml_filename)

    except Exception as excep:
        raise ee.ESPAException(ee.ErrorCodes.warping,
                               str(excep)), None, sys.exc_info()[2]
コード例 #17
0
ファイル: statistics.py プロジェクト: NGenetzky/espa
    finally:
        # Change back to the previous directory
        os.chdir(current_directory)
# END - generate_statistics


# ============================================================================
if __name__ == '__main__':
    '''
    Description:
      This is test code only used during proto-typing.
      It only provides stats for landsat and modis data.
    '''

    # Configure logging
    EspaLogging.configure(settings.PROCESSING_LOGGER, order='test',
                          product='statistics')
    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    # Hold the wild card strings in a type based dictionary
    files_to_search_for = dict()

    # Landsat files
    files_to_search_for['SR'] = ['*_sr_band[0-9].img']
    files_to_search_for['TOA'] = ['*_toa_band[0-9].img']
    files_to_search_for['INDEX'] = ['*_nbr.img', '*_nbr2.img', '*_ndmi.img',
                                    '*_ndvi.img', '*_evi.img', '*_savi.img',
                                    '*_msavi.img']

    # MODIS files
    files_to_search_for['SR'].extend(['*sur_refl_b*.img'])
    files_to_search_for['INDEX'].extend(['*NDVI.img', '*EVI.img'])
コード例 #18
0
ファイル: transfer.py プロジェクト: NGenetzky/espa
def http_transfer_file(download_url, destination_file):
    '''
    Description:
      Using http transfer a file from a source location to a destination
      file on the localhost.
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    logger.info(download_url)

# First way
#    file_size = 0
#    retrieved_bytes = 0
#    with closing(requests.get(download_url, stream=True)) as req:
#        if not req.ok:
#            raise Exception("Transfer Failed - HTTP - Reason(%s)"
#                            % req.reason)
#
#        file_size = int(req.headers['content-length'])
#
#        with open(destination_file, 'wb') as local_fd:
#            for data_chunk in req.iter_content(settings.TRANSFER_BLOCK_SIZE):
#                local_fd.write(data_chunk)
#                retrieved_bytes += len(data_chunk)
#
#    if retrieved_bytes != file_size:
#        raise Exception("Transfer Failed - HTTP - Retrieved %d"
#                        " out of %d bytes" % (retrieved_bytes, file_size))
#    else:
#        logger.info("Transfer Complete - HTTP")

# Second way
#    req = None
#    try:
#        req = requests.get(download_url)
#
#        if not req.ok:
#            logger.error("Transfer Failed - HTTP")
#            req.raise_for_status()
#
#        with open(destination_file, 'wb') as local_fd:
#            local_fd.write(req.content)
#    except:
#        logger.error("Transfer Failed - HTTP")
#        raise
#    finally:
#        if req is not None:
#            req.close()

# Third way
    session = requests.Session()

    session.mount('http://', requests.adapters.HTTPAdapter(max_retries=3))
    session.mount('https://', requests.adapters.HTTPAdapter(max_retries=3))

    retry_attempt = 0
    done = False
    while not done:
        req = None
        try:
            req = session.get(url=download_url, timeout=300.0)

            if not req.ok:
                logger.error("Transfer Failed - HTTP")
                req.raise_for_status()

            with open(destination_file, 'wb') as local_fd:
                local_fd.write(req.content)

            done = True

        except:
            logger.exception("Transfer Issue - HTTP")
            if retry_attempt > 3:
                raise Exception("Transfer Failed - HTTP"
                                " - exceeded retry limit")
            retry_attempt += 1
            sleep(int(1.5 * retry_attempt))

        finally:
            if req is not None:
                req.close()

    logger.info("Transfer Complete - HTTP")
コード例 #19
0
ファイル: statistics.py プロジェクト: NGenetzky/espa
def generate_statistics(work_directory, files_to_search_for):
    '''
    Description:
      Create the stats output directory and each output stats file for each
      file specified.

    Notes:
      The stats directory is created here because we only want it in the
      product if we need statistics.
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    # Change to the working directory
    current_directory = os.getcwd()
    os.chdir(work_directory)

    try:
        stats_output_path = 'stats'
        try:
            os.makedirs(stats_output_path)
        except OSError as exc:  # Python >2.5
            if exc.errno == errno.EEXIST and os.path.isdir(stats_output_path):
                pass
            else:
                raise ee.ESPAException(ee.ErrorCodes.statistics,
                                       str(exc)), None, sys.exc_info()[2]

        try:
            # Build the list of files to process
            file_names = dict()
            for band_type in files_to_search_for:
                file_names[band_type] = list()
                for search in files_to_search_for[band_type]:
                    file_names[band_type].extend(glob.glob(search))

            # Generate the requested statistics for each tile
            for band_type in file_names:
                for file_name in file_names[band_type]:

                    logger.info("Generating statistics for: %s" % file_name)

                    (minimum, maximum, mean, stddev,
                     valid) = get_statistics(file_name, band_type)

                    # Drop the filename extention so we can replace it with
                    # 'stats'
                    base = os.path.splitext(file_name)[0]
                    base_name = '.'.join([base, 'stats'])

                    # Figure out the full path filename
                    stats_output_file = os.path.join(stats_output_path,
                                                     base_name)

                    # Buffer the stats
                    data_io = StringIO()
                    data_io.write("FILENAME=%s\n" % file_name)
                    data_io.write("MINIMUM=%f\n" % minimum)
                    data_io.write("MAXIMUM=%f\n" % maximum)
                    data_io.write("MEAN=%f\n" % mean)
                    data_io.write("STDDEV=%f\n" % stddev)
                    data_io.write("VALID=%s\n" % valid)

                    # Create the stats file
                    with open(stats_output_file, 'w+') as stat_fd:
                        stat_fd.write(data_io.getvalue())
            # END - for tile
        except Exception as e:
            raise ee.ESPAException(ee.ErrorCodes.statistics,
                                   str(e)), None, sys.exc_info()[2]

    finally:
        # Change back to the previous directory
        os.chdir(current_directory)
コード例 #20
0
def validate_reprojection_parameters(parms, scene, projections, ns_values,
                                     pixel_size_units, image_extents_units,
                                     resample_methods, datum_values):
    '''
    Description:
      Perform a check on the possible reprojection parameters

    Note:
      We blindly convert values to float or int without checking them.  It is
      assumed that the web tier has validated them.
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    # Create this and set to None if not present
    if not test_for_parameter(parms, 'projection'):
        logger.warning("'projection' parameter missing defaulting to None")
        parms['projection'] = None

    # Create this and set to 'near' if not present
    if not test_for_parameter(parms, 'resample_method'):
        logger.warning("'resample_method' parameter missing defaulting to"
                       " near")
        parms['resample_method'] = 'near'

    # Make sure these have at least a False value
    required_parameters = ['reproject', 'image_extents', 'resize']

    for parameter in required_parameters:
        if not test_for_parameter(parms, parameter):
            logger.warning("'%s' parameter missing defaulting to False"
                           % parameter)
            parms[parameter] = False

    if parms['reproject']:
        if not test_for_parameter(parms, 'target_projection'):
            raise RuntimeError("Missing target_projection parameter")
        else:
            # Convert to lower case
            target_projection = parms['target_projection'].lower()
            parms['target_projection'] = target_projection

            # Verify a valid projection
            if target_projection not in projections:
                raise ValueError("Invalid target_projection [%s]:"
                                 " Argument must be one of (%s)"
                                 % (target_projection, ', '.join(projections)))

            # ................................................................
            if target_projection == "sinu":
                if not test_for_parameter(parms, 'central_meridian'):
                    raise RuntimeError("Missing central_meridian parameter")
                else:
                    parms['central_meridian'] = \
                        float(parms['central_meridian'])
                if not test_for_parameter(parms, 'false_easting'):
                    raise RuntimeError("Missing false_easting parameter")
                else:
                    parms['false_easting'] = float(parms['false_easting'])
                if not test_for_parameter(parms, 'false_northing'):
                    raise RuntimeError("Missing false_northing parameter")
                else:
                    parms['false_northing'] = float(parms['false_northing'])

                if not test_for_parameter(parms, 'datum'):
                    parms['datum'] = None

            # ................................................................
            if target_projection == 'aea':
                if not test_for_parameter(parms, 'std_parallel_1'):
                    raise RuntimeError("Missing std_parallel_1 parameter")
                else:
                    parms['std_parallel_1'] = float(parms['std_parallel_1'])
                if not test_for_parameter(parms, 'std_parallel_2'):
                    raise RuntimeError("Missing std_parallel_2 parameter")
                else:
                    parms['std_parallel_2'] = float(parms['std_parallel_2'])
                if not test_for_parameter(parms, 'origin_lat'):
                    raise RuntimeError("Missing origin_lat parameter")
                else:
                    parms['origin_lat'] = float(parms['origin_lat'])
                if not test_for_parameter(parms, 'central_meridian'):
                    raise RuntimeError("Missing central_meridian parameter")
                else:
                    parms['central_meridian'] = \
                        float(parms['central_meridian'])
                if not test_for_parameter(parms, 'false_easting'):
                    raise RuntimeError("Missing false_easting parameter")
                else:
                    parms['false_easting'] = float(parms['false_easting'])
                if not test_for_parameter(parms, 'false_northing'):
                    raise RuntimeError("Missing false_northing parameter")
                else:
                    parms['false_northing'] = float(parms['false_northing'])

                # The datum must be in uppercase for the processing code to
                # work so if it is present here, we force it
                if not test_for_parameter(parms, 'datum'):
                    raise RuntimeError("Missing datum parameter")
                else:
                    parms['datum'] = parms['datum'].upper()
                if parms['datum'] not in datum_values:
                    raise ValueError("Invalid datum [%s]:"
                                     " Argument must be one of (%s)"
                                     % (parms['datum'],
                                        ', '.join(datum_values)))

            # ................................................................
            if target_projection == 'utm':
                if not test_for_parameter(parms, 'utm_zone'):
                    raise RuntimeError("Missing utm_zone parameter")
                else:
                    zone = int(parms['utm_zone'])
                    if zone < 0 or zone > 60:
                        raise ValueError("Invalid utm_zone [%d]:"
                                         " Value must be 0-60" % zone)
                    parms['utm_zone'] = zone
                if not test_for_parameter(parms, 'utm_north_south'):
                    raise RuntimeError("Missing utm_north_south parameter")
                elif parms['utm_north_south'] not in ns_values:
                    raise ValueError("Invalid utm_north_south [%s]:"
                                     " Argument must be one of (%s)"
                                     % (parms['utm_north_south'],
                                        ', '.join(ns_values)))

                if not test_for_parameter(parms, 'datum'):
                    parms['datum'] = None

            # ................................................................
            if target_projection == 'ps':
                if not test_for_parameter(parms, 'latitude_true_scale'):
                    # Must be tested before origin_lat
                    raise RuntimeError("Missing latitude_true_scale parameter")
                else:
                    value = float(parms['latitude_true_scale'])
                    if ((value < 60.0 and value > -60.0) or
                            value > 90.0 or value < -90.0):
                        raise ValueError("Invalid latitude_true_scale [%f]:"
                                         " Value must be between"
                                         " (-60.0 and -90.0) or"
                                         " (60.0 and 90.0)" % value)
                    parms['latitude_true_scale'] = value
                if not test_for_parameter(parms, 'longitude_pole'):
                    raise RuntimeError("Missing longitude_pole parameter")
                else:
                    parms['longitude_pole'] = float(parms['longitude_pole'])
                if not test_for_parameter(parms, 'origin_lat'):
                    # If the user did not specify the origin_lat value, then
                    # set it based on the latitude true scale
                    lat_ts = float(parms['latitude_true_scale'])
                    if lat_ts < 0:
                        parms['origin_lat'] = -90.0
                    else:
                        parms['origin_lat'] = 90.0
                else:
                    value = float(parms['origin_lat'])
                    if value != -90.0 and value != 90.0:
                        raise ValueError("Invalid origin_lat [%f]:"
                                         " Value must be -90.0 or 90.0"
                                         % value)
                    parms['origin_lat'] = value

                if not test_for_parameter(parms, 'false_easting'):
                    raise RuntimeError("Missing false_easting parameter")
                else:
                    parms['false_easting'] = float(parms['false_easting'])
                if not test_for_parameter(parms, 'false_northing'):
                    raise RuntimeError("Missing false_northing parameter")
                else:
                    parms['false_northing'] = float(parms['false_northing'])

                if not test_for_parameter(parms, 'datum'):
                    parms['datum'] = None

            # ................................................................
            if target_projection == 'lonlat':

                if not test_for_parameter(parms, 'datum'):
                    parms['datum'] = None

    # ------------------------------------------------------------------------
    if parms['resample_method'] not in resample_methods:
        raise ValueError("Invalid resample_method [%s]:"
                         " Argument must be one of (%s)"
                         % (parms['resample_method'],
                            ', '.join(resample_methods)))

    # ------------------------------------------------------------------------
    if parms['image_extents']:
        if not test_for_parameter(parms, 'image_extents_units'):
            raise RuntimeError("Missing image_extents_units parameter")
        else:
            if parms['image_extents_units'] not in image_extents_units:
                raise ValueError("Invalid image_extents_units [%s]:"
                                 " Argument must be one of (%s)"
                                 % (parms['image_extents_units'],
                                    ', '.join(image_extents_units)))
        if not test_for_parameter(parms, 'minx'):
            raise RuntimeError("Missing minx parameter")
        else:
            parms['minx'] = float(parms['minx'])
        if not test_for_parameter(parms, 'miny'):
            raise RuntimeError("Missing miny parameter")
        else:
            parms['miny'] = float(parms['miny'])
        if not test_for_parameter(parms, 'maxx'):
            raise RuntimeError("Missing maxx parameter")
        else:
            parms['maxx'] = float(parms['maxx'])
        if not test_for_parameter(parms, 'maxy'):
            raise RuntimeError("Missing maxy parameter")
        else:
            parms['maxy'] = float(parms['maxy'])
    else:
        # Default these
        parms['minx'] = None
        parms['miny'] = None
        parms['maxx'] = None
        parms['maxy'] = None
        parms['image_extents_units'] = None

    # ------------------------------------------------------------------------
    if parms['resize']:
        if not test_for_parameter(parms, 'pixel_size'):
            raise RuntimeError("Missing pixel_size parameter")
        else:
            parms['pixel_size'] = float(parms['pixel_size'])
        if not test_for_parameter(parms, 'pixel_size_units'):
            raise RuntimeError("Missing pixel_size_units parameter")
        else:
            if parms['pixel_size_units'] not in pixel_size_units:
                raise ValueError("Invalid pixel_size_units [%s]:"
                                 " Argument must be one of (%s)"
                                 % (parms['pixel_size_units'],
                                    ', '.join(pixel_size_units)))
    else:
        # Default this
        parms['pixel_size'] = None
        parms['pixel_size_units'] = None

    # ------------------------------------------------------------------------
    if ((parms['reproject'] or parms['image_extents']) and
            not parms['resize']):
        # Sombody asked for reproject or extents, but didn't specify a pixel
        # size

        units = 'meters'
        if parms['reproject'] and parms['target_projection'] == 'lonlat':
            units = 'dd'

        # Default to the sensor specific meters or dd equivalent
        parms['pixel_size'] = sensor.instance(scene).default_pixel_size[units]
        parms['pixel_size_units'] = units

        logger.warning("'resize' parameter not provided but required for"
                       " reprojection or image extents"
                       " (Defaulting pixel_size(%f) and pixel_size_units(%s)"
                       % (parms['pixel_size'], parms['pixel_size_units']))
コード例 #21
0
ファイル: warp.py プロジェクト: spgriffin/espa-processing
if __name__ == '__main__':
    '''
    Description:
      Read parameters from the command line and build a JSON dictionary from
      them.  Pass the JSON dictionary to the process routine.
    '''

    # Build the command line argument parser
    parser = build_argument_parser()

    # Parse the command line arguments
    args = parser.parse_args()
    args_dict = vars(parser.parse_args())

    # Configure logging
    EspaLogging.configure(settings.PROCESSING_LOGGER, order='test',
                          product='product', debug=args.debug)
    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    # Build our JSON formatted input from the command line parameters
    options = {k: args_dict[k] for k in args_dict if args_dict[k] is not None}

    try:
        # Call the main processing routine
        warp_espa_data(options, parms['scene'])
    except Exception as excep:
        if hasattr(excep, 'output'):
            logger.error("Output [%s]" % e.output)
        logger.exception("Processing failed")
        sys.exit(1)

    sys.exit(0)
コード例 #22
0
ファイル: distribution.py プロジェクト: NGenetzky/espa
def distribute_product_remote(product_name, source_path, packaging_path,
                              cache_path, parms):

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    opts = parms['options']

    # Determine the remote hostname to use
    destination_host = utilities.get_cache_hostname()

    # Deliver the product files
    # Attempt X times sleeping between each attempt
    sleep_seconds = settings.DEFAULT_SLEEP_SECONDS
    max_number_of_attempts = settings.MAX_DISTRIBUTION_ATTEMPTS
    max_package_attempts = settings.MAX_PACKAGING_ATTEMPTS
    max_delivery_attempts = settings.MAX_DELIVERY_ATTEMPTS

    attempt = 0
    product_file = 'ERROR'
    cksum_file = 'ERROR'
    while True:
        try:
            # Package the product files
            # Attempt X times sleeping between each sub_attempt
            sub_attempt = 0
            while True:
                try:
                    (product_full_path, cksum_full_path,
                     local_cksum_value) = package_product(source_path,
                                                          packaging_path,
                                                          product_name)
                except Exception as e:
                    logger.exception("An exception occurred processing %s"
                                     % product_name)
                    if sub_attempt < max_package_attempts:
                        sleep(sleep_seconds)  # sleep before trying again
                        sub_attempt += 1
                        continue
                    else:
                        raise ee.ESPAException(ee.ErrorCodes.packaging_product,
                                               str(e)), None, sys.exc_info()[2]
                break

            # Distribute the product
            # Attempt X times sleeping between each sub_attempt
            sub_attempt = 0
            while True:
                try:
                    (remote_cksum_value, product_file, cksum_file) = \
                        transfer_product(destination_host, cache_path,
                                         opts['destination_username'],
                                         opts['destination_pw'],
                                         product_full_path, cksum_full_path)
                except Exception as e:
                    logger.exception("An exception occurred processing %s"
                                     % product_name)
                    if sub_attempt < max_delivery_attempts:
                        sleep(sleep_seconds)  # sleep before trying again
                        sub_attempt += 1
                        continue
                    else:
                        raise ee.ESPAException(ee.ErrorCodes.transfer_product,
                                               str(e)), None, sys.exc_info()[2]
                break

            # Checksum validation
            if local_cksum_value.split()[0] != remote_cksum_value.split()[0]:
                raise ee.ESPAException(ee.ErrorCodes.verifing_checksum,
                                       "Failed checksum validation between"
                                       " %s and %s:%s"
                                       % (product_full_path,
                                          destination_host,
                                          destination_product_file))

            # Always log where we placed the files
            logger.info("Delivered product to %s at location %s"
                        " and cksum location %s" % (destination_host,
                                                    product_file, cksum_file))
        except Exception as e:
            if attempt < max_number_of_attempts:
                sleep(sleep_seconds)  # sleep before trying again
                attempt += 1
                # adjust for next set
                sleep_seconds = int(sleep_seconds * 1.5)
                continue
            else:
                # May already be an ESPAException so don't override that
                raise e
        break

    return (product_file, cksum_file)
コード例 #23
0
ファイル: warp.py プロジェクト: spgriffin/espa-processing
def projection_minbox(ul_lon, ul_lat, lr_lon, lr_lat,
                      target_proj4, pixel_size, pixel_size_units):
    '''
    Description:
      Determines the minimum box in map coordinates that contains the
      geographic coordinates.  Minimum and maximum extent values are returned
      in map coordinates.

    Parameters:
      ul_lon       = Upper Left longitude in decimal degrees
      ul_lat       = Upper Left latitude in decimal degrees
      lr_lon       = Lower Right longitude in decimal degrees
      lr_lat       = Lower Right latitude in decimal degrees
      target_proj4 = The user supplied target proj4 string
      pixel_size   = The target pixel size in meters used to step along the
                     projected area boundary
      pixel_size_units = The units the pixel size is in 'dd' or 'meters'

    Returns:
        (min_x, min_y, max_x, max_y) in meters
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    logger.info("Determining Image Extents For Requested Projection")

    # We are always going to be geographic
    source_proj4 = settings.GEOGRAPHIC_PROJ4_STRING

    logger.info("Using source projection [%s]" % source_proj4)
    logger.info("Using target projection [%s]" % target_proj4)

    # Create and initialize the source SRS
    source_srs = osr.SpatialReference()
    source_srs.ImportFromProj4(source_proj4)

    # Create and initialize the target SRS
    target_srs = osr.SpatialReference()
    target_srs.ImportFromProj4(target_proj4)

    # Create the transformation object
    transform = osr.CoordinateTransformation(source_srs, target_srs)

    # Determine the step in decimal degrees
    step = pixel_size
    if pixel_size_units == 'meters':
        # Convert it to decimal degrees
        step = settings.DEG_FOR_1_METER * pixel_size

    # Determine the lat and lon values to iterate over
    longitudes = np.arange(ul_lon, lr_lon, step, np.float)
    latitudes = np.arange(lr_lat, ul_lat, step, np.float)

    # Initialization using the two corners
    (ul_x, ul_y, z) = transform.TransformPoint(ul_lon, ul_lat)
    (lr_x, lr_y, z) = transform.TransformPoint(lr_lon, lr_lat)

    min_x = min(ul_x, lr_x)
    max_x = max(ul_x, lr_x)
    min_y = min(ul_y, lr_y)
    max_y = max(ul_y, lr_y)

    logger.info('Direct translation of the provided geographic coordinates')
    logger.info(','.join(['min_x', 'min_y', 'max_x', 'max_y']))
    logger.info(','.join([str(min_x), str(min_y), str(max_x), str(max_y)]))

    # Walk across the top and bottom of the geographic coordinates
    for lon in longitudes:
        # Upper side
        (ux, uy, z) = transform.TransformPoint(lon, ul_lat)

        # Lower side
        (lx, ly, z) = transform.TransformPoint(lon, lr_lat)

        min_x = min(ux, lx, min_x)
        max_x = max(ux, lx, max_x)
        min_y = min(uy, ly, min_y)
        max_y = max(uy, ly, max_y)

    # Walk along the left and right of the geographic coordinates
    for lat in latitudes:
        # Left side
        (lx, ly, z) = transform.TransformPoint(ul_lon, lat)

        # Right side
        (rx, ry, z) = transform.TransformPoint(lr_lon, lat)

        min_x = min(rx, lx, min_x)
        max_x = max(rx, lx, max_x)
        min_y = min(ry, ly, min_y)
        max_y = max(ry, ly, max_y)

    del transform
    del source_srs
    del target_srs

    logger.info('Map coordinates after minbox determination')
    logger.info(','.join(['min_x', 'min_y', 'max_x', 'max_y']))
    logger.info(','.join([str(min_x), str(min_y), str(max_x), str(max_y)]))

    return (min_x, min_y, max_x, max_y)
コード例 #24
0
def process(args):
    '''
    Description:
      Read all lines from STDIN and process them.  Each line is converted to
      a JSON dictionary of the parameters for processing.  Validation is
      performed on the JSON dictionary to test if valid for this mapper.
      After validation the generation of the products is performed.
    '''

    # Initially set to the base logger
    logger = EspaLogging.get_logger('base')

    processing_location = socket.gethostname()

    # Process each line from stdin
    for line in sys.stdin:
        if not line or len(line) < 1 or not line.strip().find('{') > -1:
            # this is how the nlineinputformat is supplying values:
            # 341104        {"orderid":
            # logger.info("BAD LINE:%s##" % line)
            continue
        else:
            # take the entry starting at the first opening parenth to the end
            line = line[line.find("{"):]
            line = line.strip()

        # Reset these for each line
        (server, order_id, product_id) = (None, None, None)

        # Default to the command line value
        mapper_keep_log = args.keep_log

        try:
            line = line.replace('#', '')
            parms = json.loads(line)

            if not parameters.test_for_parameter(parms, 'options'):
                raise ValueError("Error missing JSON 'options' record")

            # TODO scene will be replaced with product_id someday
            (order_id, product_id, product_type, options) = \
                (parms['orderid'], parms['scene'], parms['product_type'],
                 parms['options'])

            # Fix the orderid in-case it contains any single quotes
            # The processors can not handle single quotes in the email
            # portion due to usage in command lines.
            parms['orderid'] = order_id.replace("'", '')

            # If it is missing due to above TODO, then add it
            if not parameters.test_for_parameter(parms, 'product_id'):
                parms['product_id'] = product_id

            # Figure out if debug level logging was requested
            debug = False
            if parameters.test_for_parameter(options, 'debug'):
                debug = options['debug']

            # Configure and get the logger for this order request
            EspaLogging.configure(settings.PROCESSING_LOGGER, order=order_id,
                                  product=product_id, debug=debug)
            logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

            # If the command line option is True don't use the scene option
            if not mapper_keep_log:
                if not parameters.test_for_parameter(options, 'keep_log'):
                    options['keep_log'] = False

                mapper_keep_log = options['keep_log']

            logger.info("Processing %s:%s" % (order_id, product_id))

            # Update the status in the database
            if parameters.test_for_parameter(parms, 'xmlrpcurl'):
                if parms['xmlrpcurl'] != 'skip_xmlrpc':
                    server = xmlrpclib.ServerProxy(parms['xmlrpcurl'],
                                                   allow_none=True)
                    if server is not None:
                        status = server.update_status(product_id, order_id,
                                                      processing_location,
                                                      'processing')
                        if not status:
                            logger.warning("Failed processing xmlrpc call"
                                           " to update_status to processing")

            if product_id != 'plot':
                # Make sure we can process the sensor
                sensor_name = sensor.instance(product_id).sensor_name
                if sensor_name not in parameters.valid_sensors:
                    raise ValueError("Invalid Sensor %s" % sensor_name)

                # Make sure we have a valid output format
                if not parameters.test_for_parameter(options, 'output_format'):
                    logger.warning("'output_format' parameter missing"
                                   " defaulting to envi")
                    options['output_format'] = 'envi'

                if (options['output_format']
                        not in parameters.valid_output_formats):

                    raise ValueError("Invalid Output format %s"
                                     % options['output_format'])

            # ----------------------------------------------------------------
            # NOTE: The first thing the product processor does during
            #       initialization is validate the input parameters.
            # ----------------------------------------------------------------

            destination_product_file = 'ERROR'
            destination_cksum_file = 'ERROR'
            pp = None
            try:
                # All processors are implemented in the processor module
                pp = processor.get_instance(parms)
                (destination_product_file, destination_cksum_file) = \
                    pp.process()

            finally:
                # Free disk space to be nice to the whole system.
                if not mapper_keep_log and pp is not None:
                    pp.remove_product_directory()

            # Everything was successfull so mark the scene complete
            if server is not None:
                status = server.mark_scene_complete(product_id, order_id,
                                                    processing_location,
                                                    destination_product_file,
                                                    destination_cksum_file, "")
                if not status:
                    logger.warning("Failed processing xmlrpc call to"
                                   " mark_scene_complete")

            # Cleanup the log file
            if not mapper_keep_log:
                EspaLogging.delete_logger_file(settings.PROCESSING_LOGGER)

            # Reset back to the base logger
            logger = EspaLogging.get_logger('base')

        except ee.ESPAException, e:

            # First log the exception
            if hasattr(e, 'output'):
                logger.error("Code [%s]" % str(e.error_code))
            if hasattr(e, 'output'):
                logger.error("Output [%s]" % e.output)
            logger.exception("Exception encountered and follows")

            # Log the error information to the server
            # Depending on the error_code do something different
            # TODO - Today we are failing everything, but some things could be
            #        made recovereable in the future.
            #        So this code seems a bit ridiculous.
            status = False
            if server is not None:
                try:
                    if (e.error_code == ee.ErrorCodes.creating_stage_dir or
                            (e.error_code ==
                             ee.ErrorCodes.creating_work_dir) or
                            (e.error_code ==
                             ee.ErrorCodes.creating_output_dir)):

                        status = set_product_error(server,
                                                   order_id,
                                                   product_id,
                                                   processing_location)

                    elif (e.error_code == ee.ErrorCodes.staging_data or
                          e.error_code == ee.ErrorCodes.unpacking):

                        status = set_product_error(server,
                                                   order_id,
                                                   product_id,
                                                   processing_location)

                    elif (e.error_code == ee.ErrorCodes.metadata or
                          e.error_code == ee.ErrorCodes.surface_reflectance or
                          e.error_code == ee.ErrorCodes.browse or
                          e.error_code == ee.ErrorCodes.spectral_indices or
                          e.error_code == ee.ErrorCodes.create_dem or
                          e.error_code == ee.ErrorCodes.solr or
                          e.error_code == ee.ErrorCodes.cloud_masking or
                          e.error_code == ee.ErrorCodes.dswe or
                          e.error_code ==
                          ee.ErrorCodes.land_surface_temperature or
                          e.error_code == ee.ErrorCodes.cleanup_work_dir or
                          e.error_code == ee.ErrorCodes.remove_products):

                        status = set_product_error(server,
                                                   order_id,
                                                   product_id,
                                                   processing_location)

                    elif e.error_code == ee.ErrorCodes.warping:

                        status = set_product_error(server,
                                                   order_id,
                                                   product_id,
                                                   processing_location)

                    elif e.error_code == ee.ErrorCodes.reformat:

                        status = set_product_error(server,
                                                   order_id,
                                                   product_id,
                                                   processing_location)

                    elif e.error_code == ee.ErrorCodes.statistics:

                        status = set_product_error(server,
                                                   order_id,
                                                   product_id,
                                                   processing_location)

                    elif (e.error_code == ee.ErrorCodes.packaging_product or
                          (e.error_code ==
                           ee.ErrorCodes.distributing_product) or
                          (e.error_code ==
                           ee.ErrorCodes.verifying_checksum)):

                        status = set_product_error(server,
                                                   order_id,
                                                   product_id,
                                                   processing_location)

                    else:
                        # Catch all remaining errors
                        status = set_product_error(server,
                                                   order_id,
                                                   product_id,
                                                   processing_location)

                    if status and not mapper_keep_log:
                        try:
                            # Cleanup the log file
                            EspaLogging. \
                                delete_logger_file(settings.PROCESSING_LOGGER)
                        except Exception:
                            logger.exception("Exception encountered"
                                             " stacktrace follows")

                except Exception:
                    logger.exception("Exception encountered and follows")
            # END - if server is not None

        except Exception, e:

            # First log the exception
            if hasattr(e, 'output'):
                logger.error("Output [%s]" % e.output)
            logger.exception("Exception encountered stacktrace follows")

            if server is not None:

                try:
                    status = set_product_error(server,
                                               order_id,
                                               product_id,
                                               processing_location)
                    if status and not mapper_keep_log:
                        try:
                            # Cleanup the log file
                            EspaLogging. \
                                delete_logger_file(settings.PROCESSING_LOGGER)
                        except Exception:
                            logger.exception("Exception encountered"
                                             " stacktrace follows")
                except Exception:
                    logger.exception("Exception encountered stacktrace"
                                     " follows")
コード例 #25
0
                except Exception:
                    logger.exception("Exception encountered stacktrace"
                                     " follows")
    # END - for line in STDIN


# ============================================================================
if __name__ == '__main__':
    '''
    Description:
        Some parameter and logging setup, then call the process routine.
    '''

    # Grab our only command line parameter
    parser = ArgumentParser(
        description="Processes a list of scenes from stdin")
    parser.add_argument('--keep-log', action='store_true', dest='keep_log',
                        default=False, help="keep the generated log file")
    args = parser.parse_args()

    EspaLogging.configure_base_logger(filename='/tmp/espa-ondemand-mapper.log')
    # Initially set to the base logger
    logger = EspaLogging.get_logger('base')

    try:
        process(args)
    except Exception:
        logger.exception("Processing failed stacktrace follows")

    sys.exit(0)
コード例 #26
0
ファイル: warp.py プロジェクト: spgriffin/espa-processing
def warp_espa_data(parms, scene, xml_filename=None):
    '''
    Description:
      Warp each espa science product to the parameters specified in the parms
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    # Validate the parameters
    validate_parameters(parms, scene)
    logger.debug(parms)

    # ------------------------------------------------------------------------
    # De-register the DOQ drivers since they may cause a problem with some of
    # our generated imagery.  And we are only processing envi format today
    # inside the processing code.
    doq1 = gdal.GetDriverByName('DOQ1')
    doq2 = gdal.GetDriverByName('DOQ2')
    doq1.Deregister()
    doq2.Deregister()
    # ------------------------------------------------------------------------

    # Verify something was provided for the XML filename
    if xml_filename is None or xml_filename == '':
        raise ee.ESPAException(ee.ErrorCodes.warping, "Missing XML Filename")

    # Change to the working directory
    current_directory = os.getcwd()
    os.chdir(parms['work_directory'])

    try:
        xml = metadata_api.parse(xml_filename, silence=True)
        bands = xml.get_bands()
        global_metadata = xml.get_global_metadata()
        satellite = global_metadata.get_satellite()

        # Might need this for the base warp command image extents
        original_proj4 = get_original_projection(bands.band[0].get_file_name())

        # Build the base warp command to use
        base_warp_command = \
            build_base_warp_command(parms, original_proj4=str(original_proj4))

        # Determine the user specified resample method
        user_resample_method = 'near'  # default
        if parms['resample_method'] is not None:
            user_resample_method = parms['resample_method']

        # Process through the bands in the XML file
        for band in bands.band:
            img_filename = band.get_file_name()
            hdr_filename = img_filename.replace('.img', '.hdr')
            logger.info("Processing %s" % img_filename)

            # Reset the resample method to the user specified value
            resample_method = user_resample_method

            # Always use near for qa bands
            category = band.get_category()
            if category == 'qa':
                resample_method = 'near'  # over-ride with 'near'

            # Update the XML metadata object for the resampling method used
            # Later update_espa_xml is used to update the XML file
            if resample_method == 'near':
                band.set_resample_method('nearest neighbor')
            if resample_method == 'bilinear':
                band.set_resample_method('bilinear')
            if resample_method == 'cubic':
                band.set_resample_method('cubic convolution')

            # Figure out the pixel size to use
            pixel_size = parms['pixel_size']

            # EXECUTIVE DECISION(Calli) - ESPA Issue 185
            #    - If the band is (Landsat 7 or 8) and Band 8 do not resize
            #      the pixels.
            if ((satellite == 'LANDSAT_7' or satellite == 'LANDSAT_8') and
                    band.get_name() == 'band8'):
                if parms['target_projection'] == 'lonlat':
                    pixel_size = settings.DEG_FOR_15_METERS
                else:
                    pixel_size = float(band.pixel_size.x)

            # Open the image to read the no data value out since the internal
            # ENVI driver for GDAL does not output it, even if it is known
            ds = gdal.Open(img_filename)
            if ds is None:
                raise RuntimeError("GDAL failed to open (%s)" % img_filename)

            ds_band = None
            try:
                ds_band = ds.GetRasterBand(1)
            except Exception as excep:
                raise ee.ESPAException(ee.ErrorCodes.warping,
                                       str(excep)), None, sys.exc_info()[2]

            # Save the no data value since gdalwarp does not write it out when
            # using the ENVI format
            no_data_value = ds_band.GetNoDataValue()
            if no_data_value is not None:
                # TODO - We don't process any floating point data types.  Yet
                # Convert to an integer then string
                no_data_value = str(int(no_data_value))

            # Force a freeing of the memory
            del ds_band
            del ds

            tmp_img_filename = 'tmp-%s' % img_filename
            tmp_hdr_filename = 'tmp-%s' % hdr_filename

            warp_image(img_filename, tmp_img_filename,
                       base_warp_command=base_warp_command,
                       resample_method=resample_method,
                       pixel_size=pixel_size,
                       no_data_value=no_data_value)

            ##################################################################
            ##################################################################
            # Get new everything for the re-projected band
            ##################################################################
            ##################################################################

            # Update the tmp ENVI header with our own values for some fields
            sb = StringIO()
            with open(tmp_hdr_filename, 'r') as tmp_fd:
                while True:
                    line = tmp_fd.readline()
                    if not line:
                        break
                    if (line.startswith('data ignore value') or
                            line.startswith('description')):
                        pass
                    else:
                        sb.write(line)

                    if line.startswith('description'):
                        # This may be on multiple lines so read lines until
                        # we find the closing brace
                        if not line.strip().endswith('}'):
                            while 1:
                                next_line = tmp_fd.readline()
                                if (not next_line or
                                        next_line.strip().endswith('}')):
                                    break
                        sb.write('description = {ESPA-generated file}\n')
                    elif (line.startswith('data type') and
                          (no_data_value is not None)):
                        sb.write('data ignore value = %s\n' % no_data_value)
            # END - with tmp_fd

            # Do the actual replace here
            with open(tmp_hdr_filename, 'w') as tmp_fd:
                tmp_fd.write(sb.getvalue())

            # Remove the original files, they are replaced in following code
            if os.path.exists(img_filename):
                os.unlink(img_filename)
            if os.path.exists(hdr_filename):
                os.unlink(hdr_filename)

            # Rename the temps file back to the original name
            os.rename(tmp_img_filename, img_filename)
            os.rename(tmp_hdr_filename, hdr_filename)
        # END for each band in the XML file

        # Update the XML to reflect the new warped output
        update_espa_xml(parms, xml, xml_filename)

        del xml

    except Exception as excep:
        raise ee.ESPAException(ee.ErrorCodes.warping,
                               str(excep)), None, sys.exc_info()[2]
    finally:
        # Change back to the previous directory
        os.chdir(current_directory)
コード例 #27
0
            # Reset back to the base logger
            logger = EspaLogging.get_logger('base')

    # END - for line in STDIN


# ============================================================================
if __name__ == '__main__':
    '''
    Description:
        Some parameter and logging setup, then call the process routine.
    '''

    # Grab our only command line parameter
    parser = ArgumentParser(
        description="Processes a list of scenes from stdin")
    parser.add_argument('--keep-log', action='store_true', dest='keep_log',
                        default=False, help="keep the generated log file")
    args = parser.parse_args()

    EspaLogging.configure_base_logger(filename=MAPPER_LOG_FILENAME)
    # Initially set to the base logger
    logger = EspaLogging.get_logger('base')

    try:
        process(args)
    except Exception:
        logger.exception("Processing failed stacktrace follows")

    sys.exit(0)
コード例 #28
0
def process(args):
    '''
    Description:
      Read all lines from STDIN and process them.  Each line is converted to
      a JSON dictionary of the parameters for processing.  Validation is
      performed on the JSON dictionary to test if valid for this mapper.
      After validation the generation of the products is performed.
    '''

    # Initially set to the base logger
    logger = EspaLogging.get_logger('base')

    processing_location = socket.gethostname()

    # Process each line from stdin
    for line in sys.stdin:
        if not line or len(line) < 1 or not line.strip().find('{') > -1:
            # this is how the nlineinputformat is supplying values:
            # 341104        {"orderid":
            # logger.info("BAD LINE:%s##" % line)
            continue
        else:
            # take the entry starting at the first opening parenth to the end
            line = line[line.find("{"):]
            line = line.strip()

        # Reset these for each line
        (server, order_id, product_id) = (None, None, None)

        # Default to the command line value
        mapper_keep_log = args.keep_log

        start_time = datetime.datetime.now()

        try:
            line = line.replace('#', '')
            parms = json.loads(line)

            if not parameters.test_for_parameter(parms, 'options'):
                raise ValueError("Error missing JSON 'options' record")

            # TODO scene will be replaced with product_id someday
            (order_id, product_id, product_type, options) = \
                (parms['orderid'], parms['scene'], parms['product_type'],
                 parms['options'])

            # Fix the orderid in-case it contains any single quotes
            # The processors can not handle single quotes in the email
            # portion due to usage in command lines.
            parms['orderid'] = order_id.replace("'", '')

            # If it is missing due to above TODO, then add it
            if not parameters.test_for_parameter(parms, 'product_id'):
                parms['product_id'] = product_id

            # Figure out if debug level logging was requested
            debug = False
            if parameters.test_for_parameter(options, 'debug'):
                debug = options['debug']

            # Configure and get the logger for this order request
            EspaLogging.configure(settings.PROCESSING_LOGGER, order=order_id,
                                  product=product_id, debug=debug)
            logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

            # If the command line option is True don't use the scene option
            if not mapper_keep_log:
                if not parameters.test_for_parameter(options, 'keep_log'):
                    options['keep_log'] = False

                mapper_keep_log = options['keep_log']

            logger.info("Processing %s:%s" % (order_id, product_id))

            # Update the status in the database
            if parameters.test_for_parameter(parms, 'xmlrpcurl'):
                if parms['xmlrpcurl'] != 'skip_xmlrpc':
                    server = xmlrpclib.ServerProxy(parms['xmlrpcurl'],
                                                   allow_none=True)
                    if server is not None:
                        status = server.update_status(product_id, order_id,
                                                      processing_location,
                                                      'processing')
                        if not status:
                            logger.warning("Failed processing xmlrpc call"
                                           " to update_status to processing")

            if product_id != 'plot':
                # Make sure we can process the sensor
                tmp_inst = sensor.instance(product_id)
                del tmp_inst

                # Make sure we have a valid output format
                if not parameters.test_for_parameter(options, 'output_format'):
                    logger.warning("'output_format' parameter missing"
                                   " defaulting to envi")
                    options['output_format'] = 'envi'

                if (options['output_format']
                        not in parameters.valid_output_formats):

                    raise ValueError("Invalid Output format %s"
                                     % options['output_format'])

            # ----------------------------------------------------------------
            # NOTE: The first thing the product processor does during
            #       initialization is validate the input parameters.
            # ----------------------------------------------------------------

            destination_product_file = 'ERROR'
            destination_cksum_file = 'ERROR'
            pp = None
            try:
                # All processors are implemented in the processor module
                pp = processor.get_instance(parms)
                (destination_product_file, destination_cksum_file) = \
                    pp.process()

            finally:
                # Free disk space to be nice to the whole system.
                if not mapper_keep_log and pp is not None:
                    pp.remove_product_directory()

            # Sleep the number of seconds for minimum request duration
            sleep(get_sleep_duration(start_time))

            archive_log_files(order_id, product_id)

            # Everything was successfull so mark the scene complete
            if server is not None:
                status = server.mark_scene_complete(product_id, order_id,
                                                    processing_location,
                                                    destination_product_file,
                                                    destination_cksum_file, "")
                if not status:
                    logger.warning("Failed processing xmlrpc call to"
                                   " mark_scene_complete")

        except Exception as excep:

            # First log the exception
            if hasattr(excep, 'output'):
                logger.error("Output [%s]" % excep.output)
            logger.exception("Exception encountered stacktrace follows")

            # Sleep the number of seconds for minimum request duration
            sleep(get_sleep_duration(start_time))

            archive_log_files(order_id, product_id)

            if server is not None:
                try:
                    status = set_product_error(server,
                                               order_id,
                                               product_id,
                                               processing_location)
                except Exception:
                    logger.exception("Exception encountered stacktrace"
                                     " follows")
        finally:
            # Reset back to the base logger
            logger = EspaLogging.get_logger('base')
コード例 #29
0
ファイル: distribution.py プロジェクト: NGenetzky/espa
def package_product(source_directory, destination_directory, product_name):
    '''
    Description:
      Package the contents of the source directory into a gzipped tarball
      located in the destination directory and generates a checksum file for it

      The filename will be prefixed with the specified product name

    Returns:
      product_full_path - The full path to the product including filename
      cksum_full_path - The full path to the check sum including filename
      cksum_value - The checksum value
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    product_full_path = os.path.join(destination_directory, product_name)

    # Make sure the directory exists.
    utilities.create_directory(destination_directory)

    # Remove any pre-existing files
    # Grab the first part of the filename, which is not unique
    filename_parts = product_full_path.split('-')
    filename_parts[-1] = '*'  # Replace the last element of the list
    filename = '-'.join(filename_parts)  # Join with '-'

    cmd = ' '.join(['rm', '-f', filename])
    output = ''
    try:
        output = utilities.execute_cmd(cmd)
    except Exception as e:
        raise ee.ESPAException(ee.ErrorCodes.packaging_product,
                               str(e)), None, sys.exc_info()[2]
    finally:
        if len(output) > 0:
            logger.info(output)

    # Change to the source directory
    current_directory = os.getcwd()
    os.chdir(source_directory)

    try:
        # Tar the files
        logger.info("Packaging completed product to %s.tar.gz"
                    % product_full_path)

        # Grab the files to tar and gzip
        product_files = glob.glob("*")

        # Execute tar with zipping, the full/path/*.tar.gz name is returned
        product_full_path = utilities.tar_files(product_full_path,
                                                product_files, gzip=True)

        # Change file permissions
        logger.info("Changing file permissions on %s to 0644"
                    % product_full_path)
        os.chmod(product_full_path, 0644)

        # Verify that the archive is good
        output = ''
        cmd = ' '.join(['tar', '-tf', product_full_path])
        try:
            output = utilities.execute_cmd(cmd)
        except Exception as e:
            raise ee.ESPAException(ee.ErrorCodes.packaging_product,
                                   str(e)), None, sys.exc_info()[2]
        finally:
            if len(output) > 0:
                logger.info(output)

        # If it was good create a checksum file
        cksum_output = ''
        cmd = ' '.join([settings.ESPA_CHECKSUM_TOOL, product_full_path])
        try:
            cksum_output = utilities.execute_cmd(cmd)
        except Exception as e:
            if len(cksum_output) > 0:
                logger.info(cksum_output)
            raise ee.ESPAException(ee.ErrorCodes.packaging_product,
                                   str(e)), None, sys.exc_info()[2]

        # Name of the checksum file created
        cksum_filename = '.'.join([product_name,
                                   settings.ESPA_CHECKSUM_EXTENSION])
        # Get the base filename of the file that was checksum'd
        cksum_prod_filename = os.path.basename(product_full_path)

        logger.debug("Checksum file = %s" % cksum_filename)
        logger.debug("Checksum'd file = %s" % cksum_prod_filename)

        # Make sure they are strings
        cksum_values = cksum_output.split()
        cksum_value = "%s %s" % (str(cksum_values[0]),
                                 str(cksum_prod_filename))
        logger.info("Generating cksum: %s" % cksum_value)

        cksum_full_path = os.path.join(destination_directory, cksum_filename)

        try:
            with open(cksum_full_path, 'wb+') as cksum_fd:
                cksum_fd.write(cksum_value)
        except Exception as e:
            raise ee.ESPAException(ee.ErrorCodes.packaging_product,
                                   "Error building checksum file"), \
                None, sys.exc_info()[2]

    finally:
        # Change back to the previous directory
        os.chdir(current_directory)

    return (product_full_path, cksum_full_path, cksum_value)
コード例 #30
0
ファイル: distribution.py プロジェクト: NGenetzky/espa
def distribute_statistics_local(product_id, source_path, destination_path):
    '''
    Description:
        Copies the statistics to the specified directory on the local system

    Parameters:
        product_id - The unique product ID associated with the files.
        source_path - The full path to where the statistics files to
                      distribute reside.
        destination_path - The full path on the local system to copy the
                           statistics files into.

    Note:
        - It is assumed a stats directory exists under the source_path
        - A stats directory will be created under the destination path
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    d_name = 'stats'

    # Save the current directory location and change to the source directory
    current_directory = os.getcwd()
    os.chdir(source_path)

    try:
        stats_path = os.path.join(destination_path, d_name)
        stats_files = ''.join([d_name, '/', product_id, '*'])

        # Create the statistics directory under the destination path
        logger.info("Creating directory {0}".format(stats_path))
        utilities.create_directory(stats_path)

        # Remove any pre-existing statistics for this product ID
        cmd = ' '.join(['rm', '-f', os.path.join(destination_path,
                                                 stats_files)])
        output = ''
        try:
            output = utilities.execute_cmd(cmd)
        except Exception as e:
            raise ee.ESPAException(ee.ErrorCodes.distributing_product,
                                   str(e)), None, sys.exc_info()[2]
        finally:
            if len(output) > 0:
                logger.info(output)

        # Transfer the statistics files
        for file_path in glob.glob(stats_files):
            filename = os.path.basename(file_path)
            dest_file_path = os.path.join(stats_path, filename)

            logger.info("Copying {0} to {1}".format(filename, dest_file_path))
            shutil.copyfile(file_path, dest_file_path)

    except Exception as e:
        logger.exception("An exception occurred processing {0}".
                         format(product_id))
        e_code = ee.ErrorCodes.distributing_product
        raise ee.ESPAException(e_code, str(e)), None, sys.exc_info()[2]

    finally:
        # Change back to the previous directory
        os.chdir(current_directory)
コード例 #31
0
def do_sr_browse(sr_filename,
                 scene,
                 resolution=settings.DEFAULT_BROWSE_RESOLUTION):
    '''
    Description:
      Creates a browse image from the surface relfectance file
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    logger.info("Creating browse product")

    browse_filename = "%s-sr-browse.tif" % scene

    # ------------------------------------------------------------------------
    # Convert to GeoTIFF
    cmd = [
        'gdal_translate', '-a_nodata', '-9999', '-a_nodata', '12000', '-of',
        'GTiff', '-sds', sr_filename, 'out.tiff'
    ]
    cmd = ' '.join(cmd)
    logger.info(' '.join(['Running:', cmd]))
    output = utilities.execut_cmd(cmd)
    if len(output) > 0:
        logger.info(output)

    # ------------------------------------------------------------------------
    # Scale each browse band to 8bit data range
    base_translate_cmd = [
        'gdal_translate', '-ot', 'Byte', '-scale', '0', '10000', '0', '255',
        '-of', 'GTIFF'
    ]
    # gdal_translate -ot Byte -scale 0 10000 0 255 -of ENVI
    # LT50460282002042EDC01_toa_band5.img browse_5.img
    cmd = ' '.join([base_translate_cmd, 'out.tiff5', 'browse.tiff5'])
    logger.info(' '.join(['Running:', cmd]))
    output = utilities.execut_cmd(cmd)
    if len(output) > 0:
        logger.info(output)

# gdal_translate -ot Byte -scale 0 10000 0 255 -of ENVI
# LT50460282002042EDC01_toa_band4.img browse_4.img
    cmd = ' '.join([base_translate_cmd, 'out.tiff4', 'browse.tiff4'])
    logger.info(' '.join(['Running:', cmd]))
    output = utilities.execut_cmd(cmd)
    if len(output) > 0:
        logger.info(output)

# gdal_translate -ot Byte -scale 0 10000 0 255 -of ENVI
# LT50460282002042EDC01_toa_band3.img browse_3.img
    cmd = ' '.join([base_translate_cmd, 'out.tiff3', 'browse.tiff3'])
    logger.info(' '.join(['Running:', cmd]))
    output = utilities.execut_cmd(cmd)
    if len(output) > 0:
        logger.info(output)

    # ------------------------------------------------------------------------
    # Create the 3 band composite
# gdal_merge_simple -of ENVI -in browse_5.img -in browse_4.img
# -in browse_3.img -out final.img
    cmd = ' '.join([
        'gdal_merge_simple', '-in', 'browse.tiff5', '-in', 'browse.tiff4',
        '-in', 'browse.tiff3', '-out', 'final.tif'
    ])
    logger.info(' '.join(['Running:', cmd]))
    output = utilities.execut_cmd(cmd)
    if len(output) > 0:
        logger.info(output)

    # ------------------------------------------------------------------------
    # Project to geographic
# gdalwarp -of ENVI -dstalpha -srcnodata 0 -t_srs EPSG:4326 final.img
# warped.img
    cmd = ' '.join([
        'gdalwarp', '-dstalpha', '-srcnodata', '0', '-t_srs', 'EPSG:4326',
        'final.tif', 'warped.tif'
    ])
    logger.info(' '.join(['Running:', cmd]))
    output = utilities.execut_cmd(cmd)
    if len(output) > 0:
        logger.info(output)

    # ------------------------------------------------------------------------
    # Resize and rename
# gdal_translate -ot INT16 -of ENVI -outsize 50 50 -a_nodata -9999
# warped.img browse.img

# Should probably use gdalwarp to set the resolution, because outsize in
# gdal_translate is a percentage.  This step may not even be needed then,
# because it could be handled in the previous gdalwarp step.
    cmd = ' '.join([
        'gdal_translate', '-co', 'COMPRESS=DEFLATE', '-co', 'PREDICTOR=2',
        '-outsize',
        str(resolution),
        str(resolution), '-a_nodata', '-9999', '-of', 'GTIFF', 'warped.tif',
        browse_filename
    ])
    logger.info(' '.join(['Running:', cmd]))
    output = utilities.execut_cmd(cmd)
    if len(output) > 0:
        logger.info(output)

    # ------------------------------------------------------------------------
    # Cleanup
    remove_files = ['warped.tif', 'final.tif']
    remove_files.extend(glob.glob('*tiff*'))

    cmd = ' '.join(['rm', '-rf'] + remove_files)
    logger.info(' '.join(['Running:', cmd]))
    output = utilities.execut_cmd(cmd)
    if len(output) > 0:
        logger.info(output)

    logger.info("Browse product generation complete...")
コード例 #32
0
ファイル: solr.py プロジェクト: govtmirror/espa-junk-drawer
def do_solr_index(metadata, scene, solr_filename, collection_name,
                  build_points=False):
    '''
    Description:
      Creates the solr index file from the metadata
    '''

    logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)

    logger.info("Executing create_solr_index() for %s using collection %s "
                % (scene, collection_name))

    # deal with the landsat metadata fieldname changes
    if 'CORNER_UL_LAT_PRODUCT' in metadata:
        upper_left_LL = '%s,%s' \
            % (metadata['CORNER_UL_LAT_PRODUCT'],
               metadata['CORNER_UL_LON_PRODUCT'])
        upper_right_LL = '%s,%s' \
            % (metadata['CORNER_UR_LAT_PRODUCT'],
               metadata['CORNER_UR_LON_PRODUCT'])
        lower_left_LL = '%s,%s' \
            % (metadata['CORNER_LL_LAT_PRODUCT'],
               metadata['CORNER_LL_LON_PRODUCT'])
        lower_right_LL = '%s,%s' \
            % (metadata['CORNER_LR_LAT_PRODUCT'],
               metadata['CORNER_LR_LON_PRODUCT'])

        if build_points:
            lat_list = [float(metadata['CORNER_UL_LAT_PRODUCT']),
                        float(metadata['CORNER_UR_LAT_PRODUCT']),
                        float(metadata['CORNER_LL_LAT_PRODUCT']),
                        float(metadata['CORNER_LR_LAT_PRODUCT'])]
            lon_list = [float(metadata['CORNER_UL_LON_PRODUCT']),
                        float(metadata['CORNER_UR_LON_PRODUCT']),
                        float(metadata['CORNER_LL_LON_PRODUCT']),
                        float(metadata['CORNER_LR_LON_PRODUCT'])]
    else:
        upper_left_LL = '%s,%s' \
            % (metadata['PRODUCT_UL_CORNER_LAT'],
               metadata['PRODUCT_UL_CORNER_LON'])
        upper_right_LL = '%s,%s' \
            % (metadata['PRODUCT_UR_CORNER_LAT'],
               metadata['PRODUCT_UR_CORNER_LON'])
        lower_left_LL = '%s,%s' \
            % (metadata['PRODUCT_LL_CORNER_LAT'],
               metadata['PRODUCT_LL_CORNER_LON'])
        lower_right_LL = '%s,%s' \
            % (metadata['PRODUCT_LR_CORNER_LAT'],
               metadata['PRODUCT_LR_CORNER_LON'])

        if build_points:
            lat_list = [float(metadata['PRODUCT_UL_CORNER_LAT']),
                        float(metadata['PRODUCT_UR_CORNER_LAT']),
                        float(metadata['PRODUCT_LL_CORNER_LAT']),
                        float(metadata['PRODUCT_LR_CORNER_LAT'])]
            lon_list = [float(metadata['PRODUCT_UL_CORNER_LON']),
                        float(metadata['PRODUCT_UR_CORNER_LON']),
                        float(metadata['PRODUCT_LL_CORNER_LON']),
                        float(metadata['PRODUCT_LR_CORNER_LON'])]

    solr_buffer = StringIO()

    solr_buffer.write("<add><doc>\n")

    solr_buffer.write("<field name='sceneid'>%s</field>\n" % scene)

    solr_buffer.write("<field name='path'>%s</field>\n"
                      % metadata['WRS_PATH'])

    # this is a fix for the changes to landsat metadata...
    # currently have mixed versions on the cache
    row = None
    if 'WRS_ROW' in metadata:
        row = metadata['WRS_ROW']
    else:
        row = metadata['STARTING_ROW']

    solr_buffer.write("<field name='row'>%s</field>\n" % row)

    solr_buffer.write("<field name='sensor'>%s</field>\n"
                      % metadata['SENSOR_ID'])

    solr_buffer.write("<field name='sunElevation'>%s</field>\n"
                      % metadata['SUN_ELEVATION'])

    solr_buffer.write("<field name='sunAzimuth'>%s</field>\n"
                      % metadata['SUN_AZIMUTH'])

    solr_buffer.write("<field name='groundStation'>%s</field>\n"
                      % metadata['STATION_ID'])

    # get the acquisition date... account for landsat changes
    acquisition_date = None
    if 'DATE_ACQUIRED' in metadata:
        acquisition_date = ''.join([metadata['DATE_ACQUIRED'], 'T00:00:01Z'])
    else:
        acquisition_date = ''.join([metadata['ACQUISITION_DATE'],
                                    'T00:00:01Z'])

    solr_buffer.write("<field name='acquisitionDate'>%s</field>\n"
                      % acquisition_date)

    solr_buffer.write("<field name='collection'>%s</field>\n"
                      % collection_name)

    solr_buffer.write("<field name='upperRightCornerLatLong'>%s</field>\n"
                      % upper_right_LL)
    solr_buffer.write("<field name='upperLeftCornerLatLong'>%s</field>\n"
                      % upper_left_LL)
    solr_buffer.write("<field name='lowerLeftCornerLatLong'>%s</field>\n"
                      % lower_left_LL)
    solr_buffer.write("<field name='lowerRightCornerLatLong'>%s</field>\n"
                      % lower_right_LL)

    if build_points:
        # Build lat and lon list values using the same step hard coded in the
        # original code implementation (util.buildMatrix)
        step = 0.05
        for lat in np.arange(min(lat_list), max(lat_list), step):
            for lon in np.arange(min(lon_list), max(lon_list), step):
                solr_buffer.write("<field name='latitude_longitude'>"
                                  "%f,%f</field>\n" % (round(lat, 6),
                                                       round(lon, 6)))

    solr_buffer.write("</doc></add>")
    solr_buffer.flush()

    with open(solr_filename, 'w') as output_fd:
        output_fd.write(solr_buffer.getvalue())

    solr_buffer.close()