def perform_processing(transformer_instance: transformer_class.Transformer, args: argparse.Namespace, metadata: dict) -> dict:
        """Makes the calls to perform the processing
        Arguments:
            transformer_instance: instance of transformer class
            args: the command line arguments
            metadata: the loaded metadata
        Return:
            Returns a dict containing the result of processing
        """
        result = {}

        # Get the various types of parameters from the transformer instance
        if hasattr(transformer_instance, 'get_transformer_params'):
            transformer_params = transformer_instance.get_transformer_params(args, metadata)
            if not isinstance(transformer_params, dict):
                return __internal__.handle_error(-101,
                                                 "Invalid return from getting transformer parameters from transformer class instance")

            params_result = __internal__.check_params_result_error(transformer_params)
            if params_result:
                return params_result
        else:
            logging.info("Transformer class instance does not have get_transformer_params method")
            transformer_params = {}

        # First check if the transformer thinks everything is in place
        if hasattr(transformer, 'check_continue'):
            result = __internal__.handle_check_continue(transformer_instance, transformer_params)
            if 'code' in result and result['code'] < 0 and 'error' not in result:
                result['error'] = "Unknown error returned from check_continue call"
        else:
            logging.info("Transformer module doesn't have a function named 'check_continue'")

        # Retrieve additional files if indicated by return code from the check
        if 'error' not in result and 'code' in result and result['code'] == 0:
            result = __internal__.handle_retrieve_files(transformer_instance, args, metadata)

        # Next make the call to perform the processing
        if 'error' not in result:
            if hasattr(transformer, 'perform_process'):
                result = transformer.perform_process(transformer_instance, **transformer_params)
            else:
                logging.debug("Transformer module is missing function named 'perform_process'")
                return __internal__.handle_error(-102, "Transformer perform_process interface is not available " +
                                                 "for processing data")

        return result
    def handle_retrieve_files(transformer_instance: transformer_class.Transformer, args: argparse.Namespace, metadata: dict) ->\
            Optional[dict]:
        """Handles calling the transformer class to retrieve files
        Arguments:
            transformer_instance: the current transformer environment
            args: the command line arguments
            metadata: the loaded metadata
        Return:
            A dict containing error information if a problem occurs and None if no problems are found.
        Note:
            A side effect of this function is a information message logged if the transformer class instance does not have
            a 'retrieve_files' function declared.
        """
        if hasattr(transformer_instance, 'retrieve_files'):
            transformer_retrieve = transformer_instance.retrieve_files(args, metadata)
            retrieve_results = __internal__.check_retrieve_results_error(transformer_retrieve)
            if retrieve_results:
                return retrieve_results
        else:
            logging.info("Transformer class doesn't have function named 'retrieve_files'")

        return None
Esempio n. 3
0
"""Tests for 'entrypoint.py'
"""

#Import entrypoint.py and embedded modules
import argparse
import entrypoint
import transformer
from transformer_class import Transformer

#Set up initial testing values
TEST_TRANSFORMER = Transformer()
PARSE = argparse.ArgumentParser(description="test")
TEST_INTERNAL = entrypoint.__internal__()


# pylint: disable=assignment-from-no-return
def test_handle_error():
    """Test for handle error
    """
    #Some testing arguments
    test_code = 117
    test_message = "Test message"

    #Initial test using "ideal" args
    ideal_example = TEST_INTERNAL.handle_error(test_code, test_message)
    #Should return dict type
    assert isinstance(ideal_example, dict)

    #A secondary test
    test_code = None
    test_message = False
Esempio n. 4
0
def perform_process(transformer: transformer_class.Transformer, check_md: dict,
                    transformer_md: dict, full_md: dict) -> dict:
    """Performs the processing of the data
    Arguments:
        transformer: instance of transformer class
    Return:
        Returns a dictionary with the results of processing
    """
    # Find the source file to process
    source_file = None
    for one_file in check_md['list_files']():
        if one_file.endswith('.bin'):
            source_file = one_file
            break

    # Initialize local variables
    logging.debug("Working with source file: %s", source_file)
    out_filename = os.path.splitext(os.path.basename(source_file))[0] + '.tif'
    out_file = os.path.join(check_md['working_folder'], out_filename)

    bin_type = 'left' if source_file.endswith(
        '_left.bin') else 'right' if source_file.endswith(
            '_right.bin') else None
    if not bin_type:
        msg = "Bin file must be a left or right file: '%s'" % source_file
        logging.error(msg)
        logging.error("    Returning an error")
        return {'code': -1000, 'error': msg}
    logging.debug("Source image is type: %s", bin_type)

    # Process the file
    try:
        bin_shape = terraref.stereo_rgb.get_image_shape(
            check_md['context_md'], bin_type)
        gps_bounds_bin = tr_geojson_to_tuples(
            check_md['context_md']['spatial_metadata'][bin_type]
            ['bounding_box'])
    except KeyError:
        msg = "Spatial metadata is not properly identified. Unable to continue"
        logging.error(msg)
        return {'code': -1001, 'error': msg}
    logging.debug("Image bounds are: %s", str(gps_bounds_bin))

    # Perform actual processing
    if transformer.args.save_intermediate:
        intermediate_filename = os.path.join(check_md['working_folder'],
                                             "intermediate.tif")
        logging.info("Generating intermediate image file: %s",
                     intermediate_filename)
    else:
        intermediate_filename = None
    new_image = terraref.stereo_rgb.process_raw(bin_shape, source_file,
                                                intermediate_filename)
    tr_create_geotiff(new_image,
                      gps_bounds_bin,
                      out_file,
                      None,
                      False,
                      transformer.generate_transformer_md(),
                      check_md['context_md'],
                      compress=True)

    return {
        'code': 0,
        'file': [{
            'path': out_file,
            'key': configuration.TRANSFORMER_TYPE
        }]
    }
Esempio n. 5
0
def perform_process(transformer: transformer_class.Transformer, check_md: dict,
                    transformer_md: list, full_md: list) -> dict:
    """Performs the processing of the data
    Arguments:
        transformer: instance of transformer class
    Return:
        Returns a dictionary with the results of processing
    """
    # pylint: disable=unused-argument
    file_md = []
    start_timestamp = datetime.datetime.utcnow()

    file_list = __internal__.get_file_list(check_md['list_files']())
    files_count = len(file_list)

    # Find the metadata we're interested in for calibration parameters
    terra_md = __internal__.find_terra_md(full_md)
    if not terra_md:
        raise RuntimeError("Unable to find TERRA REF specific metadata")

    transformer_md = transformer.generate_transformer_md()

    def generate_file_md(file_path: str) -> dict:
        """Returns file metadata for a file
        Arguments:
            file_path: the file to generate metadata for
        Return:
            Returns the metadata
        """
        return {
            'path': file_path,
            'key': configuration.TRANSFORMER_SENSOR,
            'metadata': {
                'data': transformer_md
            }
        }

    # Generate a list of approved file name endings
    file_endings = ["{0:0>4}.bin".format(i) for i in range(0, 102)]

    files_processed = 0
    try:
        img_width, img_height = __internal__.get_image_dimensions(terra_md)
        gps_bounds = geojson_to_tuples(
            terra_md['spatial_metadata']['ps2Top']['bounding_box'])
        logging.debug("Image width and height: %s %s", str(img_width),
                      str(img_height))
        logging.debug("Image geo bounds: %s", str(gps_bounds))

        png_frames = {}
        for one_file in file_list:
            if one_file[-8:] in file_endings:
                files_processed += 1
                logging.debug("Processing file: '%s'", one_file)

                try:
                    pixels = np.fromfile(one_file, np.dtype('uint8')).reshape(
                        [int(img_height), int(img_width)])
                except ValueError:
                    logging.info(
                        "Ignoring ValueError exception while loading file '%s'",
                        one_file)
                    continue

                png_filename = os.path.join(
                    check_md['working_folder'],
                    os.path.basename(one_file.replace('.bin', '.png')))
                logging.info("Creating: '%s'", png_filename)
                create_image(pixels, png_filename)
                file_md.append(generate_file_md(png_filename))
                png_frames[int(one_file[-8:-4])] = png_filename

                tif_filename = os.path.join(
                    check_md['working_folder'],
                    os.path.basename(one_file.replace('.bin', '.tif')))
                logging.info("Creating: '%s'", tif_filename)
                create_geotiff(pixels, gps_bounds, tif_filename, None, False,
                               transformer_md, terra_md)
                file_md.append(generate_file_md(tif_filename))
            else:
                logging.info("Skipping non-sensor file '%s'", one_file)

        if files_processed > 0:
            logging.info("Generating aggregates")
            hist_path = os.path.join(check_md['working_folder'],
                                     'combined_hist.png')
            false_color_path = os.path.join(check_md['working_folder'],
                                            'combined_pseudocolored.png')
            __internal__.analyze(png_frames, hist_path, false_color_path)
            file_md.append(generate_file_md(hist_path))
            file_md.append(generate_file_md(false_color_path))
        else:
            logging.warning("No files were processed")

        result = {
            'code': 0,
            'file': file_md,
            configuration.TRANSFORMER_NAME: {
                'version': configuration.TRANSFORMER_VERSION,
                'utc_timestamp': datetime.datetime.utcnow().isoformat(),
                'processing_time':
                str(datetime.datetime.now() - start_timestamp),
                'num_files_received': str(files_count),
                'files_processed': str(files_processed)
            }
        }

    except Exception as ex:
        msg = 'Exception caught converting PSII files'
        logging.exception(msg)
        result = {'code': -1000, 'error': msg + ': ' + str(ex)}

    return result
Esempio n. 6
0
def perform_process(transformer: transformer_class.Transformer, check_md: dict, transformer_md: list, full_md: list) -> dict:
    """Performs the processing of the data
    Arguments:
        transformer: instance of transformer class
        check_md: request specific metadata
        transformer_md: metadata associated with previous runs of the transformer
        full_md: the full set of metadata available to the transformer
    Return:
        Returns a dictionary with the results of processing
    """
    # pylint: disable=unused-argument
    start_timestamp = datetime.datetime.now()
    all_files = check_md['list_files']()
    total_file_count = len(all_files)
    files_to_process = __internal__.get_files_to_process(all_files)

    file_md = []
    num_image_files = 0
    num_processed_files = 0
    for one_file in files_to_process:
        logging.debug("Processing file: '%s'", one_file)
        num_image_files += 1

        if not os.path.exists(one_file):
            logging.error("Unable to access file: '%s'. Continuing processing", one_file)
            continue

        try:
            quality_value = __internal__.get_image_quality(one_file)
            image_bounds = transformer.get_image_file_geobounds(one_file)
            quality_image_bounds = (image_bounds[2], image_bounds[3], image_bounds[0], image_bounds[1])

            mac_file_name = os.path.join(check_md['working_folder'], os.path.splitext(os.path.basename(one_file))[0] + '_mac.tif')

            logging.info("MAC score %s for file '%s'", str(quality_value), one_file)
            logging.debug("Creating quality image: bounds %s  name: '%s'", str(quality_image_bounds), mac_file_name)
            create_geotiff(np.array([[quality_value, quality_value], [quality_value, quality_value]]), quality_image_bounds,
                           mac_file_name, None, True, transformer.generate_transformer_md(), full_md[0], compress=True)

            num_processed_files += 1
            file_md.append(
                {
                    'path': mac_file_name,
                    'key': 'tif',
                    'metadata': {
                        'replace': True,
                        'data': {
                            'MAC score': str(quality_value),
                            'utc_timestamp': datetime.datetime.utcnow().isoformat(),
                            'source_file': one_file
                        }
                    }
                }
            )
        except Exception as ex:
            logging.warning("Ignoring exception caught processing image file '%s'", one_file)
            logging.debug("Exception: %s", str(ex))
            logging.exception('broken')

    return {'code': 0,
            'files': file_md,
            configuration.TRANSFORMER_NAME: {
                'version': configuration.TRANSFORMER_VERSION,
                'utc_timestamp': datetime.datetime.utcnow().isoformat(),
                'processing_time': str(datetime.datetime.now() - start_timestamp),
                'num_files_received': str(total_file_count),
                'num_image_files': str(num_image_files),
                'num_processed_files': str(num_processed_files)
            }
            }
Esempio n. 7
0
def perform_process(transformer: transformer_class.Transformer, check_md: dict,
                    transformer_md: list, full_md: list) -> dict:
    """Performs the processing of the data
    Arguments:
        transformer: instance of transformer class'
        check_md: the metadata for this request
        transformer_md: the metadata associated with this transformer
        full_md: the full set of original metadata
    Return:
        Returns a dictionary with the results of processing
    """
    # pylint: disable=unused-argument
    result = {}
    file_md = []

    # Loop through the files
    try:
        for one_file in check_md['list_files']():
            # Check file by type
            ext = os.path.splitext(one_file)[1].lower()
            if ext not in ('.tiff', '.tif'):
                continue
            if not os.path.exists(one_file):
                logging.warning("Unable to access file '%s'", one_file)
                continue
            mask_source = one_file

            # Get the image's EPSG code
            epsg = transformer.get_image_file_epsg(mask_source)
            if epsg is None:
                logging.debug("Skipping image that is not georeferenced: '%s'",
                              mask_source)
                continue

            # Check that it's geo referenced and transform it if it'sin the wrong coordinate system
            if epsg != transformer.default_epsg:
                logging.info(
                    "Reprojecting image from EPSG %s to default EPSG %s",
                    str(epsg), str(transformer.default_epsg))
                _, tmp_name = tempfile.mkstemp(dir=check_md['working_folder'])
                src = gdal.Open(mask_source)
                gdal.Warp(tmp_name,
                          src,
                          dstSRS='EPSG:' + str(transformer.default_epsg))
                mask_source = tmp_name

            # Get the bounds of the image to see if we can process it.
            bounds = transformer.get_image_file_geobounds(mask_source)

            if bounds is None:
                logging.warning(
                    "Unable to get bounds of georeferenced image: '%s'",
                    os.path.basename(one_file))
                if mask_source != one_file:
                    os.remove(mask_source)
                continue

            # Get the mask name using the original name as reference
            rgb_mask_tif = os.path.join(
                check_md['working_folder'],
                __internal__.get_maskfilename(one_file))

            # Create the mask file
            logging.debug("Creating mask file '%s'", rgb_mask_tif)
            mask_ratio, mask_rgb = gen_cc_enhanced(mask_source)

            # Bands must be reordered to avoid swapping R and B
            mask_rgb = cv2.cvtColor(mask_rgb, cv2.COLOR_BGR2RGB)

            transformer_info = transformer.generate_transformer_md()

            tr_create_geotiff(mask_rgb, bounds, rgb_mask_tif, None, False,
                              transformer_info, check_md['context_md'])
            tr_compress_geotiff(rgb_mask_tif)

            # Remove any temporary file
            if mask_source != one_file:
                os.remove(mask_source)

            transformer_md = {
                'name': transformer_info['name'],
                'version': transformer_info['version'],
                'ratio': mask_ratio
            }

            new_file_md = {
                'path': rgb_mask_tif,
                'key': configuration.TRANSFORMER_SENSOR,
                'metadata': {
                    'data': transformer_md
                }
            }
            file_md.append(new_file_md)

        result['code'] = 0
        result['file'] = file_md

    except Exception as ex:
        result['code'] = -1001
        result['error'] = "Exception caught masking files: %s" % str(ex)

    return result
Esempio n. 8
0
def perform_process(transformer: transformer_class.Transformer, check_md: dict,
                    transformer_md: dict, full_md: list) -> dict:
    """Performs the processing of the data
    Arguments:
        transformer: instance of transformer class
    Return:
        Returns a dictionary with the results of processing
    """
    # pylint: disable=unused-argument
    result = {}
    file_md = []

    file_list = check_md['list_files']()

    # Extract necessary parameters from metadata
    for one_metadata in full_md:
        if 'sensor_variable_metadata' in one_metadata:
            scan_distance = float(one_metadata['sensor_variable_metadata']
                                  ['scan_distance_mm']) / MM_PER_METER
            scan_direction = int(
                one_metadata['sensor_variable_metadata']['scan_direction'])
            point_cloud_origin = one_metadata['sensor_variable_metadata'][
                'point_cloud_origin_m']['east']
            break

    try:
        ply_files = []
        transformer_info = transformer.generate_transformer_md()
        for one_file in file_list:
            if one_file.endswith(".ply"):
                ply_files.append(
                    os.path.join(check_md['working_folder'], one_file))
        if ply_files:
            out_file = ply_files[0].replace(".ply", ".las")
            generate_las_from_ply(ply_files, out_file, scan_distance,
                                  scan_direction, point_cloud_origin, True)

            file_md.append({
                'path': out_file,
                'key': configuration.TRANSFORMER_SENSOR,
                'metadata': {
                    'data': {
                        'name': transformer_info['name'],
                        'version': transformer_info['version'],
                        'source': ','.join(file_list),
                        'utc_timestamp': datetime.datetime.now().isoformat()
                    }
                }
            })
        result['code'] = 0
        result['file'] = file_md
        result[configuration.TRANSFORMER_NAME] = {
            **transformer_info, 'utc_timestamp':
            datetime.datetime.now().isoformat()
        }

    except Exception as ex:
        result['code'] = -1
        result['error'] = "Exception caught converting PLY files: %s" % str(ex)

    return result