Esempio n. 1
0
def flir2tif(input_paths, full_md=None):
    # Determine metadata and BIN file
    bin_file = None
    for f in input_paths:
        if f.endswith(".bin"):
            bin_file = f
        if f.endswith("_cleaned.json") and full_md is None:
            with open(f, 'r') as mdf:
                full_md = json.load(mdf)['content']

    # TODO: Figure out how to pass extractor details to create_geotiff in both types of pipelines
    extractor_info = None

    if full_md:
        if bin_file is not None:
            out_file = bin_file.replace(".bin", ".tif")
            gps_bounds_bin = geojson_to_tuples(
                full_md['spatial_metadata']['flirIrCamera']['bounding_box'])
            raw_data = np.fromfile(bin_file,
                                   np.dtype('<u2')).reshape([480, 640
                                                             ]).astype('float')
            raw_data = np.rot90(raw_data, 3)
            tc = rawData_to_temperature(raw_data, full_md)
            create_geotiff(tc,
                           gps_bounds_bin,
                           out_file,
                           None,
                           False,
                           extractor_info,
                           full_md,
                           compress=True)

    # Return formatted dict for simple extractor
    return {"metadata": {"files_created": [out_file]}, "outputs": [out_file]}
Esempio n. 2
0
    def process_message(self, connector, host, secret_key, resource,
                        parameters):
        self.start_message(resource)

        f = resource['local_paths'][0]

        self.log_info(resource, "determining image quality")
        qual = getImageQuality(f)

        self.log_info(resource, "creating output image")
        md = download_ds_metadata(connector, host, secret_key,
                                  resource['parent']['id'])
        terramd = get_terraref_metadata(md)
        if "left" in f:
            bounds = geojson_to_tuples(
                terramd['spatial_metadata']['left']['bounding_box'])
        else:
            bounds = geojson_to_tuples(
                terramd['spatial_metadata']['right']['bounding_box'])
        output = f.replace(".tif", "_nrmac.tif")
        create_geotiff(np.array([[qual, qual], [qual, qual]]), bounds, output)
        upload_to_dataset(connector, host, self.clowder_user,
                          self.clowder_pass, resource['parent']['id'], output)

        # Tell Clowder this is completed so subsequent file updates don't daisy-chain
        ext_meta = build_metadata(host, self.extractor_info, resource['id'],
                                  {"quality_score": qual}, 'file')
        self.log_info(resource, "uploading extractor metadata")
        upload_metadata(connector, host, secret_key, resource['id'], ext_meta)

        self.end_message(resource)
Esempio n. 3
0
def main():
    """Convert bin files to geoTIFFs here"""

    args = get_args()

    if not os.path.isdir(args.outdir):
        os.makedirs(args.outdir)

    b_box, img_height, img_width = get_boundingbox(args.metadata, args.zoffset)

    shape = (img_width, img_height)
    im = np.fromfile(args.bin, dtype='uint8').reshape(shape[::-1])
    im_color = demosaic(im)
    im_color = (np.rot90(im_color))

    basename = os.path.basename(args.bin).replace('.bin', '.tif')
    out_file = os.path.join(args.outdir, basename)

    extractor_info = None
    create_geotiff(im_color,
                   b_box,
                   out_file,
                   None,
                   False,
                   extractor_info,
                   None,
                   compress=True)
Esempio n. 4
0
def do_work(left_file, right_file, json_file):
    """Make the calls to convert the files
    Args:
        left_file(str): Path to the left BIN file
        right_file(str): Path to the right BIN file
        json_file(str): Path to the JSON file
    """
    out_left = os.path.splitext(left_file)[0] + ".tif"
    out_right = os.path.splitext(right_file)[0] + ".tif"
    file_name, file_ext = os.path.splitext(json_file)
    out_json = file_name + "_updated" + file_ext

    # Load the JSON
    with open(json_file, "r") as infile:
        metadata = json.load(infile)
        if not metadata:
            raise RuntimeError("JSON file appears to be invalid: " + json_file)
        md_len = len(metadata)
        if md_len <= 0:
            raise RuntimeError("JSON file is empty: " + json_file)

    # Prepare the metadata
    clean_md = get_terraref_metadata(clean_metadata(metadata, 'stereoTop'), 'stereoTop')

    # Pull out the information we need from the JSON
    try:
        left_shape = terraref.stereo_rgb.get_image_shape(clean_md, 'left')
        gps_bounds_left = geojson_to_tuples(clean_md['spatial_metadata']['left']['bounding_box'])
        right_shape = terraref.stereo_rgb.get_image_shape(clean_md, 'right')
        gps_bounds_right = geojson_to_tuples(clean_md['spatial_metadata']['right']['bounding_box'])
    except KeyError:
        print("ERROR: Spatial metadata not properly identified in JSON file")
        return
 
    # Make the conversion calls
    print("creating %s" % out_left)
    left_image = terraref.stereo_rgb.process_raw(left_shape, left_file, None)
    create_geotiff(left_image, gps_bounds_left, out_left, asfloat=False, system_md=clean_md, compress=False)

    print("creating %s" % out_right)
    right_image = terraref.stereo_rgb.process_raw(right_shape, right_file, None)
    create_geotiff(right_image, gps_bounds_right, out_right, asfloat=False, system_md=clean_md, compress=True)

    # Write the metadata
    print("creating %s" % out_json)
    with open(out_json, "w") as outfile:
        json.dump(clean_md, outfile, indent=4)
Esempio n. 5
0
def generate_tif_from_ply(inp, out, md, mode='max'):
    """
    Create a raster (e.g. Digital Surface Map) from LAS pointcloud.
    :param inp: input LAS file
    :param out: output TIF file
    :param md: metadata for the PLY files
    :param mode: max | min | mean | idx | count | stdev (https://pdal.io/stages/writers.gdal.html)
    """

    pdal_dtm = out.replace(".tif", "_dtm.json")
    las_raw = out.replace(".tif", "_temp.las")
    tif_raw = out.replace(".tif", "unreferenced.tif")

    bounds = generate_las_from_ply(inp, las_raw, md, False)

    if not os.path.exists(tif_raw):
        # Generate a temporary JSON file with PDAL pipeline for conversion to TIF and execute it
        with open(pdal_dtm, 'w') as dtm:
            dtm.write("""{
            "pipeline": [
                "%s",
                {
                    "filename":"%s",
                    "output_type":"%s",
                    "resolution": 1,
                    "type": "writers.gdal"
                }
            ]
        }""" % (las_raw, tif_raw, mode))
        # "gdalopts": "t_srs=epsg:32612"

        cmd = 'pdal pipeline %s' % pdal_dtm
        subprocess.call([cmd], shell=True)

    os.remove(las_raw)

    # Georeference the unreferenced TIF file according to PLY UTM bounds
    ds = gdal.Open(tif_raw)
    px = ds.GetRasterBand(1).ReadAsArray()
    #if scan_dir == 0:
    #   px = numpy.rot90(px, 2)
    #   x = numpy.fliplr(px)
    create_geotiff(px, bounds, out, asfloat=True)

    os.remove(tif_raw)
def main():
    """Calibrate images here"""
    args = get_args()
    img_list = glob.glob(f'{args.dir}/*.tif', recursive=True)
    out_path = args.outdir

    if not os.path.isdir(out_path):
        os.makedirs(out_path)

    for img in img_list:

        meta_path = img.replace('.tif', '_meta.csv')
        filename = os.path.basename(img)
        outfile = os.path.join(os.getcwd(), out_path, filename)
        print(outfile)

        meta_df = pd.read_csv(meta_path, delimiter=';')
        print(meta_df['TempFPA'][0])

        g_img = gdal.Open(img)
        exif_dict = piexif.load(img)
        zeroth = str(exif_dict['0th'])
        exif = str(exif_dict['Exif'])
        GPS = str(exif_dict['GPS'])

        gps_data = gpsphoto.getGPSData(img)
        rawData = gpsphoto.getRawData(img)

        raw_data = g_img.GetRasterBand(1).ReadAsArray().astype('float')
        tc = raw2temp(raw_data, meta_df)

        create_geotiff(tc, (0, 0, 0, 0), outfile, None, True, None,
            extra_metadata=[
                f'0th={str(zeroth.strip("{}"))}\n\
                Exif={str(exif.strip("{}"))}\n\
                GPS={str(GPS.strip("{}"))}\n'
            ], compress=False)

        cmd = f'exiftool -overwrite_original -TagsFromFile {img} {outfile}'
        subprocess.call(cmd, shell=True)

        exif_dict = piexif.load(outfile)
        gps_data = gpsphoto.getGPSData(outfile)
        print(f'{gps_data}\n')
def main():
    """Create TIF here"""

    args = get_args()

    if not os.path.isdir(args.outdir):
        os.makedirs(args.outdir)

    bin_file = args.bin
    if bin_file is not None:
        with open(args.metadata, 'r') as mdf:

            full_md = json.load(mdf)[
                'lemnatec_measurement_metadata']  #['content']
            extractor_info = None

            if full_md:
                if bin_file is not None:
                    out_file = os.path.join(
                        args.outdir,
                        bin_file.split('/')[-1].replace(".bin", ".tif"))

                    #gps_bounds_bin = geojson_to_tuples(
                    #full_md['spatial_metadata']['flirIrCamera']['bounding_box'])
                    gps_bounds_bin, img_height, img_width = get_boundingbox(
                        args.metadata, args.zoffset)

                    raw_data = np.fromfile(bin_file, np.dtype('<u2')).reshape(
                        [480, 640]).astype('float')
                    raw_data = np.rot90(raw_data, 3)

                    tc = flirRawToTemperature(raw_data, full_md)
                    create_geotiff(tc,
                                   gps_bounds_bin,
                                   out_file,
                                   None,
                                   True,
                                   extractor_info,
                                   None,
                                   compress=True)

                    print(f'Done. See output in {args.outdir}')
    def generate_all_outputs(self, input_image, out_csv, out_dgci, out_edge,
                             out_label, gps_bounds):
        # Generate actual output CSV and PNGs
        cmd = "Rscript gift.R -f %s " % input_image
        cmd += "--table -o %s " % out_csv
        cmd += "--dgci --outputdgci %s " % out_dgci
        cmd += "--edge --outputedge %s " % out_edge
        cmd += "--label --outputlabel %s " % out_label
        logging.info(cmd)
        subprocess.call([cmd], shell=True)

        # Convert PNGs to GeoTIFFs
        for png_path in [out_dgci, out_edge, out_label]:
            tif_path = png_path.replace(".png", ".tif")
            with Image.open(png_path) as png:
                px_array = numpy.array(png)
                create_geotiff(px_array, gps_bounds, tif_path)

        # Remove PNGs
        os.remove(out_dgci)
        os.remove(out_edge)
        os.remove(out_label)
Esempio n. 9
0
    def process_message(self, connector, host, secret_key, resource,
                        parameters):
        self.start_message(resource)

        # Get left/right files and metadata
        img_left, img_right, metadata = None, None, None
        for fname in resource['local_paths']:
            if fname.endswith('_dataset_metadata.json'):
                all_dsmd = load_json_file(fname)
                metadata = get_terraref_metadata(all_dsmd, 'stereoTop')
            elif fname.endswith('_left.bin'):
                img_left = fname
            elif fname.endswith('_right.bin'):
                img_right = fname
        if None in [img_left, img_right, metadata]:
            self.log_error(
                "could not locate each of left+right+metadata in processing")
            raise ValueError(
                "could not locate each of left+right+metadata in processing")

        # Determine output location & filenames
        timestamp = resource['dataset_info']['name'].split(" - ")[1]
        left_tiff = self.sensors.create_sensor_path(timestamp, opts=['left'])
        right_tiff = self.sensors.create_sensor_path(timestamp, opts=['right'])
        uploaded_file_ids = []

        self.log_info(resource, "determining image shapes & gps bounds")
        left_shape = bin2tiff.get_image_shape(metadata, 'left')
        right_shape = bin2tiff.get_image_shape(metadata, 'right')
        left_gps_bounds = geojson_to_tuples(
            metadata['spatial_metadata']['left']['bounding_box'])
        right_gps_bounds = geojson_to_tuples(
            metadata['spatial_metadata']['right']['bounding_box'])
        out_tmp_tiff = os.path.join(tempfile.gettempdir(),
                                    resource['id'].encode('utf8'))

        target_dsid = build_dataset_hierarchy(
            host,
            secret_key,
            self.clowder_user,
            self.clowder_pass,
            self.clowderspace,
            self.sensors.get_display_name(),
            timestamp[:4],
            timestamp[5:7],
            timestamp[8:10],
            leaf_ds_name=self.sensors.get_display_name() + ' - ' + timestamp)

        if (not os.path.isfile(left_tiff)) or self.overwrite:
            self.log_info(resource, "creating & uploading %s" % left_tiff)
            left_image = bin2tiff.process_image(left_shape, img_left, None)
            # Rename output.tif after creation to avoid long path errors
            create_geotiff(left_image, left_gps_bounds, out_tmp_tiff, None,
                           False, self.extractor_info, metadata)
            # TODO: we're moving zero byte files
            shutil.move(out_tmp_tiff, left_tiff)
            if left_tiff not in resource['local_paths']:
                fileid = upload_to_dataset(connector, host, self.clowder_user,
                                           self.clowder_pass, target_dsid,
                                           left_tiff)
                uploaded_file_ids.append(host +
                                         ("" if host.endswith("/") else "/") +
                                         "files/" + fileid)
            else:
                self.log_info(
                    resource,
                    "file found in dataset already; not re-uploading")
            self.created += 1
            self.bytes += os.path.getsize(left_tiff)

        if (not os.path.isfile(right_tiff)) or self.overwrite:
            self.log_info(resource, "creating & uploading %s" % right_tiff)
            right_image = bin2tiff.process_image(right_shape, img_right, None)
            create_geotiff(right_image, right_gps_bounds, out_tmp_tiff, None,
                           False, self.extractor_info, metadata)
            shutil.move(out_tmp_tiff, right_tiff)
            if right_tiff not in resource['local_paths']:
                fileid = upload_to_dataset(connector, host, self.clowder_user,
                                           self.clowder_pass, target_dsid,
                                           right_tiff)
                uploaded_file_ids.append(host +
                                         ("" if host.endswith("/") else "/") +
                                         "files/" + fileid)
            else:
                self.log_info(
                    resource,
                    "file found in dataset already; not re-uploading")
            self.created += 1
            self.bytes += os.path.getsize(right_tiff)

        # Tell Clowder this is completed so subsequent file updates don't daisy-chain
        ext_meta = build_metadata(host, self.extractor_info, resource['id'],
                                  {"files_created": uploaded_file_ids},
                                  'dataset')
        self.log_info(resource, "uploading extractor metadata")
        upload_metadata(connector, host, secret_key, resource['id'], ext_meta)

        # Upload original Lemnatec metadata to new Level_1 dataset
        md = get_terraref_metadata(all_dsmd)
        md['raw_data_source'] = host + ("" if host.endswith("/") else
                                        "/") + "datasets/" + resource['id']
        lemna_md = build_metadata(host, self.extractor_info, target_dsid, md,
                                  'dataset')
        self.log_info(resource, "uploading LemnaTec metadata")
        upload_metadata(connector, host, secret_key, target_dsid, lemna_md)

        self.end_message(resource)
    def process_message(self, connector, host, secret_key, resource, parameters):
        self.start_message(resource)

        # Get bin files and metadata
        metadata = None
        for f in resource['local_paths']:
            # First check metadata attached to dataset in Clowder for item of interest
            if f.endswith('_dataset_metadata.json'):
                all_dsmd = load_json_file(f)
                metadata = get_terraref_metadata(all_dsmd, "ps2Top")
            # Otherwise, check if metadata was uploaded as a .json file
            elif f.endswith('_metadata.json') and f.find('/_metadata.json') == -1 and metadata is None:
                metadata = load_json_file(f)
        frames = {}
        for ind in range(0, 101):
            format_ind = "{0:0>4}".format(ind) # e.g. 1 becomes 0001
            for f in resource['local_paths']:
                if f.endswith(format_ind+'.bin'):
                    frames[ind] = f
        if None in [metadata] or len(frames) < 101:
            self.log_error(resource, 'could not find all of frames/metadata')
            return

        # Determine output directory
        timestamp = resource['dataset_info']['name'].split(" - ")[1]
        hist_path = self.sensors.create_sensor_path(timestamp, opts=['combined_hist'])
        coloredImg_path = self.sensors.create_sensor_path(timestamp, opts=['combined_pseudocolored'])
        uploaded_file_ids = []

        target_dsid = build_dataset_hierarchy(host, secret_key, self.clowder_user, self.clowder_pass, self.clowderspace,
                                              self.sensors.get_display_name(),
                                              timestamp[:4], timestamp[5:7], timestamp[8:10],
                                              leaf_ds_name=self.sensors.get_display_name()+' - '+timestamp)

        (img_width, img_height) = self.get_image_dimensions(metadata)
        gps_bounds = geojson_to_tuples(metadata['spatial_metadata']['ps2Top']['bounding_box'])

        self.log_info(resource, "image dimensions (w, h): (%s, %s)" % (img_width, img_height))

        png_frames = {}
        # skip 0101.bin since 101 is an XML file that lists the frame times
        for ind in range(0, 101):
            format_ind = "{0:0>4}".format(ind) # e.g. 1 becomes 0001
            png_path = self.sensors.create_sensor_path(timestamp, opts=[format_ind])
            tif_path = png_path.replace(".png", ".tif")
            png_frames[ind] = png_path
            if not os.path.exists(png_path) or self.overwrite:
                self.log_info(resource, "generating and uploading %s" % png_path)
                pixels = np.fromfile(frames[ind], np.dtype('uint8')).reshape([int(img_height), int(img_width)])
                create_image(pixels, png_path)
                create_geotiff(pixels, gps_bounds, tif_path, None, False, self.extractor_info, metadata)

                if png_path not in resource['local_paths']:
                    fileid = upload_to_dataset(connector, host, secret_key, target_dsid, png_path)
                    uploaded_file_ids.append(fileid)
                self.created += 1
                self.bytes += os.path.getsize(png_path)

        # Generate aggregate outputs
        self.log_info(resource, "generating aggregates")
        if not (os.path.exists(hist_path) and os.path.exists(coloredImg_path)) or self.overwrite:
            # TODO: Coerce histogram and pseudocolor to geotiff?
            self.analyze(int(img_width), int(img_height), png_frames, hist_path, coloredImg_path)
            self.created += 2
            self.bytes += os.path.getsize(hist_path) + os.path.getsize(coloredImg_path)
        if hist_path not in resource['local_paths']:
            fileid = upload_to_dataset(connector, host, secret_key, target_dsid, hist_path)
            uploaded_file_ids.append(fileid)
        if coloredImg_path not in resource['local_paths']:
            fileid = upload_to_dataset(connector, host, secret_key, target_dsid, coloredImg_path)
            uploaded_file_ids.append(fileid)

        # Tell Clowder this is completed so subsequent file updates don't daisy-chain
        metadata = build_metadata(host, self.extractor_info, target_dsid, {
                                  "files_created": uploaded_file_ids}, 'dataset')
        self.log_info(resource, "uploading extractor metadata")
        upload_metadata(connector, host, secret_key, resource['id'], metadata)

        self.end_message(resource)
Esempio n. 11
0
img_left = args.input + "/" + id + "_left.bin"
if not os.path.exists(img_left):
    logger.error("Left image %s not found" % img_left)
    sys.exit(1)

img_right = args.input + "/" + id + "_right.bin"
if not os.path.exists(img_right):
    logger.error("Left image %s not found" % img_right)
    sys.exit(1)

logger.debug("Processing raw image data")
left_shape = bin2tiff.get_image_shape(metadata, 'left')
right_shape = bin2tiff.get_image_shape(metadata, 'right')
left_gps_bounds = geojson_to_tuples(
    metadata['spatial_metadata']['left']['bounding_box'])
right_gps_bounds = geojson_to_tuples(
    metadata['spatial_metadata']['right']['bounding_box'])
left_image = bin2tiff.process_image(left_shape, img_left, None)
right_image = bin2tiff.process_image(right_shape, img_right, None)

logger.debug("Creating output directories")
sensors = Sensors(base=args.output, station="ua-mac", sensor="rgb_geotiff")
left_tiff = sensors.create_sensor_path(timestamp, opts=['left'])
right_tiff = sensors.create_sensor_path(timestamp, opts=['right'])

logger.debug("Generating geotiffs")
# TODO: Extractor Info is None here, which isn't good
create_geotiff(left_image, left_gps_bounds, left_tiff, None, False, None,
               metadata)
create_geotiff(right_image, right_gps_bounds, right_tiff, None, False, None,
               metadata)
Esempio n. 12
0
    def process_message(self, connector, host, secret_key, resource, parameters):

        super(rgbEnhancementExtractor, self).process_message(connector, host, secret_key,
                                                             resource, parameters)

        self.start_message(resource)

        # Get left/right files and metadata
        process_files = []
        if not self.get_terraref_metadata is None:
            process_files = find_terraref_files(resource)
        else:
            process_files = find_image_files(self.args.identify_binary, resource,
                                             self.file_infodata_file_ending)

        # Get the best username, password, and space
        old_un, old_pw, old_space = (self.clowder_user, self.clowder_pass, self.clowderspace)
        self.clowder_user, self.clowder_pass, self.clowderspace = self.get_clowder_context()

        # Ensure that the clowder information is valid
        if not confirm_clowder_info(host, secret_key, self.clowderspace, self.clowder_user,
                                    self.clowder_pass):
            self.log_error(resource, "Clowder configuration is invalid. Not processing " +\
                                     "request")
            self.clowder_user, self.clowder_pass, self.clowderspace = (old_un, old_pw, old_space)
            self.end_message(resource)
            return

        # Change the base path of files to include the user by tweaking the sensor's value
        sensor_old_base = None
        if self.get_terraref_metadata is None:
            _, new_base = self.get_username_with_base_path(host, secret_key, resource['id'],
                                                           self.sensors.base)
            sensor_old_base = self.sensors.base
            self.sensors.base = new_base

        # Prepare for processing files
        timestamp = timestamp_to_terraref(self.find_timestamp(resource['dataset_info']['name']))
        target_dsid = resource['id']
        uploaded_file_ids = []
        ratios = []

        try:
            for one_file in process_files:

                mask_source = one_file

                # Make sure the source image is in the correct EPSG space
                epsg = get_epsg(one_file)
                if epsg != self.default_epsg:
                    self.log_info(resource, "Reprojecting from " + str(epsg) +
                                  " to default " + str(self.default_epsg))
                    _, tmp_name = tempfile.mkstemp()
                    src = gdal.Open(one_file)
                    gdal.Warp(tmp_name, src, dstSRS='EPSG:'+str(self.default_epsg))
                    mask_source = tmp_name

                # Get the bounds of the image to see if we can process it. Also get the mask filename
                rgb_mask_tif, bounds = self.get_maskfilename_bounds(mask_source, timestamp)

                if bounds is None:
                    self.log_skip(resource, "Skipping non-georeferenced image: " + \
                                                                    os.path.basename(one_file))
                    if mask_source != one_file:
                        os.remove(mask_source)
                    continue

                if not file_exists(rgb_mask_tif) or self.overwrite:
                    self.log_info(resource, "creating %s" % rgb_mask_tif)

                    mask_ratio, mask_rgb = gen_cc_enhanced(mask_source)
                    ratios.append(mask_ratio)

                    # Bands must be reordered to avoid swapping R and B
                    mask_rgb = cv2.cvtColor(mask_rgb, cv2.COLOR_BGR2RGB)

                    create_geotiff(mask_rgb, bounds, rgb_mask_tif, None, False, self.extractor_info,
                                   self.get_terraref_metadata)
                    compress_geotiff(rgb_mask_tif)

                    # Remove any temporary file
                    if mask_source != one_file:
                        os.remove(mask_source)

                    self.created += 1
                    self.bytes += os.path.getsize(rgb_mask_tif)

                found_in_dest = check_file_in_dataset(connector, host, secret_key, target_dsid,
                                                      rgb_mask_tif, remove=self.overwrite)
                if not found_in_dest:
                    self.log_info(resource, "uploading %s" % rgb_mask_tif)
                    fileid = upload_to_dataset(connector, host, self.clowder_user, self.clowder_pass,
                                               target_dsid, rgb_mask_tif)
                    uploaded_file_ids.append(host + ("" if host.endswith("/") else "/") +
                                             "files/" + fileid)

            # Tell Clowder this is completed so subsequent file updates don't daisy-chain
            if not self.get_terraref_metadata is None:
                ratios_len = len(ratios)
                left_ratio = (ratios[0] if ratios_len > 0 else None)
                right_ratio = (ratios[1] if ratios_len > 1 else None)
                md = {
                    "files_created": uploaded_file_ids
                }
                if not left_ratio is None:
                    md["left_mask_ratio"] = left_ratio
                if not self.leftonly and not right_ratio is None:
                    md["right_mask_ratio"] = right_ratio
                extractor_md = build_metadata(host, self.extractor_info, target_dsid, md, 'dataset')
                self.log_info(resource, "uploading extractor metadata to Lv1 dataset")
                remove_metadata(connector, host, secret_key, resource['id'],
                                self.extractor_info['name'])
                upload_metadata(connector, host, secret_key, resource['id'], extractor_md)

        finally:
            # Signal end of processing message and restore changed variables. Be sure to restore
            # changed variables above with early returns
            if not sensor_old_base is None:
                self.sensors.base = sensor_old_base

            self.clowder_user, self.clowder_pass, self.clowderspace = (old_un, old_pw, old_space)
            self.end_message(resource)
    def process_message(self, connector, host, secret_key, resource,
                        parameters):
        self.start_message(resource)

        # Get left/right files and metadata
        img_left, img_right, metadata = None, None, None
        for fname in resource['local_paths']:
            if fname.endswith('_dataset_metadata.json'):
                all_dsmd = load_json_file(fname)
                terra_md_full = get_terraref_metadata(all_dsmd, 'stereoTop')
            elif fname.endswith('_left.tif'):
                img_left = fname
            elif fname.endswith('_right.tif'):
                img_right = fname
        if None in [img_left, img_right, terra_md_full]:
            raise ValueError(
                "could not locate all files & metadata in processing")

        timestamp = resource['dataset_info']['name'].split(" - ")[1]
        target_dsid = resource['id']

        left_rgb_mask_tiff = self.sensors.create_sensor_path(timestamp,
                                                             opts=['left'])
        right_rgb_mask_tiff = self.sensors.create_sensor_path(timestamp,
                                                              opts=['right'])
        uploaded_file_ids = []
        right_ratio, left_ratio = 0, 0

        left_bounds = geojson_to_tuples(
            terra_md_full['spatial_metadata']['left']['bounding_box'])
        right_bounds = geojson_to_tuples(
            terra_md_full['spatial_metadata']['right']['bounding_box'])
        #qual_md = get_extractor_metadata(all_dsmd, "terra.stereo-rgb.nrmac")
        if (not file_exists(left_rgb_mask_tiff)) or self.overwrite:
            self.log_info(resource, "creating %s" % left_rgb_mask_tiff)

            #if qual_md and 'left_quality_score' in qual_md:
            #left_ratio, left_rgb = gen_cc_enhanced(img_left, quality_score=int(qual_md['left_quality_score']))
            left_ratio, left_rgb = gen_cc_enhanced(img_left)

            if left_ratio is not None and left_rgb is not None:
                # Bands must be reordered to avoid swapping R and B
                left_rgb = cv2.cvtColor(left_rgb, cv2.COLOR_BGR2RGB)
                create_geotiff(left_rgb, left_bounds, left_rgb_mask_tiff, None,
                               False, self.extractor_info, terra_md_full)
                compress_geotiff(left_rgb_mask_tiff)
                self.created += 1
                self.bytes += os.path.getsize(left_rgb_mask_tiff)
            else:
                # If the masked version was not generated, delete any old version as well
                self.log_info(
                    resource, "a faulty version exists; deleting %s" %
                    left_rgb_mask_tiff)
                os.remove(left_rgb_mask_tiff)

        found_in_dest = check_file_in_dataset(connector, host, secret_key,
                                              target_dsid, left_rgb_mask_tiff)
        if not found_in_dest:
            self.log_info(resource, "uploading %s" % left_rgb_mask_tiff)
            fileid = upload_to_dataset(connector, host, self.clowder_user,
                                       self.clowder_pass, target_dsid,
                                       left_rgb_mask_tiff)
            uploaded_file_ids.append(host +
                                     ("" if host.endswith("/") else "/") +
                                     "files/" + fileid)

        if not self.leftonly:
            if (not file_exists(right_rgb_mask_tiff)) or self.overwrite:

                right_ratio, right_rgb = gen_cc_enhanced(img_right)

                if right_ratio is not None and right_rgb is not None:
                    # Bands must be reordered to avoid swapping R and B
                    right_rgb = cv2.cvtColor(right_rgb, cv2.COLOR_BGR2RGB)
                    create_geotiff(right_rgb, right_bounds,
                                   right_rgb_mask_tiff, None, False,
                                   self.extractor_info, terra_md_full)
                    compress_geotiff(right_rgb_mask_tiff)
                    self.created += 1
                    self.bytes += os.path.getsize(right_rgb_mask_tiff)
                else:
                    # If the masked version was not generated, delete any old version as well
                    self.log_info(
                        resource, "a faulty version exists; deleting %s" %
                        right_rgb_mask_tiff)
                    os.remove(right_rgb_mask_tiff)

            found_in_dest = check_file_in_dataset(connector, host, secret_key,
                                                  target_dsid,
                                                  right_rgb_mask_tiff)
            if not found_in_dest:
                self.log_info(resource, "uploading %s" % right_rgb_mask_tiff)
                fileid = upload_to_dataset(connector, host, self.clowder_user,
                                           self.clowder_pass, target_dsid,
                                           right_rgb_mask_tiff)
                uploaded_file_ids.append(host +
                                         ("" if host.endswith("/") else "/") +
                                         "files/" + fileid)

        # Tell Clowder this is completed so subsequent file updates don't daisy-chain
        if len(uploaded_file_ids) > 0:
            md = {
                "files_created": uploaded_file_ids,
                "left_mask_ratio": left_ratio
            }
            if not self.leftonly:
                md["right_mask_ratio"] = right_ratio
            extractor_md = build_metadata(host, self.extractor_info,
                                          target_dsid, md, 'dataset')
            self.log_info(resource,
                          "uploading extractor metadata to Lv1 dataset")
            remove_metadata(connector, host, secret_key, resource['id'],
                            self.extractor_info['name'])
            upload_metadata(connector, host, secret_key, resource['id'],
                            extractor_md)

        self.end_message(resource)
Esempio n. 14
0
    def process_message(self, connector, host, secret_key, resource, parameters):
        self.start_message(resource)

        # Get BIN file and metadata
        bin_file, terra_md_full = None, None
        for f in resource['local_paths']:
            if f.endswith('_dataset_metadata.json'):
                all_dsmd = load_json_file(f)
                terra_md_full = get_terraref_metadata(all_dsmd, 'flirIrCamera')
            elif f.endswith('_ir.bin'):
                bin_file = f
        if None in [bin_file, terra_md_full]:
            raise ValueError("could not locate all files & metadata in processing")

        timestamp = resource['dataset_info']['name'].split(" - ")[1]

        # Fetch experiment name from terra metadata
        season_name, experiment_name, updated_experiment = get_season_and_experiment(timestamp, 'flirIrCamera', terra_md_full)
        if None in [season_name, experiment_name]:
            raise ValueError("season and experiment could not be determined")

        # Determine output directory
        self.log_info(resource, "Hierarchy: %s / %s / %s / %s / %s / %s / %s" % (season_name, experiment_name, self.sensors.get_display_name(),
                                                                                 timestamp[:4], timestamp[5:7], timestamp[8:10], timestamp))
        target_dsid = build_dataset_hierarchy_crawl(host, secret_key, self.clowder_user, self.clowder_pass, self.clowderspace,
                                              season_name, experiment_name, self.sensors.get_display_name(),
                                              timestamp[:4], timestamp[5:7], timestamp[8:10],
                                              leaf_ds_name=self.sensors.get_display_name()+' - '+timestamp)
        tiff_path = self.sensors.create_sensor_path(timestamp)
        png_path = tiff_path.replace(".tif", ".png")
        uploaded_file_ids = []

        # Attach LemnaTec source metadata to Level_1 product
        self.log_info(resource, "uploading LemnaTec metadata to ds [%s]" % target_dsid)
        remove_metadata(connector, host, secret_key, target_dsid, self.extractor_info['name'])
        terra_md_trim = get_terraref_metadata(all_dsmd)
        if updated_experiment is not None:
            terra_md_trim['experiment_metadata'] = updated_experiment
        terra_md_trim['raw_data_source'] = host + ("" if host.endswith("/") else "/") + "datasets/" + resource['id']
        level1_md = build_metadata(host, self.extractor_info, target_dsid, terra_md_trim, 'dataset')
        upload_metadata(connector, host, secret_key, target_dsid, level1_md)

        skipped_png = False
        if not file_exists(png_path) or self.overwrite:
            # Perform actual processing
            self.log_info(resource, "creating & uploading %s" % png_path)
            raw_data = numpy.fromfile(bin_file, numpy.dtype('<u2')).reshape([480, 640]).astype('float')
            raw_data = numpy.rot90(raw_data, 3)
            create_image(raw_data, png_path, self.scale_values)
            self.created += 1
            self.bytes += os.path.getsize(png_path)
        else:
            skipped_png = True
        # Only upload the newly generated file to Clowder if it isn't already in dataset
        found_in_dest = check_file_in_dataset(connector, host, secret_key, target_dsid, png_path, remove=self.overwrite)
        if not found_in_dest or self.overwrite:
            fileid = upload_to_dataset(connector, host, secret_key, target_dsid, png_path)
            uploaded_file_ids.append(host + ("" if host.endswith("/") else "/") + "files/" + fileid)

        if not file_exists(tiff_path) or self.overwrite:
            # Generate temperature matrix and perform actual processing
            self.log_info(resource, "creating & uploading %s" % tiff_path)
            gps_bounds = geojson_to_tuples(terra_md_full['spatial_metadata']['flirIrCamera']['bounding_box'])
            if skipped_png:
                raw_data = numpy.fromfile(bin_file, numpy.dtype('<u2')).reshape([480, 640]).astype('float')
                raw_data = numpy.rot90(raw_data, 3)
            tc = getFlir.rawData_to_temperature(raw_data, terra_md_full) # get temperature
            create_geotiff(tc, gps_bounds, tiff_path, None, True, self.extractor_info, terra_md_full)
            self.created += 1
            self.bytes += os.path.getsize(tiff_path)
        # Only upload the newly generated file to Clowder if it isn't already in dataset
        found_in_dest = check_file_in_dataset(connector, host, secret_key, target_dsid, tiff_path, remove=self.overwrite)
        if not found_in_dest or self.overwrite:
            fileid = upload_to_dataset(connector, host, secret_key, target_dsid, tiff_path)
            uploaded_file_ids.append(host + ("" if host.endswith("/") else "/") + "files/" + fileid)

        # Trigger additional extractors
        self.log_info(resource, "triggering downstream extractors")
        submit_extraction(connector, host, secret_key, target_dsid, "terra.plotclipper_tif")

        # Tell Clowder this is completed so subsequent file updates don't daisy-chain
        if len(uploaded_file_ids) > 0:
            extractor_md = build_metadata(host, self.extractor_info, target_dsid, {
                "files_created": uploaded_file_ids
            }, 'dataset')
            self.log_info(resource, "uploading extractor metadata to raw dataset")
            remove_metadata(connector, host, secret_key, resource['id'], self.extractor_info['name'])
            upload_metadata(connector, host, secret_key, resource['id'], extractor_md)

        self.end_message(resource)
Esempio n. 15
0
    def process_message(self, connector, host, secret_key, resource,
                        parameters):
        self.start_message()

        # Get BIN file and metadata
        bin_file, metadata = None, None
        for f in resource['local_paths']:
            # First check metadata attached to dataset in Clowder for item of interest
            if f.endswith('_dataset_metadata.json'):
                all_dsmd = load_json_file(f)
                metadata = get_terraref_metadata(all_dsmd, 'flirIrCamera')
            # Otherwise, check if metadata was uploaded as a .json file
            elif f.endswith('_ir.bin'):
                bin_file = f
        if None in [bin_file, metadata]:
            logging.getLogger(__name__).error(
                'could not find all both of ir.bin/metadata')
            return

        # Determine output directory
        timestamp = resource['dataset_info']['name'].split(" - ")[1]
        png_path = self.sensors.create_sensor_path(timestamp, ext='png')
        tiff_path = self.sensors.create_sensor_path(timestamp)
        uploaded_file_ids = []

        target_dsid = build_dataset_hierarchy(
            host,
            secret_key,
            self.clowder_user,
            self.clowder_pass,
            self.clowderspace,
            self.sensors.get_display_name(),
            timestamp[:4],
            timestamp[5:7],
            timestamp[8:10],
            leaf_ds_name=self.sensors.get_display_name() + ' - ' + timestamp)

        skipped_png = False
        if not os.path.exists(png_path) or self.overwrite:
            logging.getLogger(__name__).info("Generating %s" % png_path)
            # get raw data from bin file
            raw_data = numpy.fromfile(bin_file, numpy.dtype('<u2')).reshape(
                [480, 640]).astype('float')
            raw_data = numpy.rot90(raw_data, 3)
            create_image(raw_data, png_path, self.scale_values)
            # Only upload the newly generated file to Clowder if it isn't already in dataset
            if png_path not in resource["local_paths"]:
                fileid = upload_to_dataset(connector, host, secret_key,
                                           target_dsid, png_path)
                uploaded_file_ids.append(host +
                                         ("" if host.endswith("/") else "/") +
                                         "files/" + fileid)
            self.created += 1
            self.bytes += os.path.getsize(png_path)
        else:
            skipped_png = True

        if not os.path.exists(tiff_path) or self.overwrite:
            logging.getLogger(__name__).info("Generating temperature matrix")
            gps_bounds = geojson_to_tuples(
                metadata['spatial_metadata']['flirIrCamera']['bounding_box'])
            if skipped_png:
                raw_data = numpy.fromfile(bin_file,
                                          numpy.dtype('<u2')).reshape(
                                              [480, 640]).astype('float')
                raw_data = numpy.rot90(raw_data, 3)
            tc = getFlir.rawData_to_temperature(raw_data,
                                                metadata)  # get temperature

            logging.getLogger(__name__).info("Creating %s" % tiff_path)
            # Rename temporary tif after creation to avoid long path errors
            out_tmp_tiff = os.path.join(tempfile.gettempdir(),
                                        resource['id'].encode('utf8'))
            create_geotiff(tc, gps_bounds, out_tmp_tiff, None, True,
                           self.extractor_info, metadata)
            shutil.move(out_tmp_tiff, tiff_path)
            if tiff_path not in resource["local_paths"]:
                fileid = upload_to_dataset(connector, host, secret_key,
                                           target_dsid, tiff_path)
                uploaded_file_ids.append(host +
                                         ("" if host.endswith("/") else "/") +
                                         "files/" + fileid)
            self.created += 1
            self.bytes += os.path.getsize(tiff_path)

        # Tell Clowder this is completed so subsequent file updates don't daisy-chain
        metadata = build_metadata(host, self.extractor_info, target_dsid,
                                  {"files_created": uploaded_file_ids},
                                  'dataset')
        upload_metadata(connector, host, secret_key, resource['id'], metadata)

        # Upload original Lemnatec metadata to new Level_1 dataset
        md = get_terraref_metadata(all_dsmd)
        md['raw_data_source'] = host + ("" if host.endswith("/") else
                                        "/") + "datasets/" + resource['id']
        lemna_md = build_metadata(host, self.extractor_info, target_dsid, md,
                                  'dataset')
        upload_metadata(connector, host, secret_key, target_dsid, lemna_md)

        self.end_message()
Esempio n. 16
0
    def process_message(self, connector, host, secret_key, resource, parameters):
        self.start_message(resource)

        # Get left/right files and metadata
        img_left, img_right, metadata = None, None, None
        for fname in resource['local_paths']:
            if fname.endswith('_dataset_metadata.json'):
                all_dsmd = load_json_file(fname)
                terra_md_full = get_terraref_metadata(all_dsmd, 'stereoTop')
            elif fname.endswith('_left.tif'):
                img_left = fname
            elif fname.endswith('_right.tif'):
                img_right = fname
        if None in [img_left, img_right, terra_md_full]:
            raise ValueError("could not locate all files & metadata in processing")

        timestamp = resource['dataset_info']['name'].split(" - ")[1]
        target_dsid = resource['id']
        left_nrmac_tiff = self.sensors.create_sensor_path(timestamp, opts=['left'])
        right_nrmac_tiff = self.sensors.create_sensor_path(timestamp, opts=['right'])
        uploaded_file_ids = []

        self.log_info(resource, "determining image quality")
        left_qual = getImageQuality(img_left)
        if not self.leftonly:
            right_qual = getImageQuality(img_right)

        left_bounds = geojson_to_tuples(terra_md_full['spatial_metadata']['left']['bounding_box'])
        right_bounds = geojson_to_tuples(terra_md_full['spatial_metadata']['right']['bounding_box'])

        if not file_exists(left_nrmac_tiff) or self.overwrite:
            self.log_info(resource, "creating %s" % left_nrmac_tiff)
            create_geotiff(np.array([[left_qual, left_qual],[left_qual, left_qual]]), left_bounds,
                           left_nrmac_tiff, None, True, self.extractor_info, terra_md_full, compress=True)
            self.created += 1
            self.bytes += os.path.getsize(left_nrmac_tiff)
        found_in_dest = check_file_in_dataset(connector, host, secret_key, target_dsid, left_nrmac_tiff,
                                              remove=self.overwrite)
        if not found_in_dest or self.overwrite:
            self.log_info(resource, "uploading %s" % left_nrmac_tiff)
            fileid = upload_to_dataset(connector, host, self.clowder_user, self.clowder_pass, target_dsid,
                                       left_nrmac_tiff)
            uploaded_file_ids.append(host + ("" if host.endswith("/") else "/") + "files/" + fileid)


        if not self.leftonly:
            if (not file_exists(right_nrmac_tiff) or self.overwrite):
                self.log_info(resource, "creating %s" % right_nrmac_tiff)
                create_geotiff(np.array([[right_qual, right_qual],[right_qual, right_qual]]), right_bounds,
                               right_nrmac_tiff, None, True, self.extractor_info, terra_md_full, compress=True)
                self.created += 1
                self.bytes += os.path.getsize(right_nrmac_tiff)
            found_in_dest = check_file_in_dataset(connector, host, secret_key, target_dsid, right_nrmac_tiff,
                                                  remove=self.overwrite)
            if not found_in_dest or self.overwrite:
                self.log_info(resource, "uploading %s" % right_nrmac_tiff)
                fileid = upload_to_dataset(connector, host, self.clowder_user, self.clowder_pass, target_dsid,
                                           right_nrmac_tiff)
                uploaded_file_ids.append(host + ("" if host.endswith("/") else "/") + "files/" + fileid)

        # Tell Clowder this is completed so subsequent file updates don't daisy-chain
        md = {
            "files_created": uploaded_file_ids,
            "left_quality_score": left_qual
        }
        if not self.leftonly:
            md["right_quality_score"] = right_qual
        extractor_md = build_metadata(host, self.extractor_info, resource['id'], md, 'file')
        self.log_info(resource, "uploading extractor metadata to Lv1 dataset")
        remove_metadata(connector, host, secret_key, resource['id'], self.extractor_info['name'])
        upload_metadata(connector, host, secret_key, resource['id'], extractor_md)

        self.end_message(resource)
Esempio n. 17
0
def perform_process(transformer: transformer_class.Transformer, check_md: dict, transformer_md: list, full_md: list) -> dict:
    """Performs the processing of the data
    Arguments:
        transformer: instance of transformer class
        check_md: request specific metadata
        transformer_md: metadata associated with previous runs of the transformer
        full_md: the full set of metadata available to the transformer
    Return:
        Returns a dictionary with the results of processing
    """
    # pylint: disable=unused-argument
    start_timestamp = datetime.datetime.now()
    all_files = check_md['list_files']()
    total_file_count = len(all_files)
    files_to_process = __internal__.get_files_to_process(all_files)

    file_md = []
    num_image_files = 0
    num_processed_files = 0
    for one_file in files_to_process:
        logging.debug("Processing file: '%s'", one_file)
        num_image_files += 1

        if not os.path.exists(one_file):
            logging.error("Unable to access file: '%s'. Continuing processing", one_file)
            continue

        try:
            quality_value = __internal__.get_image_quality(one_file)
            image_bounds = transformer.get_image_file_geobounds(one_file)
            quality_image_bounds = (image_bounds[2], image_bounds[3], image_bounds[0], image_bounds[1])

            mac_file_name = os.path.join(check_md['working_folder'], os.path.splitext(os.path.basename(one_file))[0] + '_mac.tif')

            logging.info("MAC score %s for file '%s'", str(quality_value), one_file)
            logging.debug("Creating quality image: bounds %s  name: '%s'", str(quality_image_bounds), mac_file_name)
            create_geotiff(np.array([[quality_value, quality_value], [quality_value, quality_value]]), quality_image_bounds,
                           mac_file_name, None, True, transformer.generate_transformer_md(), full_md[0], compress=True)

            num_processed_files += 1
            file_md.append(
                {
                    'path': mac_file_name,
                    'key': 'tif',
                    'metadata': {
                        'replace': True,
                        'data': {
                            'MAC score': str(quality_value),
                            'utc_timestamp': datetime.datetime.utcnow().isoformat(),
                            'source_file': one_file
                        }
                    }
                }
            )
        except Exception as ex:
            logging.warning("Ignoring exception caught processing image file '%s'", one_file)
            logging.debug("Exception: %s", str(ex))
            logging.exception('broken')

    return {'code': 0,
            'files': file_md,
            configuration.TRANSFORMER_NAME: {
                'version': configuration.TRANSFORMER_VERSION,
                'utc_timestamp': datetime.datetime.utcnow().isoformat(),
                'processing_time': str(datetime.datetime.now() - start_timestamp),
                'num_files_received': str(total_file_count),
                'num_image_files': str(num_image_files),
                'num_processed_files': str(num_processed_files)
            }
            }
Esempio n. 18
0
def perform_process(transformer: transformer_class.Transformer, check_md: dict,
                    transformer_md: list, full_md: list) -> dict:
    """Performs the processing of the data
    Arguments:
        transformer: instance of transformer class
    Return:
        Returns a dictionary with the results of processing
    """
    # pylint: disable=unused-argument
    file_md = []
    start_timestamp = datetime.datetime.utcnow()

    file_list = __internal__.get_file_list(check_md['list_files']())
    files_count = len(file_list)

    # Find the metadata we're interested in for calibration parameters
    terra_md = __internal__.find_terra_md(full_md)
    if not terra_md:
        raise RuntimeError("Unable to find TERRA REF specific metadata")

    transformer_md = transformer.generate_transformer_md()

    def generate_file_md(file_path: str) -> dict:
        """Returns file metadata for a file
        Arguments:
            file_path: the file to generate metadata for
        Return:
            Returns the metadata
        """
        return {
            'path': file_path,
            'key': configuration.TRANSFORMER_SENSOR,
            'metadata': {
                'data': transformer_md
            }
        }

    # Generate a list of approved file name endings
    file_endings = ["{0:0>4}.bin".format(i) for i in range(0, 102)]

    files_processed = 0
    try:
        img_width, img_height = __internal__.get_image_dimensions(terra_md)
        gps_bounds = geojson_to_tuples(
            terra_md['spatial_metadata']['ps2Top']['bounding_box'])
        logging.debug("Image width and height: %s %s", str(img_width),
                      str(img_height))
        logging.debug("Image geo bounds: %s", str(gps_bounds))

        png_frames = {}
        for one_file in file_list:
            if one_file[-8:] in file_endings:
                files_processed += 1
                logging.debug("Processing file: '%s'", one_file)

                try:
                    pixels = np.fromfile(one_file, np.dtype('uint8')).reshape(
                        [int(img_height), int(img_width)])
                except ValueError:
                    logging.info(
                        "Ignoring ValueError exception while loading file '%s'",
                        one_file)
                    continue

                png_filename = os.path.join(
                    check_md['working_folder'],
                    os.path.basename(one_file.replace('.bin', '.png')))
                logging.info("Creating: '%s'", png_filename)
                create_image(pixels, png_filename)
                file_md.append(generate_file_md(png_filename))
                png_frames[int(one_file[-8:-4])] = png_filename

                tif_filename = os.path.join(
                    check_md['working_folder'],
                    os.path.basename(one_file.replace('.bin', '.tif')))
                logging.info("Creating: '%s'", tif_filename)
                create_geotiff(pixels, gps_bounds, tif_filename, None, False,
                               transformer_md, terra_md)
                file_md.append(generate_file_md(tif_filename))
            else:
                logging.info("Skipping non-sensor file '%s'", one_file)

        if files_processed > 0:
            logging.info("Generating aggregates")
            hist_path = os.path.join(check_md['working_folder'],
                                     'combined_hist.png')
            false_color_path = os.path.join(check_md['working_folder'],
                                            'combined_pseudocolored.png')
            __internal__.analyze(png_frames, hist_path, false_color_path)
            file_md.append(generate_file_md(hist_path))
            file_md.append(generate_file_md(false_color_path))
        else:
            logging.warning("No files were processed")

        result = {
            'code': 0,
            'file': file_md,
            configuration.TRANSFORMER_NAME: {
                'version': configuration.TRANSFORMER_VERSION,
                'utc_timestamp': datetime.datetime.utcnow().isoformat(),
                'processing_time':
                str(datetime.datetime.now() - start_timestamp),
                'num_files_received': str(files_count),
                'files_processed': str(files_processed)
            }
        }

    except Exception as ex:
        msg = 'Exception caught converting PSII files'
        logging.exception(msg)
        result = {'code': -1000, 'error': msg + ': ' + str(ex)}

    return result
    def process_message(self, connector, host, secret_key, resource,
                        parameters):
        self.start_message(resource)

        # Get left/right files and metadata
        img_left, img_right, metadata = None, None, None
        for fname in resource['local_paths']:
            if fname.endswith('_dataset_metadata.json'):
                all_dsmd = load_json_file(fname)
                terra_md_full = get_terraref_metadata(all_dsmd, 'stereoTop')
            elif fname.endswith('_left.tif'):
                img_left = fname
            elif fname.endswith('_right.tif'):
                img_right = fname
        if None in [img_left, img_right, terra_md_full]:
            raise ValueError(
                "could not locate all files & metadata in processing")

        timestamp = resource['dataset_info']['name'].split(" - ")[1]
        target_dsid = resource['id']

        left_rgb_enh_tiff = self.sensors.create_sensor_path(timestamp,
                                                            opts=['left'])
        right_rgb_enh_tiff = self.sensors.create_sensor_path(timestamp,
                                                             opts=['right'])
        uploaded_file_ids = []

        left_bounds = geojson_to_tuples(
            terra_md_full['spatial_metadata']['left']['bounding_box'])
        right_bounds = geojson_to_tuples(
            terra_md_full['spatial_metadata']['right']['bounding_box'])

        if not file_exists(left_rgb_enh_tiff) or self.overwrite:
            self.log_info(resource, "creating %s" % left_rgb_enh_tiff)
            EI = getEnhancedImage(img_left)
            create_geotiff(EI, left_bounds, left_rgb_enh_tiff)
            self.created += 1
            self.bytes += os.path.getsize(left_rgb_enh_tiff)

        found_in_dest = check_file_in_dataset(connector,
                                              host,
                                              secret_key,
                                              target_dsid,
                                              left_rgb_enh_tiff,
                                              remove=self.overwrite)
        if not found_in_dest:
            self.log_info(resource, "uploading %s" % left_rgb_enh_tiff)
            fileid = upload_to_dataset(connector, host, self.clowder_user,
                                       self.clowder_pass, target_dsid,
                                       left_rgb_enh_tiff)
            uploaded_file_ids.append(host +
                                     ("" if host.endswith("/") else "/") +
                                     "files/" + fileid)

        if not file_exists(right_rgb_enh_tiff) or self.overwrite:
            self.log_info(resource, "creating %s" % right_rgb_enh_tiff)
            EI = getEnhancedImage(img_right)
            create_geotiff(EI, right_bounds, right_rgb_enh_tiff)
            self.created += 1
            self.bytes += os.path.getsize(right_rgb_enh_tiff)

        found_in_dest = check_file_in_dataset(connector,
                                              host,
                                              secret_key,
                                              target_dsid,
                                              right_rgb_enh_tiff,
                                              remove=self.overwrite)
        if not found_in_dest:
            self.log_info(resource, "uploading %s" % right_rgb_enh_tiff)
            fileid = upload_to_dataset(connector, host, self.clowder_user,
                                       self.clowder_pass, target_dsid,
                                       right_rgb_enh_tiff)
            uploaded_file_ids.append(host +
                                     ("" if host.endswith("/") else "/") +
                                     "files/" + fileid)

        # Tell Clowder this is completed so subsequent file updates don't daisy-chain
        ext_meta = build_metadata(host, self.extractor_info, target_dsid,
                                  {"files_created": uploaded_file_ids},
                                  'dataset')
        self.log_info(resource, "uploading extractor metadata")
        remove_metadata(connector, host, secret_key, target_dsid,
                        self.extractor_info['name'])
        upload_metadata(connector, host, secret_key, target_dsid, ext_meta)

        self.end_message(resource)
    def process_message(self, connector, host, secret_key, resource, parameters):
        self.start_message(resource)

        # Get left/right files and metadata
        img_left, img_right, terra_md_full = None, None, None
        for fname in resource['local_paths']:
            if fname.endswith('_dataset_metadata.json'):
                all_dsmd = load_json_file(fname)
                terra_md_full = get_terraref_metadata(all_dsmd, 'stereoTop')
            elif fname.endswith('_left.bin'):
                img_left = fname
            elif fname.endswith('_right.bin'):
                img_right = fname
        if None in [img_left, img_right, terra_md_full]:
            raise ValueError("could not locate all files & metadata in processing")

        timestamp = resource['dataset_info']['name'].split(" - ")[1]

        # Fetch experiment name from terra metadata
        season_name, experiment_name, updated_experiment = get_season_and_experiment(timestamp, 'stereoTop', terra_md_full)
        if None in [season_name, experiment_name]:
            raise ValueError("season and experiment could not be determined")

        # Determine output directory
        self.log_info(resource, "Hierarchy: %s / %s / %s / %s / %s / %s / %s" % (season_name, experiment_name, self.sensors.get_display_name(),
                                                                                 timestamp[:4], timestamp[5:7], timestamp[8:10], timestamp))
        target_dsid = build_dataset_hierarchy_crawl(host, secret_key, self.clowder_user, self.clowder_pass, self.clowderspace,
                                              season_name, experiment_name, self.sensors.get_display_name(),
                                              timestamp[:4], timestamp[5:7], timestamp[8:10],
                                              leaf_ds_name=self.sensors.get_display_name() + ' - ' + timestamp)
        left_tiff = self.sensors.create_sensor_path(timestamp, opts=['left'])
        right_tiff = self.sensors.create_sensor_path(timestamp, opts=['right'])
        uploaded_file_ids = []

        # Attach LemnaTec source metadata to Level_1 product if necessary
        target_md = download_metadata(connector, host, secret_key, target_dsid)
        if not get_extractor_metadata(target_md, self.extractor_info['name']):
            self.log_info(resource, "uploading LemnaTec metadata to ds [%s]" % target_dsid)
            remove_metadata(connector, host, secret_key, target_dsid, self.extractor_info['name'])
            terra_md_trim = get_terraref_metadata(all_dsmd)
            if updated_experiment is not None:
                terra_md_trim['experiment_metadata'] = updated_experiment
            terra_md_trim['raw_data_source'] = host + ("" if host.endswith("/") else "/") + "datasets/" + resource['id']
            level1_md = build_metadata(host, self.extractor_info, target_dsid, terra_md_trim, 'dataset')
            upload_metadata(connector, host, secret_key, target_dsid, level1_md)

        try:
            left_shape = terraref.stereo_rgb.get_image_shape(terra_md_full, 'left')
            gps_bounds_left = geojson_to_tuples(terra_md_full['spatial_metadata']['left']['bounding_box'])
            right_shape = terraref.stereo_rgb.get_image_shape(terra_md_full, 'right')
            gps_bounds_right = geojson_to_tuples(terra_md_full['spatial_metadata']['right']['bounding_box'])
        except KeyError:
            self.log_error(resource, "spatial metadata not properly identified; sending to cleaner")
            submit_extraction(connector, host, secret_key, resource['id'], "terra.metadata.cleaner")
            return

        if (not file_exists(left_tiff)) or self.overwrite:
            # Perform actual processing
            self.log_info(resource, "creating %s" % left_tiff)
            left_image = terraref.stereo_rgb.process_raw(left_shape, img_left, None)
            create_geotiff(left_image, gps_bounds_left, left_tiff, None, True,
                           self.extractor_info, terra_md_full, compress=True)
            self.created += 1
            self.bytes += os.path.getsize(left_tiff)
        # Check if the file should be uploaded, even if it was already created
        found_in_dest = check_file_in_dataset(connector, host, secret_key, target_dsid, left_tiff)
        if not found_in_dest:
            self.log_info(resource, "uploading %s" % left_tiff)
            fileid = upload_to_dataset(connector, host, self.clowder_user, self.clowder_pass, target_dsid, left_tiff)
            uploaded_file_ids.append(host + ("" if host.endswith("/") else "/") + "files/" + fileid)


        if (not file_exists(right_tiff)) or self.overwrite:
            # Perform actual processing
            self.log_info(resource, "creating %s" % right_tiff)
            right_image = terraref.stereo_rgb.process_raw(right_shape, img_right, None)
            create_geotiff(right_image, gps_bounds_right, right_tiff, None, True,
                           self.extractor_info, terra_md_full, compress=True)
            self.created += 1
            self.bytes += os.path.getsize(right_tiff)
        # Check if the file should be uploaded, even if it was already created
        found_in_dest = check_file_in_dataset(connector, host, secret_key, target_dsid, right_tiff)
        if not found_in_dest:
            self.log_info(resource, "uploading %s" % right_tiff)
            fileid = upload_to_dataset(connector, host, self.clowder_user, self.clowder_pass, target_dsid, right_tiff)
            uploaded_file_ids.append(host + ("" if host.endswith("/") else "/") + "files/" + fileid)

        # Trigger additional extractors
        self.log_info(resource, "triggering downstream extractors")
        submit_extraction(connector, host, secret_key, target_dsid, "terra.stereo-rgb.rgbmask")
        submit_extraction(connector, host, secret_key, target_dsid, "terra.stereo-rgb.nrmac")
        submit_extraction(connector, host, secret_key, target_dsid, "terra.plotclipper_tif")

        # Tell Clowder this is completed so subsequent file updates don't daisy-chain
        if len(uploaded_file_ids) > 0:
            extractor_md = build_metadata(host, self.extractor_info, target_dsid, {
                "files_created": uploaded_file_ids
            }, 'dataset')
            self.log_info(resource, "uploading extractor metadata to raw dataset")
            remove_metadata(connector, host, secret_key, resource['id'], self.extractor_info['name'])
            try:
                upload_metadata(connector, host, secret_key, resource['id'], extractor_md)
            except:
                self.log_info(resource, "problem uploading extractor metadata...")

        self.end_message(resource)
Esempio n. 21
0
    img = Image.open(imgfile)
    img = np.array(img)

    NRMAC = MAC(img, img, img)

    return NRMAC


with open(args.meta, 'r') as mdfile:
    md = json.load(mdfile)

lbounds = geojson_to_tuples(md['spatial_metadata']['left']['bounding_box'])
rbounds = geojson_to_tuples(md['spatial_metadata']['right']['bounding_box'])

logger.debug("Calculating quality scores")
left_qual = nrmac(args.left)
right_qual = nrmac(args.right)

# Create geoTIFF with left image quality score
logger.debug("Saving quality scores as rasters")
create_geotiff(np.array([[left_qual,left_qual],[left_qual,left_qual]]), lbounds, args.out_l)
create_geotiff(np.array([[right_qual,right_qual],[right_qual,right_qual]]), rbounds, args.out_r)

with open(args.out_j, 'w') as o:
    o.write(json.dumps({
        "quality_score": {
            "left": left_qual,
            "right": right_qual
        }
    }))