Beispiel #1
0
def upload_to_geostreams(file, clowder_id):
    conn = Connector(
        None, mounted_paths={"/home/clowder/sites": "/home/clowder/sites"})

    successful_plots = 0
    with open(file, 'rb') as csvfile:
        reader = csv.DictReader(csvfile)
        for row in reader:
            centroid_lonlat = [row['lon'], row['lat']]
            time_fmt = row['dp_time']
            timestamp = row['timestamp']
            dpmetadata = {"source": row['source'], "value": row['value']}
            trait = row['trait']

            create_datapoint_with_dependencies(
                conn, host, secret_key, trait,
                (centroid_lonlat[1], centroid_lonlat[0]), time_fmt, time_fmt,
                dpmetadata, timestamp)
            successful_plots += 1

    # Extractor metadata
    extractor_info = {
        "extractor_name": "terra.geostreams",
        "extractor_version": "1.0",
        "extractor_author": "Max Burnette <*****@*****.**>",
        "extractor_description": "Geostreams CSV uploader",
        "extractor_repo": "https://github.com/terraref/computing-pipeline.git"
    }

    # Add metadata to original dataset indicating this was run
    ext_meta = build_metadata(host, extractor_info, clowder_id, {
        "plots_processed": successful_plots,
    }, 'file')
    upload_metadata(conn, host, secret_key, clowder_id, ext_meta)
Beispiel #2
0
def upload_to_bety(file, clowder_id):
    conn = Connector(
        None, mounted_paths={"/home/clowder/sites": "/home/clowder/sites"})

    submit_traits(file, betykey=bety_key)

    # Extractor metadata
    extractor_info = {
        "extractor_name": "terra.betydb",
        "extractor_version": "1.0",
        "extractor_author": "Max Burnette <*****@*****.**>",
        "extractor_description": "BETYdb CSV uploader",
        "extractor_repo": "https://github.com/terraref/computing-pipeline.git"
    }

    # Add metadata to original dataset indicating this was run
    ext_meta = build_metadata(
        host, extractor_info, clowder_id, {
            "betydb_link":
            "https://terraref.ncsa.illinois.edu/bety/api/v1/variables?name=canopy_cover"
        }, 'file')
    upload_metadata(conn, host, secret_key, clowder_id, ext_meta)
Beispiel #3
0
from terrautils.sensors import Sensors

import utils
import counts


config = {}
app_dir = '/home/filecounter'
SCAN_LOCK = False
count_defs = counts.SENSOR_COUNT_DEFINITIONS
DEFAULT_COUNT_START = None
DEFAULT_COUNT_END = None

CLOWDER_HOST = "https://terraref.ncsa.illinois.edu/clowder/"
CLOWDER_KEY = os.getenv('CLOWDER_KEY', False)
CONN = Connector("", {}, mounted_paths={"/home/clowder/sites":"/home/clowder/sites"})


# UTILITIES ----------------------------
def update_nested_dict(existing, new):
    """Nested update of python dictionaries for config parsing
    Adapted from http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
    """
    for k, v in new.iteritems():
        if isinstance(existing, collections.Mapping):
            if isinstance(v, collections.Mapping):
                r = update_nested_dict(existing.get(k, {}), v)
                existing[k] = r
            else:
                existing[k] = new[k]
        else:
import os
import datetime
import json

from pyclowder.connectors import Connector
from pyclowder.datasets import upload_metadata
from pyclowder.files import upload_to_dataset
from terrautils.extractors import build_dataset_hierarchy, build_metadata
from terrautils.metadata import clean_metadata

# CONNECTION SETTINGS
CLOWDER_HOST = "http://141.142.22.37:9000/"
CLOWDER_KEY = "r1ek3rs"
CLOWDER_USER = "******"
CONN = Connector(
    None, mounted_paths={"/Users/mburnette/globus": "/Users/mburnette/globus"})

SPACE_ID = "5997333de98a9d4e498532ce"
SENSOR_FOLDER = "/Users/mburnette/globus/Level_1"
LOGFILE = open(os.path.join(SENSOR_FOLDER, "build_log.txt"), "w+")
SENSOR_LIST = ["scanner3DTop"]
TIMESTAMP_FOLDER = True
DRY_RUN = False


def log(string):
    print("%s: %s" %
          (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), string))
    LOGFILE.write(
        "%s: %s" %
        (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), string))
Beispiel #5
0
def upload_to_clowder(dir, type, scan):
    conn = Connector(None , mounted_paths={"/home/clowder/sites":"/home/clowder/sites"})

    if args.type == "rgb_geotiff":
        print("Submission of RGB GeoTIFF would happen now")
        return

        disp = "RGB GeoTIFFs"
        timestamp = dir.split("/")[-2]
        target_dsid = build_dataset_hierarchy(host, secret_key, clow_user, clow_pass, clowspace, disp,
                                              timestamp[:4], timestamp[5:7], timestamp[8:10], leaf_ds_name=disp+' - '+timestamp)

        output_ids = {}
        # First, upload actual files
        for targ_file in ["rgb_geotiff_L1_ua-mac_%s_left.tif" % ts,
                          "rgb_geotiff_L1_ua-mac_%s_right.tif" % ts,
                          "rgb_geotiff_L1_ua-mac_%s_nrmac_left.tif" % ts,
                          "rgb_geotiff_L1_ua-mac_%s_nrmac_right.tif" % ts]:
            targ_path = os.path.join(dir, targ_file)
            if os.path.isfile(targ_path):
                file_id = upload_to_dataset(conn, host, clow_user, clow_pass, target_dsid, targ_path)
                output_ids[targ_file] = file_id

        # Second, upload metadata
        ds_md = os.path.join(dir, "clean_metadata.json")
        if os.path.isfile(ds_md):
            # Dataset metadata
            extractor_info = {
                "extractor_name": "terra.stereo-rgb.bin2tif",
                "extractor_version": "1.1",
                "extractor_author": "Max Burnette <*****@*****.**>",
                "extractor_description": "Stereo RGB Image Bin to GeoTIFF Converter",
                "extractor_repo": "https://github.com/terraref/extractors-stereo-rgb.git"
            }

            with open(ds_md, 'r') as contents:
                jmd = json.load(contents)
            upload_dataset_metadata(conn, host, secret_key, clowder_id, jmd)
            lemna_md = build_metadata(host, extractor_info, target_dsid, jmd, 'dataset')
            upload_metadata(connector, host, secret_key, target_dsid, lemna_md)

        nrmac_md = os.path.join(dir, "nrmac_scores.json")
        if os.path.isfile(nrmac_md):
            # NRMAC file metadata
            extractor_info = {
                "extractor_name": "terra.stereo-rgb.nrmac",
                "extractor_version": "1.0",
                "extractor_author": "Sidike Paheding <*****@*****.**>",
                "extractor_description": "Stereo RGB No-Reference Multiscale Autocorrelation",
                "extractor_repo": "https://github.com/terraref/quality-metrics.git"
            }

            with open(nrmac_md, 'r') as contents:
                jmd = json.load(contents)
            fi_id = output_ids["rgb_geotiff_L1_ua-mac_%s_nrmac_left.tif" % ts]
            ext_meta = build_metadata(host, extractor_info, fi_id, {
                "quality_score": jmd["quality_score"]["left"]
            }, 'file')
            upload_metadata(connector, host, secret_key, fi_id, ext_meta)
            fi_id = output_ids["rgb_geotiff_L1_ua-mac_%s_nrmac_right.tif" % ts]
            ext_meta = build_metadata(host, extractor_info, fi_id, {
                "quality_score": jmd["quality_score"]["right"]
            }, 'file')
            upload_metadata(connector, host, secret_key, fi_id, ext_meta)

        # Write output_ids.json
        with open(os.path.join(dir, "clowder_ids.json"), 'w') as js:
            js.write(json.dumps(output_ids))

    elif args.type == "fullfield":
        print("Submission of Full Field Mosaic would happen now")
        return

        disp = "Full Field Stitched Mosaics"
        timestamp = dir.split("/")[-2]
        target_dsid = build_dataset_hierarchy(host, secret_key, clow_user, clow_pass, clowspace, disp,
                                              timestamp[:4], timestamp[5:7], leaf_ds_name=disp+' - '+timestamp)

        # TODO: Can each scan be in a separate folder in Clowder?

        output_ids = {}
        # First, upload NRMAC files
        for targ_file in ["fullfield_L1_ua-mac_%s_%s_nrmac.vrt" % (day, scan),
                          "fullfield_L1_ua-mac_%s_%s_nrmac.tif" % (day, scan)]:
            targ_path = os.path.join(dir, targ_file)
            if os.path.isfile(targ_path):
                file_id = upload_to_dataset(conn, host, clow_user, clow_pass, target_dsid, targ_path)
                output_ids[targ_file] = file_id

        # Second, upload main stitched files
        for targ_file in ["fullfield_L1_ua-mac_%s_%s.vrt" % (day, scan),
                          "fullfield_L1_ua-mac_%s_%s.tif" % (day, scan),
                          "fullfield_L1_ua-mac_%s_%s_thumb.tif" % (day, scan),
                          "fullfield_L1_ua-mac_%s_%s_10pct.tif" % (day, scan),
                          "fullfield_L1_ua-mac_%s_%s.png" % (day, scan)]:
            targ_path = os.path.join(dir, targ_file)
            if os.path.isfile(targ_path):
                file_id = upload_to_dataset(conn, host, clow_user, clow_pass, target_dsid, targ_path)
                output_ids[targ_file] = file_id

        # Third, upload trait CSV files
        for targ_file in ["fullfield_L1_ua-mac_%s_%s_canopycover_bety.csv" % (day, scan),
                          "fullfield_L1_ua-mac_%s_%s_canopycover_geo.csv" % (day, scan)]:
            targ_path = os.path.join(dir, targ_file)
            if os.path.isfile(targ_path):
                file_id = upload_to_dataset(conn, host, clow_user, clow_pass, target_dsid, targ_path)
                output_ids[targ_file] = file_id

        # Write output_ids.json
        with open(os.path.join(dir, "clowder_ids.json"), 'w') as js:
            js.write(json.dumps(output_ids))
        'perspective': perspective,
        'rotation_angle': rotation_angle,
        'zoom': zoom,
        'imager_stage_vertical_position': stage_position,
        'camera_gain': camera_gain,
        'camera_exposure': camera_exposure,
        'image_id': img_id,
        'imagedate': snap_details['timestamp'],
        'species': species,
        'genotype': genotype,
        'treatment': treatment,
        'sample_id': barcode['unique_id']
    }


conn = Connector({},
                 mounted_paths={"/home/clowder/sites": "/home/clowder/sites"})
experiment_root = sys.argv[1]
experiment_name = os.path.basename(experiment_root)
if os.path.exists(experiment_root):
    logger.debug("Searching for index files in %s" % experiment_root)
    md_file = os.path.join(experiment_root, experiment_name + "_metadata.json")
    csv_file = os.path.join(experiment_root, "SnapshotInfo.csv")

    if not os.path.isfile(md_file):
        logger.debug("%s not found" % md_file)
        sys.exit(1)
    if not os.path.isfile(csv_file):
        logger.debug("%s not found" % csv_file)
        sys.exit(1)

    logger.debug("Found index files; loading %s" % md_file)
                    default="https://terraref.ncsa.illinois.edu/clowder/")
parser.add_argument('-d',
                    '--daily',
                    help="only submit one dataset per day",
                    default=False,
                    action='store_true')
parser.add_argument('-t',
                    '--test',
                    help="only submit one dataset then exit",
                    default=False,
                    action='store_true')
args = parser.parse_args()

logging.basicConfig(filename="submit_%s.log" % args.input, level=logging.DEBUG)

CONN = Connector(None, mounted_paths={"/home/clowder/sites": args.sites})

logging.info("attempting to parse %s" % args.input)
sess = requests.Session()

if args.daily:
    seen_days = []
with open(args.input, 'r') as csv:
    i = 0
    for line in csv:
        ds_id, ds_name = line.replace("\n", "").split(",")
        if len(ds_id) > 0:
            if args.daily:
                day = ds_name.split(" - ")[1].split("__")[0]
                if day in seen_days:
                    continue