Пример #1
0
def test_read_noaa():
    get_local_bucket(empty=True)

    data_filepath = get_datapath(
        filename="co_pocn25_surface-flask_1_ccgg_event.txt", data_type="NOAA")

    results = ObsSurface.read_file(filepath=data_filepath, data_type="NOAA")

    uuid = results["co_pocn25_surface-flask_1_ccgg_event.txt"][
        "co_pocn25_surface-flask_1_ccgg_event_co"]

    co_data = Datasource.load(uuid=uuid, shallow=False).data()

    assert len(co_data.keys()) == 95

    old_data = co_data["1990-12-02-12:23:00+00:00_1990-12-02-12:23:00+00:00"]

    assert old_data.time[0] == Timestamp("1990-12-02T12:23:00")
    assert old_data.time[-1] == Timestamp("1990-12-02T12:23:00")

    assert old_data["co"][0] == 141.61
    assert old_data["co"][-1] == 141.61

    assert old_data["co_repeatability"][0] == -999.99
    assert old_data["co_repeatability"][-1] == -999.99

    assert old_data["co_selection_flag"][0] == 0
    assert old_data["co_selection_flag"][-1] == 0

    obs = ObsSurface.load()

    assert list(obs._datasource_names.keys()
                )[0] == "co_pocn25_surface-flask_1_ccgg_event_co"
Пример #2
0
def test_read_CRDS():
    get_local_bucket(empty=True)

    filepath = get_datapath(filename="bsd.picarro.1minute.248m.dat",
                            data_type="CRDS")

    results = ObsSurface.read_file(filepath=filepath, data_type="CRDS")

    keys = results["bsd.picarro.1minute.248m.dat"].keys()

    expected_keys = sorted([
        "bsd.picarro.1minute.248m_ch4",
        "bsd.picarro.1minute.248m_co",
        "bsd.picarro.1minute.248m_co2",
    ])
    assert sorted(keys) == expected_keys

    # Load up the assigned Datasources and check they contain the correct data
    data = results["bsd.picarro.1minute.248m.dat"]

    ch4_data = Datasource.load(
        uuid=data["bsd.picarro.1minute.248m_ch4"]).data()
    ch4_data = ch4_data["2014-01-30-10:52:30+00:00_2014-01-30-14:20:30+00:00"]

    assert ch4_data.time[0] == Timestamp("2014-01-30T10:52:30")
    assert ch4_data["ch4"][0] == 1960.24
    assert ch4_data["ch4"][-1] == 1952.24
    assert ch4_data["ch4_stdev"][-1] == 0.674
    assert ch4_data["ch4_n_meas"][-1] == 25.0

    obs = ObsSurface.load()

    assert sorted(obs._datasource_names.keys()) == expected_keys
Пример #3
0
def test_save_footprint():
    bucket = get_local_bucket(empty=True)

    metadata = {"test": "testing123"}

    dir_path = os.path.dirname(__file__)
    test_data = "../data/emissions"
    filename = "WAO-20magl_EUROPE_201306_downsampled.nc"
    filepath = os.path.join(dir_path, test_data, filename)

    data = xarray.open_dataset(filepath)

    datasource = Datasource(name="test_name")
    datasource.add_data(metadata=metadata, data=data, data_type="footprint")
    datasource.save()

    prefix = f"{Datasource._datasource_root}/uuid/{datasource._uuid}"
    objs = get_object_names(bucket, prefix)

    datasource_2 = Datasource.load(bucket=bucket, key=objs[0])

    date_key = "2013-06-02-00:00:00+00:00_2013-06-30-00:00:00+00:00"

    data = datasource_2._data[date_key]

    assert float(data.pressure[0].values) == pytest.approx(1023.971)
    assert float(data.pressure[2].values) == pytest.approx(1009.940)
    assert float(data.pressure[-1].values) == pytest.approx(1021.303)
Пример #4
0
def query_store():
    """ Create a dictionary that can be used to visualise the object store 

        Returns:
            dict: Dictionary of data ? 

    """
    from collections import defaultdict
    from HUGS.Modules import Datasource, ObsSurface

    obs = ObsSurface.load()

    datasource_uuids = obs.datasources()
    datasources = (Datasource.load(uuid=uuid, shallow=True)
                   for uuid in datasource_uuids)

    data = defaultdict(dict)

    for d in datasources:
        metadata = d.metadata()
        result = {
            "site": metadata["site"],
            "species": metadata["species"],
            "instrument": metadata.get("instrument", "Unknown"),
            "network": metadata.get("network")
        }
        data[d.uuid()] = result

    return data


# def visualise_store():
#     """ Visualise the output of the

#     """
Пример #5
0
def assign_data(gas_data, lookup_results, overwrite):
    """ Create or get an existing Datasource for each gas in the file

        Args:
            gas_data (dict): Dictionary containing data and metadata for species
        Returns:
            dict: Dictionary of UUIDs of Datasources data has been assigned to keyed by species name
    """
    from HUGS.Modules import Datasource

    uuids = {}
    # Add in copying of attributes, or add attributes to the metadata at an earlier state.
    for species in gas_data:
        metadata = gas_data[species]["metadata"]
        data = gas_data[species]["data"]
        name = lookup_results[species]["name"]
        uuid = lookup_results[species]["uuid"]

        # If we have a UUID for this Datasource load the existing object
        # from the object store
        if uuid:
            datasource = Datasource.load(uuid=uuid)
        else:
            datasource = Datasource(name=name)

        # Add the dataframe to the datasource
        datasource.add_data(metadata=metadata, data=data, overwrite=overwrite)
        # Save Datasource to object store
        datasource.save()

        uuids[name] = datasource.uuid()

    return uuids
Пример #6
0
def test_delete_Datasource():
    bucket = get_local_bucket(empty=True)

    data_filepath = get_datapath(filename="tta.co2.1minute.222m.min.dat",
                                 data_type="ICOS")

    ObsSurface.read_file(filepath=data_filepath, data_type="ICOS")

    obs = ObsSurface.load()

    datasources = obs.datasources()

    uuid = datasources[0]

    datasource = Datasource.load(uuid=uuid)

    data = datasource.data(
    )["2011-12-07-01:38:00+00:00_2011-12-31-19:57:00+00:00"]

    assert data["co2"][0] == pytest.approx(397.334)
    assert data.time[0] == Timestamp("2011-12-07T01:38:00")

    data_keys = datasource.data_keys()

    key = data_keys[0]

    assert exists(bucket=bucket, key=key)

    obs.delete(uuid=uuid)

    assert uuid not in obs.datasources()

    assert not exists(bucket=bucket, key=key)
Пример #7
0
def test_read_thames_barrier():
    get_local_bucket(empty=True)

    data_filepath = get_datapath(filename="thames_test_20190707.csv",
                                 data_type="THAMESBARRIER")

    results = ObsSurface.read_file(filepath=data_filepath,
                                   data_type="THAMESBARRIER")

    expected_keys = sorted([
        'thames_test_20190707_CH4', 'thames_test_20190707_CO2',
        'thames_test_20190707_CO'
    ])

    assert sorted(list(
        results["thames_test_20190707.csv"].keys())) == expected_keys

    uuid = results["thames_test_20190707.csv"]["thames_test_20190707_CO2"]

    data = Datasource.load(uuid=uuid, shallow=False).data()
    data = data["2019-07-01-00:39:55+00:00_2019-08-01-00:10:30+00:00"]

    assert data.time[0] == Timestamp("2019-07-01T00:39:55")
    assert data.time[-1] == Timestamp("2019-08-01T00:10:30")
    assert data["co2"][0] == pytest.approx(417.97344761)
    assert data["co2"][-1] == pytest.approx(417.80000653)
    assert data["co2_variability"][0] == 0
    assert data["co2_variability"][-1] == 0

    obs = ObsSurface.load()

    assert sorted(obs._datasource_names.keys()) == expected_keys
Пример #8
0
    def set_rank(self, uuid, rank, daterange):
        """ Set the rank of a Datasource associated with this object.

            This function performs checks to ensure multiple ranks aren't set for
            overlapping dateranges.

            Passing a daterange and rank to this function will overwrite any current 
            daterange stored for that rank.

            Args:
                uuid (str): UUID of Datasource
                rank (int): Rank of data
                daterange (str, list): Daterange(s)
            Returns:
                None
        """
        from HUGS.Modules import Datasource
        from HUGS.Util import daterange_from_str

        if not 0 <= int(rank) <= 10:
            raise TypeError("Rank can only take values 0 (for unranked) to 10. Where 1 is the highest rank.")

        if not isinstance(daterange, list):
            daterange = [daterange]

        try:
            rank_data = self._rank_data[uuid]
            # Check this source isn't ranked differently for the same dates
            for d in daterange:
                # Check we don't have any overlapping dateranges for other ranks
                daterange_obj = daterange_from_str(d)
                # Check the other dateranges for overlapping dates and raise error
                for existing_rank, existing_daterange in rank_data.items():
                    for e in existing_daterange:
                        e = daterange_from_str(e)

                        intersection = daterange_obj.intersection(e)
                        if len(intersection) > 0 and int(existing_rank) != int(rank):
                            raise ValueError(f"This datasource has already got the rank {existing_rank} for dates that overlap the ones given. \
                                                Overlapping dates are {intersection}")
        except KeyError:
            pass

        # Store the rank within the Datasource
        datasource = Datasource.load(uuid=uuid, shallow=True)
        datasource.set_rank(rank=rank, daterange=daterange)
        datasource.save()

        try:
            self._rank_data[uuid][rank].extend(daterange)
        except KeyError:
            self._rank_data[uuid][rank] = daterange
Пример #9
0
def get_data(key_list):
    """ Gets data from the Datasources found by the search function

        Bypass loading the Datasource? Get both then we have metadata?

    """
    from HUGS.Modules import Datasource

    # Get the data
    # This will return a list of lists of data
    # Maybe want to do some preprocessing on this data before it comes raw out of the object store?
    # We only want the data in the correct daterange
    return [Datasource.load(key=key)._data for key in key_list]
Пример #10
0
def get_sources(args):
    """ Get the Datasources associated with the specified species at a specified site

        Args:
            args (dict): Dictionary containing site and species keys
        Returns:
            dict: Dictionary of 
    """
    try:
        site = args["site"]
    except KeyError:
        # TODO - created a SiteError error type to raise here
        raise KeyError("Site must be specified")

    try:
        species = args["species"]
    except KeyError:
        raise KeyError("Species must be specified")

    obs = ObsSurface.load()

    datasource_uuids = obs.datasources()
    # Shallow load the Datasources (only get their JSON metadata)
    datasources = [
        Datasource.load(uuid=uuid, shallow=True) for uuid in datasource_uuids
    ]

    matching_sources = [
        d for d in datasources
        if d.search_metadata(search_terms=[site, species], find_all=True)
    ]

    def name_str(d):
        return "_".join([d.species(), d.site(), d.inlet(), d.instrument()])

    unranked = {
        name_str(d): {
            "rank": d.rank(),
            "data_range": d.daterange_str(),
            "uuid": d.uuid()
        }
        for d in matching_sources
    }

    return unranked
Пример #11
0
    def get_sources(self, site, species, data_type):
        """ Get the datasources for this site and species to allow a ranking to be set

            Args:
                site (str): Three letter site code
                species (str): Species name
                data_type (str): Must be valid datatype i.e. CRDS, GC
                See all valid datasources in the DataTypes class
            Returns:
                dict: Dictionary of datasource metadata
        """
        if len(site) != 3 or not valid_site(site):
            # raise InvalidSiteError(f"{site} is not a valid site code")
            raise ValueError(f"{site} is not a valid site code")

        obs = ObsSurface.load()
        datasource_uuids = obs.datasources()

        # Shallow load the Datasources (only get their JSON metadata)
        datasources = [
            Datasource.load(uuid=uuid, shallow=True)
            for uuid in datasource_uuids
        ]

        matching_sources = [
            d for d in datasources
            if d.search_metadata(search_terms=[site, species], find_all=True)
        ]

        def name_str(d):
            return "_".join([d.species(), d.site(), d.inlet(), d.instrument()])

        rank_info = {
            name_str(d): {
                "rank": d.rank(),
                "data_range": d.daterange_str(),
                "uuid": d.uuid()
            }
            for d in matching_sources
        }

        self._before_ranking = copy.deepcopy(rank_info)
        self._key_uuids = {key: rank_info[key]["uuid"] for key in rank_info}

        return rank_info
Пример #12
0
def test_read_cranfield():
    get_local_bucket(empty=True)

    data_filepath = get_datapath(filename="THB_hourly_means_test.csv",
                                 data_type="Cranfield_CRDS")

    results = ObsSurface.read_file(filepath=data_filepath,
                                   data_type="CRANFIELD")

    expected_keys = sorted([
        "THB_hourly_means_test_ch4",
        "THB_hourly_means_test_co2",
        "THB_hourly_means_test_co",
    ])

    assert sorted(results["THB_hourly_means_test.csv"].keys()) == expected_keys

    uuid = results["THB_hourly_means_test.csv"]["THB_hourly_means_test_ch4"]

    ch4_data = Datasource.load(uuid=uuid, shallow=False).data()
    ch4_data = ch4_data["2018-05-05-00:00:00+00:00_2018-05-13-16:00:00+00:00"]

    assert ch4_data.time[0] == Timestamp("2018-05-05")
    assert ch4_data.time[-1] == Timestamp("2018-05-13T16:00:00")

    assert ch4_data["ch4"][0] == pytest.approx(2585.651)
    assert ch4_data["ch4"][-1] == pytest.approx(1999.018)

    assert ch4_data["ch4 variability"][0] == pytest.approx(75.50218)
    assert ch4_data["ch4 variability"][-1] == pytest.approx(6.48413)

    # Check obs has stored the keys correctly
    obs = ObsSurface.load()

    assert sorted(list(obs._datasource_names.keys())) == sorted([
        'THB_hourly_means_test_ch4', 'THB_hourly_means_test_co2',
        'THB_hourly_means_test_co'
    ])
Пример #13
0
    def assign_data(self,
                    lookup_results,
                    source_name,
                    data,
                    metadata,
                    overwrite=False):
        """ Assign data to a new or existing Datasource

            Args:
                lookup_results (dict): Results of Datasource lookup
                source_name (str): Name of data source
                data (xarray.Dataset): Data
                metadata (dict): Dictionary of metadata
                overwrite (bool, default=False): Should exisiting data be overwritten
            Returns:
                list: List of Datasource UUIDs
        """
        from HUGS.Modules import Datasource

        uuids = {}
        for key in lookup_results:
            uuid = lookup_results[key]["uuid"]
            name = metadata["name"]

            if uuid:
                datasource = Datasource.load(uuid=uuid)
            else:
                datasource = Datasource(name=name)

            datasource.add_data(metadata=metadata,
                                data=data,
                                data_type="footprint")
            datasource.save()

            uuids[name] = datasource.uuid()

        return uuids
Пример #14
0
    def delete(self, uuid):
        """ Delete a Datasource with the given UUID

            This function deletes both the record of the object store in he

            Args:
                uuid (str): UUID of Datasource
            Returns:
                None
        """
        from HUGS.ObjectStore import delete_object, get_bucket
        from HUGS.Modules import Datasource

        bucket = get_bucket()
        # Load the Datasource and get all its keys
        # iterate over these keys and delete them
        datasource = Datasource.load(uuid=uuid)

        data_keys = datasource.data_keys(return_all=True)

        for version in data_keys:
            key_data = data_keys[version]["keys"]

            for daterange in key_data:
                key = key_data[daterange]
                delete_object(bucket=bucket, key=key)

        # Then delete the Datasource itself
        key = f"{Datasource._datasource_root}/uuid/{uuid}"
        delete_object(bucket=bucket, key=key)

        # First remove from our dictionary of Datasources
        name = self._datasource_uuids[uuid]

        del self._datasource_names[name]
        del self._datasource_uuids[uuid]
Пример #15
0
def test_read_icos():
    get_local_bucket(empty=True)

    data_filepath = get_datapath(filename="tta.co2.1minute.222m.min.dat",
                                 data_type="ICOS")

    results = ObsSurface.read_file(filepath=data_filepath, data_type="ICOS")

    assert list(results["tta.co2.1minute.222m.min.dat"].keys()
                )[0] == "tta.co2.1minute.222m.min_co2"

    uuid = results["tta.co2.1minute.222m.min.dat"][
        "tta.co2.1minute.222m.min_co2"]

    data = Datasource.load(uuid=uuid, shallow=False).data()

    assert sorted(list(data.keys())) == sorted([
        "2011-12-07-01:38:00+00:00_2011-12-31-19:57:00+00:00",
        "2011-06-01-05:54:00+00:00_2011-08-31-17:58:00+00:00",
        "2011-03-30-08:52:00+00:00_2011-05-31-20:59:00+00:00",
        "2011-09-01-11:20:00+00:00_2011-11-30-03:39:00+00:00",
        "2012-12-01-04:03:00+00:00_2012-12-31-15:41:00+00:00",
        "2012-06-01-11:15:00+00:00_2012-08-07-19:16:00+00:00",
        "2012-04-07-06:20:00+00:00_2012-05-31-18:00:00+00:00",
        "2012-09-05-02:15:00+00:00_2012-11-30-19:08:00+00:00",
        "2013-01-01-00:01:00+00:00_2013-01-17-18:06:00+00:00",
    ])

    co2_data = data["2012-12-01-04:03:00+00:00_2012-12-31-15:41:00+00:00"]

    assert co2_data.time[0] == Timestamp("2012-12-01T04:03:00")
    assert co2_data.time[-1] == Timestamp("2012-12-31T15:41:00")

    assert co2_data["co2"][0] == 397.765
    assert co2_data["co2"][-1] == 398.374

    assert co2_data["co2_variability"][0] == 0.057
    assert co2_data["co2_variability"][-1] == 0.063

    assert co2_data["co2_number_of_observations"][0] == 12
    assert co2_data["co2_number_of_observations"][-1] == 13

    del co2_data.attrs["File created"]

    assert co2_data.attrs == {
        "Conditions of use":
        "Ensure that you contact the data owner at the outset of your project.",
        "Source": "In situ measurements of air",
        "Conventions": "CF-1.6",
        "Processed by": "*****@*****.**",
        "species": "co2",
        "Calibration_scale": "unknown",
        "station_longitude": -2.98598,
        "station_latitude": 56.55511,
        "station_long_name": "Angus Tower, UK",
        "station_height_masl": 300.0,
    }

    obs = ObsSurface.load()

    assert list(
        obs._datasource_names.keys())[0] == "tta.co2.1minute.222m.min_co2"
Пример #16
0
def search(
    locations,
    species=None,
    inlet=None,
    instrument=None,
    find_all=True,
    start_datetime=None,
    end_datetime=None,
):
    """ Search for gas data (optionally within a daterange)

        TODO - review this function - feel like it can be tidied and simplified

        Args:
            species (str or list): Terms to search for in Datasources
            locations (str or list): Where to search for the terms in species
            inlet (str, default=None): Inlet height such as 100m
            instrument (str, default=None): Instrument name such as picarro
            find_all (bool, default=True): Require all search terms to be satisfied
            start_datetime (datetime, default=None): Start datetime for search
            If None a start datetime of UNIX epoch (1970-01-01) is set
            end_datetime (datetime, default=None): End datetime for search
            If None an end datetime of the current datetime is set
        Returns:
            dict: List of keys of Datasources matching the search parameters
    """
    from collections import defaultdict
    from json import load
    from HUGS.Modules import Datasource, ObsSurface
    from HUGS.Util import (get_datetime_now, get_datetime_epoch,
                           create_daterange_str, timestamp_tzaware,
                           get_datapath)

    # if species is not None and not isinstance(species, list):
    if not isinstance(species, list):
        species = [species]

    if not isinstance(locations, list):
        locations = [locations]

    # Allow passing of location names instead of codes
    site_codes_json = get_datapath(filename="site_codes.json")
    with open(site_codes_json, "r") as f:
        d = load(f)
        site_codes = d["name_code"]

    updated_locations = []
    # Check locations, if they're longer than three letters do a lookup
    for loc in locations:
        if len(loc) > 3:
            try:
                site_code = site_codes[loc.lower()]
                updated_locations.append(site_code)
            except KeyError:
                raise ValueError(f"Invalid site {loc} passed")
        else:
            updated_locations.append(loc)

    locations = updated_locations

    if start_datetime is None:
        start_datetime = get_datetime_epoch()
    if end_datetime is None:
        end_datetime = get_datetime_now()

    # Ensure passed datetimes are timezone aware
    start_datetime = timestamp_tzaware(start_datetime)
    end_datetime = timestamp_tzaware(end_datetime)

    # Here we want to load in the ObsSurface module for now
    obs = ObsSurface.load()
    datasource_uuids = obs.datasources()

    # Shallow load the Datasources so we can search their metadata
    datasources = [
        Datasource.load(uuid=uuid, shallow=True) for uuid in datasource_uuids
    ]

    # First we find the Datasources from locations we want to narrow down our search
    location_sources = defaultdict(list)
    # If we have locations to search
    for location in locations:
        for datasource in datasources:
            if datasource.search_metadata(search_terms=location):
                location_sources[location].append(datasource)

    # This is returned to the caller
    results = defaultdict(dict)

    # With both inlet and instrument specified we bypass the ranking system
    if inlet is not None and instrument is not None:
        for site, sources in location_sources.items():
            for sp in species:
                for datasource in sources:
                    # Just match the single source here
                    if datasource.search_metadata(
                            search_terms=[sp, site, inlet, instrument],
                            find_all=True):
                        daterange_str = create_daterange_str(
                            start=start_datetime, end=end_datetime)
                        # Get the data keys for the data in the matching daterange
                        in_date = datasource.in_daterange(
                            daterange=daterange_str)

                        data_date_str = strip_dates_keys(in_date)

                        key = f"{sp}_{site}_{inlet}_{instrument}".lower()

                        # Find the keys that match the correct data
                        results[key]["keys"] = {data_date_str: in_date}
                        results[key]["metadata"] = datasource.metadata()

        return results

    for location, sources in location_sources.items():
        # Loop over and look for the species
        species_data = defaultdict(list)
        for datasource in sources:
            for s in species:
                search_terms = [
                    x for x in (s, location, inlet, instrument)
                    if x is not None
                ]
                # Check the species and the daterange
                if datasource.search_metadata(search_terms=search_terms,
                                              find_all=True):
                    species_data[s].append(datasource)

        # For each location we want to find the highest ranking sources for the selected species
        for sp, sources in species_data.items():
            ranked_sources = {}

            # How to return all the sources if they're all 0?
            for source in sources:
                rank_data = source.get_rank(start_date=start_datetime,
                                            end_date=end_datetime)

                # With no rank set we get an empty dictionary
                if not rank_data:
                    ranked_sources[0] = 0
                    continue

                # Just get the highest ranked datasources and return them
                # Find the highest ranked data from this site
                highest_rank = sorted(rank_data.keys())[-1]

                if highest_rank == 0:
                    ranked_sources[0] = 0
                    continue

                ranked_sources[source.uuid()] = {
                    "rank": highest_rank,
                    "dateranges": rank_data[highest_rank],
                    "source": source
                }

            # If it's all zeroes we want to return all sources
            if list(ranked_sources) == [0]:
                for source in sources:
                    key = f"{source.species()}_{source.site()}_{source.inlet()}_{source.instrument()}".lower(
                    )

                    daterange_str = create_daterange_str(start=start_datetime,
                                                         end=end_datetime)
                    data_keys = source.in_daterange(daterange=daterange_str)

                    if not data_keys:
                        continue

                    # Get a key that covers the daterange of the actual data and not from epoch to now
                    # if no start/end datetimes are passed
                    data_date_str = strip_dates_keys(data_keys)

                    results[key]["keys"] = {data_date_str: data_keys}
                    results[key]["metadata"] = source.metadata()

                continue
            else:
                # TODO - find a cleaner way of doing this
                # We might have a zero rank, delete it as we have higher ranked data
                try:
                    del ranked_sources[0]
                except KeyError:
                    pass

            # Otherwise iterate over the sources that are ranked and extract the keys
            for uid in ranked_sources:
                source = ranked_sources[uid]["source"]
                source_dateranges = ranked_sources[uid]["dateranges"]

                key = f"{source.species()}_{source.site()}_{source.inlet()}_{source.instrument()}".lower(
                )

                data_keys = {}
                # Get the keys for each daterange
                for d in source_dateranges:
                    keys_in_date = source.in_daterange(daterange=d)
                    d = d.replace("+00:00", "")
                    if keys_in_date:
                        data_keys[d] = keys_in_date

                if not data_keys:
                    continue

                results[key]["keys"] = data_keys
                results[key]["metadata"] = source.metadata()

    return results
Пример #17
0
def test_read_GC():
    get_local_bucket(empty=True)

    data_filepath = get_datapath(filename="capegrim-medusa.18.C",
                                 data_type="GC")
    precision_filepath = get_datapath(
        filename="capegrim-medusa.18.precisions.C", data_type="GC")

    results = ObsSurface.read_file(filepath=(data_filepath,
                                             precision_filepath),
                                   data_type="GCWERKS")

    expected_keys = sorted([
        "capegrim-medusa.18_NF3",
        "capegrim-medusa.18_CF4",
        "capegrim-medusa.18_PFC-116",
        "capegrim-medusa.18_PFC-218",
        "capegrim-medusa.18_PFC-318",
        "capegrim-medusa.18_C4F10",
        "capegrim-medusa.18_C6F14",
        "capegrim-medusa.18_SF6",
        "capegrim-medusa.18_SO2F2",
        "capegrim-medusa.18_SF5CF3",
        "capegrim-medusa.18_HFC-23",
        "capegrim-medusa.18_HFC-32",
        "capegrim-medusa.18_HFC-125",
        "capegrim-medusa.18_HFC-134a",
        "capegrim-medusa.18_HFC-143a",
        "capegrim-medusa.18_HFC-152a",
        "capegrim-medusa.18_HFC-227ea",
        "capegrim-medusa.18_HFC-236fa",
        "capegrim-medusa.18_HFC-245fa",
        "capegrim-medusa.18_HFC-365mfc",
        "capegrim-medusa.18_HFC-4310mee",
        "capegrim-medusa.18_HCFC-22",
        "capegrim-medusa.18_HCFC-124",
        "capegrim-medusa.18_HCFC-132b",
        "capegrim-medusa.18_HCFC-133a",
        "capegrim-medusa.18_HCFC-141b",
        "capegrim-medusa.18_HCFC-142b",
        "capegrim-medusa.18_CFC-11",
        "capegrim-medusa.18_CFC-12",
        "capegrim-medusa.18_CFC-13",
        "capegrim-medusa.18_CFC-112",
        "capegrim-medusa.18_CFC-113",
        "capegrim-medusa.18_CFC-114",
        "capegrim-medusa.18_CFC-115",
        "capegrim-medusa.18_H-1211",
        "capegrim-medusa.18_H-1301",
        "capegrim-medusa.18_H-2402",
        "capegrim-medusa.18_CH3Cl",
        "capegrim-medusa.18_CH3Br",
        "capegrim-medusa.18_CH3I",
        "capegrim-medusa.18_CH2Cl2",
        "capegrim-medusa.18_CHCl3",
        "capegrim-medusa.18_CCl4",
        "capegrim-medusa.18_CH2Br2",
        "capegrim-medusa.18_CHBr3",
        "capegrim-medusa.18_CH3CCl3",
        "capegrim-medusa.18_TCE",
        "capegrim-medusa.18_PCE",
        "capegrim-medusa.18_ethyne",
        "capegrim-medusa.18_ethane",
        "capegrim-medusa.18_propane",
        "capegrim-medusa.18_c-propane",
        "capegrim-medusa.18_benzene",
        "capegrim-medusa.18_toluene",
        "capegrim-medusa.18_COS",
        "capegrim-medusa.18_desflurane",
    ])

    sorted(list(results["capegrim-medusa.18.C"].keys())) == expected_keys

    # Load in some data
    uuid = results["capegrim-medusa.18.C"]["capegrim-medusa.18_HFC-152a"]

    hfc152a_data = Datasource.load(uuid=uuid, shallow=False).data()
    hfc152a_data = hfc152a_data[
        "2018-01-01-02:24:00+00:00_2018-01-31-23:33:00+00:00"]

    assert hfc152a_data.time[0] == Timestamp("2018-01-01T02:24:00")
    assert hfc152a_data.time[-1] == Timestamp("2018-01-31T23:33:00")

    assert hfc152a_data["hfc152a"][0] == 4.409
    assert hfc152a_data["hfc152a"][-1] == 4.262

    assert hfc152a_data["hfc152a_repeatability"][0] == 0.03557
    assert hfc152a_data["hfc152a_repeatability"][-1] == 0.03271

    assert hfc152a_data["hfc152a_status_flag"][0] == 0
    assert hfc152a_data["hfc152a_status_flag"][-1] == 0

    assert hfc152a_data["hfc152a_integration_flag"][0] == 0
    assert hfc152a_data["hfc152a_integration_flag"][-1] == 0

    # Check we have the Datasource info saved

    obs = ObsSurface.load()

    assert sorted(obs._datasource_names.keys()) == expected_keys

    del hfc152a_data.attrs["File created"]

    assert hfc152a_data.attrs == {
        "data_owner": "Paul Krummel",
        "data_owner_email": "*****@*****.**",
        "inlet_height_magl": "75m_4",
        "comment":
        "Medusa measurements. Output from GCWerks. See Miller et al. (2008).",
        "Conditions of use":
        "Ensure that you contact the data owner at the outset of your project.",
        "Source": "In situ measurements of air",
        "Conventions": "CF-1.6",
        "Processed by": "*****@*****.**",
        "species": "hfc152a",
        "Calibration_scale": "SIO-05",
        "station_longitude": 144.689,
        "station_latitude": -40.683,
        "station_long_name": "Cape Grim, Tasmania",
        "station_height_masl": 94.0,
    }