def add_dataset(ctx, dataset): """ Add a single dataset. Return the new dataset with a dataset ID. .. code-block:: python (Dataset){ value = 123, unit = 'm^3', dimension = 'Volume', name = 'Storage Capacity', type = 'scalar', #(others are 'descriptor', 'array' and 'timeseries') metadata = "{'measured_by':'John Doe'}", #Json encoded python dictionary } Args: dataset (Dataset): The dataset complex model (see above) Returns: Dataset: The new dataset object, complete with ID """ value = dataset.parse_value() metadata = dataset.get_metadata_as_dict(user_id=ctx.in_header.user_id) dataset_i = data.add_dataset(dataset.type, value, dataset.unit, dataset.dimension, metadata, dataset.name, ctx.in_header.user_id, flush=True) return Dataset(dataset_i)
def get_collection_datasets(ctx, collection_id): """ Get all the datasets from the collection with the specified name Args: collection_id (int): The collection whose dastasets we want to retrieve Returns: List(Dataset): A list of dastaset complex models, all of them in the collection specified """ collection_datasets = data.get_collection_datasets( collection_id, **ctx.in_header.__dict__) ret_data = [Dataset(d) for d in collection_datasets] return ret_data
def get_datasets(ctx, dataset_ids): """ Get a list of datasets, by ID Args: dataset_ids (List(int)): A list of dataset IDs Returns: List(Dataset): The corresponding list of datasets. A subset will be returned if not all datasets are available. Raises: ResourceNotFoundError: If none of the requested datasets were found. """ datasets = data.get_datasets(dataset_ids, **ctx.in_header.__dict__) ret_datasets = [Dataset(d) for d in datasets] return ret_datasets
def get_dataset(ctx, dataset_id): """ Get a single dataset, by ID Args: dataset_id (int): THe ID of the requested dataset Returns: Dataset: The dataset complex model Raises: ResourceNotFoundError: If the dataset does not exist. """ dataset_i = data.get_dataset(dataset_id, **ctx.in_header.__dict__) return Dataset(dataset_i)
def clone_dataset(ctx, dataset_id): """ Clone a single dataset, by ID Args: dataset_id (int): THe ID of the dataset to be cloned Returns: Dataset: The newly cloned dataset complex model Raises: ResourceNotFoundError: If the dataset does not exist. """ dataset_i = data.clone_dataset(dataset_id, **ctx.in_header.__dict__) return Dataset(dataset_i)
def search_datasets(ctx, dataset_id, name, collection_name, data_type, dimension, unit, scenario_id, metadata_name, metadata_val, attr_id, type_id, unconnected, inc_metadata, inc_val, page_start, page_size): """ Search for datadets that satisfy the criteria specified. By default, returns a max of 2000 datasets. To return datasets from 2001 onwards, set page_start to 2001. Args: dataset_id (int) : The ID of the dataset name (string) : The name of the dataset collection_name (string) : Search for datsets in a collection with this name data_type (string) : 'scalar', 'descriptor', 'array', 'timeseries' dimension (string) : Datasets with this dimension unit (string) : Datasets with this unit. scenario_id (int) : Datasets in this scenraio metadata_name (string) : Datasets that have this metadata metadata_val (string) : Datasets that have this metadata value attr_id (int) : Datasts that are associated with this attribute via resource scenario & resource attribute type_id (int) : Datasets that are associated with this type via resource scenario -> resource attribute -> attribute -> type unconnected (char) : Datasets that are not in any scenarios inc_metadata (char) (default 'N') : Return metadata with retrieved datasets. 'Y' gives a performance hit. inc_val (char) (default 'N') : Include the value with the dataset. 'Y' gives a performance hit page_start (int) : Return datasets from this point (ex: from index 2001 of 10,000) page_size (int) : Return this number of datasets in one go. default is 2000. Returns: List(Dataset): The datasets matching all the specified criteria. """ datasets = data.search_datasets( dataset_id, name, collection_name, data_type, dimension, unit, scenario_id, metadata_name, metadata_val, attr_id, type_id, unconnected, inc_metadata, inc_val, page_start, page_size, **ctx.in_header.__dict__) cm_datasets = [] for d in datasets: cm_datasets.append(Dataset(d)) return cm_datasets
def update_dataset(ctx, dataset): """ Update a piece of data directly, rather than through a resource scenario. Args: dataset (Dataset): A complex model representing an existing dataset which is to be updsated (must have an id) Returns: Dataset: The updated dataset """ val = dataset.parse_value() metadata = dataset.get_metadata_as_dict() updated_dataset = data.update_dataset(dataset.id, dataset.name, dataset.type, val, dataset.unit, dataset.dimension, metadata, **ctx.in_header.__dict__) return Dataset(updated_dataset)
def get_scenario_data(ctx, scenario_id): scenario_data = scenario.get_scenario_data(scenario_id, **ctx.in_header.__dict__) data_cm = [Dataset(d) for d in scenario_data] return data_cm