def _process_incoming_data(data, user_id=None, source=None): datasets = {} for d in data: val = d.parse_value() if val is None: log.info( "Cannot parse data (dataset_id=%s). " "Value not available.", d) continue data_dict = { 'data_type': d.type, 'data_name': d.name, 'data_units': d.unit, 'created_by': user_id, 'frequency': None, 'start_time': None, } # Assign dimension if necessary if d.unit is not None and d.dimension in (None, 'dimensionless'): data_dict['data_dimen'] = hydra_units.get_unit_dimension(d.unit) else: data_dict['data_dimen'] = d.dimension db_val = _get_db_val(d.type, val) data_dict['value'] = db_val if d.metadata is not None: if isinstance(d.metadata, str) or isinstance(d.metadata, unicode): metadata_dict = json.loads(d.metadata) else: metadata_dict = d.metadata else: metadata_dict = {} metadata_keys = [k.lower() for k in metadata_dict] if user_id is not None and 'user_id' not in metadata_keys: metadata_dict[u'user_id'] = unicode(user_id) if source is not None and 'source' not in metadata_keys: metadata_dict[u'source'] = unicode(source) data_dict['metadata'] = metadata_dict d.data_hash = generate_data_hash(data_dict) data_dict['data_hash'] = d.data_hash datasets[d.data_hash] = data_dict return datasets
def _process_incoming_data(data, user_id=None, source=None): datasets = {} for d in data: val = d.parse_value() if val is None: log.info("Cannot parse data (dataset_id=%s). " "Value not available.",d) continue data_dict = { 'data_type':d.type, 'data_name':d.name, 'data_units': d.unit, 'created_by' : user_id, 'frequency' : None, 'start_time': None, } # Assign dimension if necessary if d.unit is not None and d.dimension in (None, 'dimensionless'): data_dict['data_dimen'] = hydra_units.get_unit_dimension(d.unit) else: data_dict['data_dimen'] = d.dimension db_val = _get_db_val(d.type, val) data_dict['value'] = db_val if d.metadata is not None: if isinstance(d.metadata, str) or isinstance(d.metadata, unicode): metadata_dict = json.loads(d.metadata) else: metadata_dict=d.metadata else: metadata_dict={} metadata_keys = [k.lower() for k in metadata_dict] if user_id is not None and 'user_id' not in metadata_keys: metadata_dict[u'user_id'] = unicode(user_id) if source is not None and 'source' not in metadata_keys: metadata_dict[u'source'] = unicode(source) data_dict['metadata'] = metadata_dict d.data_hash = generate_data_hash(data_dict) data_dict['data_hash'] = d.data_hash datasets[d.data_hash] = data_dict return datasets
def add_dataset(data_type, val, units, dimension, metadata={}, name="", user_id=None, flush=False): """ Data can exist without scenarios. This is the mechanism whereby single pieces of data can be added without doing it through a scenario. A typical use of this would be for setting default values on types. """ d = Dataset() d.set_val(data_type, val) d.set_metadata(metadata) # Assign dimension if necessary if units is not None and dimension is None: dimension = hydra_units.get_unit_dimension(units) d.data_type = data_type d.data_units = units d.data_name = name d.data_dimen = dimension d.created_by = user_id d.data_hash = d.set_hash() try: existing_dataset = DBSession.query(Dataset).filter( Dataset.data_hash == d.data_hash).one() if existing_dataset.check_user(user_id): d = existing_dataset else: d.set_metadata({'created_at': datetime.datetime.now()}) d.set_hash() DBSession.add(d) except NoResultFound: DBSession.add(d) if flush == True: DBSession.flush() return d
def add_dataset(data_type, val, units, dimension, metadata={}, name="", user_id=None, flush=False): """ Data can exist without scenarios. This is the mechanism whereby single pieces of data can be added without doing it through a scenario. A typical use of this would be for setting default values on types. """ d = Dataset() d.set_val(data_type, val) d.set_metadata(metadata) # Assign dimension if necessary if units is not None and dimension is None: dimension = hydra_units.get_unit_dimension(units) d.data_type = data_type d.data_units = units d.data_name = name d.data_dimen = dimension d.created_by = user_id d.data_hash = d.set_hash() try: existing_dataset = DBSession.query(Dataset).filter(Dataset.data_hash==d.data_hash).one() if existing_dataset.check_user(user_id): d = existing_dataset else: d.set_metadata({'created_at': datetime.datetime.now()}) d.set_hash() DBSession.add(d) except NoResultFound: DBSession.add(d) if flush == True: DBSession.flush() return d
def _update_resourcescenario(scenario, resource_scenario, dataset=None, new=False, user_id=None, source=None): """ Insert or Update the value of a resource's attribute by first getting the resource, then parsing the input data, then assigning the value. returns a ResourceScenario object. """ if scenario is None: scenario = DBSession.query(Scenario).filter( Scenario.scenario_id == 1).one() ra_id = resource_scenario.resource_attr_id log.debug("Assigning resource attribute: %s", ra_id) try: r_scen_i = DBSession.query(ResourceScenario).filter( ResourceScenario.scenario_id == scenario.scenario_id, ResourceScenario.resource_attr_id == ra_id).one() except NoResultFound as e: r_scen_i = ResourceScenario() r_scen_i.resource_attr_id = resource_scenario.resource_attr_id r_scen_i.scenario_id = scenario.scenario_id DBSession.add(r_scen_i) if scenario.locked == 'Y': log.info("Scenario %s is locked", scenario.scenario_id) return r_scen_i if dataset is not None: r_scen_i.dataset = dataset return r_scen_i dataset = resource_scenario.value start_time = None frequency = None value = dataset.parse_value() log.info("Assigning %s to resource attribute: %s", value, ra_id) if value is None: log.info("Cannot set data on resource attribute %s", ra_id) return None metadata = dataset.get_metadata_as_dict(source=source, user_id=user_id) dimension = dataset.dimension data_unit = dataset.unit # Assign dimension if necessary # It happens that dimension is and empty string. We set it to # None to achieve consistency in the DB. if data_unit is not None and dimension is None or \ data_unit is not None and len(dimension) == 0: dimension = hydra_units.get_unit_dimension(data_unit) else: if dimension is None or len(dimension) == 0: dimension = None data_hash = dataset.get_hash(value, metadata) assign_value(r_scen_i, dataset.type.lower(), value, data_unit, dataset.name, dataset.dimension, metadata=metadata, data_hash=data_hash, user_id=user_id, source=source) return r_scen_i
def _update_resourcescenario(scenario, resource_scenario, dataset=None, new=False, user_id=None, source=None): """ Insert or Update the value of a resource's attribute by first getting the resource, then parsing the input data, then assigning the value. returns a ResourceScenario object. """ if scenario is None: scenario = DBSession.query(Scenario).filter(Scenario.scenario_id==1).one() ra_id = resource_scenario.resource_attr_id log.debug("Assigning resource attribute: %s",ra_id) try: r_scen_i = DBSession.query(ResourceScenario).filter( ResourceScenario.scenario_id==scenario.scenario_id, ResourceScenario.resource_attr_id==ra_id).one() except NoResultFound as e: r_scen_i = ResourceScenario() r_scen_i.resource_attr_id = resource_scenario.resource_attr_id r_scen_i.scenario_id = scenario.scenario_id DBSession.add(r_scen_i) if scenario.locked == 'Y': log.info("Scenario %s is locked",scenario.scenario_id) return r_scen_i if dataset is not None: r_scen_i.dataset = dataset return r_scen_i dataset = resource_scenario.value start_time = None frequency = None value = dataset.parse_value() log.info("Assigning %s to resource attribute: %s", value, ra_id) if value is None: log.info("Cannot set data on resource attribute %s",ra_id) return None metadata = dataset.get_metadata_as_dict(source=source, user_id=user_id) dimension = dataset.dimension data_unit = dataset.unit # Assign dimension if necessary # It happens that dimension is and empty string. We set it to # None to achieve consistency in the DB. if data_unit is not None and dimension is None or \ data_unit is not None and len(dimension) == 0: dimension = hydra_units.get_unit_dimension(data_unit) else: if dimension is None or len(dimension) == 0: dimension = None data_hash = dataset.get_hash(value, metadata) assign_value(r_scen_i, dataset.type.lower(), value, data_unit, dataset.name, dataset.dimension, metadata=metadata, data_hash=data_hash, user_id=user_id, source=source) return r_scen_i