def delete_object(query_info: Dict[str, str], session: scoped_session, collection: bool = False) -> None: """ Delete the object from the database :param query_info: Dict containing the id and @type of object that has to retrieved :param session: sqlalchemy session :param collection: True if the type_ is of a collection, False for any other class """ type_ = query_info["@type"] id_ = query_info["id_"] database_class = get_database_class(type_) if collection: try: objects = session.query(database_class).filter_by( collection_id=id_).delete() except NoResultFound: raise InstanceNotFound(type_=type_, id_=id_) try: session.commit() except InvalidRequestError: session.rollback() return id_ else: try: object_ = (session.query(database_class).filter( database_class.id == id_).one()) except NoResultFound: raise InstanceNotFound(type_=type_, id_=id_) session.delete(object_) try: session.commit() except InvalidRequestError: session.rollback()
def _update_statistics( session: scoped_session, table: type[Statistics | StatisticsShortTerm], stat_id: int, statistic: StatisticData, ) -> None: """Insert statistics in the database.""" try: session.query(table).filter_by(id=stat_id).update( { table.mean: statistic.get("mean"), table.min: statistic.get("min"), table.max: statistic.get("max"), table.last_reset: statistic.get("last_reset"), table.state: statistic.get("state"), table.sum: statistic.get("sum"), }, synchronize_session=False, ) except SQLAlchemyError: _LOGGER.exception( "Unexpected exception when updating statistics %s:%s ", id, statistic, )
def get_object(query_info: Dict[str, str], session: scoped_session, collection: bool = False) -> Dict[str, str]: """ Get the object from the database :param query_info: Dict containing the id and @type of object that has to retrieved :param session: sqlalchemy session :param collection: True if the type_ is of a collection, False for any other class :return: dict of object with its properties """ type_ = query_info["@type"] id_ = query_info["id_"] database_class = get_database_class(type_) if collection: objects = (session.query( database_class.members, database_class.member_type).filter( database_class.collection_id == id_).all()) if len(objects) == 0: raise InstanceNotFound(type_=type_, id_=id_) object_template = {} object_template["@type"] = query_info["@type"] object_template["members"] = objects return object_template else: try: object_ = (session.query(database_class).filter( database_class.id == id_).one()).__dict__ except NoResultFound: raise InstanceNotFound(type_=type_, id_=id_) # Remove the unnecessary keys from the object retrieved from database object_template = copy.deepcopy(object_) object_template.pop("_sa_instance_state") object_template.pop("id") object_template["@type"] = query_info["@type"] return object_template
def get_single(type_: str, api_name: str, session: scoped_session, path: str = None) -> Dict[str, Any]: """Get instance of classes with single objects. :param type_: type of object to be updated :param api_name: api name specified while starting server :param session: sqlalchemy scoped session :param path: endpoint :return: response containing information about a single object """ try: rdf_class = session.query(RDFClass).filter( RDFClass.name == type_).one() except NoResultFound: raise ClassNotFound(type_=type_) try: instance = session.query(Instance).filter( Instance.type_ == rdf_class.id).all()[-1] except (NoResultFound, IndexError, ValueError): raise InstanceNotFound(type_=rdf_class.name) object_ = get(instance.id, rdf_class.name, session=session, api_name=api_name, path=path) if path is not None: object_["@id"] = "/{}/{}".format(api_name, path) else: object_["@id"] = "/{}/{}".format(api_name, type_) return object_
def _statistics_at_time( session: scoped_session, metadata_ids: set[int], table: type[Statistics | StatisticsShortTerm], start_time: datetime, ) -> list | None: """Return last known statics, earlier than start_time, for the metadata_ids.""" # Fetch metadata for the given (or all) statistic_ids if table == StatisticsShortTerm: base_query = QUERY_STATISTICS_SHORT_TERM else: base_query = QUERY_STATISTICS query = session.query(*base_query) most_recent_statistic_ids = (session.query( func.max(table.id).label("max_id"), ).filter( table.start < start_time).filter( table.metadata_id.in_(metadata_ids))) most_recent_statistic_ids = most_recent_statistic_ids.group_by( table.metadata_id) most_recent_statistic_ids = most_recent_statistic_ids.subquery() query = query.join( most_recent_statistic_ids, table.id == most_recent_statistic_ids.c.max_id, ) return execute(query)
def get_modification_table_diff( session: scoped_session, agent_job_id: str = None) -> List[Dict[str, Any]]: """ Get modification table difference. :param session: sqlalchemy session. :param agent_job_id: Job id from the client. :return: List of all modifications done after job with job_id = agent_job_id. """ # If agent_job_id is not given then return all the elements. if agent_job_id is None: modifications = session.query(Modification).order_by( Modification.job_id.asc()).all() # If agent_job_id is given then return all records which are older # than the record with agent_job_id. else: try: record_for_agent_job_id = session.query(Modification).filter( Modification.job_id == agent_job_id).one() except NoResultFound: return [] modifications = session.query(Modification).filter( Modification.job_id > record_for_agent_job_id.job_id).order_by( Modification.job_id.asc()).all() # Create response body list_of_modification_records = [] for modification in modifications: modification_record = { "job_id": modification.job_id, "method": modification.method, "resource_url": modification.resource_url } list_of_modification_records.append(modification_record) return list_of_modification_records
def update_single(object_: Dict[str, Any], session: scoped_session, api_name: str, path: str = None) -> int: """Update instance of classes with single objects. :param object_: new object :param session: sqlalchemy scoped session :param api_name: api name specified while starting server :param path: endpoint :return: id of the updated object """ try: rdf_class = session.query(RDFClass).filter( RDFClass.name == object_["@type"]).one() except NoResultFound: raise ClassNotFound(type_=object_["@type"]) try: instance = session.query(Instance).filter( Instance.type_ == rdf_class.id).all()[-1] except (NoResultFound, IndexError, ValueError): raise InstanceNotFound(type_=rdf_class.name) return update( id_=instance.id, type_=object_["@type"], object_=object_, session=session, api_name=api_name, path=path)
def get_collection(API_NAME: str, type_: str, session: scoped_session) -> Dict[str, Any]: """Retrieve a type of collection from the database.""" collection_template = { "@id": "/" + API_NAME + "/" + type_ + "Collection/", "@context": None, "@type": type_ + "Collection", "members": list() } # type: Dict[str, Any] try: rdf_class = session.query(RDFClass).filter( RDFClass.name == type_).one() except NoResultFound: raise ClassNotFound(type_=type_) try: instances = session.query(Instance).filter( Instance.type_ == rdf_class.id).all() except NoResultFound: instances = list() for instance_ in instances: object_template = { "@id": "/" + API_NAME + "/" + type_ + "Collection/" + str(instance_.id), "@type": type_ } collection_template["members"].append(object_template) return collection_template
def apply_filter(object_id: str, search_props: Dict[str, Any], triples: Graph, session: scoped_session) -> bool: """Check whether objects has properties with query values or not. :param object_id: Id of the instance. :param search_props: Dictionary of query parameters with property id and values. :param triples: All triples. :param session: sqlalchemy scoped session. :return: True if the instance has properties with given values, False otherwise. """ for prop in search_props: # For nested properties if isinstance(search_props[prop], dict): data = session.query(triples).filter( triples.GraphIII.subject == object_id, triples.GraphIII.predicate == prop).one() if apply_filter(data.object_, search_props[prop], triples, session) is False: return False else: data = session.query(triples).filter( triples.GraphIIT.subject == object_id, triples.GraphIIT.predicate == prop).one() terminal = session.query(Terminal).filter( Terminal.id == data.object_).one() if terminal.value != search_props[prop]: return False return True
def test_broadcast_block( fx_server: WSGIServer, fx_session: scoped_session, fx_other_session: Session, fx_other_server: WSGIServer, fx_user: User ): now = datetime.datetime.utcnow() node = Node(url=fx_server.url, last_connected_at=now) node2 = Node(url=fx_other_server.url, last_connected_at=datetime.datetime.utcnow()) block = Block.create(fx_user, []) fx_session.add_all([node, node2, block]) fx_session.flush() assert fx_session.query(Block).get(block.id) assert not fx_other_session.query(Block).get(block.id) multicast( serialized=block.serialize( use_bencode=False, include_suffix=True, include_moves=True, include_hash=True ), broadcast=broadcast_block, ) assert node.last_connected_at > now assert fx_session.query(Block).count() == 1 assert fx_other_session.query(Block).get(block.id)
def _find_duplicates( session: scoped_session, table: type[Statistics | StatisticsShortTerm] ) -> tuple[list[int], list[dict]]: """Find duplicated statistics.""" subquery = (session.query( table.start, table.metadata_id, literal_column("1").label("is_duplicate"), ).group_by(table.metadata_id, table.start).having(func.count() > 1).subquery()) query = (session.query(table).outerjoin( subquery, (subquery.c.metadata_id == table.metadata_id) & (subquery.c.start == table.start), ).filter(subquery.c.is_duplicate == 1).order_by( table.metadata_id, table.start, table.id.desc()).limit(1000 * MAX_ROWS_TO_PURGE)) duplicates = execute(query) original_as_dict = {} start = None metadata_id = None duplicate_ids: list[int] = [] non_identical_duplicates_as_dict: list[dict] = [] if not duplicates: return (duplicate_ids, non_identical_duplicates_as_dict) def columns_to_dict( duplicate: type[Statistics | StatisticsShortTerm]) -> dict: """Convert a SQLAlchemy row to dict.""" dict_ = {} for key in duplicate.__mapper__.c.keys(): dict_[key] = getattr(duplicate, key) return dict_ def compare_statistic_rows(row1: dict, row2: dict) -> bool: """Compare two statistics rows, ignoring id and created.""" ignore_keys = ["id", "created"] keys1 = set(row1).difference(ignore_keys) keys2 = set(row2).difference(ignore_keys) return keys1 == keys2 and all(row1[k] == row2[k] for k in keys1) for duplicate in duplicates: if start != duplicate.start or metadata_id != duplicate.metadata_id: original_as_dict = columns_to_dict(duplicate) start = duplicate.start metadata_id = duplicate.metadata_id continue duplicate_as_dict = columns_to_dict(duplicate) duplicate_ids.append(duplicate.id) if not compare_statistic_rows(original_as_dict, duplicate_as_dict): non_identical_duplicates_as_dict.append({ "duplicate": duplicate_as_dict, "original": original_as_dict }) return (duplicate_ids, non_identical_duplicates_as_dict)
def compile_hourly_statistics(instance: Recorder, session: scoped_session, start: datetime) -> None: """Compile hourly statistics.""" start_time = start.replace(minute=0) end_time = start_time + timedelta(hours=1) # Get last hour's average, min, max summary = {} baked_query = instance.hass.data[STATISTICS_SHORT_TERM_BAKERY]( lambda session: session.query(*QUERY_STATISTICS_SUMMARY_MEAN)) baked_query += lambda q: q.filter(StatisticsShortTerm.start >= bindparam( "start_time")) baked_query += lambda q: q.filter(StatisticsShortTerm.start < bindparam( "end_time")) baked_query += lambda q: q.group_by(StatisticsShortTerm.metadata_id) baked_query += lambda q: q.order_by(StatisticsShortTerm.metadata_id) stats = execute( baked_query(session).params(start_time=start_time, end_time=end_time)) if stats: for stat in stats: metadata_id, _mean, _min, _max = stat summary[metadata_id] = { "metadata_id": metadata_id, "mean": _mean, "min": _min, "max": _max, } # Get last hour's sum subquery = (session.query(*QUERY_STATISTICS_SUMMARY_SUM).filter( StatisticsShortTerm.start >= bindparam("start_time")).filter( StatisticsShortTerm.start < bindparam("end_time")).subquery()) query = (session.query(subquery).filter(subquery.c.rownum == 1).order_by( subquery.c.metadata_id)) stats = execute(query.params(start_time=start_time, end_time=end_time)) if stats: for stat in stats: metadata_id, start, last_reset, state, _sum, sum_increase, _ = stat summary[metadata_id] = { **summary.get(metadata_id, {}), **{ "metadata_id": metadata_id, "last_reset": process_timestamp(last_reset), "state": state, "sum": _sum, "sum_increase": sum_increase, }, } for stat in summary.values(): session.add( Statistics.from_stats(stat.pop("metadata_id"), start_time, stat))
def get_collection(API_NAME: str, type_: str, session: scoped_session, path: str = None) -> Dict[str, Any]: """Retrieve a type of collection from the database. :param API_NAME: api name specified while starting server :param type_: type of object to be updated :param session: sqlalchemy scoped session :param path: endpoint :return: response containing all the objects of that particular type_ Raises: ClassNotFound: If `type_` does not represt a valid/defined RDFClass. """ if path is not None: collection_template = { "@id": "/{}/{}/".format(API_NAME, path), "@context": None, "@type": "{}Collection".format(type_), "members": list() } # type: Dict[str, Any] else: collection_template = { "@id": "/{}/{}Collection/".format(API_NAME, type_), "@context": None, "@type": "{}Collection".format(type_), "members": list() } # type: Dict[str, Any] try: rdf_class = session.query(RDFClass).filter( RDFClass.name == type_).one() except NoResultFound: raise ClassNotFound(type_=type_) try: instances = session.query(Instance).filter( Instance.type_ == rdf_class.id).all() except NoResultFound: instances = list() for instance_ in instances: if path is not None: object_template = { "@id": "/{}/{}/{}".format(API_NAME, path, instance_.id), "@type": type_ } else: object_template = { "@id": "/{}/{}Collection/{}".format(API_NAME, type_, instance_.id), "@type": type_ } collection_template["members"].append(object_template) return collection_template
def _update_or_add_metadata( hass: HomeAssistant, session: scoped_session, new_metadata: StatisticMetaData, ) -> int: """Get metadata_id for a statistic_id. If the statistic_id is previously unknown, add it. If it's already known, update metadata if needed. Updating metadata source is not possible. """ statistic_id = new_metadata["statistic_id"] old_metadata_dict = get_metadata_with_session(hass, session, [statistic_id], None) if not old_metadata_dict: unit = new_metadata["unit_of_measurement"] has_mean = new_metadata["has_mean"] has_sum = new_metadata["has_sum"] meta = StatisticsMeta.from_meta(DOMAIN, statistic_id, unit, has_mean, has_sum) session.add(meta) session.flush() # Flush to get the metadata id assigned _LOGGER.debug( "Added new statistics metadata for %s, new_metadata: %s", statistic_id, new_metadata, ) return meta.id # type: ignore[no-any-return] metadata_id, old_metadata = old_metadata_dict[statistic_id] if (old_metadata["has_mean"] != new_metadata["has_mean"] or old_metadata["has_sum"] != new_metadata["has_sum"] or old_metadata["unit_of_measurement"] != new_metadata["unit_of_measurement"]): session.query(StatisticsMeta).filter_by( statistic_id=statistic_id).update( { StatisticsMeta.has_mean: new_metadata["has_mean"], StatisticsMeta.has_sum: new_metadata["has_sum"], StatisticsMeta.unit_of_measurement: new_metadata["unit_of_measurement"], }, synchronize_session=False, ) _LOGGER.debug( "Updated statistics metadata for %s, old_metadata: %s, new_metadata: %s", statistic_id, old_metadata, new_metadata, ) return metadata_id
def test_post_node(fx_test_client: FlaskClient, fx_session: scoped_session): url = 'http://test.neko' assert not fx_session.query(Node).first() with Mocker() as m: m.get(url + '/ping', text='pong') res = fx_test_client.post('/nodes', data=json.dumps({'url': url}), content_type='application/json') assert res.status_code == 200 assert json.loads(res.get_data())['result'] == 'success' node = fx_session.query(Node).filter(Node.url == url).first() assert node assert node.last_connected_at
def test_post_node_connection_error(fx_test_client: FlaskClient, fx_session: scoped_session): url = 'http://test.neko' assert not fx_session.query(Node).first() with Mocker() as m: m.get(url + '/ping', exc=ConnectionError) res = fx_test_client.post('/nodes', data=json.dumps({'url': url}), content_type='application/json') assert res.status_code == 403 data = json.loads(res.get_data()) assert data['result'] == 'failed' assert data['message'] == f'Connection to node {url} was failed.' assert not fx_session.query(Node).filter(Node.url == url).first()
def delete_multiple(id_: List[int], type_: str, session: scoped_session) -> None: """ To delete multiple rows in a single request :param id_: list of ids for objects to be deleted\ :param type_: type of object to be deleted :param session: sqlalchemy scoped session Raises: ClassNotFound: If `type_` does not represent a valid/defined RDFClass. InstanceNotFound: If any instance with type 'type_' and any id in 'id_' list does not exist. """ id_ = id_.split(',') try: rdf_class = session.query(RDFClass).filter( RDFClass.name == type_).one() except NoResultFound: raise ClassNotFound(type_=type_) instances = list() data_III = list() data_IAC = list() data_IIT = list() for index in id_: try: instance = session.query(Instance).filter( Instance.id == index and type_ == rdf_class.id).one() instances.append(instance) except NoResultFound: raise InstanceNotFound(type_=rdf_class.name, id_=index) data_IIT += session.query(triples).filter( triples.GraphIIT.subject == index).all() data_IAC += session.query(triples).filter( triples.GraphIAC.subject == index).all() data_III += session.query(triples).filter( triples.GraphIII.subject == index).all() data = data_III + data_IIT + data_IAC for item in data: session.delete(item) for data in data_IIT: terminal = session.query(Terminal).filter( Terminal.id == data.object_).one() session.delete(terminal) for data in data_III: III_instance = session.query(Instance).filter( Instance.id == data.object_).one() III_instance_type = session.query(RDFClass).filter( RDFClass.id == III_instance.type_).one() # Get the III object type_ delete(III_instance.id, III_instance_type.name, session=session) for instance in instances: session.delete(instance) session.commit()
def delete_single(type_: str, session: scoped_session) -> None: """Delete instance of classes with single objects.""" try: rdf_class = session.query(RDFClass).filter( RDFClass.name == type_).one() except NoResultFound: raise ClassNotFound(type_=type_) try: instance = session.query(Instance).filter( Instance.type_ == rdf_class.id).all()[-1] except (NoResultFound, IndexError, ValueError): raise InstanceNotFound(type_=rdf_class.name) return delete(instance.id, type_, session=session)
def insert_single(object_: Dict[str, Any], session: scoped_session) -> Any: """Insert instance of classes with single objects.""" try: rdf_class = session.query(RDFClass).filter( RDFClass.name == object_["@type"]).one() except NoResultFound: raise ClassNotFound(type_=object_["@type"]) try: session.query(Instance).filter( Instance.type_ == rdf_class.id).all()[-1] except (NoResultFound, IndexError, ValueError): return insert(object_, session=session) raise InstanceExists(type_=rdf_class.name)
def _update_or_add_metadata( hass: HomeAssistant, session: scoped_session, statistic_id: str, new_metadata: StatisticMetaData, ) -> str: """Get metadata_id for a statistic_id, add if it doesn't exist.""" old_metadata_dict = _get_metadata(hass, session, [statistic_id], None) if not old_metadata_dict: unit = new_metadata["unit_of_measurement"] has_mean = new_metadata["has_mean"] has_sum = new_metadata["has_sum"] session.add( StatisticsMeta.from_meta(DOMAIN, statistic_id, unit, has_mean, has_sum)) metadata_ids = _get_metadata_ids(hass, session, [statistic_id]) _LOGGER.debug( "Added new statistics metadata for %s, new_metadata: %s", statistic_id, new_metadata, ) return metadata_ids[0] metadata_id, old_metadata = next(iter(old_metadata_dict.items())) if (old_metadata["has_mean"] != new_metadata["has_mean"] or old_metadata["has_sum"] != new_metadata["has_sum"] or old_metadata["unit_of_measurement"] != new_metadata["unit_of_measurement"]): session.query(StatisticsMeta).filter_by( statistic_id=statistic_id).update( { StatisticsMeta.has_mean: new_metadata["has_mean"], StatisticsMeta.has_sum: new_metadata["has_sum"], StatisticsMeta.unit_of_measurement: new_metadata["unit_of_measurement"], }, synchronize_session=False, ) _LOGGER.debug( "Updated statistics metadata for %s, old_metadata: %s, new_metadata: %s", statistic_id, old_metadata, new_metadata, ) return metadata_id
def insert_properties(properties: Set[str], session: scoped_session) -> Optional[Any]: """Insert all the properties as defined in the APIDocumentation into DB.""" prop_list = [ BaseProperty(name=prop) for prop in properties if not session.query(exists().where( BaseProperty.name == prop)).scalar() ] session.add_all(prop_list) session.commit() return None # if __name__ == "__main__": # Session = sessionmaker(bind=engine) # session = Session() # # doc = doc_gen("test", "test") # # Extract all classes with supportedProperty from both # classes = get_classes(doc.generate()) # # # Extract all properties from both # # import pdb; pdb.set_trace() # properties = get_all_properties(classes) # # Add all the classes # insert_classes(classes, session) # print("Classes inserted successfully") # # Add all the properties # insert_properties(properties, session) # print("Properties inserted successfully")
def broadcast_node_failed(fx_session: scoped_session, fx_other_session: Session, error): now = datetime.datetime.utcnow() node = Node(url='http://test.neko', last_connected_at=now) node2 = Node(url='http://other.neko', last_connected_at=datetime.datetime.utcnow()) fx_session.add(node) fx_session.commit() fx_other_session.add(node2) fx_other_session.commit() assert not fx_session.query(Node).filter(Node.url == node2.url).first() with Mocker() as m: m.post('http://test.neko', exc=error) broadcast_node(serialized={'url': fx_other_server.url}) assert not fx_session.query(Node).filter(Node.url == node2.url).first() assert node.last_connected_at == now
def test_get_new_novice_broadcasting( fx_test_client: FlaskClient, fx_user: User, fx_private_key: PrivateKey, fx_session: scoped_session, ): with unittest.mock.patch('nekoyume.game.multicast') as m: fx_test_client.post('/login', data={ 'private_key': fx_private_key.to_hex(), 'name': 'test_user', }, follow_redirects=True) res = fx_test_client.get('/new') assert res.status_code == 200 move = fx_session.query(Move).filter( Move.name == 'create_novice', ).first() assert move serialized = move.serialize( use_bencode=False, include_signature=True, include_id=True, ) assert m.called args = m.call_args[1] assert serialized == args['serialized'] my_node = args['my_node'] assert isinstance(my_node, Node) assert my_node.url == 'http://localhost' broadcast = args['broadcast'] assert isinstance(broadcast, typing.Callable) assert broadcast.__name__ == 'broadcast_move'
def get_all_filtered_instances(session: scoped_session, search_params: Dict[str, Any], type_: str, collection: bool = False): """Get all the filtered instances of from the database based on given query parameters. :param session: sqlalchemy scoped session :param search_params: Query parameters :param type_: @type of object to be deleted :param collection: True if the type_ is of a collection, False for any other class :return: filtered instances """ database_class = get_database_class(type_) if collection: query = session.query( database_class.collection_id.label('id')).distinct() else: query = session.query(database_class) for param, param_value in search_params.items(): # nested param if type(param_value) is dict: foreign_keys = database_class.__table__.foreign_keys for fk in foreign_keys: if fk.info['column_name'] == param: fk_table_name = fk.column.table.name continue nested_param_db_class = get_database_class(fk_table_name) # build query for attr, value in param_value.items(): query = query.join(nested_param_db_class) try: query = query.filter( getattr(nested_param_db_class, attr) == value) except AttributeError: raise InvalidSearchParameter(f'{param}[{attr}]') else: value = search_params[param] try: query = query.filter( getattr(database_class, param) == value) except AttributeError: raise InvalidSearchParameter(f'{param}') filtered_instances = query.all() return filtered_instances
def export_history(app: MinimalManagerApp, sa_session: scoped_session, job_manager: JobManager, store_directory, history_id, job_id, include_hidden=False, include_deleted=False): history = sa_session.query(model.History).get(history_id) with model.store.DirectoryModelExportStore( store_directory, app=app, export_files="symlink") as export_store: export_store.export_history(history, include_hidden=include_hidden, include_deleted=include_deleted) job = sa_session.query(model.Job).get(job_id) job.state = model.Job.states.NEW sa_session.flush() job_manager.enqueue(job)
def test_broadcast_node( fx_server: WSGIServer, fx_session: scoped_session, fx_other_server: WSGIServer, fx_other_session: Session, ): now = datetime.datetime.utcnow() node = Node(url=fx_server.url, last_connected_at=now) node2 = Node(url=fx_other_server.url, last_connected_at=datetime.datetime.utcnow()) fx_session.add(node) fx_session.commit() fx_other_session.add(node2) fx_other_session.commit() assert not fx_session.query(Node).filter(Node.url == node2.url).first() broadcast_node(serialized={'url': fx_other_server.url}) assert fx_session.query(Node).filter(Node.url == node2.url).first() assert node.last_connected_at > now
def insert_single(object_: Dict[str, Any], session: scoped_session) -> Any: """Insert instance of classes with single objects. :param object_: object to be inserted :param session: sqlalchemy scoped session :return: Raises: ClassNotFound: If `type_` does not represt a valid/defined RDFClass. Instance: If an Instance of type `type_` already exists. """ type_ = object_["@type"] database_class = get_database_class(type_) try: session.query(database_class).all()[-1] except (NoResultFound, IndexError, ValueError): return insert(object_, session=session) raise InstanceExists(type_)
def insert_classes(classes: List[Dict[str, Any]], session: scoped_session) -> Optional[Any]: """Insert all the classes as defined in the APIDocumentation into DB.""" # print(session.query(exists().where(RDFClass.name == "Datastream")).scalar()) class_list = [ RDFClass(name=class_["label"].strip('.')) for class_ in classes if "label" in class_ and not session.query(exists().where( RDFClass.name == class_["label"].strip('.'))).scalar() ] class_list = class_list + [ RDFClass(name=class_["title"].strip('.')) for class_ in classes if "title" in class_ and not session.query(exists().where( RDFClass.name == class_["title"].strip('.'))).scalar() ] # print(class_list) session.add_all(class_list) session.commit() return None
def _statistics_exists( session: scoped_session, table: type[Statistics | StatisticsShortTerm], metadata_id: int, start: datetime, ) -> int | None: """Return id if a statistics entry already exists.""" result = (session.query(table.id).filter((table.metadata_id == metadata_id) & (table.start == start)).first()) return result["id"] if result else None
def _get_metadata_ids(hass: HomeAssistant, session: scoped_session, statistic_ids: list[str]) -> list[str]: """Resolve metadata_id for a list of statistic_ids.""" baked_query = hass.data[STATISTICS_META_BAKERY]( lambda session: session.query(*QUERY_STATISTIC_META)) baked_query += lambda q: q.filter( StatisticsMeta.statistic_id.in_(bindparam("statistic_ids"))) result = execute(baked_query(session).params(statistic_ids=statistic_ids)) return [id for id, _, _ in result] if result else []