def get_ordered_results(cls, qs: Select, order_by: str, order_direction: str) -> Select: if order_by and order_direction and hasattr(cls.model_class.c, order_by): field = getattr(cls.model_class.c, order_by) if order_direction == "desc": qs = qs.order_by(field.desc()) else: qs = qs.order_by(field) return qs
def _filter_storages(self, env: Environment, query: Select) -> Select: if not self._filters.storage_urls: return query return query.join(StoredDataBlockMetadata).filter( StoredDataBlockMetadata.storage_url.in_( self._filters.storage_urls) # type: ignore )
def _filter_schemas(self, env: Environment, query: Select) -> Select: if not self._filters.schema_keys: return query return query.filter( DataBlockMetadata.nominal_schema_key.in_( [d.key for d in self.get_schemas(env)]) # type: ignore )
def _filter_unprocessed( self, env: Environment, query: Select, ) -> Select: if not self._filters.unprocessed_by_node_key: return query if self._filters.allow_cycle: # Only exclude blocks processed as INPUT filter_clause = and_( DataBlockLog.direction == Direction.INPUT, DataFunctionLog.node_key == self._filters.unprocessed_by_node_key, ) else: # No block cycles allowed # Exclude blocks processed as INPUT and blocks outputted filter_clause = (DataFunctionLog.node_key == self._filters.unprocessed_by_node_key) already_processed_drs = ( Query(DataBlockLog.data_block_id).join( DataFunctionLog).filter(filter_clause).filter( DataBlockLog.invalidated == False) # noqa .distinct()) return query.filter( not_(DataBlockMetadata.id.in_(already_processed_drs)))
def _ignore_domains_filter(query: Select) -> Select: """Add a filter to ignore domains we do not fetch history for.""" return query.filter( and_(*[ ~States.entity_id.like(entity_domain) for entity_domain in IGNORE_DOMAINS_ENTITY_ID_LIKE ]))
async def fetch_multiple_records_with_query( query: Select, offset: int = 0, limit: int = STD_NUMBER_OF_RESULT_AT_A_TIME ) -> typing.Union[None, typing.Mapping]: """ fetch multiple records from database, bases on query provided """ if offset is None: raise ValueError('offset must be an integer.') return None query = query.limit(limit=limit).offset(offset) return await database.fetch_all(query=query)
def _filter_inputs( self, env: Environment, query: Select, ) -> Select: if not self._filters.node_keys: return query eligible_input_drs = ( Query(DataBlockLog.data_block_id).join(SnapLog).filter( DataBlockLog.direction == Direction.OUTPUT, SnapLog.node_key.in_(self._filters.node_keys), ).filter(DataBlockLog.invalidated == False) # noqa .distinct()) return query.filter(DataBlockMetadata.id.in_(eligible_input_drs))
def _insertToDb(dispIds, gridCompiledQueueItems, gridKeyIndexesByDispId, locationCompiledQueueItems, locationIndexByDispId, queueIds): """ Insert to DB This method provides the DB inserts and deletes after the data has been calculated. """ startTime = datetime.now(pytz.utc) dispBaseTable = DispBase.__table__ dispQueueTable = DispIndexerQueue.__table__ gridKeyIndexTable = GridKeyIndex.__table__ gridQueueTable = GridKeyCompilerQueue.__table__ locationIndexTable = LocationIndex.__table__ locationIndexCompilerQueueTable = LocationIndexCompilerQueue.__table__ engine = CeleryDbConn.getDbEngine() conn = engine.connect() transaction = conn.begin() try: lockedDispIds = conn.execute( Select(whereclause=dispBaseTable.c.id.in_(dispIds), columns=[dispBaseTable.c.id], for_update=True)) lockedDispIds = [o[0] for o in lockedDispIds] # Ensure that the Disps exist, otherwise we get an integrity error. gridKeyIndexes = [] locationIndexes = [] for dispId in lockedDispIds: gridKeyIndexes.extend(gridKeyIndexesByDispId[dispId]) if dispId in locationIndexByDispId: locationIndexes.append(locationIndexByDispId[dispId]) # Delete existing items in the location and grid index # grid index conn.execute( gridKeyIndexTable.delete(gridKeyIndexTable.c.dispId.in_(dispIds))) # location index conn.execute( locationIndexTable.delete( locationIndexTable.c.dispId.in_(dispIds))) # --------------- # Insert the Grid Key indexes if gridKeyIndexes: conn.execute(gridKeyIndexTable.insert(), gridKeyIndexes) # Directly insert into the Grid compiler queue. if gridCompiledQueueItems: conn.execute(gridQueueTable.insert(), [ dict(coordSetId=i.coordSetId, gridKey=i.gridKey) for i in gridCompiledQueueItems ]) # --------------- # Insert the Location indexes if locationIndexes: conn.execute(locationIndexTable.insert(), locationIndexes) # Directly insert into the Location compiler queue. if locationCompiledQueueItems: conn.execute(locationIndexCompilerQueueTable.insert(), [ dict(modelSetId=i.modelSetId, indexBucket=i.indexBucket) for i in locationCompiledQueueItems ]) # --------------- # Finally, delete the disp queue items conn.execute(dispQueueTable.delete(dispQueueTable.c.id.in_(queueIds))) transaction.commit() logger.debug("Committed %s GridKeyIndex in %s", len(gridKeyIndexes), (datetime.now(pytz.utc) - startTime)) except Exception as e: raise finally: conn.close()
def count(self, stmt: Select, filter_env: bool = True) -> int: stmt = select(func.count()).select_from(stmt.subquery()) return self.execute(stmt).scalar_one()
async def select_by_id(cls, id_: str): return await db.fetch_one(query=Select([cls]).where(cls.id == id_))
async def select_by_passport_number(cls, number: str): return await db.fetch_one(query=Select([cls]).where(cls.passport_number == number))
def _filter_data_block(self, env: Environment, query: Select) -> Select: if not self._filters.data_block_id: return query return query.filter( DataBlockMetadata.id == self._filters.data_block_id)
def get_default_ordering(cls, qs: Select) -> Select: return qs.order_by("id")