async def test_async_hit_extraction_with_index(async_engine): """ """ result = EngineResultBody() selects = [ "organization_resource.name.count()", "organization_resource.telecom[0]", "organization_resource.address.Skip(0).Take(0).line[0]", ] async_engine.extract_hits(selects, hits=[DATASET_1], container=result) name_length = len("Burgers University Medical Center") assert result[0][0] == name_length assert result[0][1] == DATASET_1["_source"]["organization_resource"][ "telecom"][0] assert (result[0][2] == DATASET_1["_source"]["organization_resource"] ["address"][1]["line"][0]) # Failed/Missing test result = EngineResultBody() selects = [ "organization_resource.name.count()", "organization_resource.address.Skip(0).Take(1).line[0]", ] async_engine.extract_hits(selects, [DATASET_1], result) assert result[0][1] is None
def test_hit_extraction(engine): """ """ result = EngineResultBody() selects = ["organization_resource.name", "organization_resource.address"] engine.extract_hits(selects, hits=[DATASET_1], container=result) assert result[0][0] == "Burgers University Medical Center"
def process_raw_result(self, rawresult, selects, query_type): """ """ if query_type == EngineQueryType.COUNT: total = rawresult["count"] source_filters = [] # let´s make some compatibilities elif isinstance(rawresult["hits"]["total"], dict): total = rawresult["hits"]["total"]["value"] source_filters = self._get_source_filters(selects) else: total = rawresult["hits"]["total"] source_filters = self._get_source_filters(selects) result = EngineResult( header=EngineResultHeader(total=total), body=EngineResultBody(), scroll_id=rawresult.get("_scroll_id"), ) # extract primary data if query_type != EngineQueryType.COUNT: self.extract_hits(source_filters, rawresult["hits"]["hits"], result.body) return result
def build_engine_result(lazy_maps): """ """ total = 0 body = EngineResultBody() for brain in lazy_maps: row = EngineResultRow() extractor = queryMultiAdapter((brain.getObject(), ), IFhirResourceExtractor) assert extractor val = extractor() if IFhirResourceValue.providedBy(val): val = val.foreground_origin() row.append(val) body.append(row) total += 1 result = EngineResult(header=EngineResultHeader(total=total), body=body) return result
async def process_raw_result(self, rawresult, selects, query_type): """ """ if query_type == EngineQueryType.COUNT: total = rawresult["count"] source_filters = [] # let´s make some compatibilities elif isinstance(rawresult["hits"]["total"], dict): total = rawresult["hits"]["total"]["value"] source_filters = self._get_source_filters(selects) else: total = rawresult["hits"]["total"] source_filters = self._get_source_filters(selects) result = EngineResult(header=EngineResultHeader(total=total), body=EngineResultBody()) # extract primary data if query_type != EngineQueryType.COUNT: self.extract_hits(source_filters, rawresult["hits"]["hits"], result.body) if "_scroll_id" in rawresult and result.header.total > len( rawresult["hits"]["hits"]): # we need to fetch all! consumed = len(rawresult["hits"]["hits"]) while result.header.total > consumed: # xxx: dont know yet, if from_, size is better solution raw_res = await self.connection.scroll(rawresult["_scroll_id"]) if len(raw_res["hits"]["hits"]) == 0: break self.extract_hits(source_filters, raw_res["hits"]["hits"], result.body) consumed += len(raw_res["hits"]["hits"]) if result.header.total <= consumed: break return result