def objectValues(self, spec=None): # Returns a list of actual subobjects of the current object. # If 'spec' is specified, returns only objects whose meta_type # match 'spec'. if spec is None: return LazyMap(self._getOb, self._tree.keys()) return LazyMap(self._getOb, self.objectIds(spec))
def objectItems(self, spec=None): # Returns a list of (id, subobject) tuples of the current object. # If 'spec' is specified, returns only objects whose meta_type match # 'spec' if spec is None: return LazyMap(lambda id, _getOb=self._getOb: (id, _getOb(id)), self._tree.keys()) return LazyMap(lambda id, _getOb=self._getOb: (id, _getOb(id)), self.objectIds(spec))
def searchResults(self, REQUEST=None, check_perms=False, **kw): mode = self.mode if mode == DISABLE_MODE: return self.patched.searchResults(REQUEST, **kw) if isinstance(REQUEST, dict): query = REQUEST.copy() else: query = {} query.update(kw) if check_perms: show_inactive = query.get('show_inactive', False) if isinstance(REQUEST, dict) and not show_inactive: show_inactive = 'show_inactive' in REQUEST user = _getAuthenticatedUser(self.catalogtool) query[ 'allowedRolesAndUsers'] = self.catalogtool._listAllowedRolesAndUsers( user) if not show_inactive and not _checkPermission( AccessInactivePortalContent, self.catalogtool): query['effectiveRange'] = DateTime() orig_query = query.copy() # info('Running query: %s' % repr(orig_query)) try: return self.query(query) except: info("Error running Query: %s\n%s" % (repr(orig_query), traceback.format_exc())) if mode == DUAL_MODE: # fall back now... return self.patched.searchResults(REQUEST, **kw) else: return LazyMap(BrainFactory(self.catalog), [], 0)
def test_batch_lazy_map(self): def get(key): return key sequence = LazyMap(get, range(80, 90), actual_result_count=95) batch = Batch(sequence, size=10, start=80) self.assertEqual([b for b in batch], [80, 81, 82, 83, 84, 85, 86, 87, 88, 89]) self.assertEqual(batch.numpages, 10) self.assertEqual(batch.pagenumber, 9) self.assertEqual(batch.navlist, range(6, 11)) self.assertEqual(batch.leapback, []) self.assertEqual(batch.prevlist, range(6, 9)) self.assertEqual(batch.previous.length, 10) self.assertEqual(batch.next.length, 5) self.assertEqual(batch.pageurl({}), 'b_start:int=80') self.assertListEqual(list(batch.prevurls({})), [ (6, 'b_start:int=50'), (7, 'b_start:int=60'), (8, 'b_start:int=70'), ]) self.assertListEqual( list(batch.nexturls({})), [(10, 'b_start:int=90')], )
def remap_result(result): """ Remap brain elements in lazy sequence to a new lazy-evaluated sequence containing item info dicts. """ if isinstance(result, LazyCat): return result # likely an empty result _getter = result._func _info = lambda rid: iteminfo(_getter(rid)) rids = getattr(result, '_seq', result._data.keys()) return LazyMap(_info, rids)
def query(self, query): qassembler = QueryAssembler(self.catalogtool) dquery, sort = qassembler.normalize(query) equery = qassembler(dquery) result = self.conn.search(equery, self.catalogsid, self.catalogtype, sort=sort, fields="_metadata") count = result.count() result = ResultWrapper(result, count=count) factory = BrainFactory(self.catalog) return LazyMap(factory, result, count)
def search(self, query, factory=None, **query_params): """ @param query: dict The plone query @param factory: function(result: dict): any The factory that maps each elastic search result. By default, get the plone catalog brain. @param query_params: Parameters to pass to the search method 'stored_fields': the list of fields to get from stored source @return: LazyMap """ result = ElasticResult(self, query, **query_params) if not factory: factory = BrainFactory(self.catalog) return LazyMap(factory, result, result.count)
def query(self, value=None, limit=None, merge=1): """ See IRecentItemsIndex. """ catalog = aq_parent(aq_inner(self)) if value is None and self.field_name is not None: # Query all values value = list(self._value2items.keys()) elif value is not None and self.field_name is None: # Ignore value given if there is no classifier field value = None if isinstance(value, (types.TupleType, types.ListType)): # Query for multiple values results = [] for fieldval in value: try: itempairs = self._value2items[fieldval].keys() except KeyError: pass else: results.extend(itempairs) results.sort() if merge: results = [rid for date, rid in results] else: # Create triples expected by mergeResults() results = [(date, rid, catalog.__getitem__) for date, rid in results] else: # Query for single value try: items = self._value2items[value] except KeyError: results = [] else: if merge: results = items.values() else: # Create triples expected by mergeResults() results = [(date, rid, catalog.__getitem__) for date, rid in items.keys()] results.reverse() if limit is not None: results = results[:limit] if merge: return LazyMap(catalog.__getitem__, results, len(results)) else: return results
def zcatalog_fhir_search(context, query_string=None, params=None, bundle_response=False, bundle_as_dict=False): """ """ query_result = Search(context=context, query_string=query_string, params=params).build() query_copy = query_result._query.clone() if context.unrestricted is False: context.engine.build_security_query(query_copy) params = { "query": query_copy, "calculate_field_index_name": context.engine.calculate_field_index_name, "get_mapping": context.engine.get_mapping, } compiled = context.engine.dialect.compile(**params) if "from" in compiled: del compiled["from"] if "scroll" in compiled: del compiled["scroll"] if "_source" in compiled: del compiled["_source"] if "size" in compiled: del compiled["size"] query_params = {"stored_fields": "path.path"} result = ElasticResult(context.engine.es_catalog, compiled, **query_params) factory = BrainFactory(context.engine.es_catalog.catalog) lazy_maps = LazyMap(factory, result, result.count) if bundle_response is False: return lazy_maps engine_result = build_engine_result(lazy_maps) return context.engine.wrapped_with_bundle(engine_result, as_json=bundle_as_dict)
def _optimizedQuery(self, uid, indexname, relationship): """query reference catalog for object matching the info we are given, returns brains """ if not uid: # pragma: no cover return [] _catalog = self._catalog indexes = _catalog.indexes # First get one or multiple record ids for the source/target uid index rids = indexes[indexname]._index.get(uid, None) if rids is None: return [] elif isinstance(rids, int): rids = [rids] else: rids = list(rids) # As a second step make sure we only get references of the right type # The unindex holds data of the type: [(-311870037, 'relatesTo')] # The index holds data like: [('relatesTo', -311870037)] if relationship is None: result_rids = rids else: rel_unindex_get = indexes['relationship']._unindex.get result_rids = set() if isinstance(relationship, str): relationship = set([relationship]) for r in rids: rels = rel_unindex_get(r, None) if rels is None: rels = set() elif isinstance(rels, str): rels = set([rels]) if not rels.isdisjoint(relationship): result_rids.add(r) # Create brains return LazyMap(_catalog.__getitem__, list(result_rids), len(result_rids))
def _queryBrains(self, indexname): value = self.UID() if value is None: return [] site = getSite() tool = getToolByName(site, REFERENCE_CATALOG) _catalog = tool._catalog indexes = _catalog.indexes # First get one or multiple record ids for the source/target uid index rids = indexes[indexname]._index.get(value, None) if rids is None: return [] elif isinstance(rids, int): rids = [rids] else: rids = list(rids) # As a second step make sure we only get references of the right type # The unindex holds data of the type: [(-311870037, 'translationOf')] # The index holds data like: [('translationOf', -311870037)] # In a LinguaPlone site the index will have all content items indexed # so querying it is bound to be extremely slow rel_unindex_get = indexes['relationship']._unindex.get result_rids = set() for r in rids: rels = rel_unindex_get(r, None) if isinstance(rels, str) and rels == RELATIONSHIP: result_rids.add(r) elif RELATIONSHIP in rels: result_rids.add(r) # Create brains brains = LazyMap(_catalog.__getitem__, list(result_rids), len(result_rids)) return brains
def eval(catalog, query, sortSpecs=(), withSortValues=_notPassed): '''evaluate *query* for *catalog*; sort according to *sortSpecs*. *sortSpecs* is a sequence of sort specifications. A sort spec is either a ranking spec, an index name or a pair index name + sort direction ('asc/desc'). If *withSortValues* is not passed, it is set to 'True' when *sortSpecs* contains a ranking spec; otherwise, it is set to 'False'. If *withSortValues*, the catalog brains 'data_record_score_' is abused to communicate the sort value (a tuple with one component per sort spec). 'data_record_normalized_score_' is set to 'None' in this case. ''' cat = catalog._catalog rs = _eval(query, cat) if not rs: return LazyCat(()) sortSpecs, withSortValues = _normSortSpecs(sortSpecs, withSortValues, cat) if sortSpecs or withSortValues: rs = _sort(rs, sortSpecs, withSortValues) if hasattr(rs, 'keys'): rs = rs.keys() # a TreeSet does not have '__getitem__' return LazyMap(cat.__getitem__, rs)
def search(self, query): result = ElasticResult(self, query) factory = BrainFactory(self.catalog) return LazyMap(factory, result, result.count)
def values(self): return LazyMap(self._getOb, self._tree.keys())
def items(self): return LazyMap(lambda id, _getOb=self._getOb: (id, _getOb(id)), self._tree.keys())
def not_indexed_results(self, catalog, index_name): """ call this for not indexed results """ rs, length = self.missing_entries_for_index(catalog, index_name) return LazyMap(catalog._catalog.__getitem__, rs.keys(), length)
def _createLMap(self, mapfunc, *seq): from Products.ZCatalog.Lazy import LazyMap totalseq = [] for s in seq: totalseq.extend(s) return LazyMap(mapfunc, totalseq)
def results(self, name, term, columns, start, rows, sort_on, sort_order, filters, depth, facets): if name not in FILTERS: return [] query = '*:*' if term: pattern = (u'(Title:{term}* OR SearchableText:{term}*' u' OR metadata:{term}*)') term_queries = [ pattern.format(term=escape(safe_unicode(t))) for t in term.split() ] query = u' AND '.join(term_queries) filter_queries = [] # Exclude searchroot context_uid = IUUID(self.context, None) if context_uid: filter_queries.append(u'-UID:%s' % context_uid) if 'trashed' not in filters: filter_queries.append(u'trashed:false') filter_queries.extend(FILTERS[name]) filter_queries.append(u'path_parent:{}'.format( escape('/'.join(self.context.getPhysicalPath())))) if depth > 0: context_depth = get_path_depth(self.context) max_path_depth = context_depth + depth filter_queries.append( u'path_depth:[* TO {}]'.format(max_path_depth)) for key, value in filters.items(): if key not in FIELDS: continue key = FIELDS[key][0] if key is None: continue if key in DATE_INDEXES: # It seems solr needs date filters unescaped, hence we # only escape the other filter values value = self.daterange_filter(value) elif isinstance(value, list): value = map(escape, value) value = u' OR '.join(value) else: value = escape(value) if value is not None: filter_queries.append(u'{}:({})'.format(escape(key), value)) sort = sort_on if sort: if sort_order in ['descending', 'reverse']: sort += ' desc' else: sort += ' asc' params = { 'fl': self.field_list(columns), 'q.op': 'AND', } facet_fields = filter(None, map(self.field_name_to_index, facets)) if facet_fields: params["facet"] = "true" params["facet.mincount"] = 1 params["facet.field"] = facet_fields solr = getUtility(ISolrSearch) resp = solr.search(query=query, filters=filter_queries, start=start, rows=rows, sort=sort, **params) # We map the index names back to the field names for the facets facet_counts = {} for field in facets: index_name = self.field_name_to_index(field) if index_name is None or index_name not in resp.facets: continue facet_counts[field] = resp.facets[index_name] return (LazyMap( OGSolrDocument, start * [None] + resp.docs, actual_result_count=resp.num_found, ), facet_counts)