def batch(self): """Returns data rows for order management table. """ orders = [] sorting = [] # use only selected fields in the table columns = self.order_management_columns() for order_nr in self.registry.getOrders(): data, sort = self._get_order_data(order_nr, columns) index = len(orders) - 1 while index >= 0 and sort < sorting[index]: index -= 1 sorting.insert(index + 1, sort) orders.insert(index + 1, data) if self.reverse: orders.reverse() pagesize = self.pagesize if NEW_BATCHING: b = Batch(orders, size=pagesize, start=self.pagenumber * pagesize) else: b = Batch(orders, pagesize=pagesize, pagenumber=self.pagenumber) return b
def execute(self, query, secure=True, **kw): ''' The sort parameter ''' start = query.pop('batch_start', 0) step = query.pop('batch_step', 100) catalog = api.portal.get_tool(name='portal_catalog') if secure: search = catalog.searchResults else: search = catalog.unrestrictedSearchResults sort = kw.get('sort') if sort and isinstance(sort, basestring): # valid sort values: # - 'created': sort results ascending by creation date # - '-created': sort results descending by creation date # - 'title': sort results ascending by title if sort == 'title': sort = 'sortable_title' if sort.startswith('-'): query['sort_order'] = 'descending' query['sort_on'] = sort[1:] else: query['sort_on'] = sort brains = search(query) return Batch(brains, step, start)
def get_subfolder_table(self): view = SubFoldersFacetedTableView(self.context, self.request) data = api.content.find( context=self.context, portal_type="ClassificationSubfolder", ) return view.render_table(Batch(data, 9999))
def _makequery(self, query=None, batch=False, b_start=0, b_size=30, sort_on=None, sort_order=None, limit=0, brains=False, custom_query=None): results = super(QueryBuilder, self)._makequery(query, batch=False, b_start=b_start, b_size=b_size, sort_on=sort_on, sort_order=sort_order, limit=limit, brains=True, custom_query=custom_query) sorting = self.request.form.get('sorting', '') # if sorting is None make it an empty list sorting = isinstance(sorting, basestring) and sorting.split(',') or [] # apply the custom sorting to the resultset according to # our sorting list positions = {j: i for i, j in enumerate(sorting)} results = sorted(results, key=lambda item: positions.get(item.UID, 999)) if not brains: results = IContentListing(results) if batch: results = Batch(results, b_size, start=b_start) return results
def batch(self, batch=True, bsize=0, b_start=0): request = self.request self.b_start = b_start or request.form.get('b_start') or 0 perform_search = 'searchInTable' in request.form.keys() bsize = bsize or self.context.getBatchSize() or request.form.get( 'bsize') or 0 batch = batch and bsize > 0 if not batch: self._rows = self.rows(search=perform_search) return self._rows self._rows = self.rows(batch=batch, bsize=bsize, b_start=self.b_start, search=perform_search) # replicating foo elements to reach total size self._rows = [None] * self.b_start + self._rows + [None] * ( self.result_length - self.b_start - bsize) return Batch(self._rows, bsize, start=self.b_start, end=self.b_start + bsize, orphan=0, overlap=0, pagerange=7)
def datasets(self): data = [ self.decorate_dataset(dataset) for dataset in self.organization_data() ] # noqa b_size = self.request.get("b_size", 20) b_start = self.request.get("b_start", 0) return Batch(data, b_size, b_start)
def _makequery(self, query=None, batch=False, b_start=0, b_size=30, sort_on=None, sort_order=None, limit=0, brains=False): """Parse the (form)query and return using multi-adapter""" parsedquery = queryparser.parseFormquery(self.context, query, sort_on, sort_order) index_modifiers = getUtilitiesFor(IParsedQueryIndexModifier) for name, modifier in index_modifiers: if name in parsedquery: new_name, query = modifier(parsedquery[name]) parsedquery[name] = query # if a new index name has been returned, we need to replace # the native ones if name != new_name: del parsedquery[name] parsedquery[new_name] = query # Check for valid indexes catalog = getToolByName(self.context, 'portal_catalog') valid_indexes = [ index for index in parsedquery if index in catalog.indexes() ] # We'll ignore any invalid index, but will return an empty set if none # of the indexes are valid. if not valid_indexes: logger.warning( "Using empty query because there are no valid indexes used.") parsedquery = {} if not parsedquery: if brains: return [] else: return IContentListing([]) if batch: parsedquery['b_start'] = b_start parsedquery['b_size'] = b_size elif limit: parsedquery['sort_limit'] = limit if 'path' not in parsedquery: parsedquery['path'] = {'query': ''} results = catalog(**parsedquery) if not brains: results = IContentListing(results) if batch: results = Batch(results, b_size, start=b_start) return results
def batch(self): catalog = getToolByName(self.context, 'portal_catalog') portal_properties = getToolByName(self.context, 'portal_properties') use_view_action = portal_properties.site_properties.getProperty( 'typesUseViewActionInListings', ()) props = portal_properties.pcommerce_properties columns = int(props.getProperty('columns', 3)) width = int(props.getProperty('thumb_width', 0)) width = width and 'image/thumb?width=%s' % width or 'image_thumb' results = catalog(object_provides=IProduct.__identifier__, path={ 'query': '/'.join(self.context.getPhysicalPath()), 'depth': 1 }, sort_on='getObjPositionInParent') items = [] i = 0 start = (self.page - 1) * (columns * 5) end = start + columns * 5 for item in results: url = item.getURL() if item.portal_type in use_view_action: url += '/view' if start <= i < end: object = item.getObject() col = i % columns + 1 adapter = IPricing(object) image = None if object.getImage(): image = { 'caption': object.getImageCaption(), 'thumb': '%s/%s' % (item.getURL(), width) } item = { 'uid': item.UID, 'class': 'col%s' % col, 'title': item.Title, 'description': item.Description, 'price': CurrencyAware(adapter.getPrice()), 'base_price': CurrencyAware(adapter.getBasePrice()), 'offer': adapter.getPrice() < adapter.getBasePrice(), 'image': image, 'url': url } else: item = { 'uid': item.UID, 'title': item.Title, 'description': item.Description, 'url': url } i += 1 items.append(item) return Batch(items, columns * 5, self.page, 5)
def datasets(self): data = self.organization_data() try: datasets = data.xpath("//dataset") datasets = [self.decorate_dataset(dataset) for dataset in datasets] b_size = self.request.get("b_size", 20) b_start = self.request.get("b_start", 0) return Batch(datasets, b_size, b_start) except AttributeError: return []
def execute(self, query, secure=True, **kw): start = query.pop('batch_start', 0) step = query.pop('batch_step', 100) catalog = api.portal.get_tool(name='portal_catalog') if secure: search = catalog.searchResults else: search = catalog.unrestrictedSearchResults brains = search(query) return Batch(brains, step, start)
def results(self, batch=True, b_start=0, b_size=None, sort_on=None, limit=None, brains=False, custom_query=None): results = super(SortableCollection, self).results( batch, b_start, b_size, sort_on, limit, brains, custom_query) positions = {j: i for i, j in enumerate(self.sorting)} results = sorted( results, key=lambda item: positions.get(item.uuid(), 999)) if batch: results = Batch(results, b_size, start=b_start) return results
def update(self): uids = [] request = self.request b_start = request.form.get('b_start', 0) catalog = getToolByName(self.context, 'portal_catalog') self.batch = Batch( catalog(has_apple_news=True, sort_on='Date', sort_order='descending'), start=b_start, size=50 ) if request.method.lower() == 'post': messages = [] authenticator = getMultiAdapter( (self.context, request), name=u"authenticator" ) if not authenticator.verify(): raise Unauthorized uids = request.get('uids', []) if not uids: return count = 0 brains = catalog(has_apple_news=True, UID=uids) for b in brains: obj = b.getObject() adapter = IAppleNewsActions(obj, alternate=None) if adapter is not None: try: adapter.update_article() count += 1 except AppleNewsError as e: log(u'Handled Apple News Error in bulk update ' u'{}: {}'.format(e, e.data)) if e.code == 409: messages.append( u'Unable to update article "{}" '.format( safe_unicode(b.Title) ) + u'because there are conflicting changes ' u'in Apple News Publisher' ) else: messages.append( u'Unable to update article "{}" '.format( safe_unicode(b.Title) ) + u'check logs for details.' ) msg_adapter = IStatusMessage(self.request) msg_adapter.add( u'Updated {} Apple News article with {} errors'.format( count, len(brains) - count ), type=u"info" ) for msg in messages: msg_adapter.add(msg, type=u'error')
def update(self): start = int(self.request.get('b_start', 0)) selected_start = self.startDate() selected_end = self.endDate() order_sequence = LazyFilteredOrders(storage.get_storage(), selected_start, selected_end, csv=False) if len(order_sequence) > 0: self.orders_exist = True self.batch = Batch(order_sequence, size=50, start=start) super(OrderControlPanelView, self).update()
def search(self): kw = self.request.form b_start = int(kw.pop('b_start', 0)) b_size = int(min(kw.pop('b_size', 50), 50)) # remove path and portal_type # TODO: go through catalog indices and restrict parameters to index # names kw.pop('path', None) kw.pop('portal_type', None) kw.pop('object_provides', None) kw.pop('used', None) kw.pop('_merge', None) pc = getToolByName(self.context, 'portal_catalog') kw.update({ 'object_provides': 'org.bccvl.site.content.interfaces.IDataset', # 'object_provides': IDataset.__identifier__, 'path': '/'.join(self.context.getPhysicalPath()), }) batch = Batch(pc.searchResults(**kw), b_size, b_start) result = { 'total': batch.sequence_length, 'length': batch.length, 'b_start': b_start, 'b_size': b_size, 'results': [] } # TODO: could add next/prev links to make batch nav easier for brain in batch: result['results'].append({ 'url': brain.getURL(), 'uuid': brain.UID, 'id': brain.getId, # 'BCCCategory': brain.BCCCategory, 'BCCDataGenre': brain.BCCDataGenre, # 'BCCEnviroLayer': brain.BCCEnviroLayer, # 'BCCEmissionScenairo': brain.BCCEmissionScenairo, # 'BCCGlobalClimateModel': brain.BCCGlobalClimateModel, 'BCCResolution': brain.BCCResolution, 'Description': brain.Description, 'Title': brain.Title, 'job_state': brain.job_state, }) return result
def listings(self, b_start=None, b_size=None): """get a page of listings""" if b_size is None: b_size = self.batch_size if b_start is None: b_start = (getattr(self, 'page', 1) - 1) * b_size content_filter = {} is_collection = self.context.portal_type == 'Collection' if not is_collection: content_filter = { 'portal_type': 'Event', 'sort_on': 'start', 'sort_order': 'ascending', 'review_state': 'published', } text = self.request.get('SearchableText') if text: content_filter['SearchableText'] = text search_all = self.request.get('SearchAll') if search_all == 'yes': content_filter['start'] = { 'query': DateTime('1900/01/01'), 'range': 'min' } elif search_all == 'no' or not is_collection: content_filter['start'] = {'query': DateTime(), 'range': 'min'} start = self.request.get('start') if start: content_filter['start'] = start end = self.request.get('end') if end: content_filter['end'] = end if is_collection: batch = self.context.results(batch=True, b_start=b_start, b_size=b_size, brains=True, custom_query=content_filter) else: catalog = getToolByName(self.context, 'portal_catalog') items = catalog(**content_filter) batch = Batch(items, b_size, b_start) return batch
def listRenderedContainedElements(self, portal_types=(), widgets_to_render=(), b_size=30, b_start=0): """ Get the contained elements, rendered for display. If p_portal_types is specified, only return elements having the required portal_type. If p_widgets_to_render is specified, only render given fields/widgets. """ result = IListContainedDexterityObjectsForDisplay( self.context).listContainedObjects(portal_types, widgets_to_render, b_start=b_start, b_size=b_size) batch = Batch(result, b_size, b_start, orphan=1) return batch
def __call__(self, query, batch=False, b_start=0, b_size=30, sort_on=None, sort_order=None, limit=0, brains=False, custom_query=None): order_by_id = self.order_by_id order_by_title = self.order_by_title _batch = batch _sort = False if order_by_id or order_by_title: batch = False _sort = True # Get default results results = super(QueryBuilder, self).__call__( query, batch=batch, b_start=b_start, b_size=b_size, sort_on=sort_on, sort_order=sort_order, limit=limit, brains=brains, custom_query=custom_query ) if _sort: if order_by_id: results = sorted( results, key=lambda x: self.id_order(order_by_id, x), ) elif order_by_title: # precompiling regexes order_by_title = [ re.compile(x) for x in order_by_title ] results = sorted( results, key=lambda x: self.title_order(order_by_title, x), ) if _batch: return Batch(results, b_size, start=b_start) return results
def update(self): orders = list(_fetch_orders(storage.get_storage(), key=(), csv=False)) orders.sort(key=lambda o: o.get('date_sort', ''), reverse=True) start = int(self.request.get('b_start', 0)) if len(orders) > 0: self.orders_exist = True self.most_recent_order_date = orders[0]['date'] self.first_order_date = orders[len(orders) - 1]['date'] # default in case date selection integrity check fails # this could happen if end date < start date self.end_index = 0 self.start_index = len(orders) - 1 selected_start = self.startDate() selected_end = self.endDate() if self.check_date_integrity(): filtered_orders = [] index_list = [] count = -1 for order in orders: count += 1 if order['datetime'].date() > selected_end: continue if order['datetime'].date() < selected_start: break filtered_orders.append(order) index_list.append(count) if len(index_list) > 0: # it is possible no orders are found # even if the date range is correct self.end_index = min(index_list) self.start_index = max(index_list) orders = filtered_orders self.batch = Batch(orders, size=50, start=start) super(OrderControlPanelView, self).update()
def results(self, batch=True, b_start=0, b_size=None, sort_on=None, limit=None, brains=False, custom_query=None): results = super(SortableCollectionBehavior, self).results(batch, b_start, b_size, sort_on, limit, brains, custom_query) # apply the custom sorting to the resultset according to # our sorting list positions = {j: i for i, j in enumerate(self.sorting)} results = sorted(results, key=lambda item: positions.get(item.uuid(), 999)) if batch: if not b_size: b_size = self.item_count results = Batch(results, b_size, start=b_start) return results
def nearest(self): catalog = getToolByName(self.context, 'portal_catalog') class NeighborBrain(AbstractCatalogBrain, NoBrainer): pass cschema = catalog._catalog.schema scopy = cschema.copy() scopy['data_record_id_'] = len(cschema.keys()) scopy['data_record_score_'] = len(cschema.keys())+1 scopy['data_record_normalized_score_'] = len(cschema.keys())+2 scopy['distance'] = len(cschema.keys())+3 scopy['center'] = len(cschema.keys())+4 NeighborBrain.__record_schema__ = scopy try: g = IGeoreferenced(self.context) except: return [] def gen(): for brain in catalog( geolocation={'query': (g.bounds, 10), 'range': 'nearest'}, portal_type={'query': ['Place']}, sort_index='geolocation', ): if brain.getId == self.context.getId(): # skip self continue neighbor = NeighborBrain().__of__(catalog) for k in brain.__record_schema__.keys(): neighbor[k] = brain[k] neighbor['distance'] = self.distance(brain) neighbor['center'] = self.center(brain) yield neighbor b_size = 20 b_start = self.request.get('b_start', 0) batch = Batch(list(gen()), b_size, int(b_start), orphan=0) return batch
def batch(self): return Batch(self.results, 99999, start=0)
def _makequery(self, query=None, batch=False, b_start=0, b_size=30, sort_on=None, sort_order=None, limit=0, brains=False, custom_query=None): """Parse the (form)query and return using multi-adapter""" query_modifiers = getUtilitiesFor(IQueryModifier) for name, modifier in sorted(query_modifiers, key=itemgetter(0)): query = modifier(query) parsedquery = queryparser.parseFormquery(self.context, query, sort_on, sort_order) index_modifiers = getUtilitiesFor(IParsedQueryIndexModifier) for name, modifier in index_modifiers: if name in parsedquery: new_name, query = modifier(parsedquery[name]) parsedquery[name] = query # if a new index name has been returned, we need to replace # the native ones if name != new_name: del parsedquery[name] parsedquery[new_name] = query # Check for valid indexes catalog = getToolByName(self.context, 'portal_catalog') valid_indexes = [ index for index in parsedquery if index in catalog.indexes() ] # We'll ignore any invalid index, but will return an empty set if none # of the indexes are valid. if not valid_indexes: logger.warning( "Using empty query because there are no valid indexes used.") parsedquery = {} empty_query = not parsedquery # store emptiness if batch: parsedquery['b_start'] = b_start parsedquery['b_size'] = b_size elif limit: parsedquery['sort_limit'] = limit if 'path' not in parsedquery: parsedquery['path'] = {'query': ''} if isinstance(custom_query, dict) and custom_query: # Update the parsed query with an extra query dictionary. This may # override the parsed query. The custom_query is a dictonary of # index names and their associated query values. parsedquery.update(custom_query) empty_query = False # filter bad term and operator in query parsedquery = self.filter_query(parsedquery) results = [] if not empty_query: results = catalog(**parsedquery) if getattr(results, 'actual_result_count', False) and limit\ and results.actual_result_count > limit: results.actual_result_count = limit collapse_on = self.request.get( 'collapse_on', getattr(self.context, 'collapse_on', None)) if collapse_on is not None: fc = FieldCollapser(query={'collapse_on': collapse_on}) results = LazyMap(lambda x: x, LazyFilter(results, test=fc.collapse), length=results._len, actual_result_count=results.actual_result_count) if not brains: results = IContentListing(results) if batch: results = Batch(results, b_size, start=b_start) return results
def results(self): return Batch(self.adapted.get_items(self.tags), 99999, start=0)
def batch(self): batch_size = 10 page = int(self.request.get('page', '0')) return Batch(self.notices, start=page * batch_size, size=batch_size)
def _makequery(self, query=None, batch=False, b_start=0, b_size=30, sort_on=None, sort_order=None, limit=0, brains=False, custom_query={}): """Parse the (form)query and return using multi-adapter""" parsedquery = queryparser.parseFormquery(self.context, query, sort_on, sort_order) index_modifiers = getUtilitiesFor(IParsedQueryIndexModifier) for name, modifier in index_modifiers: if name in parsedquery: new_name, query = modifier(parsedquery[name]) parsedquery[name] = query # if a new index name has been returned, we need to replace # the native ones if name != new_name: del parsedquery[name] parsedquery[new_name] = query # Check for valid indexes catalog = getToolByName(self.context, 'portal_catalog') valid_indexes = [ index for index in parsedquery if index in catalog.indexes() ] # We'll ignore any invalid index, but will return an empty set if none # of the indexes are valid. if not valid_indexes: logger.warning( "Using empty query because there are no valid indexes used.") parsedquery = {} if not parsedquery: if brains: return [] else: return IContentListing([]) if batch: parsedquery['b_start'] = b_start parsedquery['b_size'] = b_size elif limit: parsedquery['sort_limit'] = limit if 'path' not in parsedquery: parsedquery['path'] = {'query': ''} if isinstance(custom_query, dict): # Update the parsed query with an extra query dictionary. This may # override the parsed query. The custom_query is a dictonary of # index names and their associated query values. parsedquery.update(custom_query) results = catalog(**parsedquery) if getattr(results, 'actual_result_count', False) and limit\ and results.actual_result_count > limit: results.actual_result_count = limit if not brains: results = IContentListing(results) if batch: results = Batch(results, b_size, start=b_start) return results
def table_data(self): indexes = api.portal.get_tool('portal_catalog').indexes() b_size = int(self.request.form.get('length', 10)) b_start = int(self.request.form.get('start', 0)) sort_on = self.request.get('order[0][column]', 0) sort_on = self.request.get('columns[{0}][data]'.format(sort_on), 'created') sort_on = sort_on in indexes and sort_on or 'created' sort_order = self.request.get('order[0][dir]', 'desc') sort_order = sort_order == 'asc' and 'ascending' or 'descending' sort_limit = self.request.get('sort_limit', 300) searchable_text = self.request.form.get('search[value]', '') searchable_text = safe_unicode(searchable_text) search_field = self.request.get('search_field', '') search_date = self.request.get('search_date', '') date_from = self.request.get('date_from', '') date_to = self.request.get('date_to', '') query = dict() query['b_size'] = b_size query['b_start'] = b_start query['sort_on'] = sort_on query['sort_order'] = sort_order query['sort_limit'] = sort_limit query['portal_type'] = 'Tumour' query['path'] = { 'query': '/'.join(self.context.getPhysicalPath()), 'depth': -1 } if search_field in ('task_no', 'sequencing_filename'): query[search_field] = searchable_text else: query['SearchableText'] = searchable_text if search_field in utils.progress_steps: query['steps'] = search_field elif search_field in utils.review_states: query['review_state'] = search_field else: search_words = searchable_text.lower().split() if search_words > 1: operators = ('and', 'or', 'not', '(', ')') if not any(map(lambda val: val in search_words, operators)): searchable_text = u' OR '.join(search_words) query['SearchableText'] = searchable_text if search_date in ('sampling_time', 'received_time', 'separation_time', 'extraction_time', 'library_time', 'template_time', 'sequencing_time', 'created', 'modified'): try: start_date = DateTime(date_from) except: start_date = DateTime('1970-01-01') try: end_date = DateTime(date_to) except: end_date = DateTime() query[search_date] = { 'query': sorted([start_date, end_date]), 'range': 'min:max' } try: user_search_filter = api.portal.get_registry_record( 'gene.tumour.interfaces.IGeneTumourSettings.' 'user_search_filter') except Exception as e: user_search_filter = [] logger.warn(e) if not isinstance(user_search_filter, (list, tuple)): user_search_filter = [] current_user = api.user.get_current() for group in user_search_filter: users = api.user.get_users(groupname=group) if current_user in users: searchable_text += ' ' searchable_text += safe_unicode(user_search_filter[group]) results = api.content.find(**query) results = IContentListing(results) results = Batch(results, size=b_size, start=b_start) can_review = api.user.has_permission('Review portal content', user=current_user, obj=self.context) can_changenote = api.user.has_permission('gene.tumour: Change Note', user=current_user, obj=self.context) rows = [] for item in results: obj = item.getObject() record = {} for name, value in utils.fields(): if value.__class__.__name__ in ('NamedBlobImage', 'NamedBlobFile'): record[name] = [ self.display_url(obj, name), self.download_url(obj, name) ] else: record[name] = getattr(obj, name, None) record['created'] = api.portal.get_localized_time(obj.created(), long_format=True) record['modified'] = api.portal.get_localized_time( obj.modified(), long_format=True) record['report'] = getattr(obj.aq_explicit, 'report', None) record['url'] = obj.absolute_url_path() state = api.content.get_state(obj) record['review_state'] = translate(_(state.title()), context=api.portal.getRequest()) if record['result'] in result_dict: record['result'] = translate(_(result_dict[record['result']]), context=api.portal.getRequest()) if record['treatment_situation']: record['treatment_situation'] = u','.join([ translate(_(situation_dict[item]), context=api.portal.getRequest()) for item in record['treatment_situation'] if item in situation_dict ]) record['can_versions'] = can_review record['can_changenote'] = can_changenote record['DT_RowId'] = obj.UID() record['DT_RowClass'] = '{0} {1}'.format(obj.steps, state) record['DT_RowData'] = dict() record['DT_RowData']['id'] = obj.id record['DT_RowData']['uuid'] = obj.UID() record['DT_RowData']['gid'] = obj.gid record['DT_RowData']['steps'] = obj.steps record['DT_RowData']['url'] = obj.absolute_url() record['DT_RowData']['result'] = getattr(obj, 'result', None) record['DT_RowData']['library_barcode'] = getattr( obj, 'library_barcode', None) record['steps'] = translate(_(obj.steps), context=self.request) if not can_review: pass rows.append(record) table_data = dict() table_data['draw'] = int(self.request.form.get('draw', 1)) table_data['recordsTotal'] = len(results) table_data['recordsFiltered'] = len(results) table_data['data'] = rows self.request.response.setHeader( 'Content-Disposition', 'attachment; filename={0}'.format('filename.json')) self.request.response.setHeader('Content-Type', 'application/json') return json.dumps(table_data, default=date_handler)
def _makequery(self, query=None, batch=False, b_start=0, b_size=30, sort_on=None, sort_order=None, limit=0, brains=False, custom_query=None): """Parse the (form)query and return using multi-adapter""" # Catalog assumes we want to limit the results to b_start+b_size. We would like to limit the # search too however we don't for sure what to limit it to since we need an unknown number # of returns to fill up an filtered page # We will use a combination of hints to guess # - data about how much filtered in pages the user has click on before # - the final length if the user clicked on the last page # - a look ahead param on the collection representing the max number of unfiltered results to make up a # filtered page # if b_start >=10: # import pdb; pdb.set_trace() # Need to do this here as it removes these from the query before checksum is performed fc_ends = self._get_hint_and_remove(custom_query, 'fc_ends', str, '') fc_len = self._get_hint_and_remove(custom_query, "fc_len", int) fc_check = self._get_hint_and_remove(custom_query, 'fc_check', str) checksum = hashlib.md5( json.dumps((query, custom_query, sort_on, sort_order, b_size), sort_keys=True)).hexdigest() if fc_check != checksum: fc_ends = '' fc_len = None fc_ends = enumerate([int(i) for i in fc_ends.split(',') if i]) fc_ends = [(page, i) for page, i in fc_ends if page * b_size <= b_start + b_size] if not fc_ends: nearest_page, nearest_end = 0, 0 else: nearest_page, nearest_end = max(fc_ends) max_unfiltered_pagesize = getattr(self.context, 'max_unfiltered_page_size', 1000) additional_pages = int(floor(float(b_start) / b_size - nearest_page)) safe_start = nearest_end safe_limit = additional_pages * max_unfiltered_pagesize results = super(QueryBuilder, self)._makequery(query, batch=False, b_start=safe_start, b_size=safe_limit, sort_on=sort_on, sort_order=sort_order, limit=limit, brains=True, custom_query=custom_query) collapse_on = getattr(self.context, 'collapse_on', set()) if custom_query is not None and 'collapse_on' in custom_query: custom_collapse_on = custom_query.get('collapse_on') if hasattr(custom_collapse_on, '__iter__'): collapse_on.update(custom_collapse_on) elif type(custom_collapse_on) in [str, unicode]: collapse_on.add(custom_collapse_on) del custom_query['collapse_on'] merge_fields = getattr(self.context, 'merge_fields', None) if merge_fields is None and custom_query is not None: merge_fields = custom_query.get('merge_fields', set()) elif merge_fields is None: merge_fields = set() if collapse_on: fc = FieldCollapser(collapse_on=collapse_on, merge_fields=merge_fields) results = LazyFilterLen(results, test=fc.collapse, fc_len=fc_len) if not batch: # This is a bit of hack. for collectionfilter they iterate teh results to work out all teh values # If we are using merging then the merge doesn't really work until you get to the end. So either # collectionfilter needs to iterate first then do the count or we need iterate first in some cases # if we iterate first then do we use the max_unfiltered_pagesize as the hint on how much to look # ahead? # In this case we will assume if the Batch=False then we should iterate first to ensure merge is correct # we will do this even if there is no merge to ensure the len of the results is also accurate list(results) else: # Put this into request so it ends up the batch links self.request.form['fc_ends'] = ','.join( [str(i) for i in results.fc_ends(b_start, b_size)]) # we might have hit the end if getattr(results, 'fc_len', None) is not None: self.request.form['fc_len'] = results.fc_len # This ensures if fc_len or fc_ends are used after query is updated then we don't use these hints self.request.form['fc_check'] = checksum # This is a bit of hack. for collectionfilter they iterate teh results to work out all teh values # If we are using merging then the merge doesn't really work until you get to the end. So either # collectionfilter needs to iterate first then do the count or we need iterate first in some cases # if we iterate first then do we use the max_unfiltered_pagesize as the hint on how much to look # ahead? # In this case we will assume if the Batch=False then we should iterate first to ensure merge is correct # we will do this even if there is no merge to ensure the len of the results is also accurate if not batch: list(results) if not brains: results = IContentListing(results) if batch: results = Batch(results, b_size, start=b_start) return results
def _makequery( self, query=None, batch=False, b_start=0, b_size=30, sort_on=None, sort_order=None, limit=0, brains=False, custom_query=None, ): """Parse the (form)query and return using multi-adapter""" query_modifiers = getUtilitiesFor(IQueryModifier) for name, modifier in sorted(query_modifiers, key=itemgetter(0)): query = modifier(query) parsedquery = queryparser.parseFormquery(self.context, query, sort_on, sort_order) index_modifiers = getUtilitiesFor(IParsedQueryIndexModifier) for name, modifier in index_modifiers: if name in parsedquery: new_name, query = modifier(parsedquery[name]) parsedquery[name] = query # if a new index name has been returned, we need to replace # the native ones if name != new_name: del parsedquery[name] parsedquery[new_name] = query # Check for valid indexes catalog = getToolByName(self.context, "portal_catalog") valid_indexes = [ index for index in parsedquery if index in catalog.indexes() ] # We'll ignore any invalid index, but will return an empty set if none # of the indexes are valid. if not valid_indexes: logger.warning( "Using empty query because there are no valid indexes used.") parsedquery = {} empty_query = not parsedquery # store emptiness if batch: parsedquery["b_start"] = b_start parsedquery["b_size"] = b_size elif limit: parsedquery["sort_limit"] = limit if "path" not in parsedquery: parsedquery["path"] = {"query": ""} if isinstance(custom_query, dict) and custom_query: # Update the parsed query with an extra query dictionary. This may # override the parsed query. The custom_query is a dictonary of # index names and their associated query values. parsedquery.update(custom_query) empty_query = False # filter bad term and operator in query parsedquery = self.filter_query(parsedquery) results = [] # RER.SOLRPUSH PATCH search_with_solr = False if "searchWithSolr" in parsedquery: if parsedquery["searchWithSolr"]["query"]: search_with_solr = True del parsedquery["searchWithSolr"] if not empty_query: if search_with_solr: if "SearchableText" in parsedquery: if isinstance(parsedquery["SearchableText"], dict): parsedquery["SearchableText"]["query"] = parsedquery[ "SearchableText"]["query"].rstrip("*") else: parsedquery["SearchableText"] = parsedquery[ "SearchableText"].rstrip("*") results = SolrResponse(data=solr_search( **self.clean_query_for_solr(query=parsedquery))) else: results = catalog(**parsedquery) if (getattr(results, "actual_result_count", False) and limit # noqa and results.actual_result_count > limit # noqa ): results.actual_result_count = limit if not brains and not search_with_solr: results = IContentListing(results) if batch: results = Batch(results, b_size, start=b_start) return results