def _typecatComponentBrains(self,
                             uid=None,
                             types=(),
                             meta_type=(),
                             start=0,
                             limit=None,
                             sort='name',
                             dir='ASC',
                             name=None,
                             keys=()):
     obj = self._getObject(uid)
     spec = get_component_field_spec(meta_type)
     if spec is None:
         return None, 0
     typecat = spec.get_catalog(obj, meta_type)
     sortspec = ()
     if sort:
         if sort not in typecat._catalog.indexes:
             # Fall back to slow queries and sorting
             return None, 0
         sortspec = ((sort, dir), )
     querySet = [Generic('path', uid)]
     if name:
         querySet.append(
             Or(*(MatchGlob(field, '*%s*' % name)
                  for field in spec.fields)))
     brains = typecat.evalAdvancedQuery(And(*querySet), sortspec)
     total = len(brains)
     if limit is None:
         brains = brains[start:]
     else:
         brains = brains[start:start + limit]
     return brains, total
Beispiel #2
0
    def _buildQuery(self, types, paths, depth, query, filterPermissions):
        qs = []
        if query is not None:
            qs.append(query)

        # Build the path query
        if not paths:
            paths = ('/'.join(self.context.getPhysicalPath()), )

        q = {'query': paths}
        if depth is not None:
            q['depth'] = depth
        pathq = Generic('path', q)
        qs.append(pathq)

        # Build the type query
        if not isinstance(types, (tuple, list)):
            types = (types, )
        subqs = [Eq('objectImplements', dottedname(t)) for t in types]
        if subqs:
            # Don't unnecessarily nest in an Or if there is only one type query
            typeq = subqs[0] if len(subqs) == 1 else Or(*subqs)
            qs.append(typeq)

        # filter based on permissions
        if filterPermissions:
            qs.append(
                In('allowedRolesAndUsers',
                   allowedRolesAndGroups(self.context)))

        # Consolidate into one query
        return And(*qs)
Beispiel #3
0
 def findMatchingOrganizers(self, organizerClass, organizerPath, userFilter):
     filterRegex = '(?i)^%s.*%s.*' % (organizerPath, userFilter)
     if self.validRegex(filterRegex):
         orgquery = (Eq('objectImplements','Products.ZenModel.%s.%s' % (organizerClass, organizerClass)) &
                     MatchRegexp('uid', filterRegex))
         paths = [b.getPath() for b in ICatalogTool(self._dmd).search(query=orgquery)]
         if paths:
             return Generic('path', {'query':paths})
Beispiel #4
0
    def _componentSearch(self, uid=None, types=(), meta_type=(), start=0,
                         limit=None, sort='name', dir='ASC', name=None, keys=()):
        reverse = dir=='DESC'
        if isinstance(types, basestring):
            types = (types,)
        if isinstance(meta_type, basestring):
            meta_type = (meta_type,)
        querySet = []
        if meta_type:
            querySet.append(Or(*(Eq('meta_type', t) for t in meta_type)))
        querySet.append(Generic('getAllPaths', uid))
        query = And(*querySet)
        obj = self._getObject(uid)

        cat = obj.device().componentSearch
        if 'getAllPaths' not in cat.indexes():
            obj.device()._createComponentSearchPathIndex()
        brains = cat.evalAdvancedQuery(query)

        # unbrain the results
        comps=map(IInfo, map(unbrain, brains))


        # filter the components
        if name is not None:
            comps = self._filterComponents(comps, keys, name)

        total = len(comps)
        hash_ = str(total)

        def componentSortKey(parent):
            val = getattr(parent, sort)
            if val:
                if isinstance(val, list):
                    val = val[0]
                if callable(val):
                    val = val()
                if IInfo.providedBy(val):
                    val = val.name
            return val

        # sort the components
        sortedResults = list(sorted(comps, key=componentSortKey, reverse=reverse))

        # limit the search results to the specified range
        if limit is None:
            pagedResult = sortedResults[start:]
        else:
            pagedResult = sortedResults[start:start + limit]

        # fetch any rrd data necessary
        self.bulkLoadMetricData(pagedResult)

        return SearchResults(iter(pagedResult), total, hash_, False)
Beispiel #5
0
    def _componentSearch(self,
                         uid=None,
                         types=(),
                         meta_type=(),
                         start=0,
                         limit=None,
                         sort='name',
                         dir='ASC',
                         name=None,
                         keys=()):
        reverse = dir == 'DESC'
        if isinstance(types, basestring):
            types = (types, )
        if isinstance(meta_type, basestring):
            meta_type = (meta_type, )
        querySet = []
        if meta_type:
            querySet.append(Or(*(Eq('meta_type', t) for t in meta_type)))
        querySet.append(Generic('getAllPaths', uid))
        query = And(*querySet)
        obj = self._getObject(uid)
        if getattr(aq_base(obj.device()), 'componentSearch', None) is None:
            obj.device()._create_componentSearch()

        cat = obj.device().componentSearch
        if 'getAllPaths' not in cat.indexes():
            obj.device()._createComponentSearchPathIndex()
        brains = cat.evalAdvancedQuery(query)

        # unbrain the results
        comps = map(IInfo, map(unbrain, brains))
        total = len(comps)
        hash_ = str(total)

        # filter the components
        if name is not None:
            wrapped = map(IInfo, map(unbrain, brains))
            comps = self._filterComponents(wrapped, keys, name)
            total = len(comps)
            hash_ = str(total)

        # sort the components
        sortedResults = list(
            sorted(comps, key=lambda x: getattr(x, sort), reverse=reverse))

        # limit the search results to the specified range
        if limit is None:
            pagedResult = sortedResults[start:]
        else:
            pagedResult = sortedResults[start:start + limit]

        return SearchResults(iter(pagedResult), total, hash_, False)
Beispiel #6
0
    def __call__(self, result=None, specification=None, **kwargs):
        searchTerm = _c(self.request.get('searchTerm', '')).lower()
        force_all = self.request.get('force_all', 'false')
        searchFields = 'search_fields' in self.request \
            and json.loads(_u(self.request.get('search_fields', '[]'))) \
            or ('Title',)
        # lookup objects from ZODB
        catalog_name = _c(self.request.get('catalog_name', 'portal_catalog'))
        catalog = getToolByName(self.context, catalog_name)

        # json.loads does unicode conversion, which will fail in the catalog
        # search for some cases. So we need to convert the strings to utf8
        # see: https://github.com/senaite/bika.lims/issues/443
        base_query = json.loads(self.request['base_query'])
        search_query = json.loads(self.request.get('search_query', "{}"))
        base_query = self.to_utf8(base_query)
        search_query = self.to_utf8(search_query)

        # first with all queries
        contentFilter = dict((k, v) for k, v in base_query.items())
        contentFilter.update(search_query)

        # Sorted by? (by default, Title)
        sort_on = self.request.get('sidx', 'Title')
        if sort_on == 'Title':
            sort_on = 'sortable_title'
        if sort_on:
            # Check if is an index and if is sortable. Otherwise, assume the
            # sorting must be done manually
            index = catalog.Indexes.get(sort_on, None)
            if index and index.meta_type in ['FieldIndex', 'DateIndex']:
                contentFilter['sort_on'] = sort_on
                # Sort order?
                sort_order = self.request.get('sord', 'asc')
                if (sort_order in ['desc', 'reverse', 'rev', 'descending']):
                    contentFilter['sort_order'] = 'descending'
                else:
                    contentFilter['sort_order'] = 'ascending'

        # Can do a search for indexes?
        criterias = []
        fields_wo_index = []
        if searchTerm:
            for field_name in searchFields:
                index = catalog.Indexes.get(field_name, None)
                if not index:
                    fields_wo_index.append(field_name)
                    continue
                if index.meta_type in ('ZCTextIndex'):
                    if searchTerm.isspace():
                        # earchTerm != ' ' added because of
                        # https://github.com/plone/Products.CMFPlone/issues
                        # /1537
                        searchTerm = ''
                        continue
                    else:
                        temp_st = searchTerm + '*'
                        criterias.append(MatchRegexp(field_name, temp_st))
                elif index.meta_type in ('FieldIndex'):
                    criterias.append(MatchRegexp(field_name, searchTerm))
                elif index.meta_type == 'DateIndex':
                    msg = "Unhandled DateIndex search on '%s'" % field_name
                    from bika.lims import logger
                    logger.warn(msg)
                else:
                    criterias.append(Generic(field_name, searchTerm))

        if criterias:
            # Advanced search
            advanced_query = catalog.makeAdvancedQuery(contentFilter)
            aq_or = Or()
            for criteria in criterias:
                aq_or.addSubquery(criteria)
            advanced_query &= aq_or
            brains = catalog.evalAdvancedQuery(advanced_query)
        else:
            brains = catalog(contentFilter)

        if brains and searchTerm and fields_wo_index:
            _brains = []
            for brain in brains:
                for field_name in fields_wo_index:
                    value = getattr(brain, field_name, None)
                    if not value:
                        instance = brain.getObject()
                        schema = instance.Schema()
                        if field_name in schema:
                            value = schema[field_name].get(instance)
                    if callable(value):
                        value = value()
                    if value and value.lower().find(searchTerm) > -1:
                        _brains.append(brain)
                        break
            brains = _brains

        # Then just base_query alone ("show all if no match")
        if not brains and force_all.lower() == 'true':
            if search_query:
                brains = catalog(base_query)
                if brains and searchTerm:
                    _brains = [
                        p for p in brains
                        if p.Title.lower().find(searchTerm) > -1
                    ]
                    if _brains:
                        brains = _brains

        return brains
Beispiel #7
0
    def _process_request(self):
        # Use this function from a template that is using bika_listing_table
        # in such a way that the table_only request var will be used to
        # in-place-update the table.
        form_id = self.form_id
        form = self.request.form
        workflow = getToolByName(self.context, 'portal_workflow')
        catalog = getToolByName(self.context, self.catalog)

        # Some ajax calls duplicate form values?  I have not figured out why!
        if self.request.form:
            for key, value in self.request.form.items():
                if isinstance(value, list):
                    self.request.form[key] = self.request.form[key][0]

        # If table_only specifies another form_id, then we abort.
        # this way, a single table among many can request a redraw,
        # and only it's content will be rendered.
        if form_id not in self.request.get('table_only', form_id):
            return ''

        ## review_state_selector - value can be specified in request
        selected_state = self.request.get("%s_review_state" % form_id,
                                          'default')
        # get review_state id=selected_state
        states = [r for r in self.review_states if r['id'] == selected_state]
        self.review_state = states and states[0] or self.review_states[0]
        # set selected review_state ('default'?) to request
        self.request['review_state'] = self.review_state['id']

        # contentFilter is expected in every self.review_state.
        for k, v in self.review_state['contentFilter'].items():
            self.contentFilter[k] = v
        # sort on
        self.sort_on = self.request.get(form_id + '_sort_on', None)
        # manual_sort_on: only sort the current batch of items
        # this is a compromise for sorting without column indexes
        self.manual_sort_on = None
        if self.sort_on \
           and self.sort_on in self.columns.keys() \
           and self.columns[self.sort_on].get('index', None):
            idx = self.columns[self.sort_on].get('index', self.sort_on)
            self.contentFilter['sort_on'] = idx
        else:
            if self.sort_on:
                self.manual_sort_on = self.sort_on
                if 'sort_on' in self.contentFilter:
                    del self.contentFilter['sort_on']

        # sort order
        self.sort_order = self.request.get(form_id + '_sort_order', '')
        if self.sort_order:
            self.contentFilter['sort_order'] = self.sort_order
        else:
            if 'sort_order' not in self.contentFilter:
                self.sort_order = 'ascending'
                self.contentFilter['sort_order'] = 'ascending'
                self.request.set(form_id+'_sort_order', 'ascending')
            else:
                self.sort_order = self.contentFilter['sort_order']
        if self.manual_sort_on:
            del self.contentFilter['sort_order']

        # pagesize
        pagesize = self.request.get(form_id + '_pagesize', self.pagesize)
        if type(pagesize) in (list, tuple):
            pagesize = pagesize[0]
        try:
            pagesize = int(pagesize)
        except:
            pagesize = self.pagesize = 10
        self.pagesize = pagesize
        # Plone's batching wants this variable:
        self.request.set('pagesize', self.pagesize)
        # and we want to make our choice remembered in bika_listing also
        self.request.set(self.form_id + '_pagesize', self.pagesize)

        # pagenumber
        self.pagenumber = int(self.request.get(form_id + '_pagenumber', self.pagenumber))
        # Plone's batching wants this variable:
        self.request.set('pagenumber', self.pagenumber)

        # index filters.
        self.And = []
        self.Or = []
        ##logger.info("contentFilter: %s"%self.contentFilter)
        for k, v in self.columns.items():
            if not v.has_key('index') \
               or v['index'] == 'review_state' \
               or v['index'] in self.filter_indexes:
                continue
            self.filter_indexes.append(v['index'])
        ##logger.info("Filter indexes: %s"%self.filter_indexes)

        # any request variable named ${form_id}_{index_name}
        # will pass it's value to that index in self.contentFilter.
        # all conditions using ${form_id}_{index_name} are searched with AND
        for index in self.filter_indexes:
            idx = catalog.Indexes.get(index, None)
            if not idx:
                logger.debug("index named '%s' not found in %s.  "
                             "(Perhaps the index is still empty)." %
                            (index, self.catalog))
                continue
            request_key = "%s_%s" % (form_id, index)
            value = self.request.get(request_key, '')
            if len(value) > 1:
                ##logger.info("And: %s=%s"%(index, value))
                if idx.meta_type in('ZCTextIndex', 'FieldIndex'):
                    self.And.append(MatchRegexp(index, value))
                elif idx.meta_type == 'DateIndex':
                    logger.info("Unhandled DateIndex search on '%s'"%index)
                    continue
                else:
                    self.Or.append(Generic(index, value))

        # if there's a ${form_id}_filter in request, then all indexes
        # are are searched for it's value.
        # ${form_id}_filter is searched with OR agains all indexes
        request_key = "%s_filter" % form_id
        value = self.request.get(request_key, '')
        if type(value) in (list, tuple):
            value = value[0]
        if len(value) > 1:
            for index in self.filter_indexes:
                idx = catalog.Indexes.get(index, None)
                if not idx:
                    logger.debug("index named '%s' not found in %s.  "
                                 "(Perhaps the index is still empty)." %
                                 (index, self.catalog))
                    continue
                ##logger.info("Or: %s=%s"%(index, value))
                if idx.meta_type in('ZCTextIndex', 'FieldIndex'):
                    self.Or.append(MatchRegexp(index, value))
                    # https://github.com/bikalabs/Bika-LIMS/issues/1069
                    vals = value.split('-')
                    if len(vals) > 2:
                        valroot = vals[0]
                        for i in range(1, len(vals)):
                            valroot = '%s-%s' % (valroot, vals[i])
                            self.Or.append(MatchRegexp(index, valroot+'-*'))
                elif idx.meta_type == 'DateIndex':
                    if type(value) in (list, tuple):
                        value = value[0]
                    if value.find(":") > -1:
                        try:
                            lohi = [DateTime(x) for x in value.split(":")]
                        except:
                            logger.info("Error (And, DateIndex='%s', term='%s')"%(index,value))
                        self.Or.append(Between(index, lohi[0], lohi[1]))
                    else:
                        try:
                            self.Or.append(Eq(index, DateTime(value)))
                        except:
                            logger.info("Error (Or, DateIndex='%s', term='%s')"%(index,value))
                else:
                    self.Or.append(Generic(index, value))
            self.Or.append(MatchRegexp('review_state', value))

        # get toggle_cols cookie value
        # and modify self.columns[]['toggle'] to match.
        toggle_cols = self.get_toggle_cols()
        for col in self.columns.keys():
            if col in toggle_cols:
                self.columns[col]['toggle'] = True
            else:
                self.columns[col]['toggle'] = False
    def _componentSearch(self,
                         uid=None,
                         types=(),
                         meta_type=(),
                         start=0,
                         limit=None,
                         sort='name',
                         dir='ASC',
                         name=None,
                         keys=()):
        reverse = dir == 'DESC'
        if isinstance(meta_type, basestring) and get_component_field_spec(
                meta_type) is not None:
            brains, total = self._typecatComponentBrains(
                uid, types, meta_type, start, limit, sort, dir, name, keys)
            if brains is not None:
                return self._typecatComponentPostProcess(
                    brains, total, sort, reverse)
        if isinstance(meta_type, basestring):
            meta_type = (meta_type, )
        if isinstance(types, basestring):
            types = (types, )
        querySet = []
        if meta_type:
            querySet.append(Or(*(Eq('meta_type', t) for t in meta_type)))
        querySet.append(Generic('getAllPaths', uid))
        query = And(*querySet)
        obj = self._getObject(uid)

        cat = obj.device().componentSearch
        if 'getAllPaths' not in cat.indexes():
            obj.device()._createComponentSearchPathIndex()
        brains = cat.evalAdvancedQuery(query)

        # unbrain the results
        comps = []
        for brain in brains:
            try:
                comps.append(IInfo(unbrain(brain)))
            except:
                log.warn(
                    'There is broken component "{}" in componentSearch catalog on {} device.'
                    .format(brain.id,
                            obj.device().id))

        # filter the components
        if name is not None:
            comps = self._filterComponents(comps, keys, name)

        total = len(comps)
        hash_ = str(total)

        def componentSortKey(parent):
            val = getattr(parent, sort)
            if val:
                if isinstance(val, list):
                    val = val[0]
                if callable(val):
                    val = val()
                if IInfo.providedBy(val):
                    val = val.name
            return pad_numeric_values_for_indexing(val)

        # sort the components
        sortedResults = list(
            sorted(comps, key=componentSortKey, reverse=reverse))

        # limit the search results to the specified range
        if limit is None:
            pagedResult = sortedResults[start:]
        else:
            pagedResult = sortedResults[start:start + limit]

        # fetch any rrd data necessary
        self.bulkLoadMetricData(pagedResult)

        # Do one big lookup of component events and add to the result objects
        showSeverityIcon = self.context.dmd.UserInterfaceSettings.getInterfaceSettings(
        ).get('showEventSeverityIcons')
        if showSeverityIcon:
            uuids = [r.uuid for r in pagedResult]
            zep = getFacade('zep')
            severities = zep.getWorstSeverity(uuids)
            for r in pagedResult:
                r.setWorstEventSeverity(severities[r.uuid])

        return SearchResults(iter(pagedResult), total, hash_, False)
Beispiel #9
0
    def _process_request(self):
        """Scan request for parameters and configure class attributes
        accordingly.  Setup AdvancedQuery or catalog contentFilter.

        Request parameters:
        <form_id>_limit_from:       index of the first item to display
        <form_id>_rows_only:        returns only the rows
        <form_id>_sort_on:          list items are sorted on this key
        <form_id>_manual_sort_on:   no index - sort with python
        <form_id>_pagesize:         number of items
        <form_id>_filter:           A string, will be regex matched against
                                    indexes in <form_id>_filter_indexes
        <form_id>_filter_indexes:   list of index names which will be searched
                                    for the value of <form_id>_filter

        <form_id>_<index_name>:     Any index name can be used after <form_id>_.

            any request variable named ${form_id}_{index_name} will pass it's
            value to that index in self.contentFilter.

            All conditions using ${form_id}_{index_name} are searched with AND.

            The parameter value will be matched with regexp if a FieldIndex or
            TextIndex.  Else, AdvancedQuery.Generic is used.
        """
        form_id = self.form_id
        form = self.request.form
        workflow = getToolByName(self.context, 'portal_workflow')
        catalog = getToolByName(self.context, self.catalog)

        # Some ajax calls duplicate form values?  I have not figured out why!
        if self.request.form:
            for key, value in self.request.form.items():
                if isinstance(value, list):
                    self.request.form[key] = self.request.form[key][0]

        # If table_only specifies another form_id, then we abort.
        # this way, a single table among many can request a redraw,
        # and only it's content will be rendered.
        if form_id not in self.request.get('table_only', form_id) \
            or form_id not in self.request.get('rows_only', form_id):
            return ''

        self.rows_only = self.request.get('rows_only','') == form_id
        self.limit_from = int(self.request.get(form_id + '_limit_from',0))

        # contentFilter is allowed in every self.review_state.
        for k, v in self.review_state.get('contentFilter', {}).items():
            self.contentFilter[k] = v

        # sort on
        self.sort_on = self.sort_on \
            if hasattr(self, 'sort_on') and self.sort_on \
            else None
        self.sort_on = self.request.get(form_id + '_sort_on', self.sort_on)
        self.sort_order = self.request.get(form_id + '_sort_order', 'ascending')
        self.manual_sort_on = self.request.get(form_id + '_manual_sort_on', None)

        if self.sort_on:
            if self.sort_on in self.columns.keys():
               if self.columns[self.sort_on].get('index', None):
                   self.request.set(form_id+'_sort_on', self.sort_on)
                   # The column can be sorted directly using an index
                   idx = self.columns[self.sort_on]['index']
                   self.sort_on = idx
                   # Don't sort manually!
                   self.manual_sort_on = None
               else:
                   # The column must be manually sorted using python
                   self.manual_sort_on = self.sort_on
            else:
                # We cannot sort for a column that doesn't exist!
                msg = "{}: sort_on is '{}', not a valid column".format(
                    self, self.sort_on)
                logger.error(msg)
                self.sort_on = None

        if self.manual_sort_on:
            self.manual_sort_on = self.manual_sort_on[0] \
                                if type(self.manual_sort_on) in (list, tuple) \
                                else self.manual_sort_on
            if self.manual_sort_on not in self.columns.keys():
                # We cannot sort for a column that doesn't exist!
                msg = "{}: manual_sort_on is '{}', not a valid column".format(
                    self, self.manual_sort_on)
                logger.error(msg)
                self.manual_sort_on = None

        if self.sort_on or self.manual_sort_on:
            # By default, if sort_on is set, sort the items ASC
            # Trick to allow 'descending' keyword instead of 'reverse'
            self.sort_order = 'reverse' if self.sort_order \
                                        and self.sort_order[0] in ['d','r'] \
                                        else 'ascending'
        else:
            # By default, sort on created
            self.sort_order = 'reverse'
            self.sort_on = 'created'

        self.contentFilter['sort_order'] = self.sort_order
        if self.sort_on:
            self.contentFilter['sort_on'] = self.sort_on

        # pagesize
        pagesize = self.request.get(form_id + '_pagesize', self.pagesize)
        if type(pagesize) in (list, tuple):
            pagesize = pagesize[0]
        try:
            pagesize = int(pagesize)
        except:
            pagesize = self.pagesize = 10
        self.pagesize = pagesize
        # Plone's batching wants this variable:
        self.request.set('pagesize', self.pagesize)
        # and we want to make our choice remembered in bika_listing also
        self.request.set(self.form_id + '_pagesize', self.pagesize)

        # index filters.
        self.And = []
        self.Or = []
        ##logger.info("contentFilter: %s"%self.contentFilter)
        for k, v in self.columns.items():
            if not v.has_key('index') \
               or v['index'] == 'review_state' \
               or v['index'] in self.filter_indexes:
                continue
            self.filter_indexes.append(v['index'])
        ##logger.info("Filter indexes: %s"%self.filter_indexes)

        # any request variable named ${form_id}_{index_name}
        # will pass it's value to that index in self.contentFilter.
        # all conditions using ${form_id}_{index_name} are searched with AND
        for index in self.filter_indexes:
            idx = catalog.Indexes.get(index, None)
            if not idx:
                logger.debug("index named '%s' not found in %s.  "
                             "(Perhaps the index is still empty)." %
                            (index, self.catalog))
                continue
            request_key = "%s_%s" % (form_id, index)
            value = self.request.get(request_key, '')
            if len(value) > 1:
                ##logger.info("And: %s=%s"%(index, value))
                if idx.meta_type in('ZCTextIndex', 'FieldIndex'):
                    self.And.append(MatchRegexp(index, value))
                elif idx.meta_type == 'DateIndex':
                    logger.info("Unhandled DateIndex search on '%s'"%index)
                    continue
                else:
                    self.Or.append(Generic(index, value))

        # if there's a ${form_id}_filter in request, then all indexes
        # are are searched for it's value.
        # ${form_id}_filter is searched with OR agains all indexes
        request_key = "%s_filter" % form_id
        value = self.request.get(request_key, '')
        if type(value) in (list, tuple):
            value = value[0]
        if len(value) > 1:
            for index in self.filter_indexes:
                idx = catalog.Indexes.get(index, None)
                if not idx:
                    logger.debug("index named '%s' not found in %s.  "
                                 "(Perhaps the index is still empty)." %
                                 (index, self.catalog))
                    continue
                ##logger.info("Or: %s=%s"%(index, value))
                if idx.meta_type in('ZCTextIndex', 'FieldIndex'):
                    self.Or.append(MatchRegexp(index, value))
                    self.expand_all_categories = True
                    # https://github.com/bikalabs/Bika-LIMS/issues/1069
                    vals = value.split('-')
                    if len(vals) > 2:
                        valroot = vals[0]
                        for i in range(1, len(vals)):
                            valroot = '%s-%s' % (valroot, vals[i])
                            self.Or.append(MatchRegexp(index, valroot+'-*'))
                            self.expand_all_categories = True
                elif idx.meta_type == 'DateIndex':
                    if type(value) in (list, tuple):
                        value = value[0]
                    if value.find(":") > -1:
                        try:
                            lohi = [DateTime(x) for x in value.split(":")]
                        except:
                            logger.info("Error (And, DateIndex='%s', term='%s')"%(index,value))
                        self.Or.append(Between(index, lohi[0], lohi[1]))
                        self.expand_all_categories = True
                    else:
                        try:
                            self.Or.append(Eq(index, DateTime(value)))
                            self.expand_all_categories = True
                        except:
                            logger.info("Error (Or, DateIndex='%s', term='%s')"%(index,value))
                else:
                    self.Or.append(Generic(index, value))
                    self.expand_all_categories = True
            self.Or.append(MatchRegexp('review_state', value))

        # get toggle_cols cookie value
        # and modify self.columns[]['toggle'] to match.
        toggle_cols = self.get_toggle_cols()
        for col in self.columns.keys():
            if col in toggle_cols:
                self.columns[col]['toggle'] = True
            else:
                self.columns[col]['toggle'] = False
Beispiel #10
0
    def run(self, dmd):
        """Run the report, returning an Availability object for each device"""
        # Note: we don't handle overlapping "down" events, so down
        # time could get get double-counted.
        __pychecker__ = 'no-local'
        now = time.time()
        zep = getFacade("zep", dmd)

        path = '/zport/dmd/'

        pathFilterList = [Generic('path', {'query': path})]

        if self.DeviceClass:
            pathFilterList.append(
                Generic(
                    'path',
                    {'query': ''.join([path, 'Devices', self.DeviceClass])}))
        if self.Location:
            pathFilterList.append(
                Generic(
                    'path',
                    {'query': ''.join([path, 'Locations', self.Location])}))
        if self.System:
            pathFilterList.append(
                Generic('path',
                        {'query': ''.join([path, 'Systems', self.System])}))
        if self.DeviceGroup:
            pathFilterList.append(
                Generic(
                    'path',
                    {'query': ''.join([path, 'Groups', self.DeviceGroup])}))
        if self.device:
            pathFilterList.append(
                Or(Eq('name', self.device), Eq('id', self.device)))

        results = ICatalogTool(dmd.Devices).search(
            types='Products.ZenModel.Device.Device',
            query=And(*pathFilterList))

        if not results.total:
            return []

        deviceList = {}
        tag_uuids = []
        accumulator = defaultdict(int)
        for brain in results:
            try:
                obj = brain.getObject()
                deviceList[obj.id] = obj
                tag_uuids.append(brain.uuid)
                accumulator[obj.id] = 0
            except Exception:
                log.warn("Unable to unbrain at path %s", brain.getPath())

        endDate = self.endDate or Availability.getDefaultAvailabilityEnd()
        endDate = min(endDate, now)
        startDate = self.startDate
        if not startDate:
            startDate = Availability.getDefaultAvailabilityStart(dmd)

        # convert start and end date to integer milliseconds for defining filters
        startDate = int(startDate * 1000)
        endDate = int(endDate * 1000)
        total_report_window = endDate - startDate

        create_filter_args = {
            'operator':
            zep.AND,
            'severity':
            _severityGreaterThanOrEqual(self.severity),
            'event_class':
            self.eventClass +
            ('/' if not self.eventClass.endswith('/') else '')
        }

        if self.agent:
            create_filter_args['agent'] = self.agent
        if self.monitor is not None:
            create_filter_args['monitor'] = self.monitor

        # add filters on details
        filter_details = {}
        if self.DevicePriority is not None:
            filter_details[
                'zenoss.device.priority'] = "%d:" % self.DevicePriority
        if self.prodState:
            filter_details[
                'zenoss.device.production_state'] = "%d:" % self.prodState
        if filter_details:
            create_filter_args['details'] = filter_details

        create_filter_args['tags'] = tag_uuids

        # query zep for matching event summaries
        # 1. get all open events that:
        #    - first_seen < endDate
        #    (only need to check active events)
        # 2. get all closed events that:
        #    - first_seen < endDate
        #    - status_change > startDate
        #    (must get both active and archived events)

        # 1. get open events
        create_filter_args['first_seen'] = (0, endDate)
        create_filter_args['status'] = OPEN_EVENT_STATUSES
        event_filter = zep.createEventFilter(**create_filter_args)
        open_events = zep.getEventSummariesGenerator(event_filter)

        # 2. get closed events
        create_filter_args['status_change'] = (startDate + 1, )
        create_filter_args['status'] = CLOSED_EVENT_STATUSES
        event_filter = zep.createEventFilter(**create_filter_args)
        closed_events = zep.getEventSummariesGenerator(event_filter)
        # must also get events from archive
        closed_events_from_archive = zep.getEventSummariesGenerator(
            event_filter, archive=True)

        def eventDowntime(evt):
            first = evt['first_seen_time']
            # if event is still open, downtime persists til end of report window
            if evt['status'] not in CLOSED_EVENT_STATUSES:
                last = endDate
            else:
                last = evt['status_change_time']

            # discard any events that have no elapsed time
            if first == last:
                return 0

            # clip first and last within report time window
            first = max(first, startDate)
            last = min(last, endDate)

            return (last - first)

        def eventElementIdentifier(evt):
            return evt['occurrence'][0]['actor'].get('element_identifier')

        for evt in open_events:
            dt = eventDowntime(evt)
            if dt == 0:
                continue
            accumulator[eventElementIdentifier(evt)] += dt

        summary_closed_event_uuids = {}
        for evt in closed_events:
            summary_closed_event_uuids[evt['uuid']] = True
            dt = eventDowntime(evt)
            if dt == 0:
                continue
            accumulator[eventElementIdentifier(evt)] += dt

        for evt in closed_events_from_archive:
            if evt['uuid'] in summary_closed_event_uuids:
                continue
            dt = eventDowntime(evt)
            if dt == 0:
                continue
            accumulator[eventElementIdentifier(evt)] += dt

        availabilityReport = []
        for deviceId, downtime in sorted(accumulator.items()):
            device = deviceList.get(deviceId, None)
            if device:
                sysname = device.getSystemNamesString()
                link = device.getDeviceLink()
                title = device.titleOrId()
                availabilityReport.append(
                    Availability(deviceId, '', downtime, total_report_window,
                                 sysname, link, title))
                device._p_invalidate()
        return availabilityReport
    def run(self, dmd):
        """Run the report, returning an Availability object for each device"""
        # Note: we don't handle overlapping "down" events, so down
        # time could get get double-counted.
        __pychecker__ = 'no-local'
        rf2Filename = zenhome + '/log/AvailabilityRep2.out'
        rf2 = open(rf2Filename, 'w')
        rf2.write('CReport - in run\n')
        now = time.time()
        zep = getFacade("zep", dmd)

        rf2.write('CReport - start of run \n')
        path = '/zport/dmd/'
        pathFilterList = [
            Generic(
                'path',
                {'query': ''.join([path, 'Devices', self.DeviceClass or ''])})
        ]
        if self.Location:
            pathFilterList.append(
                Generic(
                    'path',
                    {'query': ''.join([path, 'Locations', self.Location])}))
        if self.System:
            pathFilterList.append(
                Generic('path',
                        {'query': ''.join([path, 'Systems', self.System])}))
        if self.DeviceGroup:
            pathFilterList.append(
                Generic(
                    'path',
                    {'query': ''.join([path, 'Groups', self.DeviceGroup])}))
        if self.device:
            #pathFilterList.append(Or(Eq('name', self.device), Eq('id', self.device)))
            # For regex match of device name
            pathFilterList.append(
                Or(MatchRegexp('name', '(?i).*%s.*' % self.device),
                   MatchRegexp('id', '(?i).*%s.*' % self.device)))

        results = ICatalogTool(dmd.Devices).search(
            types='Products.ZenModel.Device.Device',
            query=And(*pathFilterList))
        rf2.write('pathFilterList is %s \n' % (pathFilterList))
        rf2.write('results is %s \n' % (results))

        if not results.total:
            return []

        deviceList = {}
        tag_uuids = []
        accumulator = defaultdict(int)
        for brain in results:
            try:
                obj = brain.getObject()
                deviceList[obj.id] = obj
                tag_uuids.append(brain.uuid)
                accumulator[obj.id] = 0
                rf2.write("obj is %s uuid is %s \n" %
                          (brain.getObject(), brain.uuid))
            except Exception:
                log.warn("Unable to unbrain at path %s", brain.getPath())
                rf2.write("Unable to unbrain at path %s", brain.getPath())

        endDate = self.endDate or AvailabilityColl.getDefaultAvailabilityEnd()
        endDate = min(endDate, now)
        startDate = self.startDate
        if not startDate:
            startDate = AvailabilityColl.getDefaultAvailabilityStart(dmd)

        # convert start and end date to integer milliseconds for defining filters
        startDate = int(startDate * 1000)
        endDate = int(endDate * 1000)
        total_report_window = endDate - startDate

        create_filter_args = {
            'operator':
            zep.AND,
            'severity':
            _severityGreaterThanOrEqual(self.severity),
            'event_class':
            self.eventClass +
            ('/' if not self.eventClass.endswith('/') else '')
        }

        if self.agent:
            create_filter_args['agent'] = self.agent
        if self.monitor is not None:
            create_filter_args['monitor'] = self.monitor

        rf2.write(
            'device is %s, component is %s,  location is %s  priority is %s, prodState is %s \n'
            % (self.device, self.component, self.Location, self.DevicePriority,
               self.prodState))
        # add filters on details
        filter_details = {}
        if self.DevicePriority is not None:
            filter_details[
                'zenoss.device.priority'] = "%d:" % self.DevicePriority
        if self.prodState:
            filter_details[
                'zenoss.device.production_state'] = "%d:" % self.prodState
        if filter_details:
            create_filter_args['details'] = filter_details

        create_filter_args['tags'] = tag_uuids

        # query zep for matching event summaries
        # 1. get all open events that:
        #    - first_seen < endDate
        #    (only need to check active events)
        # 2. get all closed events that:
        #    - first_seen < endDate
        #    - status_change > startDate
        #    (must get both active and archived events)

        # 1. get open events
        create_filter_args['first_seen'] = (0, endDate)
        create_filter_args['status'] = OPEN_EVENT_STATUSES
        rf2.write(' create_filter_args dictionary for open events is %s \n' %
                  (create_filter_args))
        event_filter = zep.createEventFilter(**create_filter_args)
        open_events = zep.getEventSummariesGenerator(event_filter)

        # 2. get closed events
        create_filter_args['status_change'] = (startDate + 1, )
        create_filter_args['status'] = CLOSED_EVENT_STATUSES
        rf2.write(' create_filter_args dictionary for closed events is %s \n' %
                  (create_filter_args))
        event_filter = zep.createEventFilter(**create_filter_args)
        closed_events = zep.getEventSummariesGenerator(event_filter)
        # must also get events from archive
        closed_events_from_archive = zep.getEventSummariesGenerator(
            event_filter, archive=True)

        # Don't put print / log statements in the next block
        #  Previous block uses a Python generator function to deliver events asynchronously
        for evtsumm in chain(open_events, closed_events,
                             closed_events_from_archive):
            first = evtsumm['first_seen_time']
            # if event is still open, downtime persists til end of report window
            if evtsumm['status'] not in CLOSED_EVENT_STATUSES:
                last = endDate
            else:
                last = evtsumm['status_change_time']

            # discard any events that have no elapsed time
            if first == last:
                continue

            # clip first and last within report time window
            first = max(first, startDate)
            last = min(last, endDate)

            evt = evtsumm['occurrence'][0]
            evt_actor = evt['actor']
            device = evt_actor.get('element_identifier')
            accumulator[device] += (last - first)

        availabilityReport = []
        for deviceId, downtime in sorted(accumulator.items()):
            device = deviceList.get(deviceId, None)
            if device:
                sysname = device.getSystemNamesString()
                groupname = ', '.join(device.getDeviceGroupNames())
                loc = device.getLocationName()
                dclass = device.getDeviceClassPath()
                link = device.getDeviceLink()
                title = device.titleOrId()
                availabilityReport.append(
                    AvailabilityColl(deviceId, '', downtime,
                                     total_report_window, groupname, sysname,
                                     loc, dclass, link, title))
                device._p_invalidate()


#        rf.close()
        return availabilityReport
    def _process_request(self):
        # Use this function from a template that is using bika_listing_table
        # in such a way that the table_only request var will be used to
        # in-place-update the table.
        form_id = self.form_id
        form = self.request.form
        workflow = getToolByName(self.context, 'portal_workflow')
        catalog = getToolByName(self.context, self.catalog)

        # If table_only specifies another form_id, then we abort.
        # this way, a single table among many can request a redraw,
        # and only it's content will be rendered.
        if form_id not in self.request.get('table_only', form_id):
            return ''

        ## review_state_selector
        cookie = json.loads(self.request.get("review_state", '{}'))
        cookie_key = "%s%s" % (self.context.portal_type, form_id)
        # first check POST
        selected_state = self.request.get("%s_review_state" % form_id, '')
        if not selected_state:
            # then check cookie
            selected_state = cookie.get(cookie_key, 'default')
        # get review_state id=selected_state
        states = [r for r in self.review_states if r['id'] == selected_state]
        review_state = states and states[0] or self.review_states[0]
        # set request and cookie to currently selected state id
        if not selected_state:
            selected_state = self.review_states[0]['id']

        self.review_state = cookie[cookie_key] = selected_state
        cookie = json.dumps(cookie)
        self.request['review_state'] = cookie
        self.request.response.setCookie('review_state', cookie, path="/")

        # contentFilter is expected in every review_state.
        for k, v in review_state['contentFilter'].items():
            self.contentFilter[k] = v

        # sort on
        sort_on = self.request.get(form_id + '_sort_on', '')
        # manual_sort_on: only sort the current batch of items
        # this is a compromise for sorting without column indexes
        self.manual_sort_on = None
        if sort_on \
           and sort_on in self.columns.keys() \
           and self.columns[sort_on].get('index', None):
            idx = self.columns[sort_on].get('index', sort_on)
            self.contentFilter['sort_on'] = idx
        else:
            if sort_on:
                self.manual_sort_on = sort_on
                if 'sort_on' in self.contentFilter:
                    del self.contentFilter['sort_on']

        # sort order
        self.sort_order = self.request.get(form_id + '_sort_order', '')
        if self.sort_order:
            self.contentFilter['sort_order'] = self.sort_order
        else:
            if 'sort_order' not in self.contentFilter:
                self.sort_order = 'ascending'
                self.contentFilter['sort_order'] = 'ascending'
                self.request.set(form_id + '_sort_order', 'ascending')
            else:
                self.sort_order = self.contentFilter['sort_order']
        if self.manual_sort_on:
            del self.contentFilter['sort_order']

        # pagesize
        pagesize = self.request.get(form_id + '_pagesize', self.pagesize)
        if type(pagesize) in (list, tuple):
            pagesize = pagesize[0]
        try:
            pagesize = int(pagesize)
        except:
            pagesize = self.pagesize
        self.pagesize = pagesize
        # Plone's batching wants this variable:
        self.request.set('pagesize', self.pagesize)

        # pagenumber
        self.pagenumber = int(
            self.request.get(form_id + '_pagenumber', self.pagenumber))
        # Plone's batching wants this variable:
        self.request.set('pagenumber', self.pagenumber)

        # index filters.
        self.And = []
        self.Or = []
        ##logger.info("contentFilter: %s"%self.contentFilter)
        for k, v in self.columns.items():
            if not v.has_key('index') \
               or v['index'] == 'review_state' \
               or v['index'] in self.filter_indexes:
                continue
            self.filter_indexes.append(v['index'])
        ##logger.info("Filter indexes: %s"%self.filter_indexes)

        # any request variable named ${form_id}_{index_name}
        # will pass it's value to that index in self.contentFilter.
        # all conditions using ${form_id}_{index_name} are searched with AND
        for index in self.filter_indexes:
            idx = catalog.Indexes.get(index, None)
            if not idx:
                logger.debug("index named '%s' not found in %s.  "
                             "(Perhaps the index is still empty)." %
                             (index, self.catalog))
                continue
            request_key = "%s_%s" % (form_id, index)
            value = self.request.get(request_key, '')
            if len(value) > 1:
                ##logger.info("And: %s=%s"%(index, value))
                if idx.meta_type in ('ZCTextIndex', 'FieldIndex'):
                    self.And.append(MatchRegexp(index, value))
                elif idx.meta_type == 'DateIndex':
                    logger.info("Unhandled DateIndex search on '%s'" % index)
                    continue
                else:
                    self.Or.append(Generic(index, value))

        # if there's a ${form_id}_filter in request, then all indexes
        # are are searched for it's value.
        # ${form_id}_filter is searched with OR agains all indexes
        request_key = "%s_filter" % form_id
        value = self.request.get(request_key, '')
        if len(value) > 1:
            for index in self.filter_indexes:
                idx = catalog.Indexes.get(index, None)
                if not idx:
                    logger.debug("index named '%s' not found in %s.  "
                                 "(Perhaps the index is still empty)." %
                                 (index, self.catalog))
                    continue
                ##logger.info("Or: %s=%s"%(index, value))
                if idx.meta_type in ('ZCTextIndex', 'FieldIndex'):
                    self.Or.append(MatchRegexp(index, value))
                elif idx.meta_type == 'DateIndex':
                    if value.find(":") > -1:
                        try:
                            lohi = [DateTime(x) for x in value.split(":")]
                        except:
                            logger.info(
                                "Error (And, DateIndex='%s', term='%s')" %
                                (index, value))
                        self.Or.append(Between(index, lohi[0], lohi[1]))
                    else:
                        try:
                            self.Or.append(Eq(index, DateTime(value)))
                        except:
                            logger.info(
                                "Error (Or, DateIndex='%s', term='%s')" %
                                (index, value))
                else:
                    self.Or.append(Generic(index, value))
            self.Or.append(MatchRegexp('review_state', value))

        # get toggle_cols cookie value
        # and modify self.columns[]['toggle'] to match.
        toggle_cols = self.get_toggle_cols()
        for col in self.columns.keys():
            if col in toggle_cols:
                self.columns[col]['toggle'] = True
            else:
                self.columns[col]['toggle'] = False
    def contentFilterAQ(self):
        '''
        Parse request and generate AdvancedQuery query
        '''
        portal_state = getMultiAdapter((self.context, self.request),
                                       name="plone_portal_state")
        member = portal_state.member()

        query_parts = []

        text = self.request.get('datasets.filter.text')
        if text:
            query_parts.append(Eq('SearchableText', text))

        genre = self.request.get('datasets.filter.genre')
        genre_vocab = self.dstools.genre_vocab
        if genre:
            # convert token from request to value
            query_parts.append(
                In('BCCDataGenre', [
                    genre_vocab.getTermByToken(token).value
                    for token in genre if token in genre_vocab.by_token
                ]))
        else:
            # if nothing selcted use all values in vocab
            query_parts.append(
                In('BCCDataGenre',
                   ('DataGenreSpeciesOccurrence', 'DataGenreSpeciesAbsence',
                    'DataGenreSpeciesAbundance', 'DataGenreE', 'DataGenreCC',
                    'DataGenreFC', 'DataGenreTraits', 'DataGenreSDMModel')))

        resolution = self.request.get('datasets.filter.resolution')
        resolution_vocab = self.dstools.resolution_vocab
        if resolution:
            # convert token to value
            query_parts.append(
                In('BCCResolution', [
                    resolution_vocab.getTermByToken(token).value
                    for token in resolution
                    if token in resolution_vocab.by_token
                ]))

        layer = self.request.get('datasets.filter.layer')
        layer_vocab = self.dstools.layer_vocab
        if layer:
            query_parts.append(
                In('BCCEnviroLayer', [
                    layer_vocab.getTermByToken(token).value
                    for token in layer if token in layer_vocab.by_token
                ]))

        emsc = self.request.get('datasets.filter.emsc')
        emsc_vocab = self.dstools.emsc_vocab
        if emsc:
            query_parts.append(
                In('BCCEmissionScenario', [
                    emsc_vocab.getTermByToken(token).value
                    for token in emsc if token in emsc_vocab.by_token
                ]))

        gcm = self.request.get('datasets.filter.gcm')
        gcm_vocab = self.dstools.gcm_vocab
        if gcm:
            query_parts.append(
                In('BCCGlobalClimateModel', [
                    gcm_vocab.getTermByToken(token).value
                    for token in gcm if token in gcm_vocab.by_token
                ]))

        # TODO: year

        # FIXME: source filter is incomplete
        source = self.request.get('datasets.filter.source')
        if source:
            for token in source:
                if token == 'user':
                    query_parts.append(Eq('Creator', member.getId()))
                elif token == 'admin':
                    query_parts.append(Eq('Creator', 'BCCVL'))
                elif token == 'shared':
                    query_parts.append(
                        Not(In('Creator', (member.getId(), 'BCCVL'))))
                # FIXME: missing: shared, ala

        # add path filter
        if self.path:
            query_parts.append(
                Generic('path', {
                    'query': self.path,
                    'depth': -1
                }))
        # add additional query filters
        query_parts.append(Eq('object_provides', IDataset.__identifier__))
        return And(*query_parts)