def test_device_classes_devices(self):
        """ Check devices under device classes are the same """
        failed_device_classes = []
        for dc in DEVICE_CLASSES:
            dc_object = dmd.unrestrictedTraverse(dc)

            # Devices under device class in global catalog
            global_catalog = ICatalogTool(dc_object)
            global_catalog_brains = global_catalog.search('Products.ZenModel.Device.Device')
            global_catalog_results = set([ brain.getPath() for brain in global_catalog_brains.results ])

            # Devices under device class in model catalog
            model_catalog = IModelCatalogTool(dc_object)
            model_catalog_brains = model_catalog.search('Products.ZenModel.Device.Device', limit=10000)
            model_catalog_results = set([ brain.getPath() for brain in model_catalog_brains.results ])

            result = "FAILED"
            if len(global_catalog_results - model_catalog_results) == 0 and  \
               len(model_catalog_results-global_catalog_results) ==0:
               result = "PASSED"
            else:
                failed_device_classes.append(dc)

        if not failed_device_classes:
            print "TEST PASSED: All devices found in the same device classes for both catalogs!!"
        else:
            print "TEST FAILED: The following device classes have different devices in the catalogs:"
            for failed in failed_device_classes:
                print "\t{0}".format(failed)

        return len(failed_device_classes) == 0
    def deleteNode(self, uid):
        """
        Deletes a node from the tree.

        B{NOTE}: You can not delete a root node of a tree

        @type  uid: string
        @param uid: Unique identifier of the node we wish to delete
        @rtype:   DirectResponse
        @return:  B{Properties}:
             - msg: (string) Status message
        """
        # make sure we are not deleting a root node
        if not self._canDeleteUid(uid):
            raise Exception('You cannot delete the root node')
        facade = self._getFacade()
        node = facade._getObject(uid)

        # Audit first so it can display details like "name" while they exist.
        # Trac #29148: When we delete a DeviceClass we also delete its devices
        #     and child device classes and their devices, so audit them all.
        if isinstance(node, DeviceClass):
            childBrains = ICatalogTool(node).search((
                'Products.ZenModel.DeviceClass.DeviceClass',
                'Products.ZenModel.Device.Device',
            ))
            for child in childBrains:
                audit(['UI', getDisplayType(child), 'Delete'], child.getPath())
        else:
            audit(['UI', getDisplayType(node), 'Delete'], node)

        facade.deleteNode(uid)
        msg = "Deleted node '%s'" % uid
        return DirectResponse.succeed(msg=msg)
Example #3
0
def filteredDevices(context, args, *types):
    path = '/zport/dmd'

    deviceFilter = args.get('deviceFilter', '') or ''
    deviceClass = args.get('deviceClass', '') or ''
    extraquery = args.get('extraquery', '')
    filter = []
    if deviceFilter:
        filter.append(MatchGlob('name','*%s*' % deviceFilter) | MatchGlob('id','*%s*' % deviceFilter))
    if deviceClass:
        organizer = (''.join([path,deviceClass]),)
    else:
        organizer = (''.join([path, args.get('organizer', '/Devices') or '/Devices']),)

    if not types:
        types = 'Products.ZenModel.Device.Device'

    if extraquery:
        filter.extend(extraquery)

    query = And(*filter) if filter else None

    results = ICatalogTool(context).search(types, paths=organizer,
        query=query)

    for brain in results:
        try:
            yield brain.getObject()
        except Exception:
            log.warn("Unable to unbrain at path %s", brain.getPath())
    def validate_templates(self):
        """ Check that both catalogs return same data for templates """
        global_catalog = ICatalogTool(dmd.Devices)
        model_catalog = IModelCatalogTool(dmd.Devices)

        # get template nodes from global catalog
        global_catalog_brains = global_catalog.search(types=('Products.ZenModel.RRDTemplate.RRDTemplate',))
        global_catalog_templates = set([ brain.getPath() for brain in global_catalog_brains ])

        # get template nodes from model catalog
        model_catalog_brains = global_catalog.search(types=('Products.ZenModel.RRDTemplate.RRDTemplate',))
        model_catalog_templates = set([ brain.getPath() for brain in model_catalog_brains ])

        # compare results
        if len(model_catalog_templates - global_catalog_templates) == 0 and \
            len(global_catalog_templates - model_catalog_templates) == 0:
            for template in global_catalog_templates:
                template_object = dmd.unrestrictedTraverse(template)
                query = Eq('id', template_object.id)
                
                gc_brains = global_catalog.search(types=('Products.ZenModel.RRDTemplate.RRDTemplate',), query=query)
                gc_templates = set([ brain.getPath() for brain in gc_brains ])

                mc_brains = model_catalog.search(types=('Products.ZenModel.RRDTemplate.RRDTemplate',), query=query)
                mc_templates = set([ brain.getPath() for brain in mc_brains ])

                failed_templates = []
                if not (len(mc_templates - gc_templates) == 0 and \
                   len(gc_templates - mc_templates) == 0):
                    failed_templates.append(template)

            if failed_templates:
                print "TEST FAILED: Inconsistent results from catalogs for templates:"
                for failure in failed_templates:
                    print "\t{0}".format(failure)
            else:
                print "TEST PASSED: Both catalogs returned same results!!"
                return True

        else:
            print "TEST FAILED: Inconsistent results from catalogs:"
            print "\t{0}".format("Templates found in global catalog and not in model catalog: {0}".format(global_catalog_templates - model_catalog_templates))
            print "\t{0}".format("Templates found in model catalog and not in global catalog: {0}".format(model_catalog_templates - global_catalog_templates))

        return False
Example #5
0
 def callHomeData(self, dmd):
     self.dmd = dmd
     self._catalog = ICatalogTool(self.dmd)
     stats = (self.server_key, self.google_key, self.version,
              self.all_versions, self.event_classes, self.event_count,
              self.reports, self.templates, self.systems, self.groups,
              self.locations, self.total_collectors, self.zenpacks,
              self.user_count, self.product_count, self.product_name)
     return chain.from_iterable(map(lambda fn: fn(), stats))
    def validate_mib_counts(self):
        """ """
        mib_organizers = get_mib_organizers(dmd)
        failed_counts = []
        global_catalog = ICatalogTool(dmd)
        model_catalog = IModelCatalogTool(dmd)
        for organizer in mib_organizers:
            global_catalog_count = global_catalog.count(("Products.ZenModel.MibModule.MibModule",), organizer)
            model_catalog_count = model_catalog.count(("Products.ZenModel.MibModule.MibModule",), organizer)
            if global_catalog_count != model_catalog_count:
                failed_counts.append(organizer)

        if not failed_counts:
            print "TEST PASSED: All mib organizers have the same count in both catalogs!!"
        else:
            print "TEST FAILED: The following mib organizers have different counts in the catalogs:"
            for failed in failed_counts:
                print "\t{0}".format(failed)
        return len(failed_counts) == 0
 def cutover(self, dmd):
     jmgr = getattr(dmd, 'JobManager', None)
     if jmgr:
         log.info("Removing old job records")
         for ob in ICatalogTool(dmd.JobManager).search():
             try:
                 if ob.getPath() != '/zport/dmd/JobManager':
                     dmd.global_catalog._catalog.uncatalogObject(
                         ob.getPath())
             except Exception:
                 log.warn("Error removing %s", ob.getPath())
         log.info("Removing old job relationship")
         if hasattr(jmgr, 'jobs'):
             jmgr._delOb('jobs')
Example #8
0
 def callHomeData(self, dmd):
     self._dmd = dmd
     self._catalog = ICatalogTool(self._dmd)
     stats = self._process_devices()
     for key, value in stats.items():
         yield key, value
Example #9
0
    def run(self, dmd):
        """Run the report, returning an Availability object for each device"""
        # Note: we don't handle overlapping "down" events, so down
        # time could get get double-counted.
        __pychecker__ = 'no-local'
        now = time.time()
        zep = getFacade("zep", dmd)

        path = '/zport/dmd/'

        pathFilterList = [Generic('path', {'query': path})]

        if self.DeviceClass:
            pathFilterList.append(
                Generic(
                    'path',
                    {'query': ''.join([path, 'Devices', self.DeviceClass])}))
        if self.Location:
            pathFilterList.append(
                Generic(
                    'path',
                    {'query': ''.join([path, 'Locations', self.Location])}))
        if self.System:
            pathFilterList.append(
                Generic('path',
                        {'query': ''.join([path, 'Systems', self.System])}))
        if self.DeviceGroup:
            pathFilterList.append(
                Generic(
                    'path',
                    {'query': ''.join([path, 'Groups', self.DeviceGroup])}))
        if self.device:
            pathFilterList.append(
                Or(Eq('name', self.device), Eq('id', self.device)))

        results = ICatalogTool(dmd.Devices).search(
            types='Products.ZenModel.Device.Device',
            query=And(*pathFilterList))

        if not results.total:
            return []

        deviceList = {}
        tag_uuids = []
        accumulator = defaultdict(int)
        for brain in results:
            try:
                obj = brain.getObject()
                deviceList[obj.id] = obj
                tag_uuids.append(brain.uuid)
                accumulator[obj.id] = 0
            except Exception:
                log.warn("Unable to unbrain at path %s", brain.getPath())

        endDate = self.endDate or Availability.getDefaultAvailabilityEnd()
        endDate = min(endDate, now)
        startDate = self.startDate
        if not startDate:
            startDate = Availability.getDefaultAvailabilityStart(dmd)

        # convert start and end date to integer milliseconds for defining filters
        startDate = int(startDate * 1000)
        endDate = int(endDate * 1000)
        total_report_window = endDate - startDate

        create_filter_args = {
            'operator':
            zep.AND,
            'severity':
            _severityGreaterThanOrEqual(self.severity),
            'event_class':
            self.eventClass +
            ('/' if not self.eventClass.endswith('/') else '')
        }

        if self.agent:
            create_filter_args['agent'] = self.agent
        if self.monitor is not None:
            create_filter_args['monitor'] = self.monitor

        # add filters on details
        filter_details = {}
        if self.DevicePriority is not None:
            filter_details[
                'zenoss.device.priority'] = "%d:" % self.DevicePriority
        if self.prodState:
            filter_details[
                'zenoss.device.production_state'] = "%d:" % self.prodState
        if filter_details:
            create_filter_args['details'] = filter_details

        create_filter_args['tags'] = tag_uuids

        # query zep for matching event summaries
        # 1. get all open events that:
        #    - first_seen < endDate
        #    (only need to check active events)
        # 2. get all closed events that:
        #    - first_seen < endDate
        #    - status_change > startDate
        #    (must get both active and archived events)

        # 1. get open events
        create_filter_args['first_seen'] = (0, endDate)
        create_filter_args['status'] = OPEN_EVENT_STATUSES
        event_filter = zep.createEventFilter(**create_filter_args)
        open_events = zep.getEventSummariesGenerator(event_filter)

        # 2. get closed events
        create_filter_args['status_change'] = (startDate + 1, )
        create_filter_args['status'] = CLOSED_EVENT_STATUSES
        event_filter = zep.createEventFilter(**create_filter_args)
        closed_events = zep.getEventSummariesGenerator(event_filter)
        # must also get events from archive
        closed_events_from_archive = zep.getEventSummariesGenerator(
            event_filter, archive=True)

        def eventDowntime(evt):
            first = evt['first_seen_time']
            # if event is still open, downtime persists til end of report window
            if evt['status'] not in CLOSED_EVENT_STATUSES:
                last = endDate
            else:
                last = evt['status_change_time']

            # discard any events that have no elapsed time
            if first == last:
                return 0

            # clip first and last within report time window
            first = max(first, startDate)
            last = min(last, endDate)

            return (last - first)

        def eventElementIdentifier(evt):
            return evt['occurrence'][0]['actor'].get('element_identifier')

        for evt in open_events:
            dt = eventDowntime(evt)
            if dt == 0:
                continue
            accumulator[eventElementIdentifier(evt)] += dt

        summary_closed_event_uuids = {}
        for evt in closed_events:
            summary_closed_event_uuids[evt['uuid']] = True
            dt = eventDowntime(evt)
            if dt == 0:
                continue
            accumulator[eventElementIdentifier(evt)] += dt

        for evt in closed_events_from_archive:
            if evt['uuid'] in summary_closed_event_uuids:
                continue
            dt = eventDowntime(evt)
            if dt == 0:
                continue
            accumulator[eventElementIdentifier(evt)] += dt

        availabilityReport = []
        for deviceId, downtime in sorted(accumulator.items()):
            device = deviceList.get(deviceId, None)
            if device:
                sysname = device.getSystemNamesString()
                link = device.getDeviceLink()
                title = device.titleOrId()
                availabilityReport.append(
                    Availability(deviceId, '', downtime, total_report_window,
                                 sysname, link, title))
                device._p_invalidate()
        return availabilityReport
    def run(self, dmd):
        """Run the report, returning an Availability object for each device"""
        # Note: we don't handle overlapping "down" events, so down
        # time could get get double-counted.
        __pychecker__ = 'no-local'
        rf2Filename = zenhome + '/log/AvailabilityRep2.out'
        rf2 = open(rf2Filename, 'w')
        rf2.write('CReport - in run\n')
        now = time.time()
        zep = getFacade("zep", dmd)

        rf2.write('CReport - start of run \n')
        path = '/zport/dmd/'
        pathFilterList = [
            Generic(
                'path',
                {'query': ''.join([path, 'Devices', self.DeviceClass or ''])})
        ]
        if self.Location:
            pathFilterList.append(
                Generic(
                    'path',
                    {'query': ''.join([path, 'Locations', self.Location])}))
        if self.System:
            pathFilterList.append(
                Generic('path',
                        {'query': ''.join([path, 'Systems', self.System])}))
        if self.DeviceGroup:
            pathFilterList.append(
                Generic(
                    'path',
                    {'query': ''.join([path, 'Groups', self.DeviceGroup])}))
        if self.device:
            #pathFilterList.append(Or(Eq('name', self.device), Eq('id', self.device)))
            # For regex match of device name
            pathFilterList.append(
                Or(MatchRegexp('name', '(?i).*%s.*' % self.device),
                   MatchRegexp('id', '(?i).*%s.*' % self.device)))

        results = ICatalogTool(dmd.Devices).search(
            types='Products.ZenModel.Device.Device',
            query=And(*pathFilterList))
        rf2.write('pathFilterList is %s \n' % (pathFilterList))
        rf2.write('results is %s \n' % (results))

        if not results.total:
            return []

        deviceList = {}
        tag_uuids = []
        accumulator = defaultdict(int)
        for brain in results:
            try:
                obj = brain.getObject()
                deviceList[obj.id] = obj
                tag_uuids.append(brain.uuid)
                accumulator[obj.id] = 0
                rf2.write("obj is %s uuid is %s \n" %
                          (brain.getObject(), brain.uuid))
            except Exception:
                log.warn("Unable to unbrain at path %s", brain.getPath())
                rf2.write("Unable to unbrain at path %s", brain.getPath())

        endDate = self.endDate or AvailabilityColl.getDefaultAvailabilityEnd()
        endDate = min(endDate, now)
        startDate = self.startDate
        if not startDate:
            startDate = AvailabilityColl.getDefaultAvailabilityStart(dmd)

        # convert start and end date to integer milliseconds for defining filters
        startDate = int(startDate * 1000)
        endDate = int(endDate * 1000)
        total_report_window = endDate - startDate

        create_filter_args = {
            'operator':
            zep.AND,
            'severity':
            _severityGreaterThanOrEqual(self.severity),
            'event_class':
            self.eventClass +
            ('/' if not self.eventClass.endswith('/') else '')
        }

        if self.agent:
            create_filter_args['agent'] = self.agent
        if self.monitor is not None:
            create_filter_args['monitor'] = self.monitor

        rf2.write(
            'device is %s, component is %s,  location is %s  priority is %s, prodState is %s \n'
            % (self.device, self.component, self.Location, self.DevicePriority,
               self.prodState))
        # add filters on details
        filter_details = {}
        if self.DevicePriority is not None:
            filter_details[
                'zenoss.device.priority'] = "%d:" % self.DevicePriority
        if self.prodState:
            filter_details[
                'zenoss.device.production_state'] = "%d:" % self.prodState
        if filter_details:
            create_filter_args['details'] = filter_details

        create_filter_args['tags'] = tag_uuids

        # query zep for matching event summaries
        # 1. get all open events that:
        #    - first_seen < endDate
        #    (only need to check active events)
        # 2. get all closed events that:
        #    - first_seen < endDate
        #    - status_change > startDate
        #    (must get both active and archived events)

        # 1. get open events
        create_filter_args['first_seen'] = (0, endDate)
        create_filter_args['status'] = OPEN_EVENT_STATUSES
        rf2.write(' create_filter_args dictionary for open events is %s \n' %
                  (create_filter_args))
        event_filter = zep.createEventFilter(**create_filter_args)
        open_events = zep.getEventSummariesGenerator(event_filter)

        # 2. get closed events
        create_filter_args['status_change'] = (startDate + 1, )
        create_filter_args['status'] = CLOSED_EVENT_STATUSES
        rf2.write(' create_filter_args dictionary for closed events is %s \n' %
                  (create_filter_args))
        event_filter = zep.createEventFilter(**create_filter_args)
        closed_events = zep.getEventSummariesGenerator(event_filter)
        # must also get events from archive
        closed_events_from_archive = zep.getEventSummariesGenerator(
            event_filter, archive=True)

        # Don't put print / log statements in the next block
        #  Previous block uses a Python generator function to deliver events asynchronously
        for evtsumm in chain(open_events, closed_events,
                             closed_events_from_archive):
            first = evtsumm['first_seen_time']
            # if event is still open, downtime persists til end of report window
            if evtsumm['status'] not in CLOSED_EVENT_STATUSES:
                last = endDate
            else:
                last = evtsumm['status_change_time']

            # discard any events that have no elapsed time
            if first == last:
                continue

            # clip first and last within report time window
            first = max(first, startDate)
            last = min(last, endDate)

            evt = evtsumm['occurrence'][0]
            evt_actor = evt['actor']
            device = evt_actor.get('element_identifier')
            accumulator[device] += (last - first)

        availabilityReport = []
        for deviceId, downtime in sorted(accumulator.items()):
            device = deviceList.get(deviceId, None)
            if device:
                sysname = device.getSystemNamesString()
                groupname = ', '.join(device.getDeviceGroupNames())
                loc = device.getLocationName()
                dclass = device.getDeviceClassPath()
                link = device.getDeviceLink()
                title = device.titleOrId()
                availabilityReport.append(
                    AvailabilityColl(deviceId, '', downtime,
                                     total_report_window, groupname, sysname,
                                     loc, dclass, link, title))
                device._p_invalidate()


#        rf.close()
        return availabilityReport