class RemoveCycleTimeGraph(Migrate.Step): version = Migrate.Version(4, 1, 70) def cutover(self, dmd): template = dmd.Monitors.rrdTemplates.PerformanceConf #remove selected graph points from definition if present graph = template.graphDefs._getOb('Cycle Times', None) if graph: graphPoints = ('zenperfsnmp', 'zenping', 'zenstatus') graph.manage_deleteGraphPoints(graphPoints) #remove threhsolds if present thresholdIds = ('zenperfsnmp cycle time', 'zenping cycle time', 'zenprocess cycle time') template.manage_deleteRRDThresholds(thresholdIds) #remove cycle_time datapoints from all datasources except zenmodeler for ds in template.datasources(): if ds.id != 'zenmodeler' and ds.datapoints._getOb( 'cycleTime', False): ds.manage_deleteRRDDataPoints(['cycleTime'])
class SendOrganizersToZing(Migrate.Step): """Update all organizers' catalog entries so they can be sent to Zing.""" version = Migrate.Version(300, 0, 13) def cutover(self, dmd): try: orgs = \ dmd.Devices.getSubOrganizers() + \ dmd.Groups.getSubOrganizers() + \ dmd.Systems.getSubOrganizers() + \ dmd.Locations.getSubOrganizers() + \ dmd.ComponentGroups.getSubOrganizers() except Exception as e: logging.error("error getting list of organizers: %s", e) return for org in orgs: try: IModelCatalogTool(org).update(org) except Exception as e: logging.error("error updating catalog for organizers: %s", org.getOrganizerName())
class SetMariaDbTimeouts51x(Migrate.Step): """ Setting MariaDB timeout defaults. This does the same as setMariaDbTimeouts.py, but the version on this one is higher to account for upgrading systems newer than 5.0. """ version = Migrate.Version(111, 0, 0) def cutover(self, dmd): try: ctx = sm.ServiceContext() except sm.ServiceMigrationError: log.info("Couldn't generate service context, skipping.") return marialist = ['mariadb', 'mariadb-events', 'mariadb-model'] marias = filter(lambda s: s.name in marialist, ctx.services) log.info("Found %i mariadb* services." % len(marias)) commit = False for maria in marias: for cnf in maria.configFiles: if cnf.name != '/etc/my.cnf': continue lines = cnf.content.split('\n') for i in range(len(lines)): if lines[i] == 'wait_timeout = 86400': log.info("Set wait_timeout to 7200 for in %s %s" % (maria.name, cnf.name)) lines[i] = 'wait_timeout = 7200' cnf.content = '\n'.join(lines) commit = True if commit: ctx.commit()
class AddConfigTimeDataPoints(Migrate.Step): version = Migrate.Version(2, 5, 0) def cutover(self, dmd): # get the PerformanceConf template template = dmd.Monitors.rrdTemplates._getOb('PerformanceConf', None) if not template: # No collector performance template exists. return # add configTime to the zeneventlog and zenwin data sources dpNames = [] for ds in template.datasources(): if ds.id not in ('zeneventlog', 'zenwin'): continue # don't try and add configTime if it already exists if 'configTime' in ds.datapoints.objectIds(): continue newDp = ds.manage_addRRDDataPoint('configTime') newDp.rrdtype = 'GAUGE' newDp.rrdmin = 0 dpNames.append("%s_configTime" % ds.id) # add the new datapoints to the config time graph graph = template.graphDefs._getOb("Config Time") if not graph: # No Graph Definition in the template return graph.manage_addDataPointGraphPoints(dpNames) # Fix up all of the graph points we just added. for gp in graph.graphPoints(): if isinstance(gp, DataPointGraphPoint): collectorName = gp.dpName.split('_', 1)[0] gp.legend = collectorName
class StatusMonitorMenus(Migrate.Step): version = Migrate.Version(2, 1, 0) def cutover(self, dmd): # Build menus dmd.buildMenus({ 'PerformanceMonitor_list': [{ 'action': 'performanceTemplates', 'description': 'Templates', 'id': 'performanceTemplates', 'ordering': 16.0, 'permissions': ('View Device', ), }], 'StatusMonitor_list': [{ 'action': 'performanceTemplates', 'description': 'Templates', 'id': 'performanceTemplates', 'ordering': 16.0, 'permissions': ('View Device', ), }], })
class RemoveWinModelerGraphPoints(Migrate.Step): version = Migrate.Version(2, 2, 0) def deleteLastObjectOnPath(self, dmd, path): parts = path.split('/') obj = dmd for part in parts[:-1]: obj = obj._getOb(part) obj._delObject(parts[-1]) def cutover(self, dmd): base = 'Monitors/rrdTemplates/PerformanceConf/' paths = [ base + 'graphDefs/Cycle Times/graphPoints/zenwinmodeler', base + 'thresholds/zenwinmodeler cycle time', base + 'datasources/zenwinmodeler', ] for path in paths: try: self.deleteLastObjectOnPath(dmd, path) except AttributeError: pass
class AddServiceRunLevels(Migrate.Step): """ Add the emergency shutdown and startup levels to service definitions. See ZEN-23931, ZEN-27299. """ version = Migrate.Version(109, 0, 0) def cutover(self, dmd): try: ctx = sm.ServiceContext() except sm.ServiceMigrationError: log.info("Couldn't generate service context, skipping.") return changed = False for service in ctx.services: emergencyShutdownLevel, startLevel = serviceRunLevels.get( service.name, (0, 0)) if not service.emergencyShutdownLevel == emergencyShutdownLevel: before = service.emergencyShutdownLevel service.emergencyShutdownLevel = emergencyShutdownLevel changed = True log.info( 'Change emergency shutdown level of %s from %d to %d.', service.name, before, emergencyShutdownLevel) if not service.startLevel == startLevel: before = service.startLevel service.startLevel = startLevel changed = True log.info('Change start level of %s from %d to %d.', service.name, before, startLevel) if changed: ctx.commit() else: log.info('Nothing to change in this migration step.')
class MigrateLocalhostPerf(Migrate.Step): version = Migrate.Version(5, 0, 70) def cutover(self, dmd): try: from ZenPacks.zenoss.DistributedCollector import \ addHubRoot, addLocalhostHub, convertLocalhostHubToCC, convertAllMonitors, \ convertMonitor, convertLocalhostMonitorToCC except ImportError: log.info("Distributed collector module not found, skipping") else: log.info("Migrating localhost hub + monitor") # Assume localhost hub + collector exist root = dmd.Monitors hubRoot = root.get('Hub') # No DC-style Performance dmd path yet. Shouldn't really ever hit this, # because zenpack install should take care of this. If hit, then # exit. if not hubRoot: return localhostHub = hubRoot.get('localhost') if not localhostHub: log.info("Unable to find localhost hub, creating now") localhostHub = addLocalhostHub(hubRoot) localhostHubccBacked = getattr(localhostHub, 'ccBacked') if not localhostHubccBacked: log.info("Upgrading localhost hub to be control center-backed") # convert already DC-upgraded hub to be 5.x-compat convertLocalhostHubToCC(localhostHub) localhostMonitor = root.Performance.get('localhost') localhostMonitorccBacked = getattr(localhostMonitor, 'ccBacked') if not localhostMonitorccBacked: log.info( "Upgrading localhost monitor to be control center-backed") convertLocalhostMonitorToCC(localhostMonitor)
class ChangeMemcachedStartup(Migrate.Step): "Change memcached startup to respect config file and update config file" version = Migrate.Version(108, 0, 0) def _update_config(self, config): USER_RE = r'USER="******"' CACHESIZE_RE = r'CACHESIZE="{{[\w.]+}}"' OPTIONS_RE = r'OPTIONS="\w*"' USER = '******' CACHESIZE = 'CACHESIZE="{{percentScale .RAMCommitment 0.9 | bytesToMB}}"' OPTIONS = 'OPTIONS="-v -R 4096"' config = re.sub(USER_RE, USER, config, 0, re.MULTILINE) config = re.sub(CACHESIZE_RE, CACHESIZE, config, 0, re.MULTILINE) config = re.sub(OPTIONS_RE, OPTIONS, config, 0, re.MULTILINE) return config def cutover(self, dmd): try: ctx = sm.ServiceContext() except sm.ServiceMigrationError: log.info("Couldn't generate service context, skipping.") return memcached_services = filter(lambda cf: cf.name == 'memcached', ctx.services) for service in memcached_services: service.startup = "${ZENHOME:-/opt/zenoss}/bin/zenmemcached" cfs = filter(lambda cf: cf.name == "/etc/sysconfig/memcached", service.originalConfigs + service.configFiles) for cf in cfs: cf.content = self._update_config(cf.content) ctx.commit()
class UpgradeMultiPathIndices(Migrate.Step): version = Migrate.Version(3, 0, 0) def cutover(self, dmd): idx = dmd.Devices.deviceSearch._catalog.indexes['path'] idx_parents = getattr(idx, '_index_parents', None) if idx_parents is None: dmd.Devices.deviceSearch.delIndex('path') dmd.Devices.deviceSearch._catalog.addIndex('path', makeMultiPathIndex('path')) # grab each device and manually reindex it log.info( 'Reindexing devices. This may take a while ...' ) for device in dmd.Devices.getSubDevices_recursive(): dmd.Devices.deviceSearch.catalog_object(device, idxs=('path',)) idx = dmd.searchRRDTemplates._catalog.indexes['getPhysicalPath'] if not idx.__class__.__name__=='ExtendedPathIndex': templates = dmd.searchRRDTemplates() dmd.searchRRDTemplates.delIndex('getPhysicalPath') dmd.searchRRDTemplates._catalog.addIndex('getPhysicalPath', makePathIndex('getPhysicalPath')) for template in templates: dmd.searchRRDTemplates.catalog_object(template.getObject(), idxs=('getPhysicalPath',))
class UpdateZeneventserverHealthCheck(Migrate.Step): "Change 'answering' healthcheck to only care about successful curl." version = Migrate.Version(111,0,0) def cutover(self, dmd): try: ctx = sm.ServiceContext() except sm.ServiceMigrationError: log.info("Couldn't generate service context, skipping.") return zeps = filter(lambda s: s.name == "zeneventserver", ctx.services) log.info("Found %i services named 'zeneventserver'." % len(zeps)) for zep in zeps: answering = filter(lambda hc: hc.name == "answering", zep.healthChecks) log.info("Found %i 'answering' healthchecks." % len(answering)) for a in answering: a.script = "curl -f -s http://localhost:8084/zeneventserver/api/1.0/heartbeats/" log.info("Updated 'answering' healthcheck.") ctx.commit()
class UpdateZenactiondConfig(Migrate.Step): "Add strip-email-body-tags option" version = Migrate.Version(300, 0, 5) def cutover(self, dmd): try: ctx = sm.ServiceContext() except sm.ServiceMigrationError: log.info("Couldn't generate service context, skipping.") return zenactiond = filter(lambda s: s.name == "zenactiond", ctx.services)[0] update = ('\n# Strip HTML/XML tags from plaintext' '\n# email notifications?' '\n#strip-email-body-tags True\n#\n') cfg = filter(lambda f: f.name == "/opt/zenoss/etc/zenactiond.conf", zenactiond.originalConfigs)[0] if cfg.content.find("strip-email-body-tags") < 0: cfg.content += update log.info("Added strip-email-body-tags option") ctx.commit()
class AddMemcachedService(Migrate.Step): """ Update memcache healthcheck, and add the service to Core """ version = Migrate.Version(5, 0, 70) def cutover(self, dmd): try: ctx = sm.ServiceContext() except sm.ServiceMigrationError: log.info("Couldn't generate service context, skipping.") return # If the service lacks memcached, add it now. memcached = filter(lambda s: s.name == "memcached", ctx.services) log.info("Found %i services named 'memcached'." % len(memcached)) if not memcached: log.info("No memcached found; creating new service.") new_memcached = default_memcached_service() infrastructure = ctx.findServices('^[^/]+/Infrastructure$')[0] ctx.deployService(json.dumps(new_memcached), infrastructure) ctx.commit()
class AddTriggerPermToZenManager(Migrate.Step): version = Migrate.Version(200, 0, 0) def addPermissions(self, obj, permission, roles=None, acquire=0): if not roles: roles = [] if not permission in obj.possible_permissions(): obj.__ac_permissions__ = (obj.__ac_permissions__ + ((permission, (), roles), )) for permissionDir in obj.rolesOfPermission(permission): if permissionDir['selected']: if permissionDir['name'] not in roles: roles.append(permissionDir['name']) obj.manage_permission(permission, roles, acquire) def cutover(self, dmd): zport = dmd.zport self.addPermissions(zport, MANAGE_TRIGGER, [ZEN_MANAGER_ROLE], 1) self.addPermissions(zport, UPDATE_TRIGGER, [ZEN_MANAGER_ROLE], 1) self.addPermissions(zport, UPDATE_NOTIFICATION, [ZEN_MANAGER_ROLE], 1) self.addPermissions(zport, MANAGE_NOTIFICATION_SUBSCRIPTIONS, [ZEN_MANAGER_ROLE], 1)
class ChangeDeviceProdStatePermission(Migrate.Step): """ The ZEN_CHANGE_DEVICE_PRODSTATE (Change Device Production State) permission has been added to Zenoss 2.5. This migrate script is used to update the appropriate menu item permissions for upgraded systems. """ version = Migrate.Version(2, 5, 0) def cutover(self, dmd): zport = dmd.zport if ZEN_CHANGE_DEVICE_PRODSTATE not in zport.possible_permissions(): zport.__ac_permissions__ = (zport.__ac_permissions__ + ( (ZEN_CHANGE_DEVICE_PRODSTATE, (), [ZEN_MANAGER_ROLE, OWNER_ROLE, MANAGER_ROLE]), )) m = dmd.zenMenus m.Device_list.zenMenuItems.setProductionState.permissions = \ (ZEN_CHANGE_DEVICE_PRODSTATE,) m.DeviceGrid_list.zenMenuItems.setProductionState_grid.permissions = \ (ZEN_CHANGE_DEVICE_PRODSTATE,) m.Edit.zenMenuItems.setProductionState.permissions = \ (ZEN_CHANGE_DEVICE_PRODSTATE,)
class ConvertRunsToCommands(Migrate.Step): version = Migrate.Version(5, 0, 70) def cutover(self, dmd): try: ctx = sm.ServiceContext() except sm.ServiceMigrationError: log.info("Couldn't generate service context, skipping.") return # Update the zope service commands. zopes = filter(lambda s: s.name == "Zope", ctx.services) log.info("Found %i services named 'Zope'." % len(zopes)) if len(zopes) == 1: temp = commandListDict(zopes[0].commands) temp.update(zopeCommands) zopes[0].commands = commandDictList(temp) log.info("Updated Zope command list.") mariadbModels = filter(lambda s: s.name == "mariadb-model", ctx.services) log.info("Found %i services named 'mariadb-model'." % len(mariadbModels)) if len(mariadbModels) == 1: temp = commandListDict(mariadbModels[0].commands) temp.update(mariadbModelCommands) mariadbModels[0].commands = commandDictList(temp) log.info("Updated mariadb-model command list.") mariadbs = filter(lambda s: s.name == "mariadb", ctx.services) log.info("Found %i services named 'mariadb'." % len(mariadbs)) if len(mariadbs) == 1: temp = commandListDict(mariadbs[0].commands) temp.update(mariadbCommands) mariadbs[0].commands = commandDictList(temp) log.info("Updated mariadb command list.") # Commit our changes. ctx.commit()
class CollectorIpv6Commands(Migrate.Step): version = Migrate.Version(4, 0, 0) def cutover(self, dmd): # ping ipv6 try: ping = dmd.userCommands._getOb('ping') ping.command = "${device/pingCommand} -c2 ${device/manageIp}" except AttributeError: pass # traceroute try: traceroute = dmd.userCommands._getOb('traceroute') traceroute.command = "${device/tracerouteCommand} -q 1 -w 2 ${device/manageIp}" except AttributeError: pass # snmpwalk try: snmpwalk = dmd.userCommands._getOb('snmpwalk') snmpwalk.command = "snmpwalk -${device/zSnmpVer} -c${device/zSnmpCommunity} ${device/snmpwalkPrefix}${here/manageIp} system" except AttributeError: pass # make all of the default commands work over different collectors commands = ['ping', 'traceroute', 'DNS forward', 'DNS reverse', 'snmpwalk'] for commandName in commands: try: cmd = dmd.userCommands._getOb(commandName) if not cmd.command.startswith('dcsh'): cmd.command = 'dcsh --collector=${device/getPerformanceServerName} -n "%s"' % (cmd.command) except AttributeError: pass
class FixCentralQueryHealthCheckTypo(Migrate.Step): "Fix CentralQuery healthcheck typo" version = Migrate.Version(5, 0, 70) def cutover(self, dmd): try: ctx = sm.ServiceContext() except sm.ServiceMigrationError: log.info("Couldn't generate service context, skipping.") return centralqueries = filter(lambda s: s.name == "CentralQuery", ctx.services) if not centralqueries: log.info("Couldn't find CentralQuery service, skipping.") return log.info("Found CentralQuery service.") # Locate the health check typos. commit = False for service in centralqueries: typoHealthChecks = filter( lambda healthCheck: healthCheck.name == "anwering", service.healthChecks) typos = len(typoHealthChecks) if typos > 0: log.info("Found %i healthcheck typo in service: %s" % (typos, service.name)) for healthCheck in typoHealthChecks: healthCheck.name = "answering" log.info("Updated healthcheck name.") commit = True if commit: log.info("Committing changes.") ctx.commit()
class UpdateZentrapConfigs(Migrate.Step): "Add zentrap.filter.conf and alter zentrap.conf." version = Migrate.Version(5,0,70) def cutover(self, dmd): try: ctx = sm.ServiceContext() except sm.ServiceMigrationError: log.info("Couldn't generate service context, skipping.") return zentraps = filter(lambda s: s.name == "zentrap", ctx.services) log.info("Found %i services named 'zentrap'." % len(zentraps)) cfFilter = sm.ConfigFile ( name = "/opt/zenoss/etc/zentrap.filter.conf", filename = "/opt/zenoss/etc/zentrap.filter.conf", owner = "zenoss:zenoss", permissions = "0664", content = open(os.path.join(os.path.dirname(__file__), "config-files", "zentrap.filter.conf"), 'r').read() ) for zentrap in zentraps: # First update zentrap.conf. cf = filter(lambda f: f.name == "/opt/zenoss/etc/zentrap.conf", zentrap.originalConfigs)[0] cf.content = open(os.path.join(os.path.dirname(__file__), "config-files", "zentrap.conf"), 'r').read() log.info("Updated '/opt/zenoss/etc/zentrap.conf' contents.") # Now add zentrap.filter.conf. zentrap.originalConfigs.append(cfFilter) log.info("Added '%s'." % cfFilter.name) # Commit our changes. ctx.commit()
class AddTagToImages(Migrate.Step): "Add tag latest to all Images that do not have a tag" version = Migrate.Version(108, 0, 0) def cutover(self, dmd): try: ctx = sm.ServiceContext() except sm.ServiceMigrationError: log.info("Couldn't generate service context, skipping.") return services = ctx.services updatedImageId = False for service in services: m = re.search("\/\S*:\w*", service.imageID) if service.imageID and m is None: log.info("Updated image id for %s" % service.name) service.imageID += ":latest" updatedImageId = True if updatedImageId: log.info("committing context with updated imageID") ctx.commit()
class MoveExportPackMenuItem(Migrate.Step): version = Migrate.Version(2, 2, 0) def cutover(self, dmd): # Remove "Export ZenPack" item from the ZenPack menu. zenPackMenu = dmd.zenMenus._getOb('ZenPack', None) if zenPackMenu and \ zenPackMenu.zenMenuItems._getOb('exportZenPack', None): zenPackMenu.zenMenuItems._delObject('exportZenPack') # Add to the More menu moreMenu = dmd.zenMenus._getOb('More', None) if moreMenu: moreMenu.manage_addZenMenuItem( id='exportZenPack', description='Export ZenPack...', action='dialog_exportPack', permissions=('Manage DMD',), isdialog=True, isglobal=True, ordering=1.01, allowed_classes=('ZenPack',))
class UpdateDefineCommandsEditPermission(Migrate.Step): version = Migrate.Version(300, 0, 6) def addPermissions(self, obj, permission, roles=None, acquire=0): if not roles: roles = [] if permission not in obj.possible_permissions(): obj.__ac_permissions__ = (obj.__ac_permissions__ + ((permission, (), roles), )) for permissionDir in obj.rolesOfPermission(permission): if permissionDir['selected']: if permissionDir['name'] not in roles: roles.append(permissionDir['name']) obj.manage_permission(permission, roles, acquire) def cutover(self, dmd): zport = dmd.zport # Put CZ_ADMIN_ROLE to ZEN_DEFINE_COMMANDS_EDIT permission ZEN-30566. self.addPermissions(zport, ZEN_DEFINE_COMMANDS_EDIT, [MANAGER_ROLE, CZ_ADMIN_ROLE], 1)
class EnableNginxPagespeed(Migrate.Step): ''' Turn pagespeed in zproxy-nginx.conf ''' version = Migrate.Version(108, 0, 0) def cutover(self, dmd): try: ctx = sm.ServiceContext() except sm.ServiceMigrationError: log.info("Couldn't generate service context, skipping") return commit = False zproxy = ctx.getTopService() log.info("Top-level service is '{}'.".format(zproxy.name)) configfiles = zproxy.originalConfigs + zproxy.configFiles for config_file in filter( lambda f: f.name == '/opt/zenoss/zproxy/conf/zproxy-nginx.conf', configfiles): config_text = config_file.content pgspeed_setting = re.search("pagespeed on", config_text) if pgspeed_setting is not None: continue config_text = re.sub('pagespeed off', "pagespeed on", config_text) log.info("Turning pagespeed on for {} and {}".format( config_file.name, zproxy.name)) config_file.content = config_text commit = True if commit: ctx.commit()
class AddZHardDiskMapMatchProperty(Migrate.Step): """ The property zHardDiskMapMatch is defined on /Devices/Server/Linux, but it needs to be copied to /Devices/Server/SSH/Linux """ version = Migrate.Version(4, 2, 70) def cutover(self, dmd): try: log.debug( 'Adding %s property to /Devices/Server/SSH/Linux if necessary', HARD_DISK_MAP_MATCH_PROPERTY) sshLinux = _getSshLinux(dmd) if sshLinux: if not sshLinux.getProperty(HARD_DISK_MAP_MATCH_PROPERTY): hdRegex = dmd.Devices.Server.Linux.getProperty( HARD_DISK_MAP_MATCH_PROPERTY) sshLinux.setZenProperty(HARD_DISK_MAP_MATCH_PROPERTY, hdRegex) except Exception, e: log.warn( 'Exception trying to add %s property to /Devices/Server/SSH/Linux: %s', HARD_DISK_MAP_MATCH_PROPERTY, e)
class Layer2Catalog(Migrate.Step): version = Migrate.Version(2, 3, 0) def cutover(self, dmd): """ # Add the catalog try: getattr(dmd.ZenLinkManager, 'layer2_catalog') except AttributeError: #_create_layer2_catalog(dmd.ZenLinkManager) # Reindex the interfaces print "Indexing interfaces. This may take a while." def _update(i): if i % 5000 == 0 and i > 0: print i, elif i % 1000 == 0: print '.', sys.stdout.flush() for i, iface in enumerate(dmd.Devices.getSubComponents("IpInterface")): iface.index_object() _update(i) """ pass
class FixZauthHealthCheck(Migrate.Step): """ Use different curl request to prevent `authentication failed` spam in audit.log """ version = Migrate.Version(108, 0, 0) def cutover(self, dmd): try: ctx = sm.ServiceContext() except sm.ServiceMigrationError: log.info("Couldn't generate service context, skipping.") return zauths = filter(lambda s: s.name == "Zauth", ctx.services) for zauth in zauths: healthChecks = filter(lambda hc: hc.name == "answering", zauth.healthChecks) for check in healthChecks: check.script = "curl -o /dev/null -w '%{redirect_url}' -s http://localhost:9180/zport/dmd | grep -q acl_users" log.info("Updated 'answering' healthcheck.") ctx.commit()
class NotificationSubscriptions(Migrate.Step): version = Migrate.Version(4,0,0) def cutover(self, dmd): if not hasattr(dmd, NotificationSubscriptionManager.root): manage_addNotificationSubscriptionManager(dmd)
class AddTriggerNotificationsForCommands(Migrate.Step): version = Migrate.Version(4, 0, 0) def __init__(self): Migrate.Step.__init__(self) import addTriggers, addNotificationSubscriptions self.dependencies = [ addTriggers.triggers, addNotificationSubscriptions.notificationSubscriptions ] def _parseCommand(self, command): python_statement = toPython(command.genMeta(), command.where) # if the parser failed to parse the where clause, the python statement # will be empty. if not python_statement: raise TriggerRuleSourceError(command.where) log.debug('Parsing From: "%s"' % command.where) log.debug(' To : "%s"' % python_statement) return python_statement def _parseContent(self, content): return talesifyLegacyFormatString(content) def _createTrigger(self, command): log.debug('Creating trigger for: %s' % command.id) new_rule_source = self._parseCommand(command) trigger_id = command.id for t in self.existing_triggers: if trigger_id == t['name']: log.debug('Trigger already exists, not creating.') return self.triggers_facade.getTrigger(t['uuid']) trigger_uuid = self.triggers_facade.addTrigger(trigger_id) trigger = self.triggers_facade.getTrigger(trigger_uuid) trigger['enabled'] = command.enabled trigger['rule']['source'] = new_rule_source self.triggers_facade.updateTrigger(**trigger) return trigger def _createNotification(self, command, trigger): log.debug('Creating notification for: %s (%s)' % (command.id, 'command')) notification_id = command.id for n in self.existing_notifications: if notification_id == n.id: log.debug('Notification already exists, not creating.') return self.triggers_facade.addNotification(notification_id, 'command') notification_obj = self.dmd.NotificationSubscriptions.findChild( notification_id) notification_obj.enabled = command.enabled notification_obj.send_clear = True notification_obj.send_initial_occurrence = True notification_obj.delay_seconds = command.delay notification_obj.repeat_seconds = command.repeatTime notification_obj.subscriptions = [trigger['uuid']] notification_obj.content['body_content_type'] = 'text' notification_obj.content['body_format'] = self._parseContent( command.command) notification_obj.content['clear_body_format'] = self._parseContent( command.clearCommand) # commands do not have recipients. log.debug('Not adding recipients since commands dont have recipients.') # old event commands didn't have a concept of active windows log.debug( 'Not trying to migrate windows since legacy event commands did not have them.' ) self.triggers_facade.updateNotificationSubscriptions(notification_obj) def cutover(self, dmd): self.dmd = dmd self.triggers_facade = Zuul.getFacade('triggers', dmd) self.existing_triggers = self.triggers_facade.getTriggers() self.existing_notifications = self.triggers_facade.getNotifications() # action rules are being removed, make sure they haven't been yet. commands = dmd.ZenEventManager.commands.objectValues() failed = False for command in commands: if not command.where: continue try: trigger = self._createTrigger(command) self._createNotification(command, trigger) log.info('Done processing event command: %s.' % command.id) except TriggerRuleSourceError, e: failed = True log.warn('Unable to parse existing event command: %s' % command.id) except PythonConversionException, e: log.debug("Failed conversion: %s", e) log.warn("Unable to convert existing event command: %s" % command.id) failed = True
class CollectJVMMetrics(Migrate.Step): version = Migrate.Version(300, 0, 9) def cutover(self, dmd): try: ctx = sm.ServiceContext() except sm.ServiceMigrationError: log.info("Couldn't generate service context. Skipping.") return centralquery = self._getService(ctx, "CentralQuery") self._updateConfig(centralquery,"/opt/zenoss/etc/central-query/central-query_supervisor.conf",_CentralQueryConfigContent) metricconsumer = self._getService(ctx, "MetricConsumer") self._updateConfig(metricconsumer,"/opt/zenoss/etc/metric-consumer-app/metric-consumer-app_supervisor.conf",_MetricConsumerConfigContent) ctx.commit() def _getService(self, ctx, name): return next( iter(filter(lambda s: s.name == name, ctx.services)), None ) def _getConfigs(self, ctx, name): # We don't care about original configs config_file = next( iter(filter(lambda s: s.name == name, ctx.configFiles)), None ) return config_file def _updateServiceCommand(self, service, command): if service.startup == command: log.info("%s startup command already updated", service.name) else: service.startup = command log.info("Updated startup command for %s", service.name) def _addConfig(self, service, configFile, content): config = self._getConfigs(service, configFile) if config: log.info("Service %s configFiles already has %s",service.name, config.name) else: new_config = sm.ConfigFile( name=configFile, filename=configFile, owner="zenoss:zenoss", permissions="0664", content=content ) service.configFiles.append(new_config) log.info("Added %s config to service %s configFiles",new_config.name, service.name) def _updateConfig(self, service, configFile, content): config = self._getConfigs(service, configFile) if config.content == content: log.info( log.info("Config file %s content in service %s already matches desired state", configFile, service.name) ) else: config.content = content log.info("Updated %s config file in service %s to add JVM metrics", configFile, service.name)
class AddGroupManager(Migrate.Step): version = Migrate.Version(2, 1, 0) def cutover(self, dmd): dmd.buildMenus({ 'Group_list': [{ 'action': 'dialog_addUserGroup', 'description': 'Add New Group...', 'id': 'addUserGroup', 'isdialog': True, 'ordering': 90.1, 'permissions': (ZEN_MANAGE_DMD, ) }, { 'action': 'dialog_deleteUserGroups', 'description': 'Delete Groups...', 'id': 'deleteUserGroups', 'isdialog': True, 'ordering': 90.0, 'permissions': (ZEN_MANAGE_DMD, ) }, { 'action': 'dialog_addUserToGroup', 'description': 'Add User...', 'id': 'addUserToGroups', 'isdialog': True, 'ordering': 80.1, 'permissions': (ZEN_MANAGE_DMD, ) }], 'GroupUser_list': [{ 'action': 'dialog_addUserToAGroup', 'description': 'Add User...', 'id': 'addUserToAGroup', 'isdialog': True, 'ordering': 80.1, 'permissions': (ZEN_MANAGE_DMD, ) }, { 'action': 'dialog_deleteUsersFromGroup', 'description': 'Delete Users...', 'id': 'deleteUsersFromGroup', 'isdialog': True, 'ordering': 80.0, 'permissions': (ZEN_MANAGE_DMD, ) }] }) if hasattr(dmd.zenMenus.Group_list.zenMenuItems, 'addUserToGroup'): dmd.zenMenus.Group_list.manage_deleteZenMenuItem('addUserToGroup') if hasattr(dmd.zenMenus.Group_list.zenMenuItems, 'deleteUsersFromGroup'): dmd.zenMenus.Group_list.manage_deleteZenMenuItem( 'deleteUsersFromGroup') if hasattr(dmd.zenMenus.GroupUser_list.zenMenuItems, 'addUserToGroup'): dmd.zenMenus.GroupUser_list.manage_deleteZenMenuItem( 'addUserToGroup') self._addGroupManager(dmd.zport) GROUP_ID = 'groupManager' def _addGroupManager(self, zport): acl = zport.acl_users if not hasattr(acl, self.GROUP_ID): plugins.ZODBGroupManager.addZODBGroupManager(acl, self.GROUP_ID) acl.groupManager.manage_activateInterfaces([ 'IGroupsPlugin', ])