def update_rally_checkfiles(self): if not self.is_rally_enabled: return # Copy run_rally.sh to /usr/local/bin rally_script = os.path.join(hookenv.charm_dir(), 'files', 'run_rally.py') host.rsync(rally_script, self.scripts_dir, options=['--executability']) ostestsfile = os.path.join('/home', self._rallyuser, 'ostests.txt') render(source='ostests.txt.j2', target=ostestsfile, context=self._get_rally_checks_context(), owner=self._rallyuser, group=self._rallyuser) proxy_settings = hookenv.env_proxy_settings() if proxy_settings: content = '\n'.join([ '{}={}'.format(proxy_var, proxy_var_val) for proxy_var, proxy_var_val in proxy_settings.items() ]) else: content = '' context = { 'schedule': self.rally_cron_schedule, 'user': self._rallyuser, 'cmd': os.path.join(self.scripts_dir, 'run_rally.py'), } content += '\n#\n{schedule} {user} timeout -k 840s -s SIGTERM 780s {cmd}'.format( **context) with open(self.rally_cron_file, 'w') as fd: fd.write('# Juju generated - DO NOT EDIT\n{}\n\n'.format(content))
def migrate_to_mount(new_path): """Invoked when new mountpoint appears. This function safely migrates MySQL data from local disk to persistent storage (only if needed) """ old_path = '/var/lib/mysql' if os.path.islink(old_path): hookenv.log('{} is already a symlink, skipping migration'.format( old_path)) return True # Ensure our new mountpoint is empty. Otherwise error and allow # users to investigate and migrate manually files = os.listdir(new_path) try: files.remove('lost+found') except ValueError: pass if files: raise RuntimeError('Persistent storage contains old data. ' 'Please investigate and migrate data manually ' 'to: {}'.format(new_path)) os.chmod(new_path, 0o700) if os.path.isdir('/etc/apparmor.d/local'): render('apparmor.j2', '/etc/apparmor.d/local/usr.sbin.mysqld', context={'path': os.path.join(new_path, '')}) host.service_reload('apparmor') host.service_stop('mysql') host.rsync(os.path.join(old_path, ''), # Ensure we have trailing slashes os.path.join(new_path, ''), options=['--archive']) shutil.rmtree(old_path) os.symlink(new_path, old_path) host.service_start('mysql')
def install_hugepages(): """ Configure hugepages """ hugepage_config = config('hugepages') if hugepage_config: # TODO: defaults to 2M - this should probably be configurable # and support multiple pool sizes - e.g. 2M and 1G. hugepage_size = 2048 if hugepage_config.endswith('%'): import psutil mem = psutil.virtual_memory() hugepage_config_pct = hugepage_config.strip('%') hugepage_multiplier = float(hugepage_config_pct) / 100 hugepages = int((mem.total * hugepage_multiplier) / hugepage_size) else: hugepages = int(hugepage_config) mnt_point = '/run/hugepages/kvm' hugepage_support( 'nova', mnt_point=mnt_point, group='root', nr_hugepages=hugepages, mount=False, set_shmmax=True, ) if subprocess.call(['mountpoint', mnt_point]): fstab_mount(mnt_point) rsync( charm_dir() + '/files/qemu-hugefsdir', '/etc/init.d/qemu-hugefsdir' ) subprocess.check_call('/etc/init.d/qemu-hugefsdir') subprocess.check_call(['update-rc.d', 'qemu-hugefsdir', 'defaults'])
def render_checks(self, creds): render(source='keystone.yaml', target=self.oscreds, context=creds, owner='nagios', group='nagios') nrpe = NRPE() if not os.path.exists(self.plugins_dir): os.makedirs(self.plugins_dir) charm_plugin_dir = os.path.join(hookenv.charm_dir(), 'files', 'plugins/') host.rsync(charm_plugin_dir, self.plugins_dir, options=['--executability']) contrail_check_command = os.path.join(self.plugins_dir, 'check_contrail_alarms.py') nrpe.add_check( shortname='contrail_alarms', description='Check Contrail alarms', check_cmd=contrail_check_command, ) nrpe.write()
def update_nrpe_config(): # python-dbus is used by check_upstart_job apt_install(['python-dbus', 'lockfile-progs']) log('Refreshing nagios checks') if os.path.isdir(NAGIOS_PLUGINS): rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'check_ceph_status.py'), os.path.join(NAGIOS_PLUGINS, 'check_ceph_status.py')) script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh') rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'collect_ceph_status.sh'), script) cronjob = "{} root {}\n".format('*/5 * * * *', script) write_file(STATUS_CRONFILE, cronjob) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) check_cmd = 'check_ceph_status.py -f {} --degraded_thresh {}' \ ' --misplaced_thresh {}' \ ' --recovery_rate {}'.format(STATUS_FILE, config('nagios_degraded_thresh'), config('nagios_misplaced_thresh'), config('nagios_recovery_rate')) if config('nagios_ignore_nodeepscub'): check_cmd = check_cmd + ' --ignore_nodeepscrub' nrpe_setup.add_check( shortname="ceph", description='Check Ceph health {{{}}}'.format(current_unit), check_cmd=check_cmd) nrpe_setup.write()
def install(): juju_log('**********install.real') rsync( charm_dir() + '/packages/vsm-dep-repo', '/opt' ) rsync( charm_dir() + '/packages/vsmrepo', '/opt' ) rsync( charm_dir() + '/files/apt.conf', '/etc/apt' ) rsync( charm_dir() + '/files/vsm.list', '/etc/apt/sources.list.d' ) rsync( charm_dir() + '/files/vsm-dep.list', '/etc/apt/sources.list.d' ) apt_update() apt_install(VSM_PACKAGES) juju_log('**********finished to install vsm vsm-dashboard python-vsmclient') add_source(config('ceph-source'), config('ceph-key')) apt_update(fatal=True) apt_install(packages=PRE_INSTALL_PACKAGES, fatal=True)
def update_nrpe_config(): # python-dbus is used by check_upstart_job apt_install('python-dbus') log('Refreshing nagios checks') if os.path.isdir(NAGIOS_PLUGINS): rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'check_ceph_status.py'), os.path.join(NAGIOS_PLUGINS, 'check_ceph_status.py')) script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh') rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'collect_ceph_status.sh'), script) cronjob = "{} root {}\n".format('*/5 * * * *', script) write_file(STATUS_CRONFILE, cronjob) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) nrpe_setup.add_check( shortname="ceph", description='Check Ceph health {%s}' % current_unit, check_cmd='check_ceph_status.py -f {}'.format(STATUS_FILE)) nrpe_setup.write()
def migrate(self, src_dir, subdir): assert self.needs_remount() assert subdir, 'Can only migrate to a subdirectory on a mount' config = hookenv.config() config['live_mountpoint'] = self.mountpoint if self.mountpoint is None: hookenv.log('External storage AND DATA gone.' 'Reverting to original local storage', WARNING) return dst_dir = os.path.join(self.mountpoint, subdir) if os.path.exists(dst_dir): hookenv.log('{} already exists. Not migrating data.'.format( dst_dir)) return # We are migrating the contents of src_dir, so we want a # trailing slash to ensure rsync's behavior. if not src_dir.endswith('/'): src_dir += '/' # We don't migrate data directly into the new destination, # which allows us to detect a failed migration and recover. tmp_dst_dir = dst_dir + '.migrating' hookenv.log('Migrating data from {} to {}'.format( src_dir, tmp_dst_dir)) host.rsync(src_dir, tmp_dst_dir, flags='-av') hookenv.log('Moving {} to {}'.format(tmp_dst_dir, dst_dir)) os.rename(tmp_dst_dir, dst_dir) assert not self.needs_remount()
def install_ntpmon(): """ Install package dependencies, source files, and startup configuration. """ install_dir = layer.options.get('ntpmon', 'install-dir') service_name = layer.options.get('ntpmon', 'service-name') using_systemd = host.init_is_systemd() if install_dir: log('installing ntpmon') host.mkdir(os.path.dirname(install_dir)) host.rsync('src/', '{}/'.format(install_dir)) if service_name: if using_systemd: systemd_config = '/etc/systemd/system/' + service_name + '.service' log('installing systemd service: {}'.format(service_name)) with open(systemd_config, 'w') as conffile: conffile.write(templating.render('src/' + service_name + '.systemd', layer.options.get('ntpmon'))) subprocess.call(['systemd', 'daemon-reload']) else: upstart_config = '/etc/init/' + service_name + '.conf' log('installing upstart service: {}'.format(service_name)) with open(upstart_config, 'w') as conffile: conffile.write(templating.render('src/' + service_name + '.upstart', layer.options.get('ntpmon'))) set_flag('ntpmon.installed') clear_flag('ntpmon.configured')
def update_nrpe_config(): # python-dbus is used by check_upstart_job apt_install('python-dbus') log('Refreshing nagios checks') if os.path.isdir(NAGIOS_PLUGINS): rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'check_ceph_status.py'), os.path.join(NAGIOS_PLUGINS, 'check_ceph_status.py')) script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh') rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'collect_ceph_status.sh'), script) cronjob = "{} root {}\n".format('*/5 * * * *', script) write_file(STATUS_CRONFILE, cronjob) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) nrpe_setup.add_check( shortname="ceph", description='Check Ceph health {%s}' % current_unit, check_cmd='check_ceph_status.py -f {}'.format(STATUS_FILE) ) nrpe_setup.write()
def install_charm_files(service_name): """ Install files shipped with charm """ nag_dirs = [ '/etc/nagios/nrpe.d/', '/usr/local/lib/nagios/plugins', '/var/lib/nagios/export/' ] for nag_dir in nag_dirs: if not os.path.exists(nag_dir): host.mkdir(nag_dir, perms=0o755) charm_file_dir = os.path.join(hookenv.charm_dir(), 'files') charm_plugin_dir = os.path.join(charm_file_dir, 'plugins') pkg_plugin_dir = '/usr/lib/nagios/plugins/' local_plugin_dir = '/usr/local/lib/nagios/plugins/' shutil.copy2(os.path.join(charm_file_dir, 'nagios_plugin.py'), pkg_plugin_dir + '/nagios_plugin.py') shutil.copy2(os.path.join(charm_file_dir, 'default_rsync'), '/etc/default/rsync') shutil.copy2(os.path.join(charm_file_dir, 'rsyncd.conf'), '/etc/rsyncd.conf') host.rsync(charm_plugin_dir, '/usr/local/lib/nagios/', options=['--executability']) if not os.path.exists(local_plugin_dir + 'nagios_plugin.py'): os.symlink(pkg_plugin_dir + 'nagios_plugin.py', local_plugin_dir + 'nagios_plugin.py')
def render_nimsoft_robot_config(): """Create the nimbus.conf config file. Renders the appropriate template for the Nimbus Robot """ # The v5 template is compatible with all versions < 6 cfg_original_hash = file_hash(NIMBUS_ROBOT_CONFIG) context = { 'hub': config("hub"), 'domain': config("domain"), 'hubip': config("hubip"), 'hub_robot_name': config("hub-robot-name"), 'secondary_domain': config("secondary-domain"), 'secondary_hubip': config("secondary-hubip"), 'secondary_hub': config("secondary-hub"), 'secondary_hub_robot_name': config("secondary-hub-robot-name"), 'private_address': unit_private_ip(), 'hostname': os.uname()[1] } render('robot.cfg', NIMBUS_ROBOT_CONFIG, context=context) cfg_new_hash = file_hash(NIMBUS_ROBOT_CONFIG) rsync(charm_dir() + '/files/request_linux_prod.cfg', '/opt/nimsoft/request.cfg') # Install the nimbus service rsync(charm_dir() + '/files/nimbus.service', '/lib/systemd/system/nimbus.service') if cfg_original_hash != cfg_new_hash: service('restart', 'nimbus') status.active('nimbus ready.')
def install_files(): # this part lifted from haproxy charm hooks.py src = os.path.join(os.environ["CHARM_DIR"], "files/thirdparty/") dst = '/srv/{}/parts/'.format(CHARM_NAME) maint('Copying scripts from %s to %s' % (src, dst)) host.mkdir(dst, perms=0o755) for fname in glob.glob(os.path.join(src, "*")): host.rsync(fname, os.path.join(dst, os.path.basename(fname))) # Template files may have changed in an upgrade, so we need to rewrite # them config_changed() # Package prerequisites for k8s-kpi-scripts/thirdparty/* apt_install([ 'python-configparser', 'python-prometheus-client', 'python-cssselect', 'python-yaml', 'python-urllib3', 'python3-dev', 'python-click', 'python-swiftclient', 'python-keystoneclient', ])
def install(): juju_log('**********install.real') rsync( charm_dir() + '/packages/vsm-dep-repo', '/opt' ) rsync( charm_dir() + '/packages/vsmrepo', '/opt' ) rsync( charm_dir() + '/files/apt.conf', '/etc/apt' ) rsync( charm_dir() + '/files/vsm.list', '/etc/apt/sources.list.d' ) rsync( charm_dir() + '/files/vsm-dep.list', '/etc/apt/sources.list.d' ) apt_update() apt_install(VSM_PACKAGES) juju_log('**********finished to install vsm') add_source(config('ceph-source'), config('ceph-key')) apt_update(fatal=True) apt_install(packages=PRE_INSTALL_PACKAGES, fatal=True)
def update_nrpe_checks(): if os.path.isdir(NAGIOS_PLUGINS): rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts', 'check_rabbitmq.py'), os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py')) user = '******' vhost = 'nagios' password_file = os.path.join(RABBIT_DIR, '%s.passwd' % user) if os.path.exists(password_file): password = open(password_file).read().strip() else: cmd = ['pwgen', '64', '1'] password = subprocess.check_output(cmd).strip() with open(password_file, 'wb') as out: out.write(password) rabbit.create_vhost(vhost) rabbit.create_user(user, password) rabbit.grant_permissions(user, vhost) nrpe_compat = NRPE() nrpe_compat.add_check( shortname=rabbit.RABBIT_USER, description='Check RabbitMQ', check_cmd='{}/check_rabbitmq.py --user {} --password {} --vhost {}' ''.format(NAGIOS_PLUGINS, user, password, vhost) ) nrpe_compat.write()
def update_nrpe_config(): # python-dbus is used by check_upstart_job apt_install(['python-dbus', 'lockfile-progs']) log('Refreshing nagios checks') if os.path.isdir(NAGIOS_PLUGINS): rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'check_ceph_status.py'), os.path.join(NAGIOS_PLUGINS, 'check_ceph_status.py')) script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh') rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'collect_ceph_status.sh'), script) cronjob = "{} root {}\n".format('*/5 * * * *', script) write_file(STATUS_CRONFILE, cronjob) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) check_cmd = 'check_ceph_status.py -f {} --degraded_thresh {}' \ ' --misplaced_thresh {}' \ ' --recovery_rate {}'.format(STATUS_FILE, config('nagios_degraded_thresh'), config('nagios_misplaced_thresh'), config('nagios_recovery_rate')) if config('nagios_raise_nodeepscrub'): check_cmd = check_cmd + ' --raise_nodeepscrub' nrpe_setup.add_check( shortname="ceph", description='Check Ceph health {{{}}}'.format(current_unit), check_cmd=check_cmd) if config('nagios_additional_checks'): additional_critical = config('nagios_additional_checks_critical') x = ast.literal_eval(config('nagios_additional_checks')) for key, value in x.items(): name = "ceph-{}".format(key.replace(" ", "")) log("Adding check {}".format(name)) check_cmd = 'check_ceph_status.py -f {}' \ ' --additional_check \\\"{}\\\"' \ ' {}'.format(STATUS_FILE, value, "--additional_check_critical" if additional_critical is True else "") nrpe_setup.add_check( shortname=name, description='Additional Ceph checks {{{}}}'.format( current_unit), check_cmd=check_cmd) if config('nagios_check_num_osds'): check_cmd = 'check_ceph_status.py -f {} --check_num_osds'.format( STATUS_FILE) nrpe_setup.add_check( shortname='ceph_num_osds', description='Check whether all OSDs are up and in', check_cmd=check_cmd) nrpe_setup.write()
def install(): # this part lifted from haproxy charm hooks.py src = os.path.join(os.environ["CHARM_DIR"], "scripts") dst = '/srv/charmbuild-kpi-import/parts/' maint('Copying scripts from %s to %s' % (src, dst)) host.mkdir(dst, perms=0o755) for fname in glob.glob(os.path.join(src, "*")): host.rsync(fname, os.path.join(dst, os.path.basename(fname))) set_flag('charmbuild.installed')
def rsync_nrpe_checks(plugins_dir): if not os.path.exists(plugins_dir): os.makedirs(plugins_dir) charm_plugin_dir = os.path.join(charm_dir(), 'files', 'plugins/') rsync(charm_plugin_dir, plugins_dir, options=['--executability'])
def agent_changed(rid=None, unit=None): if 'shared-db' not in CONFIGS.complete_contexts(): juju_log('shared-db relation incomplete. Peer not ready?') return if 'amqp' not in CONFIGS.complete_contexts(): juju_log('amqp relation incomplete. Peer not ready?') return with open('/etc/manifest/server.manifest') as server_manifest: flag = 'token-tenant' in server_manifest.read() if flag: rel_settings = relation_get(rid=rid, unit=unit) key = rel_settings.get('ssh_public_key') juju_log("**********key is %s" % str(key)) if not key: juju_log('peer did not publish key?') return ssh_controller_key_add(key, rid=rid, unit=unit) host = unit_get('private-address') hostname = get_hostname(host) hostaddress = get_host_ip(host) juju_log("**********host is %s" % host) juju_log("**********hostname is %s" % hostname) juju_log("**********hostaddress is %s" % hostaddress) with open('/etc/hosts', 'a') as hosts: hosts.write('%s %s' % (hostaddress, hostname) + '\n') token_tenant = rel_settings.get('token_tenant') juju_log("**********token_tenant is %s" % token_tenant) rsync( charm_dir() + '/files/server.manifest', '/etc/manifest/server.manifest' ) c_hostaddress = rel_settings.get('hostaddress') juju_log("**********controller_hostaddress is %s" % c_hostaddress) subprocess.check_call(['sudo', 'sed', '-i', 's/^controller_ip/%s/g' % c_hostaddress, '/etc/manifest/server.manifest']) subprocess.check_call(['sudo', 'sed', '-i', 's/token-tenant/%s/g' % token_tenant, '/etc/manifest/server.manifest']) subprocess.check_call(['sudo', 'service', 'vsm-agent', 'stop']) subprocess.check_call(['sudo', 'service', 'vsm-agent', 'start']) subprocess.check_call(['sudo', 'service', 'vsm-physical', 'stop']) subprocess.check_call(['sudo', 'service', 'vsm-physical', 'start']) juju_log("**********start vsm-agent") juju_log("**********start vsm-physical")
def setup_ocf_files(): """Setup OCF resrouce agent files """ # TODO (thedac) Eventually we want to package the OCF files. # Bundle with the charm until then. mkdir('/usr/lib/ocf/resource.d/ceph') mkdir('/usr/lib/ocf/resource.d/maas') # Xenial corosync is not creating this directory mkdir('/etc/corosync/uidgid.d') rsync('files/ocf/ceph/rbd', '/usr/lib/ocf/resource.d/ceph/rbd') rsync('files/ocf/maas/dns', '/usr/lib/ocf/resource.d/maas/dns') rsync('files/ocf/maas/maas_dns.py', '/usr/lib/heartbeat/maas_dns.py') rsync('files/ocf/maas/maasclient/', '/usr/lib/heartbeat/maasclient/') rsync('files/ocf/maas/maas_stonith_plugin.py', '/usr/lib/stonith/plugins/external/maas')
def setup_ocf_files(): """Setup OCF resrouce agent files """ # TODO (thedac) Eventually we want to package the OCF files. # Bundle with the charm until then. mkdir('/usr/lib/ocf/resource.d/ceph') mkdir('/usr/lib/ocf/resource.d/maas') # Xenial corosync is not creating this directory mkdir('/etc/corosync/uidgid.d') rsync('ocf/ceph/rbd', '/usr/lib/ocf/resource.d/ceph/rbd') rsync('ocf/maas/dns', '/usr/lib/ocf/resource.d/maas/dns') rsync('ocf/maas/maas_dns.py', '/usr/lib/heartbeat/maas_dns.py') rsync('ocf/maas/maasclient/', '/usr/lib/heartbeat/maasclient/') rsync( 'ocf/maas/maas_stonith_plugin.py', '/usr/lib/stonith/plugins/external/maas')
def render_nimsoft_robot_config(): """ Create the required config files. Renders the appropriate template for the Nimbus Robot """ # The v5 template is compatible with all versions < 6 cfg_original_hash = file_hash(ROBOT_CONFIG) context = { 'hub': config("hub"), 'domain': config("domain"), 'hubip': config("hubip"), 'hub_robot_name': config("hub-robot-name"), 'secondary_domain': config("secondary-domain"), 'secondary_hubip': config("secondary-hubip"), 'secondary_hub': config("secondary-hub"), 'secondary_hub_robot_name': config("secondary-hub-robot-name"), 'private_address': unit_private_ip(), 'hostname': os.uname()[1], 'aa_profile_mode': config("aa-profile-mode") } # Render robot.cfg render(ROBOT_CONFIG, ROBOT_CONFIG_PATH, context=context) cfg_new_hash = file_hash(ROBOT_CONFIG) # Render request.cfg render(DIST_REQ, DIST_REQ_PATH, context=context) # Install the nimbus service rsync(charm_dir() + '/files/nimbus.service', '/lib/systemd/system/nimbus.service') # Render AppArmor profile render(NIMBUS_AA_PROFILE, NIMBUS_AA_PROFILE_PATH, context=context) # Set AppArmor context NimbusAppArmorContext().setup_aa_profile() if cfg_original_hash != cfg_new_hash: service('restart', 'nimbus') hookenv.status_set('active', 'ready')
def config_vsm_controller(): if 'shared-db' in CONFIGS.complete_contexts() and \ 'amqp' in CONFIGS.complete_contexts() and \ 'identity-service' in CONFIGS.complete_contexts(): juju_log("**********config vsm controller") service_host = auth_token_config('identity_uri').split('/')[2].split(':')[0] net = '.'.join(service_host.split('.')[0:3]) + ".0\/24" subprocess.check_call(['sudo', 'sed', '-i', 's/^192.168.*/%s/g' % net, '/etc/manifest/cluster.manifest']) subprocess.check_call(['sudo', 'service', 'vsm-api', 'restart']) subprocess.check_call(['sudo', 'service', 'vsm-scheduler', 'restart']) subprocess.check_call(['sudo', 'service', 'vsm-conductor', 'restart']) # TODO fix the hardcode of vsm-dashboard. subprocess.check_call(['sudo', 'sed', '-i', "s/'service'/'services'/g", '/usr/share/vsm-dashboard/vsm_dashboard/api/vsm.py']) keystone_vsm_service_password = auth_token_config('admin_password') local_settings = "/usr/share/vsm-dashboard/vsm_dashboard/local/local_settings.py" etc_local_settings = "/etc/vsm-dashboard/local_settings" rsync( charm_dir() + '/files/local_settings.template', local_settings ) subprocess.check_call(['sudo', 'sed', '-i', 's/^KEYSTONE_VSM_SERVICE_PASSWORD =*.*/KEYSTONE_VSM_SERVICE_PASSWORD = "******"/g' % keystone_vsm_service_password, local_settings]) subprocess.check_call(['sudo', 'sed', '-i', 's/^OPENSTACK_HOST =*.*/OPENSTACK_HOST = "%s"/g' % service_host, local_settings]) subprocess.check_call(['sudo', 'sed', '-i', 's/^OPENSTACK_KEYSTONE_DEFAULT_ROLE =*.*/OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"/g', local_settings]) subprocess.check_call(['sudo', 'rm', '-rf', etc_local_settings]) subprocess.check_call(['sudo', 'ln', '-sf', local_settings, etc_local_settings]) rsync( charm_dir() + '/scripts/https', '/tmp/https' ) subprocess.check_call(['sudo', 'bash', '/tmp/https']) subprocess.check_call(['sudo', 'service', 'apache2', 'restart']) open_port('443') open_port('80')
def install_hugepages(): """ Configure hugepages """ hugepage_config = config('hugepages') if hugepage_config: mnt_point = '/run/hugepages/kvm' hugepage_support( 'nova', mnt_point=mnt_point, group='root', nr_hugepages=get_hugepage_number(), mount=False, set_shmmax=True, ) # Remove hugepages entry if present due to Bug #1518771 Fstab.remove_by_mountpoint(mnt_point) if subprocess.call(['mountpoint', mnt_point]): service_restart('qemu-kvm') rsync(charm_dir() + '/files/qemu-hugefsdir', '/etc/init.d/qemu-hugefsdir') subprocess.check_call('/etc/init.d/qemu-hugefsdir') subprocess.check_call(['update-rc.d', 'qemu-hugefsdir', 'defaults'])
def test_rsyncs_a_path(self, log, check_output): from_path = '/from/this/path/foo' to_path = '/to/this/path/bar' check_output.return_value = b' some output ' # Spaces will be stripped result = host.rsync(from_path, to_path) self.assertEqual(result, 'some output') check_output.assert_called_with([ '/usr/bin/rsync', '-r', '--delete', '--executability', '/from/this/path/foo', '/to/this/path/bar' ])
def install_hugepages(): """ Configure hugepages """ hugepage_config = config('hugepages') if hugepage_config: mnt_point = '/run/hugepages/kvm' hugepage_support( 'nova', mnt_point=mnt_point, group='root', nr_hugepages=get_hugepage_number(), mount=False, set_shmmax=True, ) if subprocess.call(['mountpoint', mnt_point]): fstab_mount(mnt_point) rsync( charm_dir() + '/files/qemu-hugefsdir', '/etc/init.d/qemu-hugefsdir' ) subprocess.check_call('/etc/init.d/qemu-hugefsdir') subprocess.check_call(['update-rc.d', 'qemu-hugefsdir', 'defaults'])
def install_charm_files(service_name): """Install files shipped with charm.""" nag_dirs = [ "/etc/nagios/nrpe.d/", "/usr/local/lib/nagios/plugins", "/var/lib/nagios/export/", ] for nag_dir in nag_dirs: if not os.path.exists(nag_dir): Path(nag_dir).mkdir(mode=0o755, parents=True) charm_file_dir = os.path.join(hookenv.charm_dir(), "files") charm_plugin_dir = os.path.join(charm_file_dir, "plugins") if OS_RELEASE_CTXT['ID'] == 'ubuntu': pkg_plugin_dir = "/usr/lib/nagios/plugins/" else: pkg_plugin_dir = "/usr/lib64/nagios/plugins/" local_plugin_dir = "/usr/local/lib/nagios/plugins/" shutil.copy2( os.path.join(charm_file_dir, "nagios_plugin.py"), pkg_plugin_dir + "nagios_plugin.py", ) shutil.copy2( os.path.join(charm_file_dir, "nagios_plugin3.py"), pkg_plugin_dir + "nagios_plugin3.py", ) shutil.copy2(os.path.join(charm_file_dir, "default_rsync"), "/etc/default/rsync") shutil.copy2(os.path.join(charm_file_dir, "rsyncd.conf"), "/etc/rsyncd.conf") host.mkdir("/etc/rsync-juju.d", perms=0o755) host.rsync(charm_plugin_dir, "/usr/local/lib/nagios/", options=["--executability"]) for nagios_plugin in ("nagios_plugin.py", "nagios_plugin3.py"): if not os.path.exists(local_plugin_dir + nagios_plugin): os.symlink(pkg_plugin_dir + nagios_plugin, local_plugin_dir + nagios_plugin)
def update_nrpe_config(): # python-dbus is used by check_upstart_job apt_install('python-dbus') log('Refreshing nrpe checks') if not os.path.exists(NAGIOS_PLUGINS): mkpath(NAGIOS_PLUGINS) rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nrpe-external-master', 'check_swift_storage.py'), os.path.join(NAGIOS_PLUGINS, 'check_swift_storage.py')) rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nrpe-external-master', 'check_swift_service'), os.path.join(NAGIOS_PLUGINS, 'check_swift_service')) rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'sudo', 'swift-storage'), os.path.join(SUDOERS_D, 'swift-storage')) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) # check the rings and replication nrpe_setup.add_check( shortname='swift_storage', description='Check swift storage ring hashes and replication' ' {%s}' % current_unit, check_cmd='check_swift_storage.py {}'.format( config('nagios-check-params')) ) nrpe.add_init_service_checks(nrpe_setup, SWIFT_SVCS, current_unit) nrpe_setup.write()
def update_nrpe_config(): # python-dbus is used by check_upstart_job # python-psutil is used by check_ntpmon fetch.apt_install(['python-dbus', 'python-psutil']) nagios_ntpmon_checks = hookenv.config('nagios_ntpmon_checks').split(" ") if os.path.isdir(NAGIOS_PLUGINS): host.rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'check_ntpmon.py'), os.path.join(NAGIOS_PLUGINS, 'check_ntpmon.py')) hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) nrpe.add_init_service_checks(nrpe_setup, ['ntp'], current_unit) allchecks = set(['offset', 'peers', 'reachability', 'sync']) # remove any previously-created ntpmon checks nrpe_setup.remove_check(shortname="ntpmon") for c in allchecks: nrpe_setup.remove_check(shortname="ntpmon_%s" % c) # If all checks are specified, combine them into a single check to reduce # Nagios noise. if set(nagios_ntpmon_checks) == allchecks: nrpe_setup.add_check( shortname="ntpmon", description='Check NTPmon {}'.format(current_unit), check_cmd='check_ntpmon.py') else: for nc in nagios_ntpmon_checks: if len(nc) > 0: nrpe_setup.add_check( shortname="ntpmon_%s" % nc, description='Check NTPmon %s {%s}' % (nc, current_unit), check_cmd='check_ntpmon.py --check %s' % nc) nrpe_setup.write()
def install_hugepages(): """ Configure hugepages """ hugepage_config = config('hugepages') if hugepage_config: mnt_point = '/run/hugepages/kvm' hugepage_support( 'nova', mnt_point=mnt_point, group='root', nr_hugepages=get_hugepage_number(), mount=False, set_shmmax=True, ) # Remove hugepages entry if present due to Bug #1518771 Fstab.remove_by_mountpoint(mnt_point) if subprocess.call(['mountpoint', mnt_point]): service_restart('qemu-kvm') rsync( charm_dir() + '/files/qemu-hugefsdir', '/etc/init.d/qemu-hugefsdir' ) subprocess.check_call('/etc/init.d/qemu-hugefsdir') subprocess.check_call(['update-rc.d', 'qemu-hugefsdir', 'defaults'])
def main(): cmd = subprocess.Popen(['lsmod'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stederr = cmd.communicate() if re.compile('(megaraid).*').findall(stdout.decode('utf-8')): megaraid = True if os.path.isdir(NAGIOS_PLUGINS): rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'check_lsi_raid'), os.path.join(NAGIOS_PLUGINS, 'check_lsi_raid')) if os.path.isdir(SUDOERS_DIR): rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'nagios_sudoers'), os.path.join(SUDOERS_DIR, 'nagios_sudoers')) if os.path.isdir(NAGIOS_PLUGINS): rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'check_bond'), os.path.join(NAGIOS_PLUGINS, 'check_bond')) hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) # Install megaraid tools # And add megaraid nagios check if megaraid: install_packages(['storcli', 'libfile-which-perl']) nrpe_setup.add_check(shortname='lsi-raid', description='LSI Raid Check {%s}' % current_unit, check_cmd=(os.path.join(NAGIOS_PLUGINS, 'check_lsi_raid'))) # Install checks for the network bonds if os.path.isfile('/proc/net/bonding/bond0') and \ os.path.isfile('/proc/net/bonding/bond1'): nrpe_setup.add_check( shortname='bond0', description='Bond0 check {%s}' % current_unit, check_cmd=(os.path.join(NAGIOS_PLUGINS, 'check_bond') + ' -i bond0 -p eth2')) nrpe_setup.add_check( shortname='bond0', description='Bond1 check {%s}' % current_unit, check_cmd=(os.path.join(NAGIOS_PLUGINS, 'check_bond') + ' -i bond1 -p eth3')) nrpe_setup.write() reactive.set_state('raidcheck_installed') hookenv.status_set('active', 'Unit is ready')
def update_nrpe_checks(): if os.path.isdir(NAGIOS_PLUGINS): rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts', 'check_rabbitmq.py'), os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py')) rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts', 'check_rabbitmq_queues.py'), os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_queues.py')) if config('stats_cron_schedule'): script = os.path.join(SCRIPTS_DIR, 'collect_rabbitmq_stats.sh') cronjob = CRONJOB_CMD.format(schedule=config('stats_cron_schedule'), timeout=config('cron-timeout'), command=script) rsync(os.path.join(charm_dir(), 'scripts', 'collect_rabbitmq_stats.sh'), script) write_file(STATS_CRONFILE, cronjob) elif os.path.isfile(STATS_CRONFILE): os.remove(STATS_CRONFILE) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() myunit = nrpe.get_nagios_unit_name() # create unique user and vhost for each unit current_unit = local_unit().replace('/', '-') user = '******' % current_unit vhost = 'nagios-%s' % current_unit password = rabbit.get_rabbit_password(user, local=True) rabbit.create_vhost(vhost) rabbit.create_user(user, password) rabbit.grant_permissions(user, vhost) nrpe_compat = nrpe.NRPE(hostname=hostname) nrpe_compat.add_check( shortname=rabbit.RABBIT_USER, description='Check RabbitMQ {%s}' % myunit, check_cmd='{}/check_rabbitmq.py --user {} --password {} --vhost {}' ''.format(NAGIOS_PLUGINS, user, password, vhost) ) if config('queue_thresholds'): cmd = "" # If value of queue_thresholds is incorrect we want the hook to fail for item in yaml.safe_load(config('queue_thresholds')): cmd += ' -c "{}" "{}" {} {}'.format(*item) nrpe_compat.add_check( shortname=rabbit.RABBIT_USER + '_queue', description='Check RabbitMQ Queues', check_cmd='{}/check_rabbitmq_queues.py{} {}'.format( NAGIOS_PLUGINS, cmd, STATS_DATAFILE) ) nrpe_compat.write()
def update_nrpe_checks(): if os.path.isdir(NAGIOS_PLUGINS): rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts', 'check_rabbitmq.py'), os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py')) rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts', 'check_rabbitmq_queues.py'), os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_queues.py')) if config('stats_cron_schedule'): script = os.path.join(SCRIPTS_DIR, 'collect_rabbitmq_stats.sh') cronjob = "{} root {}\n".format(config('stats_cron_schedule'), script) rsync(os.path.join(charm_dir(), 'scripts', 'collect_rabbitmq_stats.sh'), script) write_file(STATS_CRONFILE, cronjob) elif os.path.isfile(STATS_CRONFILE): os.remove(STATS_CRONFILE) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() myunit = nrpe.get_nagios_unit_name() # create unique user and vhost for each unit current_unit = local_unit().replace('/', '-') user = '******' % current_unit vhost = 'nagios-%s' % current_unit password = rabbit.get_rabbit_password(user, local=True) rabbit.create_vhost(vhost) rabbit.create_user(user, password) rabbit.grant_permissions(user, vhost) nrpe_compat = nrpe.NRPE(hostname=hostname) nrpe_compat.add_check( shortname=rabbit.RABBIT_USER, description='Check RabbitMQ {%s}' % myunit, check_cmd='{}/check_rabbitmq.py --user {} --password {} --vhost {}' ''.format(NAGIOS_PLUGINS, user, password, vhost) ) if config('queue_thresholds'): cmd = "" # If value of queue_thresholds is incorrect we want the hook to fail for item in yaml.safe_load(config('queue_thresholds')): cmd += ' -c "{}" "{}" {} {}'.format(*item) nrpe_compat.add_check( shortname=rabbit.RABBIT_USER + '_queue', description='Check RabbitMQ Queues', check_cmd='{}/check_rabbitmq_queues.py{} {}'.format( NAGIOS_PLUGINS, cmd, STATS_DATAFILE) ) nrpe_compat.write()
def install_ntpmon(): """ Install package dependencies, source files, and startup configuration. """ hookenv.log('installing ntpmon dependencies') apt_install(['python3-psutil']) hookenv.log('installing ntpmon') host.rsync('src/', ntpmon_dir) if host.init_is_systemd(): hookenv.log('installing ntpmon systemd configuration') host.rsync('src/' + service_name + '.systemd', systemd_config) subprocess.call(['systemd', 'daemon-reload']) else: hookenv.log('installing ntpmon upstart configuration') host.rsync('src/' + service_name + '.upstart', upstart_config) set_state('ntpmon.installed') remove_state('ntpmon.configured')
def deploy_scripts(): rsync(path.join(getenv("CHARM_DIR"), "scripts", "auto_lint.py"), path.join(USR_LIB, "auto_lint.py"))
def update_nrpe_config(): # python-dbus is used by check_upstart_job apt_install('python-dbus') log('Refreshing nrpe checks') if not os.path.exists(NAGIOS_PLUGINS): mkpath(NAGIOS_PLUGINS) rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nrpe-external-master', 'check_swift_storage.py'), os.path.join(NAGIOS_PLUGINS, 'check_swift_storage.py')) rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nrpe-external-master', 'check_timed_logs.pl'), os.path.join(NAGIOS_PLUGINS, 'check_timed_logs.pl')) rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nrpe-external-master', 'check_swift_replicator_logs.sh'), os.path.join(NAGIOS_PLUGINS, 'check_swift_replicator_logs.sh')) rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'nrpe-external-master', 'check_swift_service'), os.path.join(NAGIOS_PLUGINS, 'check_swift_service')) rsync( os.path.join(os.getenv('CHARM_DIR'), 'files', 'sudo', 'swift-storage'), os.path.join(SUDOERS_D, 'swift-storage')) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) # check the rings and replication nrpe_setup.add_check( shortname='swift_storage', description='Check swift storage ring hashes and replication' ' {%s}' % current_unit, check_cmd='check_swift_storage.py {}'.format( config('nagios-check-params'))) object_port = config('object-server-port') container_port = config('container-server-port') account_port = config('account-server-port') nrpe_setup.add_check( shortname="swift-object-server-api", description="Check Swift Object Server API availability", check_cmd="/usr/lib/nagios/plugins/check_http \ -I localhost -u /recon/version -p {} \ -e \"OK\"".format(object_port)) nrpe_setup.add_check( shortname="swift-container-server-api", description="Check Swift Container Server API availability", check_cmd="/usr/lib/nagios/plugins/check_http \ -I localhost -u /recon/version -p {} \ -e \"OK\"".format(container_port)) nrpe_setup.add_check( shortname="swift-account-server-api", description="Check Swift Account Server API availability", check_cmd="/usr/lib/nagios/plugins/check_http \ -I localhost -u /recon/version -p {} \ -e \"OK\"".format(account_port)) if config('nagios-replication-check-params'): nrpe_setup.add_check( shortname='swift_replicator_health', description='Check swift object replicator log reporting', check_cmd='check_swift_replicator_logs.sh {}'.format( config('nagios-replication-check-params'))) else: nrpe_setup.remove_check(shortname='swift_replicator_health') nrpe.add_init_service_checks(nrpe_setup, SWIFT_SVCS, current_unit) nrpe_setup.write()
def update_nrpe_checks(): if os.path.isdir(NAGIOS_PLUGINS): rsync(os.path.join(charm_dir(), 'scripts', 'check_rabbitmq.py'), os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py')) rsync(os.path.join(charm_dir(), 'scripts', 'check_rabbitmq_queues.py'), os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_queues.py')) if config('management_plugin'): rsync(os.path.join(charm_dir(), 'scripts', 'check_rabbitmq_cluster.py'), os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_cluster.py')) if config('stats_cron_schedule'): script = os.path.join(SCRIPTS_DIR, 'collect_rabbitmq_stats.sh') cronjob = CRONJOB_CMD.format(schedule=config('stats_cron_schedule'), timeout=config('cron-timeout'), command=script) rsync(os.path.join(charm_dir(), 'scripts', 'collect_rabbitmq_stats.sh'), script) write_file(STATS_CRONFILE, cronjob) elif os.path.isfile(STATS_CRONFILE): os.remove(STATS_CRONFILE) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() myunit = nrpe.get_nagios_unit_name() # create unique user and vhost for each unit current_unit = local_unit().replace('/', '-') user = '******' % current_unit vhost = 'nagios-%s' % current_unit password = rabbit.get_rabbit_password(user, local=True) rabbit.create_vhost(vhost) rabbit.create_user(user, password, ['monitoring']) rabbit.grant_permissions(user, vhost) nrpe_compat = nrpe.NRPE(hostname=hostname) if config('ssl') in ['off', 'on']: cmd = ('{plugins_dir}/check_rabbitmq.py --user {user} ' '--password {password} --vhost {vhost}') cmd = cmd.format(plugins_dir=NAGIOS_PLUGINS, user=user, password=password, vhost=vhost) nrpe_compat.add_check( shortname=rabbit.RABBIT_USER, description='Check RabbitMQ {%s}' % myunit, check_cmd=cmd ) if config('ssl') in ['only', 'on']: log('Adding rabbitmq SSL check', level=DEBUG) cmd = ('{plugins_dir}/check_rabbitmq.py --user {user} ' '--password {password} --vhost {vhost} ' '--ssl --ssl-ca {ssl_ca} --port {port}') cmd = cmd.format(plugins_dir=NAGIOS_PLUGINS, user=user, password=password, port=int(config('ssl_port')), vhost=vhost, ssl_ca=SSL_CA_FILE) nrpe_compat.add_check( shortname=rabbit.RABBIT_USER + "_ssl", description='Check RabbitMQ (SSL) {%s}' % myunit, check_cmd=cmd ) if config('queue_thresholds'): cmd = "" # If value of queue_thresholds is incorrect we want the hook to fail for item in yaml.safe_load(config('queue_thresholds')): cmd += ' -c "{}" "{}" {} {}'.format(*item) nrpe_compat.add_check( shortname=rabbit.RABBIT_USER + '_queue', description='Check RabbitMQ Queues', check_cmd='{}/check_rabbitmq_queues.py{} {}'.format( NAGIOS_PLUGINS, cmd, STATS_DATAFILE) ) if config('management_plugin'): # add NRPE check nrpe_compat.add_check( shortname=rabbit.RABBIT_USER + '_cluster', description='Check RabbitMQ Cluster', check_cmd='{}/check_rabbitmq_cluster.py --port {} --user {} --password {}'.format( NAGIOS_PLUGINS, rabbit.get_managment_port(), user, password ) ) nrpe_compat.write()
def update_nrpe_checks(): if os.path.isdir(NAGIOS_PLUGINS): rsync(os.path.join(charm_dir(), 'scripts', 'check_rabbitmq.py'), os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py')) rsync(os.path.join(charm_dir(), 'scripts', 'check_rabbitmq_queues.py'), os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_queues.py')) if config('management_plugin'): rsync(os.path.join(charm_dir(), 'scripts', 'check_rabbitmq_cluster.py'), os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_cluster.py')) if config('stats_cron_schedule'): script = os.path.join(SCRIPTS_DIR, 'collect_rabbitmq_stats.sh') cronjob = CRONJOB_CMD.format(schedule=config('stats_cron_schedule'), timeout=config('cron-timeout'), command=script) rsync(os.path.join(charm_dir(), 'scripts', 'collect_rabbitmq_stats.sh'), script) write_file(STATS_CRONFILE, cronjob) elif os.path.isfile(STATS_CRONFILE): os.remove(STATS_CRONFILE) if config('management_plugin'): rsync(os.path.join(charm_dir(), 'scripts', 'check_rabbitmq_cluster.py'), os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_cluster.py')) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() myunit = nrpe.get_nagios_unit_name() # create unique user and vhost for each unit current_unit = local_unit().replace('/', '-') user = '******' % current_unit vhost = 'nagios-%s' % current_unit password = rabbit.get_rabbit_password(user, local=True) rabbit.create_vhost(vhost) rabbit.create_user(user, password, ['monitoring']) rabbit.grant_permissions(user, vhost) nrpe_compat = nrpe.NRPE(hostname=hostname) if config('ssl') in ['off', 'on']: cmd = ('{plugins_dir}/check_rabbitmq.py --user {user} ' '--password {password} --vhost {vhost}') cmd = cmd.format(plugins_dir=NAGIOS_PLUGINS, user=user, password=password, vhost=vhost) nrpe_compat.add_check( shortname=rabbit.RABBIT_USER, description='Check RabbitMQ {%s}' % myunit, check_cmd=cmd ) if config('ssl') in ['only', 'on']: log('Adding rabbitmq SSL check', level=DEBUG) cmd = ('{plugins_dir}/check_rabbitmq.py --user {user} ' '--password {password} --vhost {vhost} ' '--ssl --ssl-ca {ssl_ca} --port {port}') cmd = cmd.format(plugins_dir=NAGIOS_PLUGINS, user=user, password=password, port=int(config('ssl_port')), vhost=vhost, ssl_ca=SSL_CA_FILE) nrpe_compat.add_check( shortname=rabbit.RABBIT_USER + "_ssl", description='Check RabbitMQ (SSL) {%s}' % myunit, check_cmd=cmd ) if config('queue_thresholds'): cmd = "" # If value of queue_thresholds is incorrect we want the hook to fail for item in yaml.safe_load(config('queue_thresholds')): cmd += ' -c "{}" "{}" {} {}'.format(*item) nrpe_compat.add_check( shortname=rabbit.RABBIT_USER + '_queue', description='Check RabbitMQ Queues', check_cmd='{}/check_rabbitmq_queues.py{} {}'.format( NAGIOS_PLUGINS, cmd, STATS_DATAFILE) ) if config('management_plugin'): # add NRPE check nrpe_compat.add_check( shortname=rabbit.RABBIT_USER + '_cluster', description='Check RabbitMQ Cluster', check_cmd='{}/check_rabbitmq_cluster.py --port {} --user {} --password {}'.format( NAGIOS_PLUGINS, rabbit.get_managment_port(), user, password ) ) nrpe_compat.write()
def update_plugins(self): charm_plugin_dir = os.path.join(hookenv.charm_dir(), 'files', 'plugins/') host.rsync(charm_plugin_dir, self.plugins_dir, options=['--executability'])
def update_nrpe_checks(): if os.path.isdir(NAGIOS_PLUGINS): rsync(os.path.join(charm_dir(), 'files', 'check_rabbitmq.py'), os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py')) rsync(os.path.join(charm_dir(), 'files', 'check_rabbitmq_queues.py'), os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_queues.py')) if config('management_plugin'): rsync( os.path.join(charm_dir(), 'files', 'check_rabbitmq_cluster.py'), os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_cluster.py')) if config('stats_cron_schedule'): script = os.path.join(SCRIPTS_DIR, 'collect_rabbitmq_stats.sh') cronjob = CRONJOB_CMD.format(schedule=config('stats_cron_schedule'), timeout=config('cron-timeout'), command=script) rsync(os.path.join(charm_dir(), 'files', 'collect_rabbitmq_stats.sh'), script) write_file(STATS_CRONFILE, cronjob) elif os.path.isfile(STATS_CRONFILE): os.remove(STATS_CRONFILE) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() myunit = nrpe.get_nagios_unit_name() # create unique user and vhost for each unit current_unit = local_unit().replace('/', '-') user = '******'.format(current_unit) vhosts = [{'vhost': user, 'shortname': rabbit.RABBIT_USER}] password = rabbit.get_rabbit_password(user, local=True) nrpe_compat = nrpe.NRPE(hostname=hostname) rabbit.create_user(user, password, ['monitoring']) if config('check-vhosts'): for other_vhost in config('check-vhosts').split(' '): if other_vhost: item = { 'vhost': other_vhost, 'shortname': 'rabbit_{}'.format(other_vhost) } vhosts.append(item) for vhost in vhosts: rabbit.create_vhost(vhost['vhost']) rabbit.grant_permissions(user, vhost['vhost']) if config('ssl') in ['off', 'on']: cmd = ('{}/check_rabbitmq.py --user {} --password {} ' '--vhost {}'.format(NAGIOS_PLUGINS, user, password, vhost['vhost'])) log('Adding rabbitmq non-SSL check for {}'.format(vhost['vhost']), level=DEBUG) description = 'Check RabbitMQ {} {}'.format(myunit, vhost['vhost']) nrpe_compat.add_check(shortname=vhost['shortname'], description=description, check_cmd=cmd) if config('ssl') in ['only', 'on']: cmd = ('{}/check_rabbitmq.py --user {} --password {} ' '--vhost {} --ssl --ssl-ca {} --port {}'.format( NAGIOS_PLUGINS, user, password, vhost['vhost'], SSL_CA_FILE, int(config('ssl_port')))) log('Adding rabbitmq SSL check for {}'.format(vhost['vhost']), level=DEBUG) description = 'Check RabbitMQ (SSL) {} {}'.format( myunit, vhost['vhost']) nrpe_compat.add_check(shortname=vhost['shortname'] + "_ssl", description=description, check_cmd=cmd) if config('queue_thresholds'): cmd = "" # If value of queue_thresholds is incorrect we want the hook to fail for item in yaml.safe_load(config('queue_thresholds')): cmd += ' -c "{}" "{}" {} {}'.format(*item) nrpe_compat.add_check( shortname=rabbit.RABBIT_USER + '_queue', description='Check RabbitMQ Queues', check_cmd='{}/check_rabbitmq_queues.py{} {}'.format( NAGIOS_PLUGINS, cmd, STATS_DATAFILE)) if config('management_plugin'): # add NRPE check _check_cmd = ( '{}/check_rabbitmq_cluster.py --port {} --user {} --password {}'. format(NAGIOS_PLUGINS, rabbit.get_managment_port(), user, password)) nrpe_compat.add_check(shortname=rabbit.RABBIT_USER + '_cluster', description='Check RabbitMQ Cluster', check_cmd=_check_cmd) nrpe_compat.write()