def get_hostname(address, fqdn=True): """ Resolves hostname for given IP, or returns the input if it is already a hostname. """ if is_ip(address): try: import dns.reversename except ImportError: apt_install('python-dnspython') import dns.reversename rev = dns.reversename.from_address(address) result = ns_query(rev) if not result: return None else: result = address if fqdn: # strip trailing . if result.endswith('.'): return result[:-1] else: return result else: return result.split('.')[0]
def install(): juju_log('**********install.real') rsync( charm_dir() + '/packages/vsm-dep-repo', '/opt' ) rsync( charm_dir() + '/packages/vsmrepo', '/opt' ) rsync( charm_dir() + '/files/apt.conf', '/etc/apt' ) rsync( charm_dir() + '/files/vsm.list', '/etc/apt/sources.list.d' ) rsync( charm_dir() + '/files/vsm-dep.list', '/etc/apt/sources.list.d' ) apt_update() apt_install(VSM_PACKAGES) juju_log('**********finished to install vsm vsm-dashboard python-vsmclient') add_source(config('ceph-source'), config('ceph-key')) apt_update(fatal=True) apt_install(packages=PRE_INSTALL_PACKAGES, fatal=True)
def do_openstack_upgrade(configs): """ Perform an uprade of cinder. Takes care of upgrading packages, rewriting configs + database migration and potentially any other post-upgrade actions. :param configs: The charms main OSConfigRenderer object. """ new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) juju_log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update() apt_install(packages=determine_packages(), options=dpkg_opts, fatal=True) # set CONFIGS to load templates from new release and regenerate config configs.set_release(openstack_release=new_os_rel) configs.write_all() if eligible_leader(CLUSTER_RES): migrate_database()
def install(): add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.determine_packages(), fatal=True) if config('autotune'): tune_network_adapters() install_udev_rules()
def get_cidr_from_iface(interface): ''' Determines Network CIDR from interface. ''' if not interface: return None apt_install('ohai') try: os_info = subprocess.check_output(['ohai', '-l', 'fatal']) except OSError: log('Unable to get operating system information') return None try: os_info_json = json.loads(os_info) except ValueError: log('Unable to determine network') return None device = os_info_json['network']['interfaces'].get(interface) if device is not None: if device.get('routes'): routes = device['routes'] for net in routes: if 'scope' in net: return net.get('destination') else: return None else: return None
def add_ovsbridge_linuxbridge(name, bridge): ''' Add linux bridge to the named openvswitch bridge :param name: Name of ovs bridge to be added to Linux bridge :param bridge: Name of Linux bridge to be added to ovs bridge :returns: True if veth is added between ovs bridge and linux bridge, False otherwise''' try: import netifaces except ImportError: if six.PY2: apt_install('python-netifaces', fatal=True) else: apt_install('python3-netifaces', fatal=True) import netifaces ovsbridge_port = "veth-" + name linuxbridge_port = "veth-" + bridge log('Adding linuxbridge {} to ovsbridge {}'.format(bridge, name), level=INFO) interfaces = netifaces.interfaces() for interface in interfaces: if interface == ovsbridge_port or interface == linuxbridge_port: log('Interface {} already exists'.format(interface), level=INFO) return with open('/etc/network/interfaces.d/{}.cfg'.format( linuxbridge_port), 'w') as config: config.write(BRIDGE_TEMPLATE.format(linuxbridge_port=linuxbridge_port, ovsbridge_port=ovsbridge_port, bridge=bridge)) subprocess.check_call(["ifup", linuxbridge_port]) add_bridge_port(name, linuxbridge_port)
def config_changed(): # Determine whether vaultlocker is required and install if use_vaultlocker(): installed = len(filter_installed_packages(['vaultlocker'])) == 0 if not installed: apt_install('vaultlocker', fatal=True) # Check if an upgrade was requested check_for_upgrade() # Pre-flight checks if config('osd-format') not in ceph.DISK_FORMATS: log('Invalid OSD disk format configuration specified', level=ERROR) sys.exit(1) if config('prefer-ipv6'): assert_charm_supports_ipv6() sysctl_dict = config('sysctl') if sysctl_dict: create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-osd-charm.conf') e_mountpoint = config('ephemeral-unmount') if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): umount(e_mountpoint) prepare_disks_and_activate() install_apparmor_profile() add_to_updatedb_prunepath(STORAGE_MOUNT_PATH)
def install(): hookenv.log('Installing isc-dhcp') fetch.apt_update() fetch.apt_install(fetch.filter_installed_packages( ['isc-dhcp-server', 'iptables-persistent'] )) set_state('dhcp-server.installed')
def upgrade_charm(): apt_install(filter_installed_packages(determine_packages()), fatal=True) for r_id in relation_ids('amqp'): amqp_joined(relation_id=r_id) for r_id in relation_ids('identity-service'): identity_joined(rid=r_id) for r_id in relation_ids('cloud-compute'): for unit in related_units(r_id): compute_changed(r_id, unit) for r_id in relation_ids('shared-db'): db_joined(relation_id=r_id) rels = ['shared-db', 'pgsql-nova-db'] for rname in rels: for rid in relation_ids(rname): for unit in related_units(rid): if rname == 'pgsql-nova-db': leader_init_db_if_ready(skip_acl_check=True, skip_cells_restarts=True, db_rid=rid, unit=unit) else: leader_init_db_if_ready(db_rid=rid, unit=unit) update_nrpe_config() update_nova_consoleauth_config()
def install(): """Basic Ceph client installation.""" ceph_dir = "/etc/ceph" if not os.path.exists(ceph_dir): os.mkdir(ceph_dir) apt_install('ceph-common', fatal=True)
def pre_install(self): hue_version = jujuresources.config_get("hue-version") packages = [ "ant", "g++", "libsasl2-modules-gssapi-mit", "libtidy-0.99-0", "python2.7-dev", "maven", "python-dev", "python-simplejson", "python-setuptools", "make", "libsasl2-dev", "libmysqlclient-dev", "libkrb5-dev", "libxml2-dev", "libxslt-dev", "libxslt1-dev", "libsqlite3-dev", "libssl-dev", "libldap2-dev", "python-pip" ] fetch.apt_install(packages)
def install(): if config()['offline'] is False: apt_update(fatal=True) apt_install(packages=[ 'bind9', 'dnsutils', ], fatal=True) else: log("Installing offline debian packages") install_packages('files/bind') # rerun cuz its buggy install_packages('files/bind') log("Installing Python packages") pip_install('files/bind/pip') ## use the nameserver in /etc/resolv.conf as a forwarder ... import DNS DNS.ParseResolvConf("/etc/resolv.conf") nameserver = DNS.defaults['server'][0] log('Setting dns to be forwarder to :'+nameserver) import jinja2 templateLoader = jinja2.FileSystemLoader( searchpath= os.environ['CHARM_DIR'] ) #use Jinja2 template to enable bind forwarding templateEnv=jinja2.Environment( loader=templateLoader ); template=templateEnv.get_template('contrib/bind/templates/named.conf.options.jinja2') output_from_parsed_template = template.render(forwarder=nameserver) # to save the results with open("/etc/bind/named.conf.options", "wb") as fh: fh.write(output_from_parsed_template) ## use jinja2 templates.. if not os.path.exists('/etc/bind/zone-backup'): os.makedirs('/etc/bind/zone-backup') open_port(53, "TCP") open_port(53, "UDP")
def do_openstack_upgrade(configs): """ Perform an upgrade. Takes care of upgrading packages, rewriting configs, database migrations and potentially any other post-upgrade actions. :param configs: The charms main OSConfigRenderer object. """ cur_os_rel = os_release('neutron-common') new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update(fatal=True) apt_upgrade(options=dpkg_opts, fatal=True, dist=True) pkgs = determine_packages(new_os_rel) # Sort packages just to make unit tests easier pkgs.sort() apt_install(packages=pkgs, options=dpkg_opts, fatal=True) # set CONFIGS to load templates from new release configs.set_release(openstack_release=new_os_rel) # Before kilo it's nova-cloud-controllers job if is_elected_leader(CLUSTER_RES): #stamp_neutron_database(cur_os_rel) migrate_neutron_database()
def radosgw_relation(relid=None, unit=None): # Install radosgw for admin tools apt_install(packages=filter_installed_packages(['radosgw'])) if not unit: unit = remote_unit() # NOTE: radosgw needs some usage OSD storage, so defer key # provision until OSD units are detected. if ready(): log('mon cluster in quorum and osds related ' '- providing radosgw with keys') public_addr = get_public_addr() data = { 'fsid': config('fsid'), 'radosgw_key': ceph.get_radosgw_key(), 'auth': 'cephx', 'ceph-public-address': public_addr, } settings = relation_get(rid=relid, unit=unit) """Process broker request(s).""" if 'broker_req' in settings: if ceph.is_leader(): rsp = process_requests(settings['broker_req']) unit_id = unit.replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id data[unit_response_key] = rsp else: log("Not leader - ignoring broker request", level=DEBUG) relation_set(relation_id=relid, relation_settings=data) else: log('FSID or admin key not provided, please configure them')
def setup_mysql(db): apt_install(['php5-mysql', 'mysql-client']) reset_wallabag() setup('mysql', db) remove_state('mysql.available') remove_state('wallabag.connected.sqlite') set_state('wallabag.connected.mysql')
def install(): """ Install jenkins-job-builder from a archive, remote git repository or a locally bundled copy shipped with the charm. Any locally bundled copy overrides 'jjb-install-source' setting. """ if not os.path.isdir(CONFIG_DIR): os.mkdir(CONFIG_DIR) src = config('jjb-install-source') tarball = os.path.join(charm_dir(), 'files', TARBALL) if os.path.isfile(tarball): log('Installing jenkins-job-builder from bundled file: %s.' % tarball) install_from_file(tarball) elif src.startswith('git://'): log('Installing jenkins-job-builder from remote git: %s.' % src) install_from_git(src) elif src == 'distro': log('Installing jenkins-job-builder from Ubuntu archive.') if lsb_release()['DISTRIB_CODENAME'] in ['precise', 'quantal']: m = ('jenkins-job-builder package only available in Ubuntu 13.04 ' 'and later.') raise Exception(m) apt_update(fatal=True) apt_install(['jenkins-job-builder', 'python-pbr'], fatal=True) else: m = ('Must specify a git url as install source or bundled source with ' 'the charm.') log(m, ERROR) raise Exception(m)
def update_nrpe_config(): # python-dbus is used by check_upstart_job apt_install('python-dbus') log('Refreshing nagios checks') if os.path.isdir(NAGIOS_PLUGINS): rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'check_ceph_status.py'), os.path.join(NAGIOS_PLUGINS, 'check_ceph_status.py')) script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh') rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios', 'collect_ceph_status.sh'), script) cronjob = "{} root {}\n".format('*/5 * * * *', script) write_file(STATUS_CRONFILE, cronjob) # Find out if nrpe set nagios_hostname hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) nrpe_setup.add_check( shortname="ceph", description='Check Ceph health {%s}' % current_unit, check_cmd='check_ceph_status.py -f {}'.format(STATUS_FILE) ) nrpe_setup.write()
def update_jenkins(): if not relation_ids('jenkins-configurator'): return # if jenkins lib does not exist, skip it if not os.path.isdir(JENKINS_PATH): log(('*** Jenkins does not exist. Not in jenkins relation, ' 'skipping ***')) return log("*** Updating jenkins.") if not is_jenkins_slave(): log('Running on master, updating config and jobs.') _update_jenkins_config() _update_jenkins_jobs() # run repo setup scripts. setupd = os.path.join(common.CI_CONFIG_DIR, 'setup.d') if os.path.isdir(setupd): cmd = ["run-parts", "--exit-on-error", setupd] log('Running repo setup.') subprocess.check_call(cmd) # install any packages that the repo says we need as dependencies. pkgs = required_packages() if pkgs: opts = [] if config('force-package-install'): opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_install(pkgs, options=opts, fatal=True)
def radosgw_relation(relid=None, unit=None): # Install radosgw for admin tools apt_install(packages=filter_installed_packages(['radosgw'])) if not unit: unit = remote_unit() """Process broker request(s).""" if ceph.is_quorum(): settings = relation_get(rid=relid, unit=unit) if 'broker_req' in settings: if not ceph.is_leader(): log("Not leader - ignoring broker request", level=DEBUG) else: rsp = process_requests(settings['broker_req']) unit_id = unit.replace('/', '-') unit_response_key = 'broker-rsp-' + unit_id log('mon cluster in quorum - providing radosgw with keys') public_addr = get_public_addr() data = { 'fsid': leader_get('fsid'), 'radosgw_key': ceph.get_radosgw_key(), 'auth': config('auth-supported'), 'ceph-public-address': public_addr, unit_response_key: rsp, } relation_set(relation_id=relid, relation_settings=data) else: log('mon cluster not in quorum - deferring key provision')
def upgrade_monitor(): current_version = ceph.get_version() status_set("maintenance", "Upgrading monitor") log("Current ceph version is {}".format(current_version)) new_version = config('release-version') log("Upgrading to: {}".format(new_version)) try: add_source(config('source'), config('key')) apt_update(fatal=True) except subprocess.CalledProcessError as err: log("Adding the ceph source failed with message: {}".format( err.message)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) try: if ceph.systemd(): for mon_id in ceph.get_local_mon_ids(): service_stop('ceph-mon@{}'.format(mon_id)) else: service_stop('ceph-mon-all') apt_install(packages=ceph.PACKAGES, fatal=True) if ceph.systemd(): for mon_id in ceph.get_local_mon_ids(): service_start('ceph-mon@{}'.format(mon_id)) else: service_start('ceph-mon-all') status_set("active", "") except subprocess.CalledProcessError as err: log("Stopping ceph and upgrading packages failed " "with message: {}".format(err.message)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1)
def install_queued(): '''Installs queued deb packages. Removes the apt.queued_installs state and sets the apt.installed state. On failure, sets the unit's workload state to 'blocked' and returns False. Package installs remain queued. On success, sets the apt.installed.{packagename} state for each installed package and returns True. ''' store = unitdata.kv() queue = sorted((options, package) for package, options in store.getrange('apt.install_queue.', strip=True).items()) installed = set() for options, batch in itertools.groupby(queue, lambda x: x[0]): packages = [b[1] for b in batch] try: status_set(None, 'Installing {}'.format(','.join(packages))) fetch.apt_install(packages, options, fatal=True) store.unsetrange(packages, prefix='apt.install_queue.') installed.update(packages) except subprocess.CalledProcessError: status_set('blocked', 'Unable to install packages {}' .format(','.join(packages))) return False # Without setting reactive state. for package in installed: reactive.set_state('apt.installed.{}'.format(package)) reactive.remove_state('apt.queued_installs') return True
def install(self): """Customise the installation, configure the source and then call the parent install() method to install the packages """ fetch.add_source('ppa:6wind/virt-mq-ppa') fetch.apt_install(self.packages, options=['--option=Dpkg::Options::=--force-confnew'])
def config_changed(): # neutron-server runs if < juno. Neutron-server creates mysql tables # which will subsequently cause db migratoins to fail if >= juno. # Disable neutron-server if >= juno if os_release('nova-common') >= 'juno': with open('/etc/init/neutron-server.override', 'wb') as out: out.write('manual\n') if config('prefer-ipv6'): status_set('maintenance', 'configuring ipv6') setup_ipv6() sync_db_with_multi_ipv6_addresses(config('database'), config('database-user'), relation_prefix='nova') global CONFIGS if git_install_requested(): status_set('maintenance', 'Running Git install') if config_value_changed('openstack-origin-git'): git_install(config('openstack-origin-git')) elif not config('action-managed-upgrade'): if openstack_upgrade_available('nova-common'): status_set('maintenance', 'Running openstack upgrade') CONFIGS = do_openstack_upgrade(CONFIGS) [neutron_api_relation_joined(rid=rid, remote_restart=True) for rid in relation_ids('neutron-api')] # NOTE(jamespage): Force re-fire of shared-db joined hook # to ensure that nova_api database is setup if required. [db_joined(relation_id=r_id) for r_id in relation_ids('shared-db')] save_script_rc() configure_https() CONFIGS.write_all() if console_attributes('protocol'): if not git_install_requested(): status_set('maintenance', 'Configuring guest console access') apt_update() packages = console_attributes('packages') or [] filtered = filter_installed_packages(packages) if filtered: apt_install(filtered, fatal=True) [compute_joined(rid=rid) for rid in relation_ids('cloud-compute')] for r_id in relation_ids('identity-service'): identity_joined(rid=r_id) for rid in relation_ids('zeromq-configuration'): zeromq_configuration_relation_joined(rid) [cluster_joined(rid) for rid in relation_ids('cluster')] update_nrpe_config() # If the region value has changed, notify the cloud-compute relations # to ensure the value is propagated to the compute nodes. if config_value_changed('region'): for rid in relation_ids('cloud-compute'): for unit in related_units(rid): compute_changed(rid, unit) update_nova_consoleauth_config()
def install_packages(servicename): if os_release('neutron-common') >= 'kilo': output = os.popen('pip install networking-onos') print output.read() pkgs = ['neutron-common', 'neutron-plugin-ml2'] pkgs = filter_installed_packages(pkgs) apt_install(pkgs, fatal=True)
def configure_lxd_host(): ubuntu_release = lsb_release()['DISTRIB_CODENAME'].lower() if ubuntu_release > "vivid": log('>= Wily deployment - configuring LXD trust password and address', level=INFO) cmd = ['lxc', 'config', 'set', 'core.trust_password', lxd_trust_password()] check_call(cmd) cmd = ['lxc', 'config', 'set', 'core.https_address', '[::]'] check_call(cmd) if ubuntu_release == 'xenial': apt_install('linux-image-extra-%s' % os.uname()[2], fatal=True) if ubuntu_release >= 'xenial': modprobe('netlink_diag') elif ubuntu_release == "vivid": log('Vivid deployment - loading overlay kernel module', level=INFO) cmd = ['modprobe', 'overlay'] check_call(cmd) with open('/etc/modules', 'r+') as modules: if 'overlay' not in modules.read(): modules.write('overlay')
def config_changed(): ''' This hook is run when a config parameter is changed. It also runs on node reboot. ''' charm_config = config() if charm_config.changed('lcm-ssh-key'): if add_lcm_key(): log("PLUMgrid LCM Key added") if charm_config.changed('fabric-interfaces'): if not fabric_interface_changed(): log("Fabric interface already set") if (charm_config.changed('install_sources') or charm_config.changed('plumgrid-build') or charm_config.changed('install_keys') or charm_config.changed('iovisor-build')): stop_pg() status_set('maintenance', 'Upgrading apt packages') if charm_config.changed('install_sources'): configure_pg_sources() configure_sources(update=True) pkgs = determine_packages() for pkg in pkgs: apt_install(pkg, options=['--force-yes'], fatal=True) remove_iovisor() load_iovisor() ensure_mtu() CONFIGS.write_all()
def do_openstack_upgrade(configs): """ Perform an upgrade. Takes care of upgrading packages, rewriting configs, database migrations and potentially any other post-upgrade actions. :param configs: The charms main OSConfigRenderer object. """ new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update(fatal=True) apt_upgrade(options=dpkg_opts, fatal=True, dist=True) reset_os_release() apt_install(determine_packages(), fatal=True) remove_old_packages() # set CONFIGS to load templates from new release configs.set_release(openstack_release=new_os_rel)
def upgrade_charm(): emit_cephconf() apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True) install_upstart_scripts() ceph.update_monfs() upgrade_keys() mon_relation_joined()
def resolve_hostname_to_ip(hostname): """Resolve hostname to IP @param hostname: hostname to be resolved @returns IP address or None if resolution was not possible via DNS """ try: import dns.resolver except ImportError: apt_install(filter_installed_packages(['python-dnspython']), fatal=True) import dns.resolver if config('prefer-ipv6'): if is_ipv6(hostname): return hostname query_type = 'AAAA' elif is_ip(hostname): return hostname else: query_type = 'A' # This may throw an NXDOMAIN exception; in which case # things are badly broken so just let it kill the hook answers = dns.resolver.query(hostname, query_type) if answers: return answers[0].address
def config_changed(): # if we are paused, delay doing any config changed hooks. # It is forced on the resume. if is_unit_paused_set(): log("Unit is pause or upgrading. Skipping config_changed", "WARN") return # If neutron is ready to be queried then check for incompatability between # existing neutron objects and charm settings if neutron_ready(): if l3ha_router_present() and not get_l3ha(): e = ('Cannot disable Router HA while ha enabled routers exist.' ' Please remove any ha routers') status_set('blocked', e) raise Exception(e) if dvr_router_present() and not get_dvr(): e = ('Cannot disable dvr while dvr enabled routers exist. Please' ' remove any distributed routers') log(e, level=ERROR) status_set('blocked', e) raise Exception(e) if config('prefer-ipv6'): status_set('maintenance', 'configuring ipv6') setup_ipv6() sync_db_with_multi_ipv6_addresses(config('database'), config('database-user')) global CONFIGS if not config('action-managed-upgrade'): if openstack_upgrade_available('neutron-common'): status_set('maintenance', 'Running openstack upgrade') do_openstack_upgrade(CONFIGS) additional_install_locations( config('neutron-plugin'), config('openstack-origin') ) status_set('maintenance', 'Installing apt packages') apt_install(filter_installed_packages( determine_packages(config('openstack-origin'))), fatal=True) packages_removed = remove_old_packages() configure_https() update_nrpe_config() CONFIGS.write_all() if packages_removed and not is_unit_paused_set(): log("Package purge detected, restarting services", "INFO") for s in services(): service_restart(s) for r_id in relation_ids('neutron-api'): neutron_api_relation_joined(rid=r_id) for r_id in relation_ids('neutron-plugin-api'): neutron_plugin_api_relation_joined(rid=r_id) for r_id in relation_ids('amqp'): amqp_joined(relation_id=r_id) for r_id in relation_ids('identity-service'): identity_joined(rid=r_id) for r_id in relation_ids('ha'): ha_joined(relation_id=r_id) [cluster_joined(rid) for rid in relation_ids('cluster')]
def do_openstack_upgrade(configs=None): """Perform an uprade of cinder. Takes care of upgrading packages, rewriting configs + database migration and potentially any other post-upgrade actions. :param configs: The charms main OSConfigRenderer object. """ new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) juju_log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update() apt_upgrade(options=dpkg_opts, fatal=True, dist=True) reset_os_release() apt_install(determine_packages(), fatal=True) # NOTE(hopem): must do this after packages have been upgraded so that # we ensure that correct configs are selected for the target release. # See LP 1726527. configs = register_configs() # set CONFIGS to load templates from new release and regenerate config configs.set_release(openstack_release=new_os_rel) configs.write_all() if run_in_apache(): disable_package_apache_site() # Stop/start services and migrate DB if leader [service_stop(s) for s in services()] if is_elected_leader(CLUSTER_RES): migrate_database(upgrade=True) if not is_unit_paused_set(): [service_start(s) for s in services()]
def config_changed(): if config('prefer-ipv6'): setup_ipv6() localhost = 'ip6-localhost' else: localhost = 'localhost' if (os_release('openstack-dashboard') == 'icehouse' and config('offline-compression') in ['no', 'False']): apt_install(filter_installed_packages(['python-lesscpy']), fatal=True) # Ensure default role changes are propagated to keystone for relid in relation_ids('identity-service'): keystone_joined(relid) enable_ssl() if git_install_requested(): if config_value_changed('openstack-origin-git'): git_install(config('openstack-origin-git')) elif not config('action-managed-upgrade'): if openstack_upgrade_available('openstack-dashboard'): status_set('maintenance', 'Upgrading to new OpenStack release') do_openstack_upgrade(configs=CONFIGS) env_vars = { 'OPENSTACK_URL_HORIZON': "http://{}:70{}|Login+-+OpenStack".format(localhost, config('webroot')), 'OPENSTACK_SERVICE_HORIZON': "apache2", 'OPENSTACK_PORT_HORIZON_SSL': 433, 'OPENSTACK_PORT_HORIZON': 70 } save_script_rc(**env_vars) update_nrpe_config() CONFIGS.write_all() open_port(80) open_port(443) if git_install_requested(): git_post_install_late(config('openstack-origin-git'))
def install_from_file(tarball): log('*** Installing from local tarball: %s.' % tarball) outdir = os.path.join('/tmp', 'jenkins-job-builder') _clean_tmp_dir(outdir) apt_install(filter_installed_packages(['python-pip']), fatal=True) os.chdir(os.path.dirname(outdir)) cmd = ['tar', 'xfz', tarball] subprocess.check_call(cmd) os.chdir(outdir) deps = os.path.join(charm_dir(), 'files', LOCAL_PIP_DEPS) cmd = [ 'pip', 'install', '--no-index', '--find-links=file://%s' % deps, '-r', 'requirements.txt' ] subprocess.check_call(cmd) cmd = ['python', './setup.py', 'install'] subprocess.check_call(cmd) log('*** Installed from local tarball.')
def pip_execute(*args, **kwargs): """Overriden pip_execute() to stop sys.path being changed. The act of importing main from the pip module seems to cause add wheels from the /usr/share/python-wheels which are installed by various tools. This function ensures that sys.path remains the same after the call is executed. """ try: _path = sys.path try: from pip import main as _pip_execute except ImportError: apt_update() apt_install('python-pip') from pip import main as _pip_execute _pip_execute(*args, **kwargs) finally: sys.path = _path
def patch_murano_dashboard_template_fix(): import os import shutil, errno try: from git import Repo except ImportError: from charmhelpers.fetch import apt_install apt_install("python-git") from git import Repo def git_download(repo, branch, dst): if os.path.exists(dst): log("Directory {0} already exists".format(dst)) return log("Starting to download from git {0}".format(repo)) repo = Repo.clone_from(repo, dst) repo.git.checkout(branch) def copy_dir(src, dst): try: log("Copying directory {0} to {1}".format(src, dst)) shutil.copytree(src, dst) except OSError as exc: # python >2.5 if exc.errno == errno.ENOTDIR: shutil.copy(src, dst) else: raise murano_dashboard_repo = "https://github.com/openstack/murano-dashboard.git" murano_dashboard_branch = "2.0.0.0rc1" src = "/tmp/murano_dashboard" dst = "/usr/lib/python2.7/dist-packages/muranodashboard" src_copy_dir = src + "/muranodashboard/templates/" dst_copy_dir = dst + "/templates" if os.path.exists(dst_copy_dir): log("Murano-dashboard directory {0} already exist".format( dst_copy_dir)) return git_download(murano_dashboard_repo, murano_dashboard_branch, src) copy_dir(src_copy_dir, dst_copy_dir) shutil.rmtree(src)
def install_ntpmon(): """ Install package dependencies, source files, and startup configuration. """ hookenv.log('installing ntpmon dependencies') apt_install(['python3-psutil']) hookenv.log('installing ntpmon') host.rsync('src/', ntpmon_dir) if host.init_is_systemd(): hookenv.log('installing ntpmon systemd configuration') host.rsync('src/' + service_name + '.systemd', systemd_config) subprocess.call(['systemd', 'daemon-reload']) else: hookenv.log('installing ntpmon upstart configuration') host.rsync('src/' + service_name + '.upstart', upstart_config) set_state('ntpmon.installed') remove_state('ntpmon.configured')
def _remove_docker_network_bridge(): ''' By default docker uses the docker0 bridge for container networking. This method removes the default docker bridge, and reconfigures the DOCKER_OPTS to use the SDN networking bridge. ''' status_set('maintenance', 'Reconfiguring container runtime network bridge.') host.service_stop('docker') apt_install(['bridge-utils'], fatal=True) # cmd = "ifconfig docker0 down" # ifconfig doesn't always work. use native linux networking commands to # mark the bridge as inactive. cmd = ['ip', 'link', 'set', 'docker0', 'down'] check_call(cmd) cmd = ['brctl', 'delbr', 'docker0'] check_call(cmd) # Render the config and restart docker. recycle_daemon()
def install_plugin(ip, ver, venv): """ Install TrilioVault DataMover package """ add_source('deb http://{}:8085 deb-repo/'.format(ip)) try: apt_update() apt_install(['tvault-contego'], options=['--allow-unauthenticated'], fatal=True) log("TrilioVault DataMover package installation passed") status_set('maintenance', 'Starting...') return True except Exception as e: # Datamover package installation failed log("TrilioVault Datamover package installation failed") log("With exception --{}".format(e)) return False
def install_percona_xtradb_cluster(): '''Attempt PXC install based on seeding of passwords for users''' if pxc_installed(): log('MySQL already installed, skipping') return _root_password = root_password() _sst_password = sst_password() if not _root_password or not _sst_password: log('Passwords not seeded, unable to install MySQL at this' ' point so deferring installation') return configure_mysql_root_password(_root_password) apt_install(determine_packages(), fatal=True) configure_sstuser(_sst_password) if config('harden') and 'mysql' in config('harden'): run_mysql_checks()
def install(): if config.get("install-sources"): configure_sources(update=True, sources_var="install-sources", keys_var="install-keys") # install packages apt_install(PACKAGES, fatal=True) install_url = config["install-url"] if install_url: # install opendaylight from tarball # this extracts the archive too install_remote(install_url, dest="/opt") # The extracted dirname. Look at what's on disk instead of mangling, so # the distribution tar.gz's name doesn't matter. install_dir_name = [ f for f in os.listdir("/opt") if f.startswith("distribution-karaf")][0] if not os.path.exists("/opt/opendaylight-karaf"): os.symlink(install_dir_name, "/opt/opendaylight-karaf") else: apt_install([KARAF_PACKAGE], fatal=True) install_dir_name = "opendaylight-karaf" if init_is_systemd(): shutil.copy("files/odl-controller.service", "/lib/systemd/system") service('enable', 'odl-controller') else: shutil.copy("files/odl-controller.conf", "/etc/init") adduser("opendaylight", system_user=True) mkdir("/home/opendaylight", owner="opendaylight", group="opendaylight", perms=0755) check_call( ["chown", "-R", "opendaylight:opendaylight", os.path.join("/opt", install_dir_name)]) mkdir("/var/log/opendaylight", owner="opendaylight", group="opendaylight", perms=0755) # install features write_mvn_config() service_start("odl-controller")
def install(): pkgs = copy.deepcopy(PACKAGES) ubuntu_release = lsb_release()['DISTRIB_CODENAME'].lower() if CompareHostReleases(ubuntu_release) < 'xenial': # use libnagios on anything older than Xenial pkgs.remove('libmonitoring-plugin-perl') pkgs.append('libnagios-plugin-perl') pkgs.remove('python3-netaddr') pkgs.append('python-netaddr') elif CompareHostReleases(ubuntu_release) >= 'bionic': pkgs.append('python3-libmaas') # NOTE(dosaboy): we currently disallow upgrades due to bug #1382842. This # should be removed once the pacemaker package is fixed. status_set('maintenance', 'Installing apt packages') apt_install(filter_installed_packages(pkgs), fatal=True) setup_ocf_files()
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() conf = config() src = conf['openstack-origin'] if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and src == 'distro'): src = 'cloud:precise-folsom' configure_installation_source(src) status_set('maintenance', 'Installing apt packages') apt_update() apt_install(determine_packages(), fatal=True) if run_in_apache(): disable_package_apache_site() status_set('maintenance', 'Git install') git_install(config('openstack-origin-git'))
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() openstack_origin = config('openstack-origin') configure_installation_source(openstack_origin) neutron_plugin = config('neutron-plugin') additional_install_locations(neutron_plugin, openstack_origin) add_source(config('extra-source'), config('extra-key')) status_set('maintenance', 'Installing apt packages') apt_update(fatal=True) packages = determine_packages(openstack_origin) apt_install(packages, fatal=True) [open_port(port) for port in determine_ports()] if neutron_plugin == 'midonet': mkdir('/etc/neutron/plugins/midonet', owner='neutron', group='neutron', perms=0o755, force=False)
def __init__(self, templates_dir, openstack_release): if not os.path.isdir(templates_dir): log('Could not locate templates dir %s' % templates_dir, level=ERROR) raise OSConfigException self.templates_dir = templates_dir self.openstack_release = openstack_release self.templates = {} self._tmpl_env = None if None in [Environment, ChoiceLoader, FileSystemLoader]: # if this code is running, the object is created pre-install hook. # jinja2 shouldn't get touched until the module is reloaded on next # hook execution, with proper jinja2 bits successfully imported. if six.PY2: apt_install('python-jinja2') else: apt_install('python3-jinja2')
def install(): execd_preinstall() configure_installation_source(config('openstack-origin')) apt_update(fatal=True) packages = determine_packages() if os_release('openstack-dashboard') < 'icehouse': packages += ['nodejs', 'node-less'] if lsb_release()['DISTRIB_CODENAME'] == 'precise': # Explicitly upgrade python-six Bug#1420708 apt_install('python-six', fatal=True) packages = filter_installed_packages(packages) if packages: status_set('maintenance', 'Installing packages') apt_install(packages, fatal=True) status_set('maintenance', 'Git install') git_install(config('openstack-origin-git')) patch_murano_dashboard_template_fix()
def install(): """Configure APT source. The many permutations of package source syntaxes in use does not allow us to simply call `add-apt-repository` on the unit and we need to make use of `charmhelpers.fetch.add_source` for this to be universally useful. """ source, key = os_utils.get_source_and_pgp_key(hookenv.config().get( 'source', 'distro')) fetch.add_source(source, key) fetch.apt_update(fatal=True) # The ``magpie`` charm is used as principle for functional tests with some # subordinate charms. Install the ``openstack-release`` package when # available to allow the functional test code to determine installed UCA # versions. fetch.apt_install(fetch.filter_installed_packages(['openstack-release']), fatal=False, quiet=True) set_state('charm.installed')
def install_from_nvidia_apt(): ''' Install cuda docker from the nvidia apt repository. ''' status_set('maintenance', 'Installing docker-engine from Nvidia PPA.') # Get the server and key in the apt-key management tool. add_apt_key("9DC858229FC7DD38854AE2D88D81803C0EBFCD88") # Install key for nvidia-docker. This key changes frequently # ([expires: 2019-09-20]) so we should do what the official docs say and # not try to get it through its fingerprint. add_apt_key_url("https://nvidia.github.io/nvidia-container-runtime/gpgkey") # Get the package architecture (amd64), not the machine hardware (x86_64) architecture = arch() # Get the lsb information as a dictionary. lsb = host.lsb_release() code = lsb['DISTRIB_CODENAME'] rel = lsb['DISTRIB_RELEASE'] ubuntu = str(lsb['DISTRIB_ID']).lower() dockurl = "https://download.docker.com/linux/ubuntu" nvidurl = 'https://nvidia.github.io' repo = 'stable' deb = list() deb.append('deb [arch={0}] {1} {2} {3}'.format(architecture, dockurl, code, repo)) for i in [ 'libnvidia-container', 'nvidia-container-runtime', 'nvidia-docker' ]: deb.append('deb {0}/{1}/ubuntu{2}/{3} /'.format( nvidurl, i, rel, architecture)) write_docker_sources(deb) install_cuda_drivers_repo(architecture, rel, ubuntu) apt_update(fatal=True) # actually install the required packages docker-ce nvidia-docker2 docker_ce = hookenv.config('docker-ce-package') nvidia_docker2 = hookenv.config('nvidia-docker-package') nv_container_runtime = hookenv.config('nvidia-container-runtime-package') apt_install( ['cuda-drivers', docker_ce, nvidia_docker2, nv_container_runtime], fatal=True)
def configure_nvidia(): status_set('maintenance', 'Installing Nvidia drivers.') dist = host.lsb_release() release = '{}{}'.format(dist['DISTRIB_ID'].lower(), dist['DISTRIB_RELEASE']) proxies = { "http": config('http_proxy'), "https": config('https_proxy'), } ncr_gpg_key = requests.get( 'https://nvidia.github.io/nvidia-container-runtime/gpgkey', proxies=proxies).text import_key(ncr_gpg_key) with open('/etc/apt/sources.list.d/nvidia-container-runtime.list', 'w') as f: f.write('deb ' 'https://nvidia.github.io/libnvidia-container/{}/$(ARCH) /\n'. format(release)) f.write( 'deb ' 'https://nvidia.github.io/nvidia-container-runtime/{}/$(ARCH) /\n'. format(release)) cuda_gpg_key = requests.get( 'https://developer.download.nvidia.com/' 'compute/cuda/repos/{}/x86_64/7fa2af80.pub'.format( release.replace('.', '')), proxies=proxies).text import_key(cuda_gpg_key) with open('/etc/apt/sources.list.d/cuda.list', 'w') as f: f.write('deb ' 'http://developer.download.nvidia.com/' 'compute/cuda/repos/{}/x86_64 /\n'.format( release.replace('.', ''))) apt_update() apt_install(NVIDIA_PACKAGES, fatal=True) set_state('containerd.nvidia.ready') config_changed()
def _do_openstack_upgrade(new_src): enable_policy_rcd() new_os_rel = get_os_codename_install_source(new_src) log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update(fatal=True) apt_upgrade(options=dpkg_opts, fatal=True, dist=True) apt_install(determine_packages(), fatal=True) disable_policy_rcd() # NOTE(jamespage) upgrade with existing config files as the # havana->icehouse migration enables new service_plugins which # create issues with db upgrades reset_os_release() configs = register_configs(release=new_os_rel) configs.write_all() if new_os_rel >= 'mitaka' and not database_setup(prefix='novaapi'): # NOTE: Defer service restarts and database migrations for now # as nova_api database is not yet created if (relation_ids('cluster') and is_elected_leader(CLUSTER_RES)): # NOTE: reset dbsync state so that migration will complete # when the nova_api database is setup. peer_store('dbsync_state', None) return configs if is_elected_leader(CLUSTER_RES): status_set('maintenance', 'Running nova db migration') migrate_nova_database() if not is_unit_paused_set(): [service_start(s) for s in services()] return configs
def setup_gitreview(path, repo, host): """ Configure .gitreview so that when user clones repo the default git-review target is their CIaaS not upstream openstack. :param repo: <project>/<os-project> :param host: hostname/address of Gerrit git repository Returns list of commands to executed in the git repo to apply these changes. """ cmds = [] git_review_cfg = '.gitreview' target = os.path.join(path, git_review_cfg) log("Configuring %s" % (target)) if not os.path.exists(target): log("%s not found in %s repo" % (target, repo), level=INFO) cmds.append(['git', 'add', git_review_cfg]) # See https://bugs.launchpad.net/canonical-ci/+bug/1354923 for explanation # of why we are doing this here. try: import jinja2 # NOQA except ImportError: apt_install(filter_installed_packages(['python-jinja2']), fatal=True) finally: from jinja2 import Template templates_dir = os.path.join(charm_dir(), TEMPLATES) with open(os.path.join(templates_dir, git_review_cfg), 'r') as fd: t = Template(fd.read()) rendered = t.render(repo=repo, host=host, port=SSH_PORT) with open(target, 'w') as fd: fd.write(rendered) msg = str("Configured git-review to point to '%s'" % (host)) cmds.append(['git', 'commit', '-a', '-m', msg]) return cmds
def reinstall_paste_ini(): ''' Re-install glance-{api,registry}-paste.ini file from packages Existing glance-{api,registry}-paste.ini file will be removed and the original files provided by the packages will be re-installed. This will only ever be performed once per unit. ''' db = kv() if not db.get(PASTE_INI_MARKER): for paste_file in [GLANCE_REGISTRY_PASTE, GLANCE_API_PASTE]: if os.path.exists(paste_file): os.remove(paste_file) apt_install(packages=['glance-api', 'glance-registry'], options=REINSTALL_OPTIONS, fatal=True) db.set(PASTE_INI_MARKER, True) db.flush()
def update_nrpe_config(): # python-dbus is used by check_upstart_job apt_install('python-dbus') hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) nrpe.add_init_service_checks(nrpe_setup, services(), current_unit) cronpath = '/etc/cron.d/nagios-netns-check' cron_template = ('*/5 * * * * root ' '/usr/local/lib/nagios/plugins/check_netns.sh ' '> /var/lib/nagios/netns-check.txt\n') f = open(cronpath, 'w') f.write(cron_template) f.close() nrpe_setup.add_check( shortname="netns", description='Network Namespace check {%s}' % current_unit, check_cmd='check_status_file.py -f /var/lib/nagios/netns-check.txt') nrpe_setup.write()
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() src = config('openstack-origin') if src != 'distro': openstack.configure_installation_source(src) status_set('maintenance', 'Installing apt packages') apt_update(fatal=True) rel = openstack.get_os_codename_install_source(src) pkgs = determine_packages(rel) apt_install(pkgs, fatal=True) apt_install(extra_pkgs, fatal=True) ensure_swift_dir() # configure a directory on webserver for distributing rings. ensure_www_dir_permissions(get_www_dir()) # call the policy overrides handler which will install any policy overrides policyd.maybe_do_policyd_overrides(openstack.os_release('swift-proxy'), 'swift')
def install_prerequsites(self): hookenv.status_set('maintenance', 'Installing prerequisites') charm_dir = hookenv.charm_dir() prereq_dir = charm_dir + '/dependencies/' + platform.linux_distribution( )[2] with open(os.path.join(prereq_dir, 'community.yaml')) as fp: workload = yaml.safe_load(fp) packages = workload['packages'] addition_file = "" hpcc_type = self.config['hpcc-type'] if hpcc_type == "EE": addition_file = os.path.join(prereq_dir, "enterprise.yaml") elif hpcc_type == "LN": addition_file = os.path.join(prereq_dir, "internal.yaml") if addition_file: with open(addition_file) as fp: workload = yaml.safe_load(fp) packages.extend(workload['packages']) fetch.apt_install(fetch.filter_installed_packages(packages))
def upgrade_charm(): status_set('maintenance', 'Installing apt packages') apt_install(filter_installed_packages(determine_packages())) if run_in_apache(): disable_unused_apache_sites() CONFIGS.write_all() # See LP bug 1519035 leader_init_db_if_ready() update_nrpe_config() if is_elected_leader(CLUSTER_RES): log( 'Cluster leader - ensuring endpoint configuration is up to ' 'date', level=DEBUG) update_all_identity_relation_units()
def update_nrpe_config(): # python-dbus is used by check_upstart_job apt_install('python3-dbus') hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() # create systemd or upstart check cmd = '/bin/cat /var/lib/ceph/osd/ceph-*/whoami |' if init_is_systemd(): cmd += 'xargs -I_@ /usr/local/lib/nagios/plugins/check_systemd.py' cmd += ' ceph-osd@_@' else: cmd += 'xargs -I@ status ceph-osd id=@' cmd += ' && exit 0 || exit 2' nrpe_setup = nrpe.NRPE(hostname=hostname) nrpe_setup.add_check(shortname='ceph-osd', description='process check {%s}' % current_unit, check_cmd=cmd) nrpe_setup.write()
def install(): apt_update(fatal=True) pkgs = determine_packages() apt_install(pkgs, fatal=True) prepare_env() flush_upgrade_type() download_cplane_packages() install_jboss() install_jdk() cmd = "echo '#Added by cplane' >> /etc/hosts" os.system(cmd) if config('jboss-db-on-host'): install_oracle() configure_oracle() else: install_oracle_client() configure_oracle_client() cplane_installer() if config('intall-reboot-scripts') == 'y': install_reboot_scripts()
def install_maas_cli(self): """Ensure maas-cli is installed Fallback to MAAS stable PPA when needed. """ apt.init() cache = apt.Cache() try: pkg = cache['maas-cli'] except KeyError: cmd = ['add-apt-repository', '-y', MAAS_STABLE_PPA] subprocess.check_call(cmd) cmd = ['apt-get', 'update'] subprocess.check_call(cmd) self.install_maas_cli() return if not pkg.current_ver: apt_install('maas-cli', fatal=True)
def install_packages(): # for storing the packages to be installed packages = ['etckeeper'] log("Determining DVCS packages") dvcs = config['dvcs'] packages.append(dvcs) filtered_packages = filter_installed_packages(packages) log("Ensuring etckeeper and DVCS is installed") apt_install(filtered_packages) filtered_packages = filter_installed_packages(packages) log("Checking if packages were installed") if (len(filtered_packages) > 0): log("Failed to install packages {}".format(filtered_packages)) status_set("error", "Package installation failed") else: log("Packages successfully installed") status_set("active", "etckeeper installed")
def ensure_snapd_min_version(min_version): snapd_version = _get_snapd_version() if snapd_version < LooseVersion(min_version): from charmhelpers.fetch import add_source, apt_update, apt_install # Temporary until LP:1735344 lands add_source("distro-proposed", fail_invalid=True) distro = get_series() # disable proposed by default, needs to explicit write_file( "/etc/apt/preferences.d/proposed", PREFERENCES.format(distro), ) apt_update() # explicitly install snapd from proposed apt_install("snapd/{}-proposed".format(distro)) snapd_version = _get_snapd_version() if snapd_version < LooseVersion(min_version): hookenv.log("Failed to install snapd >= {}".format(min_version), ERROR) raise UnsatisfiedMinimumVersionError(min_version, snapd_version)