def do_openstack_upgrade(configs): """Perform an uprade of cinder. Takes care of upgrading packages, rewriting configs + database migration and potentially any other post-upgrade actions. :param configs: The charms main OSConfigRenderer object. """ new_src = config("openstack-origin") new_os_rel = get_os_codename_install_source(new_src) log("Performing OpenStack upgrade to %s." % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = ["--option", "Dpkg::Options::=--force-confnew", "--option", "Dpkg::Options::=--force-confdef"] apt_update() apt_upgrade(options=dpkg_opts, fatal=True, dist=True) apt_install(determine_packages(), fatal=True) # set CONFIGS to load templates from new release and regenerate config configs.set_release(openstack_release=new_os_rel) configs.write_all() [service_stop(s) for s in services()] if is_elected_leader(CLUSTER_RES): migrate_database() [service_start(s) for s in services()]
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() openstack_origin = config('openstack-origin') configure_installation_source(openstack_origin) neutron_plugin = config('neutron-plugin') additional_install_locations(neutron_plugin, openstack_origin) add_source(config('extra-source'), config('extra-key')) status_set('maintenance', 'Installing apt packages') apt_update(fatal=True) packages = determine_packages(openstack_origin) apt_install(packages, fatal=True) for port in determine_ports(): open_port(port) if neutron_plugin == 'midonet': mkdir('/etc/neutron/plugins/midonet', owner='neutron', group='neutron', perms=0o755, force=False) # call the policy overrides handler which will install any policy overrides maybe_do_policyd_overrides( os_release('neutron-server'), 'neutron', restart_handler=lambda: service_restart('neutron-server'))
def install(): hookenv.status_set('maintenance', 'Executing pre-install') execd.execd_preinstall() ch_utils.configure_installation_source(hookenv.config('openstack-origin')) hookenv.status_set('maintenance', 'Installing apt packages') ch_fetch.apt_update() ch_fetch.apt_install(ncc_utils.determine_packages(), fatal=True) ncc_utils.disable_package_apache_site() ncc_utils.stop_deprecated_services() _files = os.path.join(hookenv.charm_dir(), 'files') if os.path.isdir(_files): for f in os.listdir(_files): f = os.path.join(_files, f) if os.path.isfile(f): hookenv.log('Installing %s to /usr/bin' % f) shutil.copy2(f, '/usr/bin') for port in ncc_utils.determine_ports(): hookenv.open_port(port) msg = 'Disabling services into db relation joined' hookenv.log(msg) hookenv.status_set('maintenance', msg) if not ch_utils.is_unit_paused_set(): for svc in ncc_utils.services(): ch_host.service_pause(svc) else: hookenv.log('Unit is in paused state, not issuing stop/pause ' 'to all services')
def install(): """ Install jenkins-job-builder from a archive, remote git repository or a locally bundled copy shipped with the charm. Any locally bundled copy overrides 'jjb-install-source' setting. """ if not os.path.isdir(CONFIG_DIR): os.mkdir(CONFIG_DIR) src = config('jjb-install-source') tarball = os.path.join(charm_dir(), 'files', TARBALL) if os.path.isfile(tarball): log('Installing jenkins-job-builder from bundled file: %s.' % tarball) install_from_file(tarball) elif src.startswith('git://'): log('Installing jenkins-job-builder from remote git: %s.' % src) install_from_git(src) elif src == 'distro': log('Installing jenkins-job-builder from Ubuntu archive.') if lsb_release()['DISTRIB_CODENAME'] in ['precise', 'quantal']: m = ('jenkins-job-builder package only available in Ubuntu 13.04 ' 'and later.') raise Exception(m) apt_update(fatal=True) apt_install(['jenkins-job-builder', 'python-pbr'], fatal=True) else: m = ('Must specify a git url as install source or bundled source with ' 'the charm.') log(m, ERROR) raise Exception(m)
def install(): if config()['offline'] is False: apt_update(fatal=True) apt_install(packages=[ 'bind9', 'dnsutils', ], fatal=True) else: log("Installing offline debian packages") install_packages('files/bind') # rerun cuz its buggy install_packages('files/bind') log("Installing Python packages") pip_install('files/bind/pip') ## use the nameserver in /etc/resolv.conf as a forwarder ... import DNS DNS.ParseResolvConf("/etc/resolv.conf") nameserver = DNS.defaults['server'][0] log('Setting dns to be forwarder to :'+nameserver) import jinja2 templateLoader = jinja2.FileSystemLoader( searchpath= os.environ['CHARM_DIR'] ) #use Jinja2 template to enable bind forwarding templateEnv=jinja2.Environment( loader=templateLoader ); template=templateEnv.get_template('contrib/bind/templates/named.conf.options.jinja2') output_from_parsed_template = template.render(forwarder=nameserver) # to save the results with open("/etc/bind/named.conf.options", "wb") as fh: fh.write(output_from_parsed_template) ## use jinja2 templates.. if not os.path.exists('/etc/bind/zone-backup'): os.makedirs('/etc/bind/zone-backup') open_port(53, "TCP") open_port(53, "UDP")
def configure_source(self): """Configure TrilioVault specific package sources """ with open("/etc/apt/sources.list.d/trilio-gemfury-sources.list", "w") as tsources: tsources.write(ch_core.hookenv.config("triliovault-pkg-source")) fetch.apt_update(fatal=True)
def install_iptables_persistent(): hookenv.log('Installing iptables-persistent') fetch.apt_update() fetch.apt_install(fetch.filter_installed_packages( ['iptables-persistent', 'nmap'] )) set_state('dependencies.installed')
def install_puppet_agent(): """ Install puppet-agent """ # Download and install trusty puppet deb hookenv.status_set('maintenance', 'Configuring trusty puppet apt sources') aufh = ArchiveUrlFetchHandler() aufh.download(TRUSTY_PUPPET_DEB_URL, TRUSTY_PUPPET_DEB_TEMP) dpkg_trusty_puppet_deb = 'dpkg -i %s' % TRUSTY_PUPPET_DEB_TEMP call(dpkg_trusty_puppet_deb.split(), shell=False) apt_update() #Clean up rm_trusty_puppet_deb = 'rm %s' % TRUSTY_PUPPET_DEB_TEMP call(rm_trusty_puppet_deb.split(), shell=False) # Install puppet-agent hookenv.status_set('maintenance', 'Installing puppet-agent %s' % PUPPET_VERSION) apt_install(PUPPET_AGENT_PKGS) render_puppet_conf(PUPPET_CONF_CTXT) hookenv.status_set('active', 'Puppet-agent: %s installed.' % PUPPET_VERSION)
def install(): execd_preinstall() configure_installation_source(config("openstack-origin")) apt_update() apt_install(PACKAGES, fatal=True) setup_storage() ensure_swift_directories()
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() configure_installation_source(config('openstack-origin')) status_set('maintenance', 'Installing apt packages') apt_update() apt_install(determine_packages(), fatal=True) if snap_install_requested(): status_set('maintenance', 'Installing keystone snap') # NOTE(thedac) Setting devmode until LP#1719636 is fixed install_os_snaps( get_snaps_install_info_from_origin( ['keystone'], config('openstack-origin'), mode='devmode')) post_snap_install() service_stop('snap.keystone.*') else: # unconfigured keystone service will prevent start of haproxy in some # circumstances. make sure haproxy runs. LP #1648396 service_stop('keystone') service_start('haproxy') if run_in_apache(): disable_unused_apache_sites() service_pause('keystone')
def upgrade_charm(): hookenv.log('Upgrading Meteor') OLD_METEOR_CONFIG = BASE_DIR + '/.juju-config' NEW_METEOR_CONFIG = os.path.join(hookenv.charm_dir(), hookenv.Config.CONFIG_FILE_NAME) if (os.path.exists(OLD_METEOR_CONFIG) and not os.path.exists(NEW_METEOR_CONFIG)): hookenv.log('Moving config from {} to {}'.format( OLD_METEOR_CONFIG, NEW_METEOR_CONFIG)) shutil.move(OLD_METEOR_CONFIG, NEW_METEOR_CONFIG) config = hookenv.config() os.environ['HOME'] = os.path.expanduser('~' + USER) hookenv.log('Upgrading nodejs') fetch.apt_update() fetch.apt_install(PACKAGES) hookenv.log('Upgrading meteor/meteorite') subprocess.check_call(DOWNLOAD_CMD.split()) subprocess.check_call(INSTALL_CMD.split()) subprocess.check_call('npm install -g meteorite'.split()) init_dependencies(config) if host.service_running(SERVICE): start()
def install(): config = hookenv.config() host.adduser(USER, password='') host.mkdir(BASE_DIR, owner=USER, group=USER) # Meteor install script needs this os.environ['HOME'] = os.path.expanduser('~' + USER) hookenv.log('Installing dependencies') fetch.add_source(NODEJS_REPO) fetch.apt_update() fetch.apt_install(PACKAGES) hookenv.log('Installing Meteor') subprocess.check_call(DOWNLOAD_CMD.split()) subprocess.check_call(INSTALL_CMD.split()) subprocess.check_call('npm install -g meteorite'.split()) init_code(config) init_bundle(config) init_dependencies(config) hookenv.open_port(config['port']) subprocess.check_call( ['chown', '-R', '{user}:{user}'.format(user=USER), BASE_DIR]) config['mongo_url'] = '' write_upstart(config)
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() src = config('openstack-origin') if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and src == 'distro'): src = 'cloud:precise-folsom' configure_installation_source(src) status_set('maintenance', 'Installing apt packages') apt_update(fatal=True) apt_install('python-six', fatal=True) # Force upgrade if valid_plugin(): apt_install(filter_installed_packages(get_early_packages()), fatal=True) apt_install(filter_installed_packages(get_packages()), fatal=True) status_set('maintenance', 'Git install') git_install(config('openstack-origin-git')) else: message = 'Please provide a valid plugin config' log(message, level=ERROR) status_set('blocked', message) sys.exit(1) # Legacy HA for Icehouse update_legacy_ha_files()
def install_from_upstream_apt(): """ Install docker from the apt repository. This is a pyton adaptation of the shell script found at https://get.docker.com/. :return: None """ status_set('maintenance', 'Installing docker-ce from upstream PPA.') key_url = 'https://download.docker.com/linux/ubuntu/gpg' add_apt_key_url(key_url) # The url to the server that contains the docker apt packages. apt_url = 'https://download.docker.com/linux/ubuntu' # Get the lsb information as a dictionary. lsb = host.lsb_release() # The codename for the release. code = lsb['DISTRIB_CODENAME'] # Repo can be: stable, edge or test. repo = 'stable' # E.g. # deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable debs = list() debs.append('deb [arch={}] {} {} {}'.format(arch(), apt_url, code, repo)) write_docker_sources(debs) apt_update(fatal=True) # Install Docker via apt. apt_install(docker_packages['upstream'], fatal=True)
def install_packages(): hookenv.status_set('maintenance', 'Installing packages') fetch.add_source('ppa:juju/stable') fetch.apt_update() packages = ['juju', 'juju-core', 'juju-deployer', 'git', 'python-yaml', 'python-jujuclient', 'charm-tools'] fetch.apt_install(fetch.filter_installed_packages(packages))
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() configure_installation_source(config('openstack-origin')) status_set('maintenance', 'Installing apt packages') apt_update() apt_install(determine_packages(), fatal=True) if snap_install_requested(): status_set('maintenance', 'Installing keystone snap') # NOTE(thedac) Setting devmode until LP#1719636 is fixed install_os_snaps( get_snaps_install_info_from_origin(['keystone'], config('openstack-origin'), mode='devmode')) post_snap_install() service_stop('snap.keystone.*') else: # unconfigured keystone service will prevent start of haproxy in some # circumstances. make sure haproxy runs. LP #1648396 service_stop('keystone') service_start('haproxy') if run_in_apache(): disable_unused_apache_sites() service_pause('keystone') unison.ensure_user(user=SSH_USER, group=SSH_USER) unison.ensure_user(user=SSH_USER, group=KEYSTONE_USER)
def config_changed(): ''' This hook is run when a config parameter is changed. It also runs on node reboot. ''' charm_config = config() if (charm_config.changed('install_sources') or charm_config.changed('plumgrid-build') or charm_config.changed('networking-build') or charm_config.changed('install_keys')): status_set('maintenance', 'Upgrading apt packages') if charm_config.changed('install_sources'): configure_pg_sources() configure_sources() apt_update() pkgs = determine_packages() for pkg in pkgs: apt_install(pkg, options=['--force-yes'], fatal=True) service_stop('neutron-server') if (charm_config.changed('networking-plumgrid-version') or charm_config.changed('pip-proxy')): ensure_files() service_stop('neutron-server') CONFIGS.write_all() if not service_running('neutron-server'): service_start('neutron-server')
def do_openstack_upgrade(configs): """ Perform an upgrade. Takes care of upgrading packages, rewriting configs, database migrations and potentially any other post-upgrade actions. :param configs: The charms main OSConfigRenderer object. """ cur_os_rel = os_release('neutron-common') new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update(fatal=True) apt_upgrade(options=dpkg_opts, fatal=True, dist=True) pkgs = determine_packages(new_os_rel) # Sort packages just to make unit tests easier pkgs.sort() apt_install(packages=pkgs, options=dpkg_opts, fatal=True) # set CONFIGS to load templates from new release configs.set_release(openstack_release=new_os_rel) # Before kilo it's nova-cloud-controllers job if is_elected_leader(CLUSTER_RES) and new_os_rel >= 'kilo': stamp_neutron_database(cur_os_rel) migrate_neutron_database()
def do_openstack_upgrade(configs): """Perform an upgrade of glance. Takes care of upgrading packages, rewriting configs + database migration and potentially any other post-upgrade actions. :param configs: The charms main OSConfigRenderer object. """ new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update() apt_upgrade(options=dpkg_opts, fatal=True, dist=True) apt_install(determine_packages(), fatal=True) # set CONFIGS to load templates from new release and regenerate config configs.set_release(openstack_release=new_os_rel) configs.write_all() [service_stop(s) for s in services()] if is_elected_leader(CLUSTER_RES): migrate_database() # Don't start services if the unit is supposed to be paused. if not is_unit_paused_set(): [service_start(s) for s in services()]
def upgrade_monitor(): current_version = ceph.get_version() status_set("maintenance", "Upgrading monitor") log("Current ceph version is {}".format(current_version)) new_version = config('release-version') log("Upgrading to: {}".format(new_version)) try: add_source(config('source'), config('key')) apt_update(fatal=True) except subprocess.CalledProcessError as err: log("Adding the ceph source failed with message: {}".format( err.message)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) try: if ceph.systemd(): for mon_id in ceph.get_local_mon_ids(): service_stop('ceph-mon@{}'.format(mon_id)) else: service_stop('ceph-mon-all') apt_install(packages=ceph.PACKAGES, fatal=True) if ceph.systemd(): for mon_id in ceph.get_local_mon_ids(): service_start('ceph-mon@{}'.format(mon_id)) else: service_start('ceph-mon-all') status_set("active", "") except subprocess.CalledProcessError as err: log("Stopping ceph and upgrading packages failed " "with message: {}".format(err.message)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1)
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() openstack_origin = config('openstack-origin') configure_installation_source(openstack_origin) neutron_plugin = config('neutron-plugin') additional_install_locations(neutron_plugin, openstack_origin) add_source(config('extra-source'), config('extra-key')) status_set('maintenance', 'Installing apt packages') apt_update(fatal=True) packages = determine_packages(openstack_origin) apt_install(packages, fatal=True) status_set('maintenance', 'Git install') git_install(config('openstack-origin-git')) [open_port(port) for port in determine_ports()] if neutron_plugin == 'midonet': mkdir('/etc/neutron/plugins/midonet', owner='neutron', group='neutron', perms=0o755, force=False) etcd_package_url = config('etcd-package-url') if etcd_package_url and etcd_package_url.startswith('http'): check_call([ "wget", etcd_package_url ]) check_call([ "dpkg", "-i", etcd_package_url.split('/')[-1] ])
def _aci_install(relation_id=None): log("Installing ACI packages") pkgs = [ 'python-apicapi', 'neutron-ml2-driver-apic', 'group-based-policy', 'python-group-based-policy-client', 'neutron-opflex-agent' ] gbp_pkgs = ['group-based-policy', 'python-group-based-policy-client'] opt = [ '--option=Dpkg::Options::=--force-confdef', '--option=Dpkg::Options::=--force-confold' ] conf = config() if 'aci-repo-key' in conf.keys(): fetch.add_source(conf['aci-repo'], key=conf['aci-repo-key']) else: fetch.add_source(conf['aci-repo']) opt.append('--allow-unauthenticated') fetch.apt_update(fatal=True) fetch.apt_upgrade(fatal=True) for pkg in pkgs: fetch.apt_install(pkg, options=opt, fatal=True) if conf['use-gbp']: for pkg in gbp_pkgs: fetch.apt_install(pkg, options=opt, fatal=True)
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() configure_installation_source(config('openstack-origin')) status_set('maintenance', 'Installing apt packages') apt_update() apt_install(determine_packages(), fatal=True) if snap_install_requested(): status_set('maintenance', 'Installing keystone snap') snap_install('keystone', '--edge', '--classic') service_pause('snap.keystone.uwsgi') service_pause('snap.keystone.nginx') else: if run_in_apache(): disable_unused_apache_sites() if not git_install_requested(): service_pause('keystone') status_set('maintenance', 'Git install') git_install(config('openstack-origin-git')) unison.ensure_user(user=SSH_USER, group='juju_keystone') # NOTE(coreycb): can just use group='keystone' once snap has drop privs support if snap_install_requested(): unison.ensure_user(user=SSH_USER, group='root') else: unison.ensure_user(user=SSH_USER, group='keystone')
def install_haproxy(): """Install haproxy when the haproxy.installed flag is not set.""" hookenv.status_set("maintenance", "Installing HAProxy") fetch.add_source(ph.ppa) fetch.apt_update() fetch.install("haproxy") set_state("haproxy.installed")
def do_openstack_upgrade(configs): """Perform an uprade of heat. Takes care of upgrading packages, rewriting configs and potentially any other post-upgrade actions. :param configs: The charms main OSConfigRenderer object. """ new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update() apt_upgrade(options=dpkg_opts, fatal=True, dist=True) packages = BASE_PACKAGES + BASE_SERVICES apt_install(packages=packages, options=dpkg_opts, fatal=True) # set CONFIGS to load templates from new release and regenerate config configs.set_release(openstack_release=new_os_rel) configs.write_all() migrate_database()
def config_changed(): # neutron-server runs if < juno. Neutron-server creates mysql tables # which will subsequently cause db migratoins to fail if >= juno. # Disable neutron-server if >= juno if os_release('nova-common') >= 'juno': with open('/etc/init/neutron-server.override', 'wb') as out: out.write('manual\n') if config('prefer-ipv6'): status_set('maintenance', 'configuring ipv6') setup_ipv6() sync_db_with_multi_ipv6_addresses(config('database'), config('database-user'), relation_prefix='nova') global CONFIGS if git_install_requested(): status_set('maintenance', 'Running Git install') if config_value_changed('openstack-origin-git'): git_install(config('openstack-origin-git')) elif not config('action-managed-upgrade'): if openstack_upgrade_available('nova-common'): status_set('maintenance', 'Running openstack upgrade') CONFIGS = do_openstack_upgrade(CONFIGS) [ neutron_api_relation_joined(rid=rid, remote_restart=True) for rid in relation_ids('neutron-api') ] # NOTE(jamespage): Force re-fire of shared-db joined hook # to ensure that nova_api database is setup if required. [db_joined(relation_id=r_id) for r_id in relation_ids('shared-db')] save_script_rc() configure_https() CONFIGS.write_all() if console_attributes('protocol'): if not git_install_requested(): status_set('maintenance', 'Configuring guest console access') apt_update() packages = console_attributes('packages') or [] filtered = filter_installed_packages(packages) if filtered: apt_install(filtered, fatal=True) [compute_joined(rid=rid) for rid in relation_ids('cloud-compute')] for r_id in relation_ids('identity-service'): identity_joined(rid=r_id) for rid in relation_ids('zeromq-configuration'): zeromq_configuration_relation_joined(rid) [cluster_joined(rid) for rid in relation_ids('cluster')] update_nrpe_config() # If the region value has changed, notify the cloud-compute relations # to ensure the value is propagated to the compute nodes. if config_value_changed('region'): for rid in relation_ids('cloud-compute'): for unit in related_units(rid): compute_changed(rid, unit) update_nova_consoleauth_config()
def install(): execd_preinstall() src = config('openstack-origin') if src != 'distro': openstack.configure_installation_source(src) apt_update(fatal=True) rel = openstack.get_os_codename_install_source(src) pkgs = determine_packages(rel) apt_install(pkgs, fatal=True) apt_install(extra_pkgs, fatal=True) ensure_swift_dir() # initialize new storage rings. for ring in SWIFT_RINGS.iteritems(): initialize_ring(ring[1], config('partition-power'), config('replicas'), config('min-hours')) # configure a directory on webserver for distributing rings. if not os.path.isdir(WWW_DIR): os.mkdir(WWW_DIR, 0755) uid, gid = swift_user() os.chown(WWW_DIR, uid, gid)
def install(): status_set('maintenance', 'Installing...') configure_sources(True, "install-sources", "install-keys") apt_update(fatal=True) apt_upgrade(fatal=True, dist=False) apt_install(PACKAGES, fatal=True) status_set("blocked", "Missing relation to contrail-controller")
def install(): ''' Install the docker daemon, and supporting tooling ''' # Often when building layer-docker based subordinates, you dont need to # incur the overhead of installing docker. This tuneable layer option # allows you to disable the exec of that install routine, and instead short # circuit immediately to docker.available, so you can charm away! layer_opts = layer.options('docker') if layer_opts['skip-install']: set_state('docker.available') set_state('docker.ready') return status_set('maintenance', 'Installing AUFS and other tools') kernel_release = check_output(['uname', '-r']).rstrip() packages = [ 'aufs-tools', 'git', 'linux-image-extra-{0}'.format(kernel_release), ] apt_update() apt_install(packages) # Install docker-engine from apt. install_from_apt() opts = DockerOpts() render('docker.defaults', '/etc/default/docker', {'opts': opts.to_s()}) status_set('active', 'Docker installed, cycling for extensions') set_state('docker.ready') # Make with the adding of the users to the groups check_call(['usermod', '-aG', 'docker', 'ubuntu'])
def install(): """ Install the docker daemon, and supporting tooling. :return: None or False """ # Switching runtimes causes a reinstall so remove any holds that exist. unhold_all() status_set('maintenance', 'Installing AUFS and other tools.') kernel_release = check_output(['uname', '-r']).rstrip() packages = [ 'aufs-tools', 'git', 'linux-image-extra-{}'.format(kernel_release.decode('utf-8')), ] apt_update() apt_install(packages) # Install docker-engine from apt. runtime = determine_apt_source() remove_state('nvidia-docker.supported') remove_state('nvidia-docker.installed') if runtime == 'upstream': install_from_upstream_apt() elif runtime == 'nvidia': set_state('nvidia-docker.supported') install_from_nvidia_apt() set_state('nvidia-docker.installed') elif runtime == 'apt': install_from_archive_apt() elif runtime == 'custom': if not install_from_custom_apt(): return False # If install fails, stop. else: hookenv.log('Unknown runtime {}'.format(runtime)) return False charm_config = check_for_juju_https_proxy(config) validate_config(charm_config) opts = DockerOpts() render('docker.defaults', '/etc/default/docker', { 'opts': opts.to_s(), 'docker_runtime': runtime }) render('docker.systemd', '/lib/systemd/system/docker.service', charm_config) reload_system_daemons() hold_all() hookenv.log( 'Holding docker-engine and docker.io packages at current revision.') host.service_restart('docker') hookenv.log('Docker installed, setting "docker.ready" state.') set_state('docker.ready') # Make with the adding of the users to the groups check_call(['usermod', '-aG', 'docker', 'ubuntu'])
def config_changed(): # Determine whether vaultlocker is required and install if use_vaultlocker(): installed = len(filter_installed_packages(['vaultlocker'])) == 0 if not installed: add_source('ppa:openstack-charmers/vaultlocker') apt_update(fatal=True) apt_install('vaultlocker', fatal=True) # Check if an upgrade was requested check_for_upgrade() # Pre-flight checks if config('osd-format') not in ceph.DISK_FORMATS: log('Invalid OSD disk format configuration specified', level=ERROR) sys.exit(1) if config('prefer-ipv6'): assert_charm_supports_ipv6() sysctl_dict = config('sysctl') if sysctl_dict: create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-osd-charm.conf') e_mountpoint = config('ephemeral-unmount') if e_mountpoint and ceph.filesystem_mounted(e_mountpoint): umount(e_mountpoint) prepare_disks_and_activate() install_apparmor_profile() add_to_updatedb_prunepath(STORAGE_MOUNT_PATH)
def install_deps(self): fetch.add_source( "deb https://download.mono-project.com/repo/ubuntu stable-{series} main", key="3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF") self.kv.set('mono-source', 'mono-project') fetch.apt_update() fetch.apt_install(self.deps)
def do_openstack_upgrade(configs): """ Perform an upgrade. Takes care of upgrading packages, rewriting configs, database migrations and potentially any other post-upgrade actions. """ new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) # NOTE(jamespage): # Write-out new openstack release configuration files prior to upgrading # to avoid having to restart services immediately after upgrade. configs = register_configs(new_os_rel) configs.write_all() dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update(fatal=True) apt_upgrade(options=dpkg_opts, fatal=True, dist=True) # The cached version of os_release will now be invalid as the pkg version # should have changed during the upgrade. reset_os_release() apt_install(get_early_packages(), fatal=True) apt_install(get_packages(), fatal=True)
def storage_backend(rel_id=None): # REQUIRED: add navicli source and key navicli_source = config_get('navicli_source') navicli_key = config_get('navicli_source_key') juju_log('storage_backend: navicli_source=%s navicli_key=%s' % (navicli_source, navicli_key)) if not valid_source(navicli_source) or not valid_key(navicli_key): raise # add_source(navicli_source, navicli_key) os.system( 'find /var/lib/juju -type d -name "navicli_7.33.2.0.51-amd64.deb" -exec sudo dpkg -i {} \;' ) # update and install packages apt_update() dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_install(packages=PACKAGES, options=dpkg_opts, fatal=True) relation_set(relation_id=rel_id, backend_name=service_name(), subordinate_configuration=json.dumps( VNXSubordinateContext()()))
def install_packages(): # Add TrilioVault repository to install required package # and add queens repo to install nova libraries if not add_user(): log("Adding dmapi user failed!") return os.system('sudo echo "{}" > ' '/etc/apt/sources.list.d/trilio-gemfury-sources.list'.format( config('triliovault-pkg-source'))) new_src = config('openstack-origin') configure_installation_source(new_src) if config('python-version') == 2: dmapi_pkg = 'dmapi' else: dmapi_pkg = 'python3-dmapi' apt_update() dmapi.install() # Placing the service file os.system('sudo cp files/trilio/tvault-datamover-api.service ' '/etc/systemd/system/') chownr('/var/log/dmapi', DMAPI_USR, DMAPI_GRP) mkdir('/var/cache/dmapi', DMAPI_USR, DMAPI_GRP, perms=493) os.system('sudo systemctl enable tvault-datamover-api') service_restart('tvault-datamover-api') application_version_set(get_new_version(dmapi_pkg)) reactive.set_state('charm.installed')
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() configure_installation_source(config('openstack-origin')) status_set('maintenance', 'Installing apt packages') apt_update() apt_install(determine_packages(), fatal=True) if placement_api_enabled(): disable_package_apache_site() git_install(config('openstack-origin-git')) _files = os.path.join(charm_dir(), 'files') if os.path.isdir(_files): for f in os.listdir(_files): f = os.path.join(_files, f) if os.path.isfile(f): log('Installing %s to /usr/bin' % f) shutil.copy2(f, '/usr/bin') [open_port(port) for port in determine_ports()] msg = 'Disabling services into db relation joined' log(msg) status_set('maintenance', msg) disable_services() cmd_all_services('stop')
def do_openstack_pkg_upgrade(self): """Upgrade OpenStack packages and snaps :returns: None """ new_src = self.config[self.source_config_key] new_os_rel = os_utils.get_os_codename_install_source(new_src) hookenv.log('Performing OpenStack upgrade to %s.' % (new_os_rel)) # TODO(jamespage): Deal with deb->snap->deb migrations if os_utils.snap_install_requested() and self.all_snaps: os_utils.install_os_snaps( snaps=os_utils.get_snaps_install_info_from_origin( self.all_snaps, self.config[self.source_config_key], mode=self.snap_mode), refresh=True) os_utils.configure_installation_source(new_src) fetch.apt_update() dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] fetch.apt_upgrade(options=dpkg_opts, fatal=True, dist=True) fetch.apt_install(packages=self.all_packages, options=dpkg_opts, fatal=True) self.remove_obsolete_packages() self.release = new_os_rel
def do_openstack_upgrade(configs): """ Perform an upgrade. Takes care of upgrading packages, rewriting configs, database migrations and potentially any other post-upgrade actions. :param configs: The charms main OSConfigRenderer object. """ new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update(fatal=True) apt_upgrade(options=dpkg_opts, fatal=True, dist=True) reset_os_release() apt_install(determine_packages(), fatal=True) remove_old_packages() # set CONFIGS to load templates from new release configs.set_release(openstack_release=new_os_rel)
def do_openstack_upgrade(configs): """Perform an upgrade of glance. Takes care of upgrading packages, rewriting configs + database migration and potentially any other post-upgrade actions. :param configs: The charms main OSConfigRenderer object. """ new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update() apt_upgrade(options=dpkg_opts, fatal=True, dist=True) reset_os_release() apt_install(determine_packages(), fatal=True) # set CONFIGS to load templates from new release and regenerate config configs.set_release(openstack_release=new_os_rel) configs.write_all() [service_stop(s) for s in services()] if is_elected_leader(CLUSTER_RES): migrate_database() # Don't start services if the unit is supposed to be paused. if not is_unit_paused_set(): [service_start(s) for s in services()]
def config_changed(): # neutron-server runs if < juno. Neutron-server creates mysql tables # which will subsequently cause db migratoins to fail if >= juno. # Disable neutron-server if >= juno if os_release('nova-common') >= 'juno': with open('/etc/init/neutron-server.override', 'wb') as out: out.write('manual\n') if config('prefer-ipv6'): status_set('maintenance', 'configuring ipv6') setup_ipv6() sync_db_with_multi_ipv6_addresses(config('database'), config('database-user'), relation_prefix='nova') global CONFIGS if git_install_requested(): status_set('maintenance', 'Running Git install') if config_value_changed('openstack-origin-git'): git_install(config('openstack-origin-git')) elif not config('action-managed-upgrade'): if openstack_upgrade_available('nova-common'): status_set('maintenance', 'Running openstack upgrade') CONFIGS = do_openstack_upgrade(CONFIGS) [neutron_api_relation_joined(rid=rid, remote_restart=True) for rid in relation_ids('neutron-api')] # NOTE(jamespage): Force re-fire of shared-db joined hook # to ensure that nova_api database is setup if required. [db_joined(relation_id=r_id) for r_id in relation_ids('shared-db')] save_script_rc() configure_https() CONFIGS.write_all() if console_attributes('protocol'): if not git_install_requested(): status_set('maintenance', 'Configuring guest console access') apt_update() packages = console_attributes('packages') or [] filtered = filter_installed_packages(packages) if filtered: apt_install(filtered, fatal=True) [compute_joined(rid=rid) for rid in relation_ids('cloud-compute')] for r_id in relation_ids('identity-service'): identity_joined(rid=r_id) for rid in relation_ids('zeromq-configuration'): zeromq_configuration_relation_joined(rid) [cluster_joined(rid) for rid in relation_ids('cluster')] update_nrpe_config() # If the region value has changed, notify the cloud-compute relations # to ensure the value is propagated to the compute nodes. if config_value_changed('region'): for rid in relation_ids('cloud-compute'): for unit in related_units(rid): compute_changed(rid, unit) update_nova_consoleauth_config()
def do_openstack_upgrade(configs): """ Perform an upgrade. Takes care of upgrading packages, rewriting configs, database migrations and potentially any other post-upgrade actions. :param configs: The charms main OSConfigRenderer object. """ new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update(fatal=True) apt_upgrade(options=dpkg_opts, fatal=True, dist=True) # set CONFIGS to load templates from new release configs.set_release(openstack_release=new_os_rel)
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() configure_installation_source(config('openstack-origin')) status_set('maintenance', 'Installing apt packages') apt_update() apt_install(determine_packages(), fatal=True) if snap_install_requested(): status_set('maintenance', 'Installing keystone snap') # NOTE(thedac) Setting devmode until LP#1719636 is fixed install_os_snaps( get_snaps_install_info_from_origin(['keystone'], config('openstack-origin'), mode='devmode')) post_snap_install() service_stop('snap.keystone.*') else: # unconfigured keystone service will prevent start of haproxy in some # circumstances. make sure haproxy runs. LP #1648396 service_stop('keystone') service_start('haproxy') if run_in_apache(): disable_unused_apache_sites() service_pause('keystone') # call the policy overrides handler which will install any policy overrides maybe_do_policyd_overrides( os_release('keystone'), 'keystone', restart_handler=lambda: service_restart('apache2'))
def do_openstack_upgrade(configs): """ Perform an upgrade. Takes care of upgrading packages, rewriting configs, database migrations and potentially any other post-upgrade actions. :param configs: The charms main OSConfigRenderer object. """ cur_os_rel = os_release('neutron-common') new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update(fatal=True) apt_upgrade(options=dpkg_opts, fatal=True, dist=True) pkgs = determine_packages(new_os_rel) # Sort packages just to make unit tests easier pkgs.sort() apt_install(packages=pkgs, options=dpkg_opts, fatal=True) # set CONFIGS to load templates from new release configs.set_release(openstack_release=new_os_rel) # Before kilo it's nova-cloud-controllers job if is_elected_leader(CLUSTER_RES): #stamp_neutron_database(cur_os_rel) migrate_neutron_database()
def upgrade_charm(): pre_install_hooks() add_source(config('source'), config('key')) apt_update(fatal=True) # Ensure older passwd files in /var/lib/juju are moved to # /var/lib/rabbitmq which will end up replicated if clustered for f in [ f for f in os.listdir('/var/lib/juju') if os.path.isfile(os.path.join('/var/lib/juju', f)) ]: if f.endswith('.passwd'): s = os.path.join('/var/lib/juju', f) d = os.path.join('/var/lib/charm/{}'.format(service_name()), f) log('upgrade_charm: Migrating stored passwd' ' from %s to %s.' % (s, d)) shutil.move(s, d) if is_elected_leader('res_rabbitmq_vip'): rabbit.migrate_passwords_to_peer_relation() # explicitly update buggy file name naigos.passwd old = os.path.join('var/lib/rabbitmq', 'naigos.passwd') if os.path.isfile(old): new = os.path.join('var/lib/rabbitmq', 'nagios.passwd') shutil.move(old, new)
def install(): hookenv.log('Installing isc-dhcp') fetch.apt_update() fetch.apt_install(fetch.filter_installed_packages( ['isc-dhcp-server', 'iptables-persistent'] )) set_state('dhcp-server.installed')
def install(): juju_log('**********install.real') rsync( charm_dir() + '/packages/vsm-dep-repo', '/opt' ) rsync( charm_dir() + '/packages/vsmrepo', '/opt' ) rsync( charm_dir() + '/files/apt.conf', '/etc/apt' ) rsync( charm_dir() + '/files/vsm.list', '/etc/apt/sources.list.d' ) rsync( charm_dir() + '/files/vsm-dep.list', '/etc/apt/sources.list.d' ) apt_update() apt_install(VSM_PACKAGES) juju_log('**********finished to install vsm vsm-dashboard python-vsmclient') add_source(config('ceph-source'), config('ceph-key')) apt_update(fatal=True) apt_install(packages=PRE_INSTALL_PACKAGES, fatal=True)
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() openstack_origin = config('openstack-origin') configure_installation_source(openstack_origin) neutron_plugin = config('neutron-plugin') additional_install_locations(neutron_plugin, openstack_origin) add_source(config('extra-source'), config('extra-key')) status_set('maintenance', 'Installing apt packages') apt_update(fatal=True) packages = determine_packages(openstack_origin) apt_install(packages, fatal=True) status_set('maintenance', 'Git install') git_install(config('openstack-origin-git')) [open_port(port) for port in determine_ports()] if neutron_plugin == 'midonet': mkdir('/etc/neutron/plugins/midonet', owner='neutron', group='neutron', perms=0o755, force=False)
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() src = config('openstack-origin') if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and src == 'distro'): src = 'cloud:precise-icehouse' configure_installation_source(src) status_set('maintenance', 'Installing apt packages') apt_update(fatal=True) apt_install('python-six', fatal=True) # Force upgrade if valid_plugin(): apt_install(filter_installed_packages(get_early_packages()), fatal=True) apt_install(filter_installed_packages(get_packages()), fatal=True) status_set('maintenance', 'Git install') git_install(config('openstack-origin-git')) else: message = 'Please provide a valid plugin config' log(message, level=ERROR) status_set('blocked', message) sys.exit(1) # Legacy HA for Icehouse update_legacy_ha_files() # Install systemd overrides to remove service startup race between # n-gateway and n-cloud-controller services. install_systemd_override()
def install_couchpotato(): hookenv.status_set('maintenance', 'creating user') host.adduser(cp.user, password="", shell='/bin/False', home_dir=cp.home_dir) hookenv.status_set('maintenance', 'installing dependencies') fetch.apt_update() fetch.apt_install(['git', 'python2.7', 'python-openssl', 'python-lxml']) hookenv.status_set('maintenance', 'cloning repository') if os.path.isdir(cp.install_dir): shutil.rmtree(cp.install_dir) subprocess.check_call([ "git clone https://github.com/CouchPotato/CouchPotatoServer.git " + cp.install_dir ], shell=True) host.chownr(cp.home_dir, owner=cp.user, group=cp.user) context = {'couchpath': cp.executable, 'couchuser': cp.user} templating.render(cp.service_name, '/etc/systemd/system/{}'.format(cp.service_name), context) cp.enable() hookenv.open_port(cp.charm_config['port'], 'TCP') set_state('couchpotato.installed') hookenv.status_set('maintenance', 'installation complete')
def install(self): log('Starting arista installation') installed = len(filter_installed_packages(self.packages)) == 0 if not installed: add_source(config('source')) apt_update(fatal=True) apt_install(self.packages[0], fatal=True)
def install_packages(): # Add TrilioVault repository to install required package # and add queens repo to install nova libraries if not validate_ip(config('triliovault-ip')): log("Invalid IP address !") reactive.status_set( 'blocked', 'Invalid IP address, please provide correct IP address') return if not add_user(): log("Adding dmapi user failed!") return add_source('deb http://{}:8085 deb-repo/'.format(config('triliovault-ip'))) os.system('sudo add-apt-repository cloud-archive:queens') apt_update() dmapi.install() apt_install(['dmapi'], options=['--allow-unauthenticated'], fatal=True) # Placing the service file os.system('sudo cp files/trilio/tvault-datamover-api.service ' '/etc/systemd/system/') chownr('/var/log/dmapi', DMAPI_USR, DMAPI_GRP) os.system('sudo systemctl enable tvault-datamover-api') service_restart('tvault-datamover-api') reactive.set_state('charm.installed')
def install(): add_source(config('source'), config('key')) apt_update(fatal=True) apt_install(packages=ceph.determine_packages(), fatal=True) if config('autotune'): tune_network_adapters() install_udev_rules()
def do_openstack_upgrade(configs): """ Perform an uprade of cinder. Takes care of upgrading packages, rewriting configs + database migration and potentially any other post-upgrade actions. :param configs: The charms main OSConfigRenderer object. """ new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) juju_log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update() apt_install(packages=determine_packages(), options=dpkg_opts, fatal=True) # set CONFIGS to load templates from new release and regenerate config configs.set_release(openstack_release=new_os_rel) configs.write_all() if eligible_leader(CLUSTER_RES): migrate_database()
def install(): status_set('maintenance', 'Executing pre-install') execd_preinstall() src = config('openstack-origin') if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and src == 'distro'): src = 'cloud:precise-icehouse' configure_installation_source(src) status_set('maintenance', 'Installing apt packages') apt_update(fatal=True) apt_install('python-six', fatal=True) # Force upgrade if valid_plugin(): apt_install(filter_installed_packages(get_early_packages()), fatal=True) apt_install(filter_installed_packages(get_packages()), fatal=True) else: message = 'Please provide a valid plugin config' log(message, level=ERROR) status_set('blocked', message) sys.exit(1) # Legacy HA for Icehouse update_legacy_ha_files() # Install systemd overrides to remove service startup race between # n-gateway and n-cloud-controller services. install_systemd_override()
def do_openstack_upgrade(configs): # NOTE(jamespage) horrible hack to make utils forget a cached value import charmhelpers.contrib.openstack.utils as utils utils.os_rel = None new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) apt_update(fatal=True) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_upgrade(options=dpkg_opts, fatal=True, dist=True) reset_os_release() apt_install(determine_packages(), fatal=True) remove_old_packages() configs.set_release(openstack_release=new_os_rel) configs.write_all() if not is_unit_paused_set(): for s in services(): service_restart(s)
def do_openstack_upgrade(configs): """ Perform an upgrade. Takes care of upgrading packages, rewriting configs, database migrations and potentially any other post-upgrade actions. :param configs: The charms main OSConfigRenderer object. """ new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) log('Performing OpenStack upgrade to %s.' % (new_os_rel)) configure_installation_source(new_src) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] apt_update(fatal=True) apt_upgrade(options=dpkg_opts, fatal=True, dist=True) reset_os_release() apt_install(packages=CEILOMETER_AGENT_PACKAGES, options=dpkg_opts, fatal=True) # Call apt_install a 2nd time to allow packages which are enabled # for specific OpenStack version to be installed . This is because # Openstack version for a subordinate should be derived from the # version of an installed package rather than relying on # openstack-origin which would not be present in a subordinate. apt_install(get_packages(), fatal=True) remove_old_packages() # set CONFIGS to load templates from new release configs.set_release(openstack_release=new_os_rel)
def install(): hookenv.log('Installing isc-dhcp') fetch.apt_update() fetch.apt_install(fetch.filter_installed_packages(['isc-dhcp-server'])) hookenv.log('Configuring isc-dhcp') dhcp_network = netaddr.IPNetwork(config()["dhcp-network"]) dhcp_range = config()["dhcp-range"] dns = ", ".join(get_dns()) dhcp_if = None public_ifs = [] dhcp_netmask = None dhcp_addr = None for interface in netifaces.interfaces(): af_inet = netifaces.ifaddresses(interface).get(AF_INET) if af_inet and af_inet[0].get('broadcast'): broadcast = netifaces.ifaddresses(interface)[AF_INET][0]['broadcast'] netmask = netifaces.ifaddresses(interface)[AF_INET][0]['netmask'] addr = netifaces.ifaddresses(interface)[AF_INET][0]['addr'] if netaddr.IPAddress(addr) in dhcp_network: dhcp_if = interface dhcp_addr = addr dhcp_netmask = netmask dhcp_broadcast = broadcast else: public_ifs.append(interface) if not dhcp_if: hookenv.status_set( 'blocked', 'Cannot find interface that is connected to network {}.'.format(dhcp_network)) return # If we are serving dhcp on a different network than the default gateway; # then configure the host as NATted gateway. Else, use host's gateway for dhcp clients. gateway_if, gateway_ip = get_gateway() if gateway_if != dhcp_if: print('Default gateway is not on dhcp network, configuring host as gateway.') gateway_ip = dhcp_addr configure_as_gateway(dhcp_if, public_ifs) templating.render( source='isc-dhcp-server', target='/etc/default/isc-dhcp-server', context={ 'interfaces': dhcp_if } ) templating.render( source='dhcpd.conf', target='/etc/dhcp/dhcpd.conf', context={ 'subnet': dhcp_network.ip, 'netmask': dhcp_netmask, 'routers': gateway_ip, # This is either the host itself or the host's gateway 'broadcast_address': dhcp_broadcast, 'domain_name_servers': dns, # We just use the host's DNS settings 'dhcp_range': dhcp_range, } ) host.service_restart('isc-dhcp-server') #TODO: We should crash if start failed hookenv.status_set('active', 'Ready ({})'.format(get_pub_ip())) set_state('dhcp-server.installed')