示例#1
0
def emit_cephconf():
    networks = get_networks('ceph-public-network')
    public_network = ', '.join(networks)

    networks = get_networks('ceph-cluster-network')
    cluster_network = ', '.join(networks)

    cephcontext = {
        'auth_supported': config('auth-supported'),
        'mon_hosts': ' '.join(get_mon_hosts()),
        'fsid': leader_get('fsid'),
        'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
        'osd_journal_size': config('osd-journal-size'),
        'use_syslog': str(config('use-syslog')).lower(),
        'ceph_public_network': public_network,
        'ceph_cluster_network': cluster_network,
        'loglevel': config('loglevel'),
    }

    if config('prefer-ipv6'):
        dynamic_ipv6_address = get_ipv6_addr()[0]
        if not public_network:
            cephcontext['public_addr'] = dynamic_ipv6_address
        if not cluster_network:
            cephcontext['cluster_addr'] = dynamic_ipv6_address

    # Install ceph.conf as an alternative to support
    # co-existence with other charms that write this file
    charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
    mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(),
          group=ceph.ceph_user())
    render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644)
    install_alternative('ceph.conf', '/etc/ceph/ceph.conf',
                        charm_ceph_conf, 100)
示例#2
0
def install_dockerbeat():
    ''' Installs dockerbeat from resources, with a fallback option
    to try to fetch over the network, for 1.25.5 hosts'''

    try:
        bin_path = resource_get('dockerbeat')
    except NotImplementedError:
        # Attempt to fetch and install from configured uri with validation
        bin_path = download_from_upstream()

    full_beat_path = '/usr/local/bin/dockerbeat'

    if not bin_path:
        status_set('blocked', 'Missing dockerbeat binary')
        return

    install(bin_path, full_beat_path)
    os.chmod(full_beat_path, 0o755)

    codename = lsb_release()['DISTRIB_CODENAME']

    # render the apropriate init systems configuration
    if codename == 'trusty':
        render('upstart', '/etc/init/dockerbeat.conf', {})
    else:
        render('systemd', '/etc/systemd/system/dockerbeat.service', {})

    set_state('dockerbeat.installed')
示例#3
0
def render_plugin_config():
    with ensure_user_and_perms(PATHS):
        render(source='errbot_plugins_config.py.j2',
               target=PLUGINS_CONFIG_PATH,
               owner='errbot',
               perms=0o744,
               context=hookenv.config())
示例#4
0
def migrate_to_mount(new_path):
    """Invoked when new mountpoint appears. This function safely migrates
    MySQL data from local disk to persistent storage (only if needed)
    """
    old_path = '/var/lib/mysql'
    if os.path.islink(old_path):
        hookenv.log('{} is already a symlink, skipping migration'.format(
            old_path))
        return True
    # Ensure our new mountpoint is empty. Otherwise error and allow
    # users to investigate and migrate manually
    files = os.listdir(new_path)
    try:
        files.remove('lost+found')
    except ValueError:
        pass
    if files:
        raise RuntimeError('Persistent storage contains old data. '
                           'Please investigate and migrate data manually '
                           'to: {}'.format(new_path))
    os.chmod(new_path, 0o700)
    if os.path.isdir('/etc/apparmor.d/local'):
        render('apparmor.j2', '/etc/apparmor.d/local/usr.sbin.mysqld',
               context={'path': os.path.join(new_path, '')})
        host.service_reload('apparmor')
    host.service_stop('mysql')
    host.rsync(os.path.join(old_path, ''),  # Ensure we have trailing slashes
               os.path.join(new_path, ''),
               options=['--archive'])
    shutil.rmtree(old_path)
    os.symlink(new_path, old_path)
    host.service_start('mysql')
示例#5
0
def setup():
    unit_data = kv()
    if not unit_data.get('gogs.db'):
        hookenv.status_set('blocked', 'need relation to postgresql')
        return

    secret_key = unit_data.get('gogs.secret_key')
    if not secret_key:
        secret_key = base64.b64encode(os.urandom(32)).decode('utf-8')
        unit_data.set('gogs.secret_key', secret_key)

    conf = hookenv.config()
    if not conf.get('host'):
        conf['host'] = hookenv.unit_public_ip()

    root = unit_data.get('gogs.root', '')
    if root and not root.endswith('/'):
        root = root + '/'

    install_context = get_install_context()

    render(source='app.ini',
           target="/opt/gogs/custom/conf/app.ini",
           perms=0o644,
           context={
               'conf': conf,
               'db': unit_data.get('gogs.db'),
               'secret_key': secret_key,
               'root': root,
               'home': install_context['home'],
               'user': install_context['user'],
           })
    restart_service()
    hookenv.status_set('active', 'ready')
示例#6
0
def render_config():
    hookenv.status_set('maintenance',
                       'Generating errbot configuration file')

    config_ctx = hookenv.config()
    config_ctx['data_path'] = DATA_PATH
    config_ctx['plugin_path'] = PLUGIN_PATH
    config_ctx['log_path'] = LOG_PATH

    upstart_ctx = {
        'venv_path': VENV_PATH,
        'user': '******',
        'group': 'errbot',
        'working_dir': BASE_PATH,
        'config_path': CONFIG_PATH,
    }

    with ensure_user_and_perms(PATHS):
        render(source='errbot_config.py.j2',
               target=CONFIG_PATH,
               owner='errbot',
               perms=0o744,
               context=config_ctx)
        render(source='errbot_upstart.j2',
               target=UPSTART_PATH,
               owner='root',
               perms=0o744,
               context=upstart_ctx)

    set_state('errbot.available')
示例#7
0
 def __call__(self, manager, service_name, event_name):
     service = manager.get_service(service_name)
     context = {}
     for ctx in service.get('required_data', []):
         context.update(ctx)
     templating.render(self.source, self.target, context,
                       self.owner, self.group, self.perms)
示例#8
0
def install():
    conf = hookenv.config()
    context = get_install_context()
    gogs_bdist = hookenv.resource_get('bdist')
    check_call(["tar", "xzf", gogs_bdist], cwd="/opt")

    # Create gogs user & group
    add_group(context['group'])
    adduser(context['user'], system_user=True)

    for dir in ('.ssh', 'repositories', 'data', 'logs'):
        os.makedirs(
            os.path.join(context['home'], dir), mode=0o700, exist_ok=True)
    os.makedirs(os.path.join(context['home'], 'custom', 'conf'),
                mode=0o755, exist_ok=True)
    chownr(context['home'], context['user'], context['group'], True, True)

    render(source='upstart',
           target="/etc/init/gogs.conf",
           perms=0o644,
           context=context)
    render(source='gogs.service',
           target="/lib/systemd/system/gogs.service",
           perms=0o644,
           context=context)
    hookenv.status_set('maintenance', 'installation complete')
示例#9
0
def render_gearman_logging_conf():
    gearman_logging_conf = os.path.join(ZUUL_CONF_DIR, 'gearman-logging.conf')
    context = {
        'gearman_log': os.path.join(ZUUL_LOG_DIR, 'gearman-server.log')
    }
    render('gearman-logging.conf', gearman_logging_conf, context, ZUUL_USER,
        ZUUL_USER)
示例#10
0
def main(cluster_data={}):

    # Grab the boilerplate config entries
    cluster_data['unit_name'] = environ['JUJU_UNIT_NAME'].replace('/', '')
    cluster_data['private_address'] = private_address
    cluster_data['public_address'] = public_address
    cluster_data['cluster_state'] = 'new'

    if not leader_status:
        cluster_data['cluster_state'] = hookenv.relation_get('cluster-state')
        leader_address = hookenv.relation_get('leader-address')

        # do self registration
        if not db.get('registered'):
            cmd = "/opt/etcd/etcdctl -C http://{}:4001 member add {}" \
                  " http://{}:7001".format(leader_address,
                                           cluster_data['unit_name'],
                                           private_address)
            print(cmd)
            check_call(shlex.split(cmd))
            db.set('registered', True)

    # introspect the cluster, and form the cluster string.
    # https://github.com/coreos/etcd/blob/master/Documentation/configuration.md#-initial-cluster

    templating.render('etcd.conf.jinja2', '/etc/init/etcd.conf',
                      cluster_data, owner='root', group='root')

    host.service('restart', 'etcd')
示例#11
0
def main(cluster_data={}):

    # Grab the boilerplate config entries
    cluster_data['unit_name'] = environ['JUJU_UNIT_NAME'].replace('/', '')
    cluster_data['private_address'] = private_address
    cluster_data['public_address'] = public_address
    cluster_data['cluster_state'] = 'new'

    if not leader_status:
        cluster_data['cluster_state'] = hookenv.relation_get('cluster-state')
        leader_address = hookenv.relation_get('leader-address')

        # do self registration
        if not db.get('registered'):
            cmd = "etcdctl -C http://{}:2379 member add {}" \
                  " http://{}:2380".format(leader_address,
                                           cluster_data['unit_name'],
                                           private_address)
            print(cmd)
            check_call(shlex.split(cmd))
            db.set('registered', True)

    # introspect the cluster, and form the cluster string.
    # https://github.com/coreos/etcd/blob/master/Documentation/configuration.md#-initial-cluster

    templating.render('etcd.default.jinja2', '/etc/default/etcd',
                      cluster_data, owner='root', group='root')

    host.service('stop', 'etcd')
    check_output(['rm', '-Rf', '/var/lib/etcd/default'])
    host.service('start', 'etcd')
    if leader_status:
        status_set('active', 'Etcd leader running')
    else:
        status_set('active', 'Etcd follower running')
示例#12
0
def run_rancherserver():
    # Render teh template
   cfg = config()
   render('docker-compose.yml', 'files/rancherserver/docker-compose.yml', cfg)

   comp = Compose('files/rancherserver')
   comp.up()
示例#13
0
def config_with_reverseproxy(reverseproxy):
    services = reverseproxy.services()
    cfg = hookenv.config()

    for service in services:
        service_dir = '/var/lib/tor/%s' % (service['service_name'])
        if not os.path.isdir(service_dir):
            check_call(['install', '-d', service_dir, '-o', 'debian-tor', '-m', '700'])

    bridges = []
    for bridge in cfg.get('bridges', '').split(','):
        fields = bridge.split()
        if len(fields) > 1:
            addr, fp = fields[:2]
            bridges.append({'addr': addr, 'fingerprint': fp})

    render(source='torrc',
        target='/etc/tor/torrc',
        owner='root',
        perms=0o644,
        context={
            'cfg': cfg,
            'services': services,
            'bridges': bridges,
            'public_address': hookenv.unit_public_ip(),
            'private_address': hookenv.unit_private_ip(),
        })
    remove_state('reverseproxy.available')
    set_state('tor.start')
示例#14
0
def render_zuul_conf():
    gearman_start = "false"
    if is_service_enabled("gearman"):
        gearman_start = "true"
    context = {
        'gearman_host': config('gearman-server'),
        'gearman_port': config('gearman-port'),
        'gearman_internal': gearman_start,
        'gearman_log': os.path.join(ZUUL_CONF_DIR, 'gearman-logging.conf'),
        'gerrit_server': config('gerrit-server'),
        'gerrit_port': '29418',
        'gerrit_username': config('username'),
        'gerrit_sshkey': os.path.join(ZUUL_SSH_DIR, ZUUL_SSH_PRIVATE_FILE),
        'zuul_layout': os.path.join(ZUUL_CONF_DIR, 'layout.yaml'),
        'zuul_logging': os.path.join(ZUUL_CONF_DIR, 'logging.conf'),
        'zuul_pidfile': os.path.join(ZUUL_RUN_DIR, 'zuul.pid'),
        'zuul_state_dir': ZUUL_STATE_DIR,
        'zuul_status_url': config('status-url'),
        'zuul_git_dir': ZUUL_GIT_DIR,
        'zuul_url': config('zuul-url'),
        'zuul_smtp_server': config('zuul-smtp-server'),
        'zuul_smtp_from': config('zuul-smtp-from'),
        'zuul_smtp_to': config('zuul-smtp-to'),
        'merger_git_user_email': config('git-user-email'),
        'merger_git_user_name': config('git-user-name'),
        'merger_pidfile': os.path.join(ZUUL_MERGER_RUN_DIR, 'merger.pid')
    }
    zuul_conf = os.path.join(ZUUL_CONF_DIR, 'zuul.conf')
    render('zuul.conf', zuul_conf, context, ZUUL_USER, ZUUL_USER)
示例#15
0
 def render_configs(self, configs):
     with self.restart_on_change():
         for conf in configs:
             render(source=os.path.basename(conf),
                    template_loader=get_loader('templates/', self.release),
                    target=conf,
                    context=self.adapter_instance)
示例#16
0
def prepare_end_user_package():
    """ Prepare the tarball package for clients to use to connet to the
        swarm cluster using the default client credentials. """

    # If we are a follower, we dont have keys and need to fetch them
    # from leader-data, which triggered `leadership.set.client_cert`
    # So it better be there!
    if not path.exists("swarm_credentials"):
        makedirs("swarm_credentials")
        with open("swarm_credentials/key.pem", "w+") as fp:
            fp.write(leader_get("client_key"))
        with open("swarm_credentials/cert.pem", "w+") as fp:
            fp.write(leader_get("client_cert"))
        with open("swarm_credentials/ca.pem", "w+") as fp:
            fp.write(leader_get("certificate_authority"))

    # Render the client package script
    template_vars = {"public_address": unit_get("public-address")}
    render("enable.sh", "./swarm_credentials/enable.sh", template_vars)

    # clear out any stale credentials package
    if path.exists("swarm_credentials.tar"):
        remove("swarm_credentials.tar")

    cmd = "tar cvfz swarm_credentials.tar.gz swarm_credentials"
    subprocess.check_call(split(cmd))
    copyfile("swarm_credentials.tar.gz", "/home/ubuntu/swarm_credentials.tar.gz")
    set_state("client.credentials.placed")
def configure():
    dist_dir = node_dist_dir()
    user = get_user()

    if is_systemd():
        conf_path = SYSTEMD_PATH
        template_type = 'systemd'
    else:
        conf_path = UPSTART_PATH
        template_type = 'upstart'

    with maintenance_status('Generating {} configuration'.format(
                                template_type),
                            'upstart configuration generated'):
        config_ctx = hookenv.config()
        config_ctx['working_dir'] = dist_dir
        config_ctx['user'] = user
        config_ctx['npm_cache_path'] = get_cache(dist_dir, user)
        config_ctx['bin_path'] = get_bin_path(dist_dir)
        config_ctx['enable_failover'] = str(
            config_ctx['enable_failover']).lower()
        config_ctx['local_registry_or_host_uri'] = get_local_registry_or_host(
            uri=True)

        render(source='npm-offline-registry_{}.j2'.format(template_type),
               target=conf_path,
               owner='root',
               perms=0o744,
               context=config_ctx)
        set_state('npm-offline-registry.available')
示例#18
0
def install():
    ''' Install the docker daemon, and supporting tooling '''
    # Often when building layer-docker based subordinates, you dont need to
    # incur the overhead of installing docker. This tuneable layer option
    # allows you to disable the exec of that install routine, and instead short
    # circuit immediately to docker.available, so you can charm away!
    layer_opts = layer.options('docker')
    if layer_opts['skip-install']:
        set_state('docker.available')
        set_state('docker.ready')
        return

    status_set('maintenance', 'Installing AUFS and other tools')
    kernel_release = check_output(['uname', '-r']).rstrip()
    packages = [
        'aufs-tools',
        'git',
        'linux-image-extra-{0}'.format(kernel_release),
    ]
    apt_update()
    apt_install(packages)
    # Install docker-engine from apt.
    install_from_apt()

    opts = DockerOpts()
    render('docker.defaults', '/etc/default/docker', {'opts': opts.to_s()})

    status_set('active', 'Docker installed, cycling for extensions')
    set_state('docker.ready')

    # Make with the adding of the users to the groups
    check_call(['usermod', '-aG', 'docker', 'ubuntu'])
示例#19
0
def render_systemd_conf():
    """Render fiche systemd conf
    """

    if config('fqdn'):
        server_name = config('fqdn')
    else:
        server_name = unit_public_ip()

    # Systemd vars
    SYSTEMD_CTXT = {
        'fiche_server_address': server_name,
        'fiche_server_port': config('fiche-server-port'),
        'slug_size': config('slug-size'),
        'buffer_size': config('buffer-size')
    }

    if os.path.exists('/etc/systemd/system/fiche.service'):
        os.remove('/etc/systemd/system/fiche.service')

    # Render systemd template
    render(source="fiche.service.tmpl",
           target="/etc/systemd/system/fiche.service",
           perms=0o644,
           owner="root",
           context=SYSTEMD_CTXT)

    # Open fiche server port
    open_port(config('fiche-server-port'))

    # Set 'fiche.systemd.configured'
    set_state('fiche.systemd.configured')
示例#20
0
def setup_cron_job(cron_spec, directories_list):
    cron_path = os.path.join(os.sep,
                             "etc",
                             "cron.d",
                             "backup")
    context = {'directories': directories_list}
    backend = Backend()

    # Overwrite the file if needed
    try:
        context['cron_spec'] = cron_spec
        context['backend'] = backend.get_backend()
        context['configdir'] = CONFIG_DIR
        # Add --vault flag if vault is related
        if relation_ids('vault'):
            context['vault'] = True
        else:
            context['vault'] = False

        render('backup_cron',
               cron_path,
               context,
               perms=0o644)
    except IOError as err:
        log("Error creating cron file: {}".format(err.message),
            level='error')
示例#21
0
def create_addon(template, context):
    '''Create an addon from a template'''
    source = 'addons/' + template
    target = '/etc/kubernetes/addons/' + template
    render(source, target, context)
    cmd = ['kubectl', 'apply', '-f', target]
    check_call(cmd)
示例#22
0
def enable_client_tls():
    """
    Copy the TLS certificates in place and generate mount points for the swarm
    manager to mount the certs. This enables client-side TLS security on the
    TCP service.
    """
    if not path.exists("/etc/docker"):
        makedirs("/etc/docker")

    kv = unitdata.kv()
    cert = kv.get("tls.server.certificate")
    with open("/etc/docker/server.pem", "w+") as f:
        f.write(cert)
    with open("/etc/docker/ca.pem", "w+") as f:
        f.write(leader_get("certificate_authority"))

    # schenanigans
    keypath = "easy-rsa/easyrsa3/pki/private/{}.key"
    server = getenv("JUJU_UNIT_NAME").replace("/", "_")
    if path.exists(keypath.format(server)):
        copyfile(keypath.format(server), "/etc/docker/server-key.pem")
    else:
        copyfile(keypath.format(unit_get("public-address")), "/etc/docker/server-key.pem")

    opts = DockerOpts()
    config_dir = "/etc/docker"
    cert_path = "{}/server.pem".format(config_dir)
    ca_path = "{}/ca.pem".format(config_dir)
    key_path = "{}/server-key.pem".format(config_dir)
    opts.add("tlscert", cert_path)
    opts.add("tlscacert", ca_path)
    opts.add("tlskey", key_path)
    opts.add("tlsverify", None)
    render("docker.defaults", "/etc/default/docker", {"opts": opts.to_s()})
def create_image():
    status_set("maintenance", "creating image")
    render(source="image.yaml", target="/tmp/image.yaml", owner="openvim", perms=0o664, context={})
    cmd = 'openvim image-create /tmp/image.yaml'
    image_uuid = sh_as_openvim(cmd).split()[0]
    image_uuid = str(image_uuid, 'utf-8')
    return image_uuid
def create_flavor():
    status_set("maintenance", "creating flavor")
    render(source="flavor.yaml", target="/tmp/flavor.yaml", owner="openvim", perms=0o664, context={})
    cmd = 'openvim flavor-create /tmp/flavor.yaml'
    flavor_uuid = sh_as_openvim(cmd).split()[0]
    flavor_uuid = str(flavor_uuid, 'utf-8')
    return flavor_uuid
示例#25
0
def configure_site(site, template, **kwargs):
    """ configures vhost

    Arguments:
    site: Site name
    template: template to process in templates/<template.conf>
    **kwargs: additional dict items to append to template variables exposed
              through the site.toml
    """
    hookenv.status_set('maintenance', 'Configuring site {}'.format(site))

    config = hookenv.config()
    context = load_site()
    context['host'] = config['host']
    context['port'] = config['port']
    context.update(**kwargs)
    conf_path = '/etc/nginx/sites-enabled/{}'.format(site)
    if os.path.exists(conf_path):
        os.remove(conf_path)
    render(source=template,
           target=conf_path,
           context=context)
    hookenv.log('Wrote vhost config {} to {}'.format(context, template),
                'info')

    host.service_reload('nginx')
示例#26
0
def _install_mattermost():
    # Backup existing installation if it exists
    backup_path = None
    if os.path.isdir('/opt/mattermost'):
        backup_path = "/opt/mattermost.back{}".format(str(datetime.datetime.now()))
        shutil.move('/opt/mattermost', backup_path)
    # Create mattermost user & group if not exists
    if not group_exists('mattermost'):
        add_group("mattermost")
    if not user_exists('mattermost'):
        adduser("mattermost", system_user=True)
    # Get and uppack resource
    mattermost_bdist = resource_get('bdist')
    extract_tarfile(mattermost_bdist, destpath="/opt")

    # Render systemd template
    render(source="mattermost.service.tmpl",
           target="/etc/systemd/system/mattermost.service",
           perms=0o644,
           owner="root",
           context={})
    check_call(['systemctl', 'daemon-reload'])
    if backup_path:
        shutil.move(
            '{}/config/config.json'.format(backup_path),
            '/opt/mattermost/config/config.json')
        shutil.move(
            '{}/data'.format(backup_path),
            '/opt/mattermost/')
    # Create dirs that don't exist yet
    for folder in ("data", "logs", "config"):
        os.makedirs("/opt/mattermost/{}".format(folder),
                    mode=0o700,
                    exist_ok=True)
    chownr("/opt/mattermost", "mattermost", "mattermost", chowntopdir=True)
def render_config(clustered=False, hosts=[]):
    if not os.path.exists(os.path.dirname(resolve_cnf_file())):
        os.makedirs(os.path.dirname(resolve_cnf_file()))

    context = {
        'cluster_name': 'juju_cluster',
        'private_address': get_host_ip(),
        'clustered': clustered,
        'cluster_hosts': ",".join(hosts),
        'sst_method': config('sst-method'),
        'sst_password': config('sst-password'),
        'innodb_file_per_table': config('innodb-file-per-table'),
        'table_open_cache': config('table-open-cache'),
        'lp1366997_workaround': config('lp1366997-workaround'),
    }

    if config('prefer-ipv6'):
        # NOTE(hopem): this is a kludge to get percona working with ipv6.
        # See lp 1380747 for more info. This is intended as a stop gap until
        # percona package is fixed to support ipv6.
        context['bind_address'] = '::'
        context['wsrep_provider_options'] = 'gmcast.listen_addr=tcp://:::4567;'
        context['ipv6'] = True
    else:
        context['ipv6'] = False

    context.update(PerconaClusterHelper().parse_config())
    render(os.path.basename(resolve_cnf_file()),
           resolve_cnf_file(), context, perms=0o444)
def create_virbr_network():
    status_set("maintenance", "creating virbr0 network")
    render(source="net-virbr0.yaml", target="/tmp/net-virbr0.yaml", owner="openvim", perms=0o664, context={})
    cmd = 'openvim net-create /tmp/net-virbr0.yaml'
    net_virbr0_uuid = sh_as_openvim(cmd).split()[0]
    net_virbr0_uuid = str(net_virbr0_uuid, 'utf-8')
    return net_virbr0_uuid
def create_tenant():
    status_set("maintenance", "creating tenant")
    render(source="tenant.yaml", target="/tmp/tenant.yaml", owner="openvim", perms=0o664, context={})
    cmd = 'openvim tenant-create /tmp/tenant.yaml'
    tenant_uuid = sh_as_openvim(cmd).split()[0]
    tenant_uuid = str(tenant_uuid, 'utf-8')
    return tenant_uuid
def write_plugin_config():
    ctx = {}
    ctx.update(contrail_api_ctx())
    ctx.update(identity_admin_ctx())
    render("ContrailPlugin.ini",
           "/etc/neutron/plugins/opencontrail/ContrailPlugin.ini",
           ctx, "root", "neutron", 0440)
def configure_tenant_config_script():
    conf = {'tenant_config_script': hookenv.config()['tenant-config']}
    templating.render('tenant_config_script.sh',
                      '/etc/zuul/tenant_config.sh',
                      context=conf,
                      perms=0o755,
                      group='zuul',
                      owner='zuul')
    reactive.clear_flag('zuul.configured')
示例#32
0
def install():
    """
    Install the docker daemon, and supporting tooling.

    :return: None or False
    """
    # Switching runtimes causes a reinstall so remove any holds that exist.
    unhold_all()

    status_set('maintenance', 'Installing AUFS and other tools.')
    kernel_release = check_output(['uname', '-r']).rstrip()
    packages = [
        'aufs-tools',
        'git',
        'linux-image-extra-{}'.format(kernel_release.decode('utf-8')),
    ]
    apt_update()
    apt_install(packages)

    # Install docker-engine from apt.
    runtime = determine_apt_source()
    remove_state('nvidia-docker.supported')
    remove_state('nvidia-docker.installed')
    if runtime == 'upstream':
        install_from_upstream_apt()
    elif runtime == 'nvidia':
        set_state('nvidia-docker.supported')
        install_from_nvidia_apt()
        set_state('nvidia-docker.installed')
    elif runtime == 'apt':
        install_from_archive_apt()
    elif runtime == 'custom':
        if not install_from_custom_apt():
            return False  # If install fails, stop.
    else:
        hookenv.log('Unknown runtime {}'.format(runtime))
        return False

    validate_config()
    opts = DockerOpts()
    render('docker.defaults', '/etc/default/docker', {
        'opts': opts.to_s(),
        'docker_runtime': runtime
    })
    render('docker.systemd', '/lib/systemd/system/docker.service', config())
    reload_system_daemons()

    hold_all()
    hookenv.log(
        'Holding docker-engine and docker.io packages at current revision.')

    host.service_restart('docker')
    hookenv.log('Docker installed, setting "docker.ready" state.')
    set_state('docker.ready')

    # Make with the adding of the users to the groups
    check_call(['usermod', '-aG', 'docker', 'ubuntu'])
示例#33
0
def render_nrpe_check_config(checkctxt):
    """ Write nrpe check definition """
    # Only render if we actually have cmd parameters
    if checkctxt['cmd_params']:
        render(
            'nrpe_command.tmpl',
            '/etc/nagios/nrpe.d/{}.cfg'.format(checkctxt['cmd_name']),
            checkctxt
        )
示例#34
0
    def install(self):
        super(charms_openstack.charm.OpenStackAPICharm, self).install()
        channel = self.config.get("channel", "stable")

        subprocess.check_call(
            ["sudo", "snap", "install",
             "--channel=%s" % channel,
             "wsgate"])

        if not ch_core.host.group_exists(WSGATE_GROUP):
            ch_core.host.add_group(
                WSGATE_GROUP, system_group=True)
        # Create the user
        if not ch_core.host.user_exists(WSGATE_USER):
            ch_core.host.adduser(
                WSGATE_USER, shell="/usr/sbin/nologin",
                system_user=True, primary_group=WSGATE_GROUP,
                home_dir=WSGATE_HOME)

        # Create the directory
        if not os.path.exists(WSGATE_HOME):
            ch_core.host.mkdir(
                WSGATE_HOME,
                owner=WSGATE_USER,
                group=WSGATE_GROUP,
                perms=0o755)
        
        if not os.path.exists(WSGATE_CFG_DIR):
            ch_core.host.mkdir(
                WSGATE_CFG_DIR,
                owner=WSGATE_USER,
                group=WSGATE_GROUP,
                perms=0o755)

        ch_core.host.chownr(
            WSGATE_HOME,
            owner=WSGATE_USER,
            group=WSGATE_GROUP,
            chowntopdir=True)

        # Systemd File
        render(
            source="wsgate.service",
            target=self.systemd_file,
            context={
                "username": WSGATE_USER,
                "cfg_file": WSGATE_CFG,
            },
            owner='root',
            perms=0o644,
        )
        cmd = ["/usr/bin/systemctl", "daemon-reload"]
        subprocess.check_output(cmd, stderr=subprocess.STDOUT)

        cmd = ["/usr/bin/systemctl", "enable", self.name]
        subprocess.check_output(cmd, stderr=subprocess.STDOUT)
示例#35
0
def write_vrouter_config():
    ctx = {}
    ctx.update(control_node_ctx())
    ctx.update(contrail_discovery_ctx())
    ctx.update(neutron_metadata_ctx())
    ctx.update(network_ctx())
    ctx.update(vrouter_ctx())
    ctx.update(vrouter_vgw_ctx())
    render("contrail-vrouter-agent.conf",
           "/etc/contrail/contrail-vrouter-agent.conf", ctx, perms=0440)
示例#36
0
def write_configs():
    ctx = _get_context()

    os.makedirs('/opt/cni/bin')
    os.makedirs('/etc/cni/net.d')
    os.makedirs('/var/lib/contrail/ports/vm')
    os.makedirs('/var/log/contrail/cni/')

    render("kube_cni.conf", "/etc/etc/10-contrail.conf", ctx, "root",
           "contrail", 0o440)
示例#37
0
def configure_limeds_mongodb(hostname, port):
    templating.render(
        source='org.ibcn.limeds.mongodb.MongoStorage.cfg',
        target=
        '/opt/limeds/run/config/org.ibcn.limeds.mongodb.MongoStorage.cfg',
        context={
            'hostname': hostname,
            'port': port,
            'database_name': 'demo'
        })
示例#38
0
def emit_cephconf():
    # Install ceph.conf as an alternative to support
    # co-existence with other charms that write this file
    charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
    mkdir(os.path.dirname(charm_ceph_conf),
          owner=ceph.ceph_user(),
          group=ceph.ceph_user())
    render('ceph.conf', charm_ceph_conf, get_ceph_context(), perms=0o644)
    install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf,
                        100)
示例#39
0
def setup_app(mysql):
    render(source='mysql_configure.php'
        target='/var/www/phpappv1/mysql_configure.php'
        owner='www-data',
        perms=0o775,
        context={
            'db': mysql,
        })
    set_state('apache.start')
    status_set('maintenance', 'Starting apache')
示例#40
0
def render_conf(allowed_cidrs):
    templating.render('munin-node.conf',
                      MUNIN_NODE_CONF, {
                          'allowed_cidrs': allowed_cidrs,
                          'log_level': hookenv.config('log-level'),
                          'log_file': hookenv.config('log-file'),
                          'plugin_timeout': hookenv.config('plugin-timeout')
                      },
                      perms=0o644)
    hookenv.log("%s rendered" % MUNIN_NODE_CONF)
示例#41
0
def render_api_upstart_template():
    flags = hookenv.config()['feature-flags'].replace(' ', '')
    flags = [x for x in flags.split(',') if x != '']
    templating.render(source='upstart.conf',
                      target='/etc/init/h_api.conf',
                      context={
                          'tengu_dir': TENGU_DIR,
                          'user': USER,
                          'flags': flags
                      })
示例#42
0
def ftb_config_server():
    # Configure server.properties
    render(
        'server.properties',
        os.path.join(FTB_HOME, 'server.properties'),
        conf,
        owner=FTB_USER,
        group=FTB_USER,
        perms=0o644
    )
def template_tenant_config():
    conf = {'config': hookenv.config().get('zuul-config')}
    templating.render('main.yaml',
                      '/etc/zuul/main.yaml',
                      context=conf,
                      perms=0o650,
                      group='zuul',
                      owner='zuul')
    if reactive.helpers.any_file_changed(['/etc/zuul/main.yaml']):
        reactive.set_flag('zuul.reload_config')
示例#44
0
def render_service(service_name, context):
    '''Render the systemd service by name.'''
    unit_directory = '/lib/systemd/system'
    source = '{0}.service'.format(service_name)
    target = os.path.join(unit_directory, '{0}.service'.format(service_name))
    render(source, target, context)
    conf_directory = '/etc/default'
    source = '{0}.defaults'.format(service_name)
    target = os.path.join(conf_directory, service_name)
    render(source, target, context)
 def render_puppet_conf(self):
     ''' Render puppet.conf
     '''
     if os.path.exists(self.puppet_conf_path()):
         os.remove(self.puppet_conf_path())
     render(source=self.puppet_conf,
            target=self.puppet_conf_path(),
            owner='root',
            perms=0o644,
            context=self.puppet_conf_ctxt)
def hookup_the_beats():
    """Configure and enable the monitoring
    to export metrics to the monitoring cluster.
    """
    status_set('maintenance', 'Configuring monitoring ...')
    kibana_host_port = \
        charms.leadership.leader_get('monitoring_kibana_host_port')
    elastic_user_password = \
        charms.leadership.leader_get('monitoring_elastic_user_password')
    monitoring_es_servers = \
        charms.leadership.leader_get('monitoring_es_servers').split(",")

    ctr = 0
    while requests.get(f"http://{kibana_host_port}").status_code != 200 and\
            ctr <= 100:
        if ctr == 100:
            return
        status_set('waiting', "Waiting on kibana to become available ...")
        sleep(1)
        ctr += 1

    ctxt = {
        'monitoring_kibana_host_port': kibana_host_port,
        'monitoring_elastic_user_password': elastic_user_password,
        'monitoring_es_servers': monitoring_es_servers,
    }

    # Render the metricbeat config, enable the elasticsearch module
    # enable the systemd service, start the service, setup the dashboards.
    render('metricbeat.yml.j2', '/etc/metricbeat/metricbeat.yml', ctxt)

    sp.call(["metricbeat", "modules", "enable", "elasticsearch"])

    sp.call(['systemctl', 'daemon-reload'])

    sp.call(["systemctl", "enable", "metricbeat.service"])

    if is_leader():
        sp.call(["metricbeat", "setup", "--dashboards"])

    # Render the filebeat config, enable the elasticsearch module,
    # enable the systemd service and setup the dashboards.
    render('filebeat.yml.j2', '/etc/filebeat/filebeat.yml', ctxt)

    sp.call(["filebeat", "modules", "enable", "elasticsearch"])

    sp.call(['systemctl', 'daemon-reload'])

    sp.call(['systemctl', 'enable', 'filebeat.service'])

    if is_leader():
        sp.call(["filebeat", "setup"])

    set_flag('elasticsearch.external.monitoring.cluster.configured')
    es_active_status()
示例#47
0
def install_administrative_scripts():
    scripts_dir = helpers.scripts_dir()
    logs_dir = helpers.logs_dir()
    helpers.makedirs(scripts_dir, mode=0o755)

    # The database backup script. Most of this is redundant now.
    source = os.path.join(hookenv.charm_dir(), "scripts", "pgbackup.py")
    destination = os.path.join(scripts_dir, "dump-pg-db")
    with open(source, "r") as f:
        helpers.write(destination, f.read(), mode=0o755)

    backups_dir = helpers.backups_dir()
    helpers.makedirs(backups_dir, mode=0o750, user="******", group="postgres")

    # Generate a wrapper that invokes the backup script for each
    # database.
    data = dict(
        logs_dir=logs_dir,
        scripts_dir=scripts_dir,
        # backups_dir probably should be deprecated in favour of
        # a juju storage mount.
        backups_dir=backups_dir,
    )
    destination = os.path.join(helpers.scripts_dir(), "pg_backup_job")
    templating.render(
        "pg_backup_job.tmpl",
        destination,
        data,
        owner="root",
        group="postgres",
        perms=0o755,
    )

    # Install the reaper scripts.
    script = "pgkillidle.py"
    source = os.path.join(hookenv.charm_dir(), "scripts", script)
    destination = os.path.join(scripts_dir, script)
    if reactive.helpers.any_file_changed([source]) or not os.path.exists(destination):
        with open(source, "r") as f:
            helpers.write(destination, f.read(), mode=0o755)

    if not os.path.exists(logs_dir):
        helpers.makedirs(logs_dir, mode=0o755, user="******", group="postgres")
        # Create the backups.log file used by the backup wrapper if it
        # does not exist, in order to trigger spurious alerts when a
        # unit is installed, per Bug #1329816.
        helpers.write(
            helpers.backups_log_path(),
            "",
            mode=0o644,
            user="******",
            group="postgres",
        )

    reactive.set_state("postgresql.cluster.support-scripts")
示例#48
0
    def bootstrap(self):
        """Generate Jenkins' initial config."""
        hookenv.log("Bootstrapping initial Jenkins configuration")

        config = hookenv.config()
        context = {"master_executors": config["master-executors"]}
        templating.render(
            "jenkins-config.xml", paths.CONFIG_FILE, context,
            owner="jenkins", group="nogroup")

        hookenv.open_port(PORT)
示例#49
0
def setup_app(mysql):
    render(source='mysql_configure.php',
           target='/var/www/proto-adminer/mysql_configure.php',
           owner='www-data',
           perms=0o775,
           context={
               'db': mysql,
           })
    log("in setup function")
    set_flag('apache.start')
    status_set('maintenance', 'Setting up application')
def install_openvim_service():
    status_set("maintenance", "installing openvim service")
    if not os.path.exists('/etc/systemd/system'):
        os.makedirs('/etc/systemd/system')
    render(
        source="openvim.service",
        target="/etc/systemd/system/openvim.service",
        owner="root",
        perms=0o644,
        context={}
    )
示例#51
0
def config_ports():
    api_port = conf.get('api_port')
    rpc_port = conf.get('rpc_port')
    open_port(api_port, protocol='TCP')  # Port to the HTTP API endpoint
    open_port(rpc_port, protocol='TCP')  # Port to the RPC endpoint
    templating.render(source='influxdb.conf',
                      target='/etc/influxdb/influxdb.conf',
                      context={
                          'api_port': api_port,
                          'rpc_port': rpc_port
                      })
示例#52
0
def config_changed():
    context = {
        'hostname': HOST,
        'user': USER,
        'rootdir': API_DIR,
        'port': config()['port']
    }
    close_port(config().previous(['port']))
    open_port(config()['port'])
    render('http.conf', '/etc/nginx/sites-enabled/sojobo.conf', context)
    service_restart('nginx')
示例#53
0
def create_virbr_network():
    status_set("maintenance", "Creating virbr0 network")
    render(source="net-virbr0.yaml",
           target="/tmp/net-virbr0.yaml",
           owner="openvim",
           perms=0o664,
           context={})
    cmd = 'openvim net-create /tmp/net-virbr0.yaml'
    net_virbr0_uuid = sh_as_openvim(cmd).split()[0]
    net_virbr0_uuid = str(net_virbr0_uuid, 'utf-8')
    return net_virbr0_uuid
    def write(self, config_path):
        data = self.config_data.get(config_path, None)
        if data:
            log("writing config file: %s , data: %s" %
                (config_path, str(data)),
                level='DEBUG')

            render(os.path.basename(config_path),
                   config_path,
                   data,
                   perms=0o644)
示例#55
0
def create_image():
    status_set("maintenance", "Creating image")
    render(source="image.yaml",
           target="/tmp/image.yaml",
           owner="openvim",
           perms=0o664,
           context={})
    cmd = 'openvim image-create /tmp/image.yaml'
    image_uuid = sh_as_openvim(cmd).split()[0]
    image_uuid = str(image_uuid, 'utf-8')
    return image_uuid
def install():
    hookenv.log('Installing gitlab-runner')
    ubuntu.apt_install(ubuntu.filter_installed_packages(['docker.io']))
    docker.run_container()
    hookenv.log('Started gitlab-runner container')
    templating.render(source='update-gitlab-runner.sh',
                      target='/etc/cron.daily/update-gitlab-runner.sh',
                      context={},
                      perms=0o755,
                      owner='root',
                      group='root')
示例#57
0
 def setup_systemd(self):
     context = {'user': self.user,
                'group': self.user,
                'mono': self.mono_path,
                'radarr': self.executable
                }
     templating.render(source="{}.j2".format(self.service_name),
                       target=self.service_file,
                       context=context)
     host.service('enable', self.service_name)
     host.service('start', self.service_name)
示例#58
0
def mongo_render_config():

    mongo = endpoint_from_flag('endpoint.mongogdb.available')

    render('db-config.j2', '/var/www/webapp/mongo-config.html', {
        'gdb_host': mongo.host(),
        'gdb_port': mongo.port(),
    })
    status_set('maintenance', 'Rendering config file')
    set_flag('mongodb.configured')
    set_flag('restart-app')
def render_conf_file(conf_parameters):
    render(source='rethinkdb.conf',
           target='/etc/rethinkdb/instances.d/rethinkd.conf',
           context={
               'port': conf_parameters[0],
               'driver_port': conf_parameters[1],
               'cluster_port': conf_parameters[2],
               'rethinkdb_data': conf_parameters[3],
               'admin_console': conf_parameters[4],
               'clustering': conf_parameters[5]
           })
示例#60
0
def create_flavor():
    status_set("maintenance", "Creating flavor")
    render(source="flavor.yaml",
           target="/tmp/flavor.yaml",
           owner="openvim",
           perms=0o664,
           context={})
    cmd = 'openvim flavor-create /tmp/flavor.yaml'
    flavor_uuid = sh_as_openvim(cmd).split()[0]
    flavor_uuid = str(flavor_uuid, 'utf-8')
    return flavor_uuid