Exemplo n.º 1
0
def create_role(name, user=None, tenant=None):
    """ creates a role if it doesn't already exist. grants role to user """
    import manager
    manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
                                      token=get_admin_token())
    roles = [r._info for r in manager.api.roles.list()]
    if not roles or name not in [r['name'] for r in roles]:
        manager.api.roles.create(name=name)
        utils.juju_log('INFO', "Created new role '%s'" % name)
    else:
        utils.juju_log('INFO', "A role named '%s' already exists" % name)

    if not user and not tenant:
        return

    # NOTE(adam_g): Keystone client requires id's for add_user_role, not names
    user_id = manager.resolve_user_id(user)
    role_id = manager.resolve_role_id(name)
    tenant_id = manager.resolve_tenant_id(tenant)

    if None in [user_id, role_id, tenant_id]:
        error_out("Could not resolve [%s, %s, %s]" %
                   (user_id, role_id, tenant_id))

    grant_role(user, name, tenant)
Exemplo n.º 2
0
def create_pool(service, name, replicas=2):
    ''' Create a new RADOS pool '''
    if pool_exists(service, name):
        utils.juju_log('WARNING',
                       "Ceph pool {} already exists, "
                       "skipping creation".format(name))
        return

    osds = get_osds(service)
    if osds:
        pgnum = (len(osds) * 100 / replicas)
    else:
        # NOTE(james-page): Default to 200 for older ceph versions
        # which don't support OSD query from cli
        pgnum = 200

    cmd = [
        'ceph', '--id', service,
        'osd', 'pool', 'create',
        name, str(pgnum)
    ]
    subprocess.check_call(cmd)
    cmd = [
        'ceph', '--id', service,
        'osd', 'pool', 'set', name,
        'size', str(replicas)
    ]
    subprocess.check_call(cmd)
Exemplo n.º 3
0
def ha_relation_changed():
    clustered = utils.relation_get("clustered")
    if clustered and cluster.is_leader(LEADER_RES):
        utils.juju_log("INFO", "Cluster configured, notifying other services")
        # Tell all related services to start using the VIP
        for r_id in utils.relation_ids("shared-db"):
            utils.relation_set(rid=r_id, db_host=utils.config_get("vip"))
def cluster_changed():
    unison.ssh_authorized_peers(user=rabbit.SSH_USER,
                                group='rabbit',
                                peer_interface='cluster',
                                ensure_local_user=True)
    rabbit.synchronize_service_credentials()

    if utils.is_relation_made('ha'):
        utils.juju_log('INFO',
                       'hacluster relation is present, skipping native '
                       'rabbitmq cluster config.')
        return
    l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
    r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
    if l_unit_no < r_unit_no:
        utils.juju_log('INFO', 'cluster_joined: Relation lesser.')
        return

    cookie = utils.relation_get('cookie')
    if cookie is None:
        utils.juju_log('INFO',
                       'cluster_joined: cookie not yet set.')
        return

    if open(rabbit.COOKIE_PATH, 'r').read().strip() == cookie:
        utils.juju_log('INFO', 'Cookie already synchronized with peer.')
    else:
        utils.juju_log('INFO', 'Synchronizing erlang cookie from peer.')
        rabbit.service('stop')
        with open(rabbit.COOKIE_PATH, 'wb') as out:
            out.write(cookie)
        rabbit.service('start')

    # cluster with other nodes
    rabbit.cluster_with()
Exemplo n.º 5
0
def configure_pki_tokens(config):
    '''Configure PKI token signing, if enabled.'''
    if config['enable-pki'] not in ['True', 'true']:
        update_config_block('signing', token_format='UUID')
    else:
        utils.juju_log('INFO', 'TODO: PKI Support, setting to UUID for now.')
        update_config_block('signing', token_format='UUID')
Exemplo n.º 6
0
    def get_allowed_units(database, username):
        allowed_units = set()
        for relid in hookenv.relation_ids('shared-db'):
            for unit in hookenv.related_units(relid):
                attr = "%s_%s" % (database, 'hostname')
                hosts = hookenv.relation_get(attribute=attr, unit=unit,
                                             rid=relid)
                if not hosts:
                    hosts = [hookenv.relation_get(attribute='private-address',
                                                  unit=unit, rid=relid)]
                else:
                    # hostname can be json-encoded list of hostnames
                    try:
                        hosts = json.loads(hosts)
                    except ValueError:
                        pass

                if not isinstance(hosts, list):
                    hosts = [hosts]

                if hosts:
                    for host in hosts:
                        utils.juju_log('INFO', "Checking host '%s' grant" %
                                       (host))
                        if grant_exists(database, username, host):
                            if unit not in allowed_units:
                                allowed_units.add(unit)
                else:
                    utils.juju_log('INFO', "No hosts found for grant check")

        return allowed_units
Exemplo n.º 7
0
def disable_https(port_maps, namespace):
    '''
    Ensure HTTPS reverse proxying is disables for given port mappings

    port_maps: dict: of ext -> int port mappings
    namespace: str: name of chamr
    '''
    juju_log('INFO', 'Ensuring HTTPS disabled for {}'.format(port_maps))

    if (not os.path.exists('/etc/apache2') or
        not os.path.exists(os.path.join('/etc/apache2/ssl', namespace))):
        return

    http_restart = False
    for ext_port in port_maps.keys():
        if os.path.exists(os.path.join(APACHE_SITE_DIR,
                                       "{}_{}".format(namespace,
                                                      ext_port))):
            juju_log('INFO',
                     "Disabling HTTPS reverse proxy"
                     " for {} {}.".format(namespace,
                                          ext_port))
            if (RELOAD_CHECK in
                subprocess.check_output(['a2dissite',
                                         '{}_{}'.format(namespace,
                                                        ext_port)])):
                http_restart = True

    if http_restart:
        restart(['apache2'])
Exemplo n.º 8
0
def ssh_authorized_peers(peer_interface,
                         user,
                         group=None,
                         ensure_local_user=False):
    """
    Main setup function, should be called from both peer -changed and -joined
    hooks with the same parameters.
    """
    if ensure_local_user:
        ensure_user(user, group)
    priv_key, pub_key = get_keypair(user)
    hook = os.path.basename(sys.argv[0])
    if hook == '%s-relation-joined' % peer_interface:
        utils.relation_set(ssh_pub_key=pub_key)
        print 'joined'
    elif hook == '%s-relation-changed' % peer_interface:
        hosts = []
        keys = []
        for r_id in utils.relation_ids(peer_interface):
            for unit in utils.relation_list(r_id):
                settings = utils.relation_get_dict(relation_id=r_id,
                                                   remote_unit=unit)
                if 'ssh_pub_key' in settings:
                    keys.append(settings['ssh_pub_key'])
                    hosts.append(settings['private-address'])
                else:
                    utils.juju_log('INFO',
                                   'ssh_authorized_peers(): ssh_pub_key '\
                                   'missing for unit %s, skipping.' % unit)
        write_authorized_keys(user, keys)
        write_known_hosts(user, hosts)
        authed_hosts = ':'.join(hosts)
        utils.relation_set(ssh_authorized_hosts=authed_hosts)
def config_changed():
    unison.ensure_user(user=rabbit.SSH_USER, group='rabbit')
    ensure_unison_rabbit_permissions()

    if utils.config_get('management_plugin') is True:
        rabbit.enable_plugin(MAN_PLUGIN)
        utils.open_port(55672)
    else:
        # rabbit.disable_plugin(MAN_PLUGIN)
        utils.close_port(55672)

    if utils.config_get('ssl_enabled') is True:
        ssl_key = utils.config_get('ssl_key')
        ssl_cert = utils.config_get('ssl_cert')
        ssl_port = utils.config_get('ssl_port')
        if None in [ssl_key, ssl_cert, ssl_port]:
            utils.juju_log('ERROR',
                           'Please provide ssl_key, ssl_cert and ssl_port'
                           ' config when enabling SSL support')
            sys.exit(1)
        else:
            rabbit.enable_ssl(ssl_key, ssl_cert, ssl_port)
            utils.open_port(ssl_port)
    else:
        if os.path.exists(rabbit.RABBITMQ_CONF):
            os.remove(rabbit.RABBITMQ_CONF)
        utils.close_port(utils.config_get('ssl_port'))

    if cluster.eligible_leader('res_rabbitmq_vip'):
        utils.restart('rabbitmq-server')

    update_nrpe_checks()
Exemplo n.º 10
0
def cluster_changed():
    unison.ssh_authorized_peers(user=rabbit.SSH_USER,
                                group='rabbit',
                                peer_interface='cluster',
                                ensure_local_user=True)
    rabbit.synchronize_service_credentials()

    if utils.is_relation_made('ha'):
        utils.juju_log(
            'INFO', 'hacluster relation is present, skipping native '
            'rabbitmq cluster config.')
        return
    l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
    r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
    if l_unit_no < r_unit_no:
        utils.juju_log('INFO', 'cluster_joined: Relation lesser.')
        return

    cookie = utils.relation_get('cookie')
    if cookie is None:
        utils.juju_log('INFO', 'cluster_joined: cookie not yet set.')
        return

    if open(rabbit.COOKIE_PATH, 'r').read().strip() == cookie:
        utils.juju_log('INFO', 'Cookie already synchronized with peer.')
    else:
        utils.juju_log('INFO', 'Synchronizing erlang cookie from peer.')
        rabbit.service('stop')
        with open(rabbit.COOKIE_PATH, 'wb') as out:
            out.write(cookie)
        rabbit.service('start')

    # cluster with other nodes
    rabbit.cluster_with()
Exemplo n.º 11
0
def ha_relation_changed():
    clustered = utils.relation_get('clustered')
    if (clustered and cluster.is_leader(LEADER_RES)):
        utils.juju_log('INFO', 'Cluster configured, notifying other services')
        # Tell all related services to start using the VIP
        for r_id in utils.relation_ids('shared-db'):
            utils.relation_set(rid=r_id, db_host=utils.config_get('vip'))
Exemplo n.º 12
0
def create_pool(service, name, replicas=2):
    ''' Create a new RADOS pool '''
    if pool_exists(service, name):
        utils.juju_log('WARNING',
                       "Ceph pool {} already exists, "
                       "skipping creation".format(name))
        return

    osds = get_osds(service)
    if osds:
        pgnum = (len(osds) * 100 / replicas)
    else:
        # NOTE(james-page): Default to 200 for older ceph versions
        # which don't support OSD query from cli
        pgnum = 200

    cmd = [
        'ceph', '--id', service,
        'osd', 'pool', 'create',
        name, str(pgnum)
    ]
    subprocess.check_call(cmd)
    cmd = [
        'ceph', '--id', service,
        'osd', 'pool', 'set', name,
        'size', str(replicas)
    ]
    subprocess.check_call(cmd)
Exemplo n.º 13
0
def get_keypair(user):
    home_dir = get_homedir(user)
    ssh_dir = os.path.join(home_dir, '.ssh')
    if not os.path.isdir(ssh_dir):
        os.mkdir(ssh_dir)

    priv_key = os.path.join(ssh_dir, 'id_rsa')
    if not os.path.isfile(priv_key):
        utils.juju_log('INFO', 'Generating new ssh key for user %s.' % user)
        cmd = [
            'ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048', '-f',
            priv_key
        ]
        subprocess.check_call(cmd)

    pub_key = '%s.pub' % priv_key
    if not os.path.isfile(pub_key):
        utils.juju_log('INFO', 'Generatring missing ssh public key @ %s.' % \
                       pub_key)
        cmd = ['ssh-keygen', '-y', '-f', priv_key]
        p = subprocess.check_output(cmd).strip()
        with open(pub_key, 'wb') as out:
            out.write(p)
    subprocess.check_call(['chown', '-R', user, ssh_dir])
    return open(priv_key, 'r').read().strip(), \
           open(pub_key, 'r').read().strip()
Exemplo n.º 14
0
def ssh_authorized_peers(peer_interface, user, group=None, ensure_local_user=False):
    """
    Main setup function, should be called from both peer -changed and -joined
    hooks with the same parameters.
    """
    if ensure_local_user:
        ensure_user(user, group)
    priv_key, pub_key = get_keypair(user)
    hook = os.path.basename(sys.argv[0])
    if hook == '%s-relation-joined' % peer_interface:
        utils.relation_set(ssh_pub_key=pub_key)
        print 'joined'
    elif hook == '%s-relation-changed' % peer_interface:
        hosts = []
        keys = []
        for r_id in utils.relation_ids(peer_interface):
            for unit in utils.relation_list(r_id):
                settings = utils.relation_get_dict(relation_id=r_id,
                                                   remote_unit=unit)
                if 'ssh_pub_key' in settings:
                    keys.append(settings['ssh_pub_key'])
                    hosts.append(settings['private-address'])
                else:
                    utils.juju_log('INFO',
                                   'ssh_authorized_peers(): ssh_pub_key '\
                                   'missing for unit %s, skipping.' % unit)
        write_authorized_keys(user, keys)
        write_known_hosts(user, hosts)
        authed_hosts = ':'.join(hosts)
        utils.relation_set(ssh_authorized_hosts=authed_hosts)
def cluster_changed():
    if utils.is_relation_made('ha'):
        utils.juju_log('INFO',
                       'hacluster relation is present, skipping native '\
                       'rabbitmq cluster config.')
        return
    l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
    r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
    if l_unit_no < r_unit_no:
        utils.juju_log('INFO', 'cluster_joined: Relation lesser.')
        return

    remote_host = utils.relation_get('host')
    cookie = utils.relation_get('cookie')
    if None in [remote_host, cookie]:
        utils.juju_log('INFO',
                       'cluster_joined: remote_host|cookie not yet set.')
        return

    if open(rabbit.COOKIE_PATH, 'r').read().strip() == cookie:
        utils.juju_log('INFO', 'Cookie already synchronized with peer.')
        return

    utils.juju_log('INFO', 'Synchronizing erlang cookie from peer.')
    rabbit.service('stop')
    with open(rabbit.COOKIE_PATH, 'wb') as out:
        out.write(cookie)
    rabbit.service('start')
    rabbit.cluster_with(remote_host)
Exemplo n.º 16
0
def config_changed():
    unison.ensure_user(user=rabbit.SSH_USER, group='rabbit')
    ensure_unison_rabbit_permissions()

    if utils.config_get('management_plugin') is True:
        rabbit.enable_plugin(MAN_PLUGIN)
        utils.open_port(55672)
    else:
        # rabbit.disable_plugin(MAN_PLUGIN)
        utils.close_port(55672)

    if utils.config_get('ssl_enabled') is True:
        ssl_key = utils.config_get('ssl_key')
        ssl_cert = utils.config_get('ssl_cert')
        ssl_port = utils.config_get('ssl_port')
        if None in [ssl_key, ssl_cert, ssl_port]:
            utils.juju_log(
                'ERROR', 'Please provide ssl_key, ssl_cert and ssl_port'
                ' config when enabling SSL support')
            sys.exit(1)
        else:
            rabbit.enable_ssl(ssl_key, ssl_cert, ssl_port)
            utils.open_port(ssl_port)
    else:
        if os.path.exists(rabbit.RABBITMQ_CONF):
            os.remove(rabbit.RABBITMQ_CONF)
        utils.close_port(utils.config_get('ssl_port'))

    if cluster.eligible_leader('res_rabbitmq_vip'):
        utils.restart('rabbitmq-server')

    update_nrpe_checks()
Exemplo n.º 17
0
def ha_relation_changed():
    clustered = utils.relation_get('clustered')
    if (clustered and cluster.is_leader(LEADER_RES)):
        utils.juju_log('INFO', 'Cluster configured, notifying other services')
        # Tell all related services to start using the VIP
        for r_id in utils.relation_ids('shared-db'):
            utils.relation_set(rid=r_id,
                               db_host=utils.config_get('vip'))
Exemplo n.º 18
0
def get_homedir(user):
    try:
        user = pwd.getpwnam(user)
        return user.pw_dir
    except KeyError:
        utils.juju_log('INFO',
                       'Could not get homedir for user %s: user exists?')
        sys.exit(1)
Exemplo n.º 19
0
def vhost_exists(vhost):
    cmd = [RABBITMQ_CTL, 'list_vhosts']
    out = subprocess.check_output(cmd)
    for line in out.split('\n')[1:]:
        if line == vhost:
            utils.juju_log('INFO', 'vhost (%s) already exists.' % vhost)
            return True
    return False
Exemplo n.º 20
0
def write_authorized_keys(user, keys):
    home_dir = get_homedir(user)
    ssh_dir = os.path.join(home_dir, '.ssh')
    auth_keys = os.path.join(ssh_dir, 'authorized_keys')
    utils.juju_log('INFO', 'Syncing authorized_keys @ %s.' % auth_keys)
    with open(auth_keys, 'wb') as out:
        for k in keys:
            out.write('%s\n' % k)
Exemplo n.º 21
0
def get_homedir(user):
    try:
        user = pwd.getpwnam(user)
        return user.pw_dir
    except KeyError:
        utils.juju_log('INFO',
                       'Could not get homedir for user %s: user exists?')
        sys.exit(1)
Exemplo n.º 22
0
def write_authorized_keys(user, keys):
    home_dir = get_homedir(user)
    ssh_dir = os.path.join(home_dir, '.ssh')
    auth_keys = os.path.join(ssh_dir, 'authorized_keys')
    utils.juju_log('INFO', 'Syncing authorized_keys @ %s.' % auth_keys)
    with open(auth_keys, 'wb') as out:
        for k in keys:
            out.write('%s\n' % k)
Exemplo n.º 23
0
def vhost_exists(vhost):
    cmd = [RABBITMQ_CTL, 'list_vhosts']
    out = subprocess.check_output(cmd)
    for line in out.split('\n')[1:]:
        if line == vhost:
            utils.juju_log('INFO', 'vhost (%s) already exists.' % vhost)
            return True
    return False
Exemplo n.º 24
0
def create_key_file(service, key):
    # create a file containing the key
    keyfile = keyfile_path(service)
    if os.path.exists(keyfile):
        utils.juju_log('INFO', 'ceph: Keyfile exists at %s.' % keyfile)
    fd = open(keyfile, 'w')
    fd.write(key)
    fd.close()
    utils.juju_log('INFO', 'ceph: Created new keyfile at %s.' % keyfile)
Exemplo n.º 25
0
def create_key_file(service, key):
    # create a file containing the key
    keyfile = keyfile_path(service)
    if os.path.exists(keyfile):
        utils.juju_log('INFO', 'ceph: Keyfile exists at %s.' % keyfile)
    fd = open(keyfile, 'w')
    fd.write(key)
    fd.close()
    utils.juju_log('INFO', 'ceph: Created new keyfile at %s.' % keyfile)
Exemplo n.º 26
0
def upgrade_charm():
    # Ensure all required packages are installed
    utils.install(*packages)
    cluster_changed()
    if cluster.eligible_leader(CLUSTER_RES):
        utils.juju_log('INFO',
                       'Cluster leader - ensuring endpoint configuration'
                       ' is up to date')
        ensure_initial_admin(config)
Exemplo n.º 27
0
def get_ca_cert():
    ca_cert = None
    juju_log('INFO',
             "Inspecting identity-service relations for CA SSL certificate.")
    for r_id in relation_ids('identity-service'):
        for unit in relation_list(r_id):
            if not ca_cert:
                ca_cert = relation_get('ca_cert',
                                       rid=r_id, unit=unit)
    return ca_cert
Exemplo n.º 28
0
def eligible_leader(resource):
    if is_clustered():
        if not is_leader(resource):
            juju_log('INFO', 'Deferring action to CRM leader.')
            return False
    else:
        peers = peer_units()
        if peers and not oldest_peer(peers):
            juju_log('INFO', 'Deferring action to oldest service unit.')
            return False
    return True
Exemplo n.º 29
0
def ha_changed():
    if not cluster.is_clustered():
        return
    vip = utils.config_get('vip')
    utils.juju_log(
        'INFO', 'ha_changed(): We are now HA clustered. '
        'Advertising our VIP (%s) to all AMQP clients.' % vip)
    # need to re-authenticate all clients since node-name changed.
    for rid in utils.relation_ids('amqp'):
        for unit in utils.relation_list(rid):
            amqp_changed(relation_id=rid, remote_unit=unit)
Exemplo n.º 30
0
def create_keyring(service, key):
    keyring = keyring_path(service)
    if os.path.exists(keyring):
        utils.juju_log('INFO', 'ceph: Keyring exists at %s.' % keyring)
    cmd = [
        'ceph-authtool', keyring, '--create-keyring',
        '--name=client.%s' % service,
        '--add-key=%s' % key
    ]
    execute(cmd)
    utils.juju_log('INFO', 'ceph: Created new ring at %s.' % keyring)
def ha_changed():
    if not cluster.is_clustered():
        return
    vip = utils.config_get('vip')
    utils.juju_log('INFO', 'ha_changed(): We are now HA clustered. '
                   'Advertising our VIP (%s) to all AMQP clients.' %
                   vip)
    # need to re-authenticate all clients since node-name changed.
    for rid in utils.relation_ids('amqp'):
        for unit in utils.relation_list(rid):
            amqp_changed(relation_id=rid, remote_unit=unit)
Exemplo n.º 32
0
def eligible_leader(resource):
    if is_clustered():
        if not is_leader(resource):
            juju_log('INFO', 'Deferring action to CRM leader.')
            return False
    else:
        peers = peer_units()
        if peers and not oldest_peer(peers):
            juju_log('INFO', 'Deferring action to oldest service unit.')
            return False
    return True
Exemplo n.º 33
0
def synchronize_service_credentials():
    '''
    Broadcast service credentials to peers or consume those that have been
    broadcasted by peer, depending on hook context.
    '''
    if (not cluster.eligible_leader(CLUSTER_RES) or
        not os.path.isfile(SERVICE_PASSWD_PATH)):
        return
    utils.juju_log('INFO', 'Synchronizing service passwords to all peers.')
    unison.sync_to_peers(peer_interface='cluster',
                         paths=[SERVICE_PASSWD_PATH], user=SSH_USER,
                         verbose=True)
def cluster_departed():
    if utils.is_relation_made('ha'):
        utils.juju_log('INFO',
                       'hacluster relation is present, skipping native '
                       'rabbitmq cluster config.')
        return
    l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
    r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
    if l_unit_no < r_unit_no:
        utils.juju_log('INFO', 'cluster_joined: Relation lesser.')
        return
    rabbit.break_cluster()
Exemplo n.º 35
0
def create_tenant(name):
    """ creates a tenant if it does not already exist """
    import manager
    manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
                                      token=get_admin_token())
    tenants = [t._info for t in manager.api.tenants.list()]
    if not tenants or name not in [t['name'] for t in tenants]:
        manager.api.tenants.create(tenant_name=name,
                                   description='Created by Juju')
        utils.juju_log('INFO', "Created new tenant: %s" % name)
        return
    utils.juju_log('INFO', "Tenant '%s' already exists." % name)
Exemplo n.º 36
0
def create_keyring(service, key):
    keyring = keyring_path(service)
    if os.path.exists(keyring):
        utils.juju_log('INFO', 'ceph: Keyring exists at %s.' % keyring)
    cmd = [
        'ceph-authtool',
        keyring,
        '--create-keyring',
        '--name=client.%s' % service,
        '--add-key=%s' % key]
    execute(cmd)
    utils.juju_log('INFO', 'ceph: Created new ring at %s.' % keyring)
Exemplo n.º 37
0
def cluster_departed():
    if utils.is_relation_made('ha'):
        utils.juju_log(
            'INFO', 'hacluster relation is present, skipping native '
            'rabbitmq cluster config.')
        return
    l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
    r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
    if l_unit_no < r_unit_no:
        utils.juju_log('INFO', 'cluster_joined: Relation lesser.')
        return
    rabbit.break_cluster()
Exemplo n.º 38
0
def _run_as_user(user):
    try:
        user = pwd.getpwnam(user)
    except KeyError:
        utils.juju_log('INFO', 'Invalid user: %s' % user)
        sys.exit(1)
    uid, gid = user.pw_uid, user.pw_gid
    os.environ['HOME'] = user.pw_dir

    def _inner():
        os.setgid(gid)
        os.setuid(uid)
    return _inner
Exemplo n.º 39
0
def write_known_hosts(user, hosts):
    home_dir = get_homedir(user)
    ssh_dir = os.path.join(home_dir, '.ssh')
    known_hosts = os.path.join(ssh_dir, 'known_hosts')
    khosts = []
    for host in hosts:
        cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
        remote_key = subprocess.check_output(cmd).strip()
        khosts.append(remote_key)
    utils.juju_log('INFO', 'Syncing known_hosts @ %s.' % known_hosts)
    with open(known_hosts, 'wb') as out:
        for host in khosts:
            out.write('%s\n' % host)
Exemplo n.º 40
0
def update_user_password(username, password):
    import manager
    manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
                                      token=get_admin_token())
    utils.juju_log('INFO', "Updating password for user '%s'" % username)

    user_id = manager.resolve_user_id(username)
    if user_id is None:
        error_out("Could not resolve user id for '%s'" % username)

    manager.api.users.update_password(user=user_id, password=password)
    utils.juju_log('INFO', "Successfully updated password for user '%s'" % \
                   username)
Exemplo n.º 41
0
def write_known_hosts(user, hosts):
    home_dir = get_homedir(user)
    ssh_dir = os.path.join(home_dir, '.ssh')
    known_hosts = os.path.join(ssh_dir, 'known_hosts')
    khosts = []
    for host in hosts:
        cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
        remote_key = subprocess.check_output(cmd).strip()
        khosts.append(remote_key)
    utils.juju_log('INFO', 'Syncing known_hosts @ %s.' % known_hosts)
    with open(known_hosts, 'wb') as out:
        for host in khosts:
            out.write('%s\n' % host)
def upgrade_charm():
    pre_install_hooks()
    # Ensure older passwd files in /var/lib/juju are moved to
    # /var/lib/rabbitmq which will end up replicated if clustered.
    for f in [f for f in os.listdir('/var/lib/juju')
              if os.path.isfile(os.path.join('/var/lib/juju', f))]:
        if f.endswith('.passwd'):
            s = os.path.join('/var/lib/juju', f)
            d = os.path.join('/var/lib/rabbitmq', f)
            utils.juju_log('INFO',
                           'upgrade_charm: Migrating stored passwd'
                           ' from %s to %s.' % (s, d))
            shutil.move(s, d)
Exemplo n.º 43
0
def config_changed():
    unison.ensure_user(user=SSH_USER, group='keystone')
    execute("chmod -R g+wrx /var/lib/keystone/")

    # Determine whether or not we should do an upgrade, based on the
    # the version offered in keyston-release.
    available = get_os_codename_install_source(config['openstack-origin'])
    installed = get_os_codename_package('keystone')

    if (available and
        get_os_version_codename(available) > \
            get_os_version_codename(installed)):
        # TODO: fixup this call to work like utils.install()
        do_openstack_upgrade(config['openstack-origin'], ' '.join(packages))
        # Ensure keystone group permissions
        execute("chmod -R g+wrx /var/lib/keystone/")

    env_vars = {'OPENSTACK_SERVICE_KEYSTONE': 'keystone',
                'OPENSTACK_PORT_ADMIN': cluster.determine_api_port(
                    config['admin-port']),
                'OPENSTACK_PORT_PUBLIC': cluster.determine_api_port(
                    config['service-port'])}
    save_script_rc(**env_vars)

    set_admin_token(config['admin-token'])

    if cluster.eligible_leader(CLUSTER_RES):
        utils.juju_log('INFO',
                       'Cluster leader - ensuring endpoint configuration'
                       ' is up to date')
        ensure_initial_admin(config)

    update_config_block('logger_root', level=config['log-level'],
                        file='/etc/keystone/logging.conf')
    if get_os_version_package('keystone') >= '2013.1':
        # PKI introduced in Grizzly
        configure_pki_tokens(config)

    if config_dirty():
        utils.restart('keystone')

    if cluster.eligible_leader(CLUSTER_RES):
        utils.juju_log('INFO',
                       'Firing identity_changed hook'
                       ' for all related services.')
        # HTTPS may have been set - so fire all identity relations
        # again
        for r_id in utils.relation_ids('identity-service'):
            for unit in utils.relation_list(r_id):
                identity_changed(relation_id=r_id,
                                 remote_unit=unit)
Exemplo n.º 44
0
def generate_admin_token(config):
    """ generate and add an admin token """
    import manager
    manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
                                      token='ADMIN')
    if config["admin-token"] == "None":
        import random
        token = random.randrange(1000000000000, 9999999999999)
    else:
        return config["admin-token"]
    manager.api.add_token(token, config["admin-user"],
                          "admin", config["token-expiry"])
    utils.juju_log('INFO', "Generated and added new random admin token.")
    return token
Exemplo n.º 45
0
def ensure_user(user, group=None):
    # need to ensure a bash shell'd user exists.
    try:
        pwd.getpwnam(user)
    except KeyError:
        utils.juju_log('INFO', 'Creating new user %s.%s.' % (user, group))
        cmd = ['adduser', '--system', '--shell', '/bin/bash', user]
        if group:
            try:
                grp.getgrnam(group)
            except KeyError:
                subprocess.check_call(['addgroup', group])
            cmd += ['--ingroup', group]
        subprocess.check_call(cmd)
Exemplo n.º 46
0
def break_cluster():
    try:
        cmd = [RABBITMQ_CTL, 'stop_app']
        subprocess.check_call(cmd)
        cmd = [RABBITMQ_CTL, 'reset']
        subprocess.check_call(cmd)
        cmd = [RABBITMQ_CTL, 'start_app']
        subprocess.check_call(cmd)
        utils.juju_log('INFO', 'Cluster successfully broken.')
        return
    except:
        # error, no nodes available for clustering
        utils.juju_log('ERROR', 'Error breaking rabbit cluster')
        sys.exit(1)
Exemplo n.º 47
0
def ensure_user(user, group=None):
    # need to ensure a bash shell'd user exists.
    try:
        pwd.getpwnam(user)
    except KeyError:
        utils.juju_log('INFO', 'Creating new user %s.%s.' % (user, group))
        cmd = ['adduser', '--system', '--shell', '/bin/bash', user]
        if group:
            try:
                grp.getgrnam(group)
            except KeyError:
                subprocess.check_call(['addgroup', group])
            cmd += ['--ingroup', group]
        subprocess.check_call(cmd)
Exemplo n.º 48
0
def _run_as_user(user):
    try:
        user = pwd.getpwnam(user)
    except KeyError:
        utils.juju_log('INFO', 'Invalid user: %s' % user)
        sys.exit(1)
    uid, gid = user.pw_uid, user.pw_gid
    os.environ['HOME'] = user.pw_dir

    def _inner():
        os.setgid(gid)
        os.setuid(uid)

    return _inner
Exemplo n.º 49
0
def upgrade_charm():
    pre_install_hooks()
    # Ensure older passwd files in /var/lib/juju are moved to
    # /var/lib/rabbitmq which will end up replicated if clustered.
    for f in [
            f for f in os.listdir('/var/lib/juju')
            if os.path.isfile(os.path.join('/var/lib/juju', f))
    ]:
        if f.endswith('.passwd'):
            s = os.path.join('/var/lib/juju', f)
            d = os.path.join('/var/lib/rabbitmq', f)
            utils.juju_log(
                'INFO', 'upgrade_charm: Migrating stored passwd'
                ' from %s to %s.' % (s, d))
            shutil.move(s, d)
Exemplo n.º 50
0
def sync_to_peers(peer_interface, user, paths=[], verbose=False):
    base_cmd = [
        'unison', '-auto', '-batch=true', '-confirmbigdel=false',
        '-fastcheck=true', '-group=false', '-owner=false', '-prefer=newer',
        '-times=true'
    ]
    if not verbose:
        base_cmd.append('-silent')

    hosts = []
    for r_id in (utils.relation_ids(peer_interface) or []):
        for unit in utils.relation_list(r_id):
            settings = utils.relation_get_dict(relation_id=r_id,
                                               remote_unit=unit)
            try:
                authed_hosts = settings['ssh_authorized_hosts'].split(':')
            except KeyError:
                print 'unison sync_to_peers: peer has not authorized *any* '\
                      'hosts yet.'
                return

            unit_hostname = utils.unit_get('private-address')
            add_host = None
            for authed_host in authed_hosts:
                if unit_hostname == authed_host:
                    add_host = settings['private-address']
            if add_host:
                hosts.append(settings['private-address'])
            else:
                print 'unison sync_to_peers: peer (%s) has not authorized '\
                      '*this* host yet, skipping.' %\
                       settings['private-address']

    for path in paths:
        # removing trailing slash from directory paths, unison
        # doesn't like these.
        if path.endswith('/'):
            path = path[:(len(path) - 1)]
        for host in hosts:
            try:
                cmd = base_cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)]
                utils.juju_log(
                    'INFO', 'Syncing local path %s to %s@%s:%s' %
                    (path, user, host, path))
                run_as_user(user, cmd)
            except:
                # it may fail for permissions on some files
                pass
Exemplo n.º 51
0
def amqp_changed(relation_id=None, remote_unit=None):
    if not cluster.eligible_leader('res_rabbitmq_vip'):
        msg = 'amqp_changed(): Deferring amqp_changed to eligible_leader.'
        utils.juju_log('INFO', msg)
        return

    relation_settings = {}
    settings = hookenv.relation_get(rid=relation_id, unit=remote_unit)

    singleset = set(['username', 'vhost'])

    if singleset.issubset(settings):
        if None in [settings['username'], settings['vhost']]:
            utils.juju_log('INFO', 'amqp_changed(): Relation not ready.')
            return

        relation_settings['password'] = configure_amqp(
            username=settings['username'], vhost=settings['vhost'])
    else:
        queues = {}
        for k, v in settings.iteritems():
            amqp = k.split('_')[0]
            x = '_'.join(k.split('_')[1:])
            if amqp not in queues:
                queues[amqp] = {}
            queues[amqp][x] = v
        relation_settings = {}
        for amqp in queues:
            if singleset.issubset(queues[amqp]):
                relation_settings['_'.join([amqp,
                                            'password'])] = configure_amqp(
                                                queues[amqp]['username'],
                                                queues[amqp]['vhost'])

    relation_settings['hostname'] = utils.unit_get('private-address')

    if cluster.is_clustered():
        relation_settings['clustered'] = 'true'
        if utils.is_relation_made('ha'):
            # active/passive settings
            relation_settings['vip'] = utils.config_get('vip')

    if relation_id:
        relation_settings['rid'] = relation_id
    utils.relation_set(**relation_settings)

    # sync new creds to all peers
    rabbit.synchronize_service_credentials()
Exemplo n.º 52
0
def cluster_with():
    vers = rabbit_version()
    if vers >= '3.0.1-1':
        cluster_cmd = 'join_cluster'
        cmd = [
            RABBITMQ_CTL, 'set_policy HA \'^(?!amq\.).*\' '
            '\'{"ha-mode": "all"}\''
        ]
        subprocess.check_call(cmd)
    else:
        cluster_cmd = 'cluster'
    out = subprocess.check_output([RABBITMQ_CTL, 'cluster_status'])
    current_host = subprocess.check_output(['hostname']).strip()

    # check all peers and try to cluster with them
    available_nodes = []
    first_hostname = utils.relation_get('host')
    available_nodes.append(first_hostname)

    for r_id in (utils.relation_ids('cluster') or []):
        for unit in (utils.relation_list(r_id) or []):
            address = utils.relation_get('private_address',
                                         rid=r_id,
                                         unit=unit)
            if address is not None:
                node = get_hostname(address, fqdn=False)
                if current_host != node:
                    available_nodes.append(node)

    # iterate over all the nodes, join to the first available
    for node in available_nodes:
        utils.juju_log('INFO',
                       'Clustering with remote rabbit host (%s).' % node)
        for line in out.split('\n'):
            if re.search(node, line):
                utils.juju_log('INFO',
                               'Host already clustered with %s.' % node)
                return

            try:
                cmd = [RABBITMQ_CTL, 'stop_app']
                subprocess.check_call(cmd)
                cmd = [RABBITMQ_CTL, cluster_cmd, 'rabbit@%s' % node]
                subprocess.check_call(cmd)
                cmd = [RABBITMQ_CTL, 'start_app']
                subprocess.check_call(cmd)
                utils.juju_log('INFO', 'Host clustered with %s.' % node)
                return
            except:
                # continue to the next node
                pass

    # error, no nodes available for clustering
    utils.juju_log('ERROR', 'No nodes available for clustering')
    sys.exit(1)
Exemplo n.º 53
0
def ensure_ceph_storage(service,
                        pool,
                        rbd_img,
                        sizemb,
                        mount_point,
                        blk_device,
                        fstype,
                        system_services=[],
                        rbd_pool_replicas=2):
    """
    To be called from the current cluster leader.
    Ensures given pool and RBD image exists, is mapped to a block device,
    and the device is formatted and mounted at the given mount_point.

    If formatting a device for the first time, data existing at mount_point
    will be migrated to the RBD device before being remounted.

    All services listed in system_services will be stopped prior to data
    migration and restarted when complete.
    """
    # Ensure pool, RBD image, RBD mappings are in place.
    if not pool_exists(service, pool):
        utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
        create_pool(service, pool, replicas=rbd_pool_replicas)

    if not rbd_exists(service, pool, rbd_img):
        utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
        create_rbd_image(service, pool, rbd_img, sizemb)

    if not image_mapped(rbd_img):
        utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.')
        map_block_storage(service, pool, rbd_img)

    # make file system
    # TODO: What happens if for whatever reason this is run again and
    # the data is already in the rbd device and/or is mounted??
    # When it is mounted already, it will fail to make the fs
    # XXX: This is really sketchy!  Need to at least add an fstab entry
    #      otherwise this hook will blow away existing data if its executed
    #      after a reboot.
    if not filesystem_mounted(mount_point):
        make_filesystem(blk_device, fstype)

        for svc in system_services:
            if utils.running(svc):
                utils.juju_log(
                    'INFO', 'Stopping services %s prior to migrating '
                    'data' % svc)
                utils.stop(svc)

        place_data_on_ceph(service, blk_device, mount_point, fstype)

        for svc in system_services:
            utils.start(svc)
Exemplo n.º 54
0
def synchronize_service_credentials():
    '''
    Broadcast service credentials to peers or consume those that have been
    broadcasted by peer, depending on hook context.
    '''
    if not os.path.isdir(LIB_PATH):
        return
    peers = cluster.peer_units()
    if peers and not cluster.oldest_peer(peers):
        utils.juju_log('INFO', 'Deferring action to oldest service unit.')
        return

    utils.juju_log('INFO', 'Synchronizing service passwords to all peers.')
    try:
        unison.sync_to_peers(peer_interface='cluster',
                             paths=[LIB_PATH],
                             user=SSH_USER,
                             verbose=True)
    except Exception:
        # to skip files without perms safely
        pass
Exemplo n.º 55
0
def ceph_changed():
    utils.juju_log('INFO', 'Start Ceph Relation Changed')
    auth = utils.relation_get('auth')
    key = utils.relation_get('key')
    use_syslog = utils.relation_get('use_syslog')
    if None in [auth, key]:
        utils.juju_log('INFO', 'Missing key or auth in relation')
        return

    ceph.configure(service=SERVICE_NAME, key=key, auth=auth,
                   use_syslog=use_syslog)

    sizemb = int(utils.config_get('block-size')) * 1024
    rbd_img = utils.config_get('rbd-name')
    blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
    rbd_pool_rep_count = utils.config_get('ceph-osd-replication-count')
    ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
                             rbd_img=rbd_img, sizemb=sizemb,
                             fstype='ext4', mount_point='/srv/juju/volumes/' + SERVICE_NAME + '-' + UNIT_ID,
                             blk_device=blk_device,
                             system_services=['mysql'],
                             rbd_pool_replicas=rbd_pool_rep_count)

    mount.mount()
    host.service_start('jetty')
    utils.juju_log('INFO', 'Finish Ceph Relation Changed')
Exemplo n.º 56
0
def ensure_ceph_storage(service,
                        pool,
                        rbd_img,
                        sizemb,
                        mount_point,
                        blk_device,
                        fstype,
                        system_services=[],
                        rbd_pool_replicas=2):
    """
    Ensures given pool and RBD image exists, is mapped to a block device,
    and the device is formatted.
    """
    # Ensure pool, RBD image, RBD mappings are in place.
    if not pool_exists(service, pool):
        utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
        create_pool(service, pool, replicas=rbd_pool_replicas)

    if not rbd_exists(service, pool, rbd_img):
        utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
        create_rbd_image(service, pool, rbd_img, sizemb)

    if not image_mapped(rbd_img):
        utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.')
        map_block_storage(service, pool, rbd_img)
    if not filesystem_mounted(mount_point):
        make_filesystem(blk_device, fstype)
Exemplo n.º 57
0
def cluster_joined():
    unison.ssh_authorized_peers(user=rabbit.SSH_USER,
                                group='rabbit',
                                peer_interface='cluster',
                                ensure_local_user=True)
    if utils.is_relation_made('ha'):
        utils.juju_log(
            'INFO', 'hacluster relation is present, skipping native '
            'rabbitmq cluster config.')
        return
    l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
    r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
    if l_unit_no > r_unit_no:
        utils.juju_log('INFO', 'cluster_joined: Relation greater.')
        return
    rabbit.COOKIE_PATH = '/var/lib/rabbitmq/.erlang.cookie'
    if not os.path.isfile(rabbit.COOKIE_PATH):
        utils.juju_log('ERROR',
                       'erlang cookie missing from %s' % rabbit.COOKIE_PATH)
        return
    cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()

    # add parent host to the relation
    local_hostname = subprocess.check_output(['hostname']).strip()
    utils.relation_set(cookie=cookie, host=local_hostname)
Exemplo n.º 58
0
def set_node_name(name):
    # update or append RABBITMQ_NODENAME to environment config.
    # rabbitmq.conf.d is not present on all releases, so use or create
    # rabbitmq-env.conf instead.
    if not os.path.isfile(ENV_CONF):
        utils.juju_log('INFO', '%s does not exist, creating.' % ENV_CONF)
        with open(ENV_CONF, 'wb') as out:
            out.write('RABBITMQ_NODENAME=%s\n' % name)
        return

    out = []
    f = False
    for line in open(ENV_CONF).readlines():
        if line.strip().startswith('RABBITMQ_NODENAME'):
            f = True
            line = 'RABBITMQ_NODENAME=%s\n' % name
        out.append(line)
    if not f:
        out.append('RABBITMQ_NODENAME=%s\n' % name)
    utils.juju_log('INFO',
                   'Updating %s, RABBITMQ_NODENAME=%s' % (ENV_CONF, name))
    with open(ENV_CONF, 'wb') as conf:
        conf.write(''.join(out))
Exemplo n.º 59
0
def make_filesystem(blk_device, fstype='ext4'):
    count = 0
    e_noent = os.errno.ENOENT
    while not os.path.exists(blk_device):
        if count >= 10:
            utils.juju_log('ERROR', 'ceph: gave up waiting on block '
                                    'device %s' % blk_device)
            raise IOError(e_noent, os.strerror(e_noent), blk_device)
        utils.juju_log('INFO', 'ceph: waiting for block device %s '
                               'to appear' % blk_device)
        count += 1
        time.sleep(1)
    else:
        utils.juju_log('INFO', 'ceph: Formatting block device %s '
                               'as filesystem %s.' % (blk_device, fstype))
        execute(['mkfs', '-t', fstype, blk_device])
Exemplo n.º 60
0
def create_user(user, password, admin=False):
    exists, is_admin = user_exists(user)

    if not exists:
        cmd = [RABBITMQ_CTL, 'add_user', user, password]
        subprocess.check_call(cmd)
        utils.juju_log('INFO', 'Created new user (%s).' % user)

    if admin == is_admin:
        return

    if admin:
        cmd = [RABBITMQ_CTL, 'set_user_tags', user, 'administrator']
        utils.juju_log('INFO', 'Granting user (%s) admin access.')
    else:
        cmd = [RABBITMQ_CTL, 'set_user_tags', user]
        utils.juju_log('INFO', 'Revoking user (%s) admin access.')