def handle_requests():
    db = endpoint_from_name('database')
    users = unitdata.kv().get('charm.users', {})
    root_password = unitdata.kv().get('charm.root-password')
    connection = mysql.connector.connect(user='******',
                                         password=root_password,
                                         host='mariadb')
    cursor = None
    try:
        cursor = connection.cursor()
        for request in db.new_requests:
            # determine db_name, username, and password for request,
            # generating each if needed
            if request.application_name not in users:
                users[request.application_name] = (host.pwgen(20),
                                                   host.pwgen(20))
            username, password = users[request.application_name]
            db_name = request.database_name or request.application_name

            # create the database and grant the user access
            layer.mariadb_k8s.create_database(cursor, db_name)
            if not layer.mariadb_k8s.grant_exists(cursor, db_name, username,
                                                  request.address):
                layer.mariadb_k8s.create_grant(cursor, db_name, username,
                                               password, request.address)

            # fulfill this request
            request.provide_database(db_name, username, password)
        cursor.commit()
    finally:
        if cursor:
            cursor.close()
        connection.close()
Пример #2
0
    def test_pwgen(self):
        pw = host.pwgen()
        self.assert_(len(pw) >= 35, 'Password is too short')

        pw = host.pwgen(10)
        self.assertEqual(len(pw), 10, 'Password incorrect length')

        pw2 = host.pwgen(10)
        self.assertNotEqual(pw, pw2, 'Duplicated password')
Пример #3
0
    def setup_token_store(self):
        """Set up the token store for barbican to use, create a pin and
        user_pin and store those details locally so that they can be used when
        Barbican connects.

        Performs as needed:

        softhsm2-util --init-token --free --label "barbican_token" --pin <pin>
                      --so-pin <so-pin>

        The <pin> and <so-pin> are generated randomly and saved to a
        configuration file.

        If the <pin> and <so-pin> configuration file don't exist, then the
        token directory is deleted and re-initialised.

        Thus if we are upgrading a charm, the charm checks to see if it has
        already been run on this host, and if so, doesn't re-initialise the
        token store, otherwise the token store is re-initialised.

        The configuration file for the softhsm2 library is also written.
        """
        # see if the <pin> and <so_pin> exist?
        pin, so_pin = read_pins_from_store()
        if pin is not None:
            # return as the token store is already set up
            return
        # see if the token directory exists - if so, delete it.
        if os.path.exists(TOKEN_STORE):
            if os.path.isdir(TOKEN_STORE):
                shutil.rmtree(TOKEN_STORE)
            else:
                os.remove(TOKEN_STORE)
        os.makedirs(TOKEN_STORE)
        # We need the token store to be 1777 so that whoever creates a token
        # can also gain access to it - the token will be created by the
        # barbican user.
        os.chmod(TOKEN_STORE, 0o1777)
        # now create the token store
        pin = ch_core_host.pwgen(PIN_LENGTH)
        so_pin = ch_core_host.pwgen(PIN_LENGTH)
        write_pins_to_store(pin, so_pin)
        cmd = [
            'sudo', '-u', 'barbican',
            SOFTHSM2_UTIL_CMD,
            '--init-token', '--free',
            '--label', BARBICAN_TOKEN_LABEL,
            '--pin', pin,
            '--so-pin', so_pin]
        subprocess.check_call(cmd)
        hookenv.log("Initialised token store.")
Пример #4
0
def leader_elected():
    if is_leader():
        if not leader_get('heat-domain-admin-passwd'):
            try:
                leader_set({'heat-domain-admin-passwd': pwgen(32)})
            except subprocess.CalledProcessError as e:
                log('leader_set: heat-domain-admin-password failed: {}'
                    .format(str(e)), level=WARNING)
        if not leader_get('heat-auth-encryption-key'):
            try:
                leader_set({'heat-auth-encryption-key': pwgen(32)})
            except subprocess.CalledProcessError as e:
                log('leader_set: heat-domain-admin-password failed: {}'
                    .format(str(e)), level=WARNING)
Пример #5
0
    def setup_token_store(self):
        """Set up the token store for barbican to use, create a pin and
        user_pin and store those details locally so that they can be used when
        Barbican connects.

        Performs as needed:

        softhsm2-util --init-token --free --label "barbican_token" --pin <pin>
                      --so-pin <so-pin>

        The <pin> and <so-pin> are generated randomly and saved to a
        configuration file.

        If the <pin> and <so-pin> configuration file don't exist, then the
        token directory is deleted and re-initialised.

        Thus if we are upgrading a charm, the charm checks to see if it has
        already been run on this host, and if so, doesn't re-initialise the
        token store, otherwise the token store is re-initialised.

        The configuration file for the softhsm2 library is also written.
        """
        # see if the <pin> and <so_pin> exist?
        pin, so_pin = read_pins_from_store()
        if pin is not None:
            # return as the token store is already set up
            return
        # see if the token directory exists - if so, delete it.
        if os.path.exists(TOKEN_STORE):
            if os.path.isdir(TOKEN_STORE):
                shutil.rmtree(TOKEN_STORE)
            else:
                os.remove(TOKEN_STORE)
        os.makedirs(TOKEN_STORE)
        # We need the token store to be 1777 so that whoever creates a token
        # can also gain access to it - the token will be created by the
        # barbican user.
        os.chmod(TOKEN_STORE, 0o1777)
        # now create the token store
        pin = ch_core_host.pwgen(PIN_LENGTH)
        so_pin = ch_core_host.pwgen(PIN_LENGTH)
        write_pins_to_store(pin, so_pin)
        cmd = [
            'sudo', '-u', 'barbican', SOFTHSM2_UTIL_CMD, '--init-token',
            '--free', '--label', BARBICAN_TOKEN_LABEL, '--pin', pin,
            '--so-pin', so_pin
        ]
        subprocess.check_call(cmd)
        hookenv.log("Initialised token store.")
Пример #6
0
    def get_mysql_password_on_disk(self, username=None, password=None):
        """Retrieve, generate or store a mysql password for the provided
        username on disk."""
        if username:
            template = self.user_passwd_file_template
            passwd_file = template.format(username)
        else:
            passwd_file = self.root_passwd_file_template

        _password = None
        if os.path.exists(passwd_file):
            log("Using existing password file '%s'" % passwd_file, level=DEBUG)
            with open(passwd_file, 'r') as passwd:
                _password = passwd.read().strip()
        else:
            log("Generating new password file '%s'" % passwd_file, level=DEBUG)
            if not os.path.isdir(os.path.dirname(passwd_file)):
                # NOTE: need to ensure this is not mysql root dir (which needs
                # to be mysql readable)
                mkdir(os.path.dirname(passwd_file), owner='root', group='root',
                      perms=0o770)
                # Force permissions - for some reason the chmod in makedirs
                # fails
                os.chmod(os.path.dirname(passwd_file), 0o770)

            _password = password or pwgen(length=32)
            write_file(passwd_file, _password, owner='root', group='root',
                       perms=0o660)

        return _password
Пример #7
0
def create_monitoring_stanza(service_name="haproxy_monitoring"):
    config_data = config_get()
    if config_data['enable_monitoring'] is False:
        return None
    monitoring_password = get_monitoring_password()
    if config_data['monitoring_password'] != "changeme":
        monitoring_password = config_data['monitoring_password']
    elif (monitoring_password is None
          and config_data['monitoring_password'] == "changeme"):
        monitoring_password = pwgen(length=20)
    monitoring_config = []
    monitoring_config.append("mode http")
    monitoring_config.append("acl allowed_cidr src %s" %
                             config_data['monitoring_allowed_cidr'])
    monitoring_config.append("block unless allowed_cidr")
    monitoring_config.append("stats enable")
    monitoring_config.append("stats uri /")
    monitoring_config.append("stats realm Haproxy\ Statistics")
    monitoring_config.append(
        "stats auth %s:%s" %
        (config_data['monitoring_username'], monitoring_password))
    monitoring_config.append("stats refresh %d" %
                             config_data['monitoring_stats_refresh'])
    return create_listen_stanza(service_name, "0.0.0.0",
                                config_data['monitoring_port'],
                                monitoring_config)
def get_rabbit_password_on_disk(username, password=None, local=False):
    ''' Retrieve, generate or store a rabbit password for
    the provided username on disk'''
    if local:
        _passwd_file = _local_named_passwd.format(service_name(), username)
    else:
        _passwd_file = _named_passwd.format(service_name(), username)

    _password = None
    if os.path.exists(_passwd_file):
        with open(_passwd_file, 'r') as passwd:
            _password = passwd.read().strip()
    else:
        mkdir(os.path.dirname(_passwd_file),
              owner=RABBIT_USER,
              group=RABBIT_USER,
              perms=0o775)
        os.chmod(os.path.dirname(_passwd_file), 0o775)
        _password = password or pwgen(length=64)
        write_file(_passwd_file,
                   _password,
                   owner=RABBIT_USER,
                   group=RABBIT_USER,
                   perms=0o660)

    return _password
def contrail_ifmap_joined():
    if is_leader():
        creds = leader_get("ifmap-creds")
        creds = json.loads(creds) if creds else {}

        # prune credentials because we can't remove them directly lp #1469731
        creds = {
            rid: {
                unit: units[unit]
                for unit, units in ((unit, creds[rid])
                                    for unit in related_units(rid))
                if unit in units
            }
            for rid in relation_ids("contrail-ifmap") if rid in creds
        }

        rid = relation_id()
        if rid not in creds:
            creds[rid] = {}
        cs = creds[rid]
        unit = remote_unit()
        if unit in cs:
            return
        # generate new credentials for unit
        cs[unit] = {"username": unit, "password": pwgen(32)}
        leader_set({"ifmap-creds": json.dumps(creds)})
        write_ifmap_config()
        service_restart("supervisor-config")
        relation_set(creds=json.dumps(cs))
Пример #10
0
def lxd_trust_password():
    db = kv()
    password = db.get('lxd-password')
    if not password:
        password = db.set('lxd-password', pwgen(PW_LENGTH))
        db.flush()
    return password
def master_joined(interface='master'):
    cluster_id = get_cluster_id()
    if not is_clustered():
        log("Not clustered yet", level=DEBUG)
        return
    relation_settings = {}
    leader_settings = leader_get()
    if is_leader():
        if not leader_settings.get('async-rep-password'):
            # Replication password cannot be longer than 32 characters
            leader_set({'async-rep-password': pwgen(32)})
            return
        configure_master()
        master_address, master_file, master_position = (
            get_master_status(interface))
        if leader_settings.get('master-address') is not master_address:
            leader_settings['master-address'] = master_address
            leader_settings['master-file'] = master_file
            leader_settings['master-position'] = master_position
        leader_set(leader_settings)
        relation_settings = {'leader': True}
    else:
        relation_settings = {'leader': False}
    relation_settings['cluster_id'] = cluster_id
    relation_settings['master_address'] = leader_settings['master-address']
    relation_settings['master_file'] = leader_settings['master-file']
    relation_settings['master_password'] = \
        leader_settings['async-rep-password']
    relation_settings['master_position'] = leader_settings['master-position']
    log("Setting master relation: '{}'".format(relation_settings), level=INFO)
    for rid in relation_ids(interface):
        relation_set(relation_id=rid, relation_settings=relation_settings)
Пример #12
0
def obtain_munge_key(*args):
    # get flags
    munge_key = hookenv.config().get('munge_key')
    # Generate a munge key if it has not been provided via charm config
    if not munge_key:
        munge_key = host.pwgen(length=4096)
    leadership.leader_set(munge_key=munge_key)
Пример #13
0
    def get_mysql_password_on_disk(self, username=None, password=None):
        """Retrieve, generate or store a mysql password for the provided
        username on disk."""
        if username:
            template = self.user_passwd_file_template
            passwd_file = template.format(username)
        else:
            passwd_file = self.root_passwd_file_template

        _password = None
        if os.path.exists(passwd_file):
            log("Using existing password file '%s'" % passwd_file, level=DEBUG)
            with open(passwd_file, 'r') as passwd:
                _password = passwd.read().strip()
        else:
            log("Generating new password file '%s'" % passwd_file, level=DEBUG)
            if not os.path.isdir(os.path.dirname(passwd_file)):
                # NOTE: need to ensure this is not mysql root dir (which needs
                # to be mysql readable)
                mkdir(os.path.dirname(passwd_file), owner='root', group='root',
                      perms=0o770)
                # Force permissions - for some reason the chmod in makedirs
                # fails
                os.chmod(os.path.dirname(passwd_file), 0o770)

            _password = password or pwgen(length=32)
            write_file(passwd_file, _password, owner='root', group='root',
                       perms=0o660)

        return _password
Пример #14
0
    def on_changed(self, event):
        if not self.cluster.is_ag_ready or not self.ha.is_ha_cluster_ready:
            logger.warning('Defering DB on_changed() until the AG and '
                           'the HA cluster are ready.')
            event.defer()
            return
        if not self.cluster.is_primary_replica:
            logger.warning('Unit is not the SQL Server primary replica. '
                           'Skipping DB on_changed().')
            return
        rel_data = self.db_rel_data(event)
        if not rel_data:
            logging.info("The db relation data is not available yet.")
            return

        logging.info("Handling db request.")
        db_user_password = host.pwgen(32)
        db_client = self.cluster.mssql_db_client()
        db_client.create_database(db_name=rel_data['database'],
                                  ag_name=self.cluster.AG_NAME)
        db_client.create_login(name=rel_data['username'],
                               password=db_user_password)
        db_client.grant_access(db_name=rel_data['database'],
                               db_user_name=rel_data['username'])
        # Notify the secondary replicas, so they can sync the new SQL logins
        # from the primary replica.
        self.cluster.set_unit_rel_nonce()

        rel = self.model.get_relation(event.relation.name, event.relation.id)
        # advertise on app
        rel.data[self.app]['db_host'] = self.ha.bind_address
        rel.data[self.app]['password'] = db_user_password
        # advertise on unit
        rel.data[self.unit]['db_host'] = self.ha.bind_address
        rel.data[self.unit]['password'] = db_user_password
Пример #15
0
def setup_uaac_client(service_name):
    uaactx = contexts.UAARelation()
    orchctx = contexts.OrchestratorRelation()
    secretctx = contexts.StoredContext(utils.secrets_file, {
        'ui_secret': host.pwgen(20),
    })
    domain = orchctx[orchctx.name][0]['domain']
    uaa_secret = uaactx[uaactx.name][0]['admin_client_secret']
    ui_secret = secretctx['ui_secret']

    shell("uaac target http://uaa.%s" % domain)
    shell("uaac token client get admin -s %s" % uaa_secret)
    client_needs_setup = bool(call(". %s/.boilerplate && uaac client get admin_ui_client" % home, shell=True))
    if client_needs_setup:
        authorities = yaml.safe_load(shell('uaac client get admin'))['authorities']
        if 'scim.write' not in authorities:
            authorities += ' scim.write'
            shell('uaac client update admin --authorities "%s"' % authorities)
            shell("uaac token client get admin -s %s" % uaa_secret)
        shell('uaac group add admin_ui.admin')
        shell('uaac member add admin_ui.admin admin')
        shell('uaac client add admin_ui_client'
              ' --authorities cloud_controller.admin,cloud_controller.read,cloud_controller.write,openid,scim.read'
              ' --authorized_grant_types authorization_code,client_credentials,refresh_token'
              ' --autoapprove true'
              ' --scope admin_ui.admin,admin_ui.user,openid'
              ' -s %s' % ui_secret)
Пример #16
0
def create_repo(git):
    username = git.get_remote('username')
    service = remote_service_name()
    repo_path = os.path.join(repo_root(), service+'.git')

    host.add_group(username)
    host.adduser(username, password=host.pwgen(32), shell='/usr/bin/git-shell')

    ssh_public_key = git.get_remote('ssh-public-key')
    dotssh_dir = '/home/{}/.ssh/'.format(username)
    host.mkdir(dotssh_dir, username, username, 0o700)
    host.write_file(dotssh_dir + 'authorized_keys',
                    ssh_public_key.encode('utf-8'),
                    username, username, 0o400)

    host.mkdir(repo_path, group=username, perms=0o770)
    subprocess.check_call(['git', 'init', '--bare', '--shared=group', repo_path])

    # Create server-side hook that will inform
    # clients whenever changes are committed.
    create_git_hooks(repo_path, username)

    # Make the repo owned by <username>.
    chown_repo(repo_path, username)

    # TODO(axw) read and publish all host keys.
    ssh_host_keys = [open(SSH_HOST_RSA_KEY).read()]
    git.configure(repo_path, ssh_host_keys)
    set_state('git.repo.created')
    status_set('active', '')
Пример #17
0
def lxd_trust_password():
    db = kv()
    password = db.get('lxd-password')
    if not password:
        password = db.set('lxd-password', pwgen(PW_LENGTH))
        db.flush()
    return password
Пример #18
0
 def get_admin_password(self) -> str:
     try:
         return self.leader_data["admin_password"]
     except KeyError:
         pw = host.pwgen(40)
         self.leader_data["admin_password"] = pw
         return pw
Пример #19
0
def leader_elected():
    if is_leader():
        if not leader_get('heat-domain-admin-passwd'):
            try:
                leader_set({'heat-domain-admin-passwd': pwgen(32)})
            except subprocess.CalledProcessError as e:
                log('leader_set: heat-domain-admin-password failed: {}'.format(
                    str(e)),
                    level=WARNING)
        if not leader_get('heat-auth-encryption-key'):
            try:
                leader_set({'heat-auth-encryption-key': pwgen(32)})
            except subprocess.CalledProcessError as e:
                log('leader_set: heat-domain-admin-password failed: {}'.format(
                    str(e)),
                    level=WARNING)
Пример #20
0
def create_monitoring_stanza(service_name="haproxy_monitoring"):
    config_data = config_get()
    if config_data['enable_monitoring'] is False:
        return None
    monitoring_password = get_monitoring_password()
    if config_data['monitoring_password'] != "changeme":
        monitoring_password = config_data['monitoring_password']
    elif (monitoring_password is None and
          config_data['monitoring_password'] == "changeme"):
        monitoring_password = pwgen(length=20)
    monitoring_config = []
    monitoring_config.append("mode http")
    monitoring_config.append("acl allowed_cidr src %s" %
                             config_data['monitoring_allowed_cidr'])
    monitoring_config.append("block unless allowed_cidr")
    monitoring_config.append("stats enable")
    monitoring_config.append("stats uri /")
    monitoring_config.append("stats realm Haproxy\ Statistics")
    monitoring_config.append("stats auth %s:%s" %
                             (config_data['monitoring_username'],
                              monitoring_password))
    monitoring_config.append("stats refresh %d" %
                             config_data['monitoring_stats_refresh'])
    return create_listen_stanza(service_name,
                                "0.0.0.0",
                                config_data['monitoring_port'],
                                monitoring_config)
def master_joined(interface='master'):
    cluster_id = get_cluster_id()
    if not is_clustered():
        log("Not clustered yet", level=DEBUG)
        return
    relation_settings = {}
    leader_settings = leader_get()
    if is_leader():
        if not leader_settings.get('async-rep-password'):
            # Replication password cannot be longer than 32 characters
            leader_set({'async-rep-password': pwgen(32)})
            return
        configure_master()
        master_address, master_file, master_position = (
            get_master_status(interface))
        if leader_settings.get('master-address') is not master_address:
            leader_settings['master-address'] = master_address
            leader_settings['master-file'] = master_file
            leader_settings['master-position'] = master_position
        leader_set(leader_settings)
        relation_settings = {'leader': True}
    else:
        relation_settings = {'leader': False}
    relation_settings['cluster_id'] = cluster_id
    relation_settings['master_address'] = leader_settings['master-address']
    relation_settings['master_file'] = leader_settings['master-file']
    relation_settings['master_password'] = \
        leader_settings['async-rep-password']
    relation_settings['master_position'] = leader_settings['master-position']
    log("Setting master relation: '{}'".format(relation_settings), level=INFO)
    for rid in relation_ids(interface):
        relation_set(relation_id=rid, relation_settings=relation_settings)
def contrail_ifmap_joined():
    if is_leader():
        creds = leader_get("ifmap-creds")
        creds = json.loads(creds) if creds else {}

        # prune credentials because we can't remove them directly lp #1469731
        creds = { rid: { unit: units[unit]
                         for unit, units in
                         ((unit, creds[rid]) for unit in related_units(rid))
                         if unit in units }
                  for rid in relation_ids("contrail-ifmap")
                  if rid in creds }

        rid = relation_id()
        if rid not in creds:
            creds[rid] = {}
        cs = creds[rid]
        unit = remote_unit()
        if unit in cs:
            return
        # generate new credentials for unit
        cs[unit] = { "username": unit, "password": pwgen(32) }
        leader_set({"ifmap-creds": json.dumps(creds)})
        write_ifmap_config()
        service_restart("supervisor-config")
        relation_set(creds=json.dumps(cs))
Пример #23
0
def reset_default_password():
    # We need a big timeout here, as the cassandra user actually
    # springs into existence some time after Cassandra has started
    # up and is accepting connections.
    with cassandra.connect('cassandra', 'cassandra', timeout=180) as session:
        # But before we close this security hole, we need to use these
        # credentials to create a different admin account.
        helpers.status_set('maintenance', 'Creating initial superuser account')
        username, password = '******', host.pwgen()
        pwhash = cassandra.encrypt_password(password)
        cassandra.ensure_user(session, username, pwhash, superuser=True)
        leadership.leader_set(username=username, password=password)
        helpers.status_set('maintenance', 'Changing default admin password')
        cassandra.query(session, 'ALTER USER cassandra WITH PASSWORD %s',
                        ConsistencyLevel.ALL, (host.pwgen(),))
    hookenv.leader_set(default_admin_password_changed=True)
 def set_master_cert(self):
     master_key_password = host.pwgen(32)
     master_cert_key_password = host.pwgen(32)
     db_client = self.mssql_db_client()
     db_client.create_master_encryption_key(master_key_password)
     cert, cert_key = db_client.create_master_cert(master_cert_key_password)
     self.set_app_rel_data({
         'master_key_password':
         master_key_password,
         'master_cert':
         b64encode(cert).decode(),
         'master_cert_key':
         b64encode(cert_key).decode(),
         'master_cert_key_password':
         master_cert_key_password,
     })
     self.state.master_cert_configured = True
Пример #25
0
def initial_setup():
    status_set('maintenance', 'Initial setup of slurm-controller')
    # Disable slurmd on controller
    service_pause(SLURMD_SERVICE)
    # Setup munge key
    munge_key = pwgen(length=4096)
    config().update({'munge_key': munge_key})
    render_munge_key(config=config())
Пример #26
0
    def __call__(self):
        ''' Provide all configuration for Horizon '''
        ctxt = {
            'compress_offline':
            bool_from_string(config('offline-compression')),
            'debug':
            bool_from_string(config('debug')),
            'customization_module':
            config('customization-module'),
            'default_role':
            config('default-role'),
            "webroot":
            config('webroot') or '/',
            "ubuntu_theme":
            bool_from_string(config('ubuntu-theme')),
            "default_theme":
            config('default-theme'),
            "custom_theme":
            config('custom-theme'),
            "secret":
            config('secret') or pwgen(),
            'support_profile':
            config('profile') if config('profile') in ['cisco'] else None,
            "neutron_network_dvr":
            config("neutron-network-dvr"),
            "neutron_network_l3ha":
            config("neutron-network-l3ha"),
            "neutron_network_lb":
            config("neutron-network-lb"),
            "neutron_network_firewall":
            config("neutron-network-firewall"),
            "neutron_network_vpn":
            config("neutron-network-vpn"),
            "cinder_backup":
            config("cinder-backup"),
            "allow_password_autocompletion":
            config("allow-password-autocompletion"),
            "password_retrieve":
            config("password-retrieve"),
            'default_domain':
            config('default-domain'),
            'multi_domain':
            False if config('default-domain') else True,
            "default_create_volume":
            config("default-create-volume"),
            'image_formats':
            config('image-formats'),
            'api_result_limit':
            config('api-result-limit') or 1000,
            'enable_fip_topology_check':
            config('enable-fip-topology-check'),
            'session_timeout':
            config('session-timeout'),
            'dropdown_max_items':
            config('dropdown-max-items')
        }

        return ctxt
Пример #27
0
def check_adminuser():
    """
    CREATE TABLE `user` (
    `id` INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL
    , `version` INTEGER NOT NULL
    , `login` TEXT NOT NULL
    , `email` TEXT NOT NULL
    , `name` TEXT NULL
    , `password` TEXT NULL
    , `salt` TEXT NULL
    , `rands` TEXT NULL
    , `company` TEXT NULL
    , `org_id` INTEGER NOT NULL
    , `is_admin` INTEGER NOT NULL
    , `email_verified` INTEGER NULL
    , `theme` TEXT NULL
    , `created` DATETIME NOT NULL
    , `updated` DATETIME NOT NULL
    );
    INSERT INTO "user" VALUES(1,0,'admin','*****@*****.**','BootStack Team','309bc4e78bc60d02dc0371d9e9fa6bf9a809d5dc25c745b9e3f85c3ed49c6feccd4ffc96d1db922f4297663a209e93f7f2b6','LZeJ3nSdrC','hseJcLcnPN','',1,1,0,'light','2016-01-22 12:00:08','2016-01-22 12:02:13');
    """
    config = hookenv.config()
    passwd = config.get('admin_password', False)
    if not passwd:
        passwd = host.pwgen(16)
        kv = unitdata.kv()
        kv.set('grafana.admin_password', passwd)

    try:
        stmt = "UPDATE user SET email=?, name='BootStack Team'"
        stmt += ", password=?, theme='light'"
        stmt += " WHERE id = ?"

        conn = sqlite3.connect('/var/lib/grafana/grafana.db', timeout=30)
        cur = conn.cursor()
        query = cur.execute('SELECT id, login, salt FROM user')
        for row in query.fetchall():
            if row[1] == 'admin':
                nagios_context = config.get('nagios_context', False)
                if not nagios_context:
                    nagios_context = 'UNKNOWN'
                email = '*****@*****.**' % nagios_context
                hpasswd = hpwgen(passwd, row[2])
                if hpasswd:
                    cur.execute(stmt, (email, hpasswd, row[0]))
                    conn.commit()
                    hookenv.log('[*] admin password updated on database')
                else:
                    hookenv.log(
                        'Could not update user table: hpwgen func failed')
                break
        conn.close()
    except sqlite3.OperationalError as e:
        hookenv.log('check_adminuser::sqlite3.OperationError: {}'.format(e))
        return
Пример #28
0
def obtain_munge_key(*args):
    # get flags
    munge_key = hookenv.config().get('munge_key')
    # Generate a munge key if it has not been provided via charm config
    if not munge_key:
        hookenv.log(
            'obtain_munge_key(): No key in charm config, generating new key')
        munge_key = host.pwgen(length=4096)
    else:
        hookenv.log('obtain_munge_key(): Using key from charm config')
    leadership.leader_set(munge_key=munge_key)
 def __call__(self):
     ''' Provide all configuration for Horizon '''
     ctxt = {
         'compress_offline': config('offline-compression') in ['yes', True],
         'debug': config('debug') in ['yes', True],
         'default_role': config('default-role'),
         "webroot": config('webroot'),
         "ubuntu_theme": config('ubuntu-theme') in ['yes', True],
         "secret": config('secret') or pwgen()
     }
     return ctxt
Пример #30
0
def check_adminuser():
    """
    CREATE TABLE `user` (
    `id` INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL
    , `version` INTEGER NOT NULL
    , `login` TEXT NOT NULL
    , `email` TEXT NOT NULL
    , `name` TEXT NULL
    , `password` TEXT NULL
    , `salt` TEXT NULL
    , `rands` TEXT NULL
    , `company` TEXT NULL
    , `org_id` INTEGER NOT NULL
    , `is_admin` INTEGER NOT NULL
    , `email_verified` INTEGER NULL
    , `theme` TEXT NULL
    , `created` DATETIME NOT NULL
    , `updated` DATETIME NOT NULL
    );
    INSERT INTO "user" VALUES(1,0,'admin','*****@*****.**','BootStack Team','309bc4e78bc60d02dc0371d9e9fa6bf9a809d5dc25c745b9e3f85c3ed49c6feccd4ffc96d1db922f4297663a209e93f7f2b6','LZeJ3nSdrC','hseJcLcnPN','',1,1,0,'light','2016-01-22 12:00:08','2016-01-22 12:02:13');
    """
    config = hookenv.config()
    passwd = config.get('admin_password', False)
    if not passwd:
        passwd = host.pwgen(16)
        kv = unitdata.kv()
        kv.set('grafana.admin_password', passwd)

    try:
        stmt = "UPDATE user SET email=?, name='BootStack Team'"
        stmt += ", password=?, theme='light'"
        stmt += " WHERE id = ?"

        conn = sqlite3.connect('/var/lib/grafana/grafana.db', timeout=30)
        cur = conn.cursor()
        query = cur.execute('SELECT id, login, salt FROM user')
        for row in query.fetchall():
            if row[1] == 'admin':
                nagios_context = config.get('nagios_context', False)
                if not nagios_context:
                    nagios_context = 'UNKNOWN'
                email = '*****@*****.**' % nagios_context
                hpasswd = hpwgen(passwd, row[2])
                if hpasswd:
                    cur.execute(stmt, (email, hpasswd, row[0]))
                    conn.commit()
                    hookenv.log('[*] admin password updated on database')
                else:
                    hookenv.log('Could not update user table: hpwgen func failed')
                break
        conn.close()
    except sqlite3.OperationalError as e:
        hookenv.log('check_adminuser::sqlite3.OperationError: {}'.format(e))
        return
Пример #31
0
def update_app_kv_hashes():
    try:
        app_kv = vault_kv.VaultAppKV()
        if app_kv.any_changed():
            if hookenv.is_leader():
                # force hooks to run on non-leader units
                hookenv.leader_set({"vault-kv-nonce": host.pwgen(8)})
            # Update the local unit hashes at successful exit
            app_kv.update_hashes()
    except vault_kv.VaultNotReady:
        return
Пример #32
0
    def _admin_data(self):
        """Get a named tuple holding configuration data for the admin user."""
        config = hookenv.config()
        username = config["username"]
        password = config["password"]

        if not password:
            password = host.pwgen(length=15)
            # Save the password to the local state, so it can be accessed
            # by the Credentials class.
            config["_generated-password"] = password

        return _User(username, password)
Пример #33
0
    def get_swift_key(self):
        """Get Swift Key.

        Generate or get existing swift key

        :returns: Key for authenticating against swift
        :rtype: String
        """
        if not self.peers.swift_key and self.unit.is_leader():
            # If the leader create and set the swift key
            _swift_key = ch_host.pwgen()
            self.peers.set_swift_key(_swift_key)
        return self.peers.swift_key
Пример #34
0
    def _admin_data(self):
        """Get a named tuple holding configuration data for the admin user."""
        config = hookenv.config()
        username = config["username"]
        password = config["password"]

        if not password:
            password = host.pwgen(length=15)
            # Save the password to the local state, so it can be accessed
            # by the Credentials class.
            config["_generated-password"] = password

        return _User(username, password)
Пример #35
0
def _publish_database_relation(relid, superuser):
    # The Casandra service needs to provide a common set of credentials
    # to a client unit. The leader creates these, if none of the other
    # units are found to have published them already (a previously elected
    # leader may have done this). The leader then tickles the other units,
    # firing a hook and giving them the opportunity to copy and publish
    # these credentials.
    username, password = _client_credentials(relid)
    if username is None:
        if hookenv.is_leader():
            # Credentials not set. The leader must generate them. We use
            # the service name so that database permissions remain valid
            # even after the relation is dropped and recreated, or the
            # juju environment rebuild and the database restored from
            # backups.
            service_name = helpers.get_service_name(relid)
            if not service_name:
                # Per Bug #1555261, we might not yet have related units,
                # so no way to calculate the remote service name and thus
                # the user.
                return  # Try again later.
            username = '******'.format(helpers.get_service_name(relid))
            if superuser:
                username += '_admin'
            password = host.pwgen()
            pwhash = helpers.encrypt_password(password)
            with helpers.connect() as session:
                helpers.ensure_user(session, username, pwhash, superuser)
            # Wake the peers, if any.
            helpers.leader_ping()
        else:
            return  # No credentials yet. Nothing to do.

    # Publish the information the client needs on the relation where
    # they can find it.
    #  - authentication credentials
    #  - address and port
    #  - cluster_name, so clients can differentiate multiple clusters
    #  - datacenter + rack, so clients know what names they can use
    #    when altering keyspace replication settings.
    config = hookenv.config()
    hookenv.relation_set(relid,
                         username=username, password=password,
                         host=helpers.rpc_broadcast_ip_address(),
                         native_transport_port=config['native_transport_port'],
                         rpc_port=config['rpc_port'],
                         cluster_name=config['cluster_name'],
                         datacenter=config['datacenter'],
                         rack=config['rack'])
def setup_munge_key():
    if controller.is_active_controller():
        munge_key = hookenv.config().get('munge_key')
        # Generate a munge key if it has not been provided via charm config
        # and update persistent configuration for future use on active
        # controller only. The munge key must be the same on all nodes in
        # a given cluster so it will be provided to a backup controller
        # and nodes via leader data or relations
        if not munge_key:
            munge_key = host.pwgen(length=4096)
        hookenv.leader_set(munge_key=munge_key)
        # a leader does not receive leadership change events, moreover,
        # no flags are set automatically so this has to be done here
        # for other handles in the same execution to be triggered
        initial_setup()
 def setup_pacemaker_mssql_login(self):
     if self.state.pacemaker_login_ready:
         logger.info('The pacemaker login is already configured.')
         return
     login_password = host.pwgen(32)
     self.cluster.mssql_db_client().create_login(
         name=self.PACEMAKER_LOGIN_NAME,
         password=login_password,
         server_roles=['sysadmin'])
     with open(self.PACEMAKER_LOGIN_CREDS_FILE, 'w') as f:
         f.write('{}\n{}\n'.format(self.PACEMAKER_LOGIN_NAME,
                                   login_password))
     os.chown(self.PACEMAKER_LOGIN_CREDS_FILE, 0, 0)
     os.chmod(self.PACEMAKER_LOGIN_CREDS_FILE, 0o400)
     self.state.pacemaker_login_ready = True
Пример #38
0
 def get_shared_secrets(self):
     secret_context = StoredContext('uaa-secrets.yml', {
         'login_client_secret': host.pwgen(20),
         'admin_client_secret': host.pwgen(20),
         'cc_client_secret': host.pwgen(20),
         'cc_token_secret': host.pwgen(20),
         'service_broker_client_secret': host.pwgen(20),
         'servicesmgmt_client_secret': host.pwgen(20),
     })
     return secret_context
Пример #39
0
def get_encryption_key():
    encryption_path = os.path.join(HEAT_PATH, 'encryption-key')
    if os.path.isfile(encryption_path):
        with open(encryption_path, 'r') as enc:
            encryption = enc.read()
    else:
        # create encryption key and store it
        if not os.path.isdir(HEAT_PATH):
            os.makedirs(HEAT_PATH)
        encryption = config("encryption-key")
        if not encryption:
            # generate random key
            encryption = pwgen(16)
        with open(encryption_path, 'w') as enc:
            enc.write(encryption)
    return encryption
Пример #40
0
def get_encryption_key():
    encryption_path = os.path.join(HEAT_PATH, 'encryption-key')
    if os.path.isfile(encryption_path):
        with open(encryption_path, 'r') as enc:
            encryption = enc.read()
    else:
        # create encryption key and store it
        if not os.path.isdir(HEAT_PATH):
            os.makedirs(HEAT_PATH)
        encryption = config("encryption-key")
        if not encryption:
            # generate random key
            encryption = pwgen(16)
        with open(encryption_path, 'w') as enc:
            enc.write(encryption)
    return encryption
    def __call__(self):
        ''' Provide all configuration for Horizon '''
        projects_yaml = git_default_repos(config('openstack-origin-git'))
        ctxt = {
            'compress_offline':
            bool_from_string(config('offline-compression')),
            'debug':
            bool_from_string(config('debug')),
            'customization_module':
            config('customization-module'),
            'default_role':
            config('default-role'),
            "webroot":
            config('webroot') or '/',
            "ubuntu_theme":
            bool_from_string(config('ubuntu-theme')),
            "default_theme":
            config('default-theme'),
            "secret":
            config('secret') or pwgen(),
            'support_profile':
            config('profile') if config('profile') in ['cisco'] else None,
            "neutron_network_dvr":
            config("neutron-network-dvr"),
            "neutron_network_l3ha":
            config("neutron-network-l3ha"),
            "neutron_network_lb":
            config("neutron-network-lb"),
            "neutron_network_firewall":
            config("neutron-network-firewall"),
            "neutron_network_vpn":
            config("neutron-network-vpn"),
            "cinder_backup":
            config("cinder-backup"),
            "password_retrieve":
            config("password-retrieve"),
            'virtualenv':
            git_pip_venv_dir(projects_yaml)
            if config('openstack-origin-git') else None,
            'default_domain':
            config('default_domain')
            if config('default_domain') else 'default',
            'multi_domin':
            False if config('default_domain') else True
        }

        return ctxt
Пример #42
0
    def connect_and_post():
        log('Connecting swift client...')
        swift_connection = client.Connection(
            authurl=auth_url, user='******',
            key=keystone_ctxt['admin_password'],
            tenant_name=keystone_ctxt['admin_tenant_name'],
            auth_version='2.0')

        account_stats = swift_connection.head_account()
        if 'x-account-meta-temp-url-key' in account_stats:
            log("Temp URL key was already posted.")
            return account_stats['x-account-meta-temp-url-key']

        temp_url_key = pwgen(length=64)
        swift_connection.post_account(headers={'x-account-meta-temp-url-key':
                                               temp_url_key})
        return temp_url_key
Пример #43
0
    def connect_and_post():
        log('Connecting swift client...')
        swift_connection = client.Connection(
            authurl=auth_url, user='******',
            key=keystone_ctxt['admin_password'],
            tenant_name=keystone_ctxt['admin_tenant_name'],
            auth_version='2.0')

        account_stats = swift_connection.head_account()
        if 'x-account-meta-temp-url-key' in account_stats:
            log("Temp URL key was already posted.")
            return account_stats['x-account-meta-temp-url-key']

        temp_url_key = pwgen(length=64)
        swift_connection.post_account(headers={'x-account-meta-temp-url-key':
                                               temp_url_key})
        return temp_url_key
Пример #44
0
def reset_default_password():
    if hookenv.leader_get('default_admin_password_changed'):
        hookenv.log('Default admin password already changed')
        return

    # Cassandra ships with well known credentials, rather than
    # providing a tool to reset credentials. This is a huge security
    # hole we must close.
    try:
        # We need a big timeout here, as the cassandra user actually
        # springs into existence some time after Cassandra has started
        # up and is accepting connections.
        with helpers.connect('cassandra',
                             'cassandra',
                             timeout=120,
                             auth_timeout=120) as session:
            # But before we close this security hole, we need to use these
            # credentials to create a different admin account for the
            # leader, allowing it to create accounts for other nodes as they
            # join. The alternative is restarting Cassandra without
            # authentication, which this charm will likely need to do in the
            # future when we allow Cassandra services to be related together.
            helpers.status_set('maintenance',
                               'Creating initial superuser account')
            username, password = helpers.superuser_credentials()
            pwhash = helpers.encrypt_password(password)
            helpers.ensure_user(session, username, pwhash, superuser=True)
            helpers.set_unit_superusers([hookenv.local_unit()])

            helpers.status_set('maintenance',
                               'Changing default admin password')
            helpers.query(session, 'ALTER USER cassandra WITH PASSWORD %s',
                          cassandra.ConsistencyLevel.ALL, (host.pwgen(), ))
    except cassandra.AuthenticationFailed:
        hookenv.log('Default superuser account already reset')
        try:
            with helpers.connect():
                hookenv.log("Leader's superuser account already created")
        except cassandra.AuthenticationFailed:
            # We have no known superuser credentials. Create the account
            # the hard, slow way. This will be the normal method
            # of creating the service's initial account when we allow
            # services to be related together.
            helpers.create_unit_superuser_hard()

    hookenv.leader_set(default_admin_password_changed=True)
def get_rabbit_password(username, password=None, local=False):
    ''' Retrieve, generate or store a rabbit password for
    the provided username using peer relation cluster'''
    if local:
        return get_rabbit_password_on_disk(username, password, local)
    else:
        migrate_passwords_to_peer_relation()
        _key = '{}.passwd'.format(username)
        try:
            _password = peer_retrieve(_key)
            if _password is None:
                _password = password or pwgen(length=64)
                peer_store(_key, _password)
        except ValueError:
            # cluster relation is not yet started, use on-disk
            _password = get_rabbit_password_on_disk(username, password)
        return _password
Пример #46
0
    def __call__(self):
        ''' Provide all configuration for Horizon '''
        ctxt = {
            'compress_offline': config('offline-compression') in ['yes', True],
            'debug': config('debug') in ['yes', True],
            'default_role': config('default-role'),
            "webroot": config('webroot'),
            "ubuntu_theme": config('ubuntu-theme') in ['yes', True],
            "secret": config('secret') or pwgen(),
            'support_profile': config('profile')
            if config('profile') in ['cisco'] else None,
            "neutron_network_lb": config("neutron-network-lb"),
            "neutron_network_firewall": config("neutron-network-firewall"),
            "neutron_network_vpn": config("neutron-network-vpn"),
        }

        return ctxt
def _get_password(key):
    '''Retrieve named password

    This function will ensure that a consistent named password
    is used across all units in the pxc cluster; the lead unit
    will generate or use the root-password configuration option
    to seed this value into the deployment.

    Once set, it cannot be changed.

    @requires: str: named password or None if unable to retrieve
                    at this point in time
    '''
    _password = leader_get(key)
    if not _password and is_leader():
        _password = config(key) or pwgen()
        leader_set({key: _password})
    return _password
def _get_password(key):
    '''Retrieve named password

    This function will ensure that a consistent named password
    is used across all units in the pxc cluster; the lead unit
    will generate or use the root-password configuration option
    to seed this value into the deployment.

    Once set, it cannot be changed.

    @requires: str: named password or None if unable to retrieve
                    at this point in time
    '''
    _password = leader_get(key)
    if not _password and is_leader():
        _password = config(key) or pwgen()
        leader_set({key: _password})
    return _password
Пример #49
0
def _publish_database_relation(relid, superuser):
    # The Casandra service needs to provide a common set of credentials
    # to a client unit. The leader creates these, if none of the other
    # units are found to have published them already (a previously elected
    # leader may have done this). The leader then tickles the other units,
    # firing a hook and giving them the opportunity to copy and publish
    # these credentials.
    username, password = _client_credentials(relid)
    if username is None:
        if hookenv.is_leader():
            # Credentials not set. The leader must generate them. We use
            # the service name so that database permissions remain valid
            # even after the relation is dropped and recreated, or the
            # juju environment rebuild and the database restored from
            # backups.
            username = '******'.format(helpers.get_service_name(relid))
            if superuser:
                username += '_admin'
            password = host.pwgen()
            pwhash = helpers.encrypt_password(password)
            with helpers.connect() as session:
                helpers.ensure_user(session, username, pwhash, superuser)
            # Wake the peers, if any.
            helpers.leader_ping()
        else:
            return  # No credentials yet. Nothing to do.

    # Publish the information the client needs on the relation where
    # they can find it.
    #  - authentication credentials
    #  - address and port
    #  - cluster_name, so clients can differentiate multiple clusters
    #  - datacenter + rack, so clients know what names they can use
    #    when altering keyspace replication settings.
    config = hookenv.config()
    hookenv.relation_set(relid,
                         username=username,
                         password=password,
                         host=hookenv.unit_public_ip(),
                         native_transport_port=config['native_transport_port'],
                         rpc_port=config['rpc_port'],
                         cluster_name=config['cluster_name'],
                         datacenter=config['datacenter'],
                         rack=config['rack'])
Пример #50
0
def reset_default_password():
    if hookenv.leader_get('default_admin_password_changed'):
        hookenv.log('Default admin password already changed')
        return

    # Cassandra ships with well known credentials, rather than
    # providing a tool to reset credentials. This is a huge security
    # hole we must close.
    try:
        # We need a big timeout here, as the cassandra user actually
        # springs into existence some time after Cassandra has started
        # up and is accepting connections.
        with helpers.connect('cassandra', 'cassandra',
                             timeout=120, auth_timeout=120) as session:
            # But before we close this security hole, we need to use these
            # credentials to create a different admin account for the
            # leader, allowing it to create accounts for other nodes as they
            # join. The alternative is restarting Cassandra without
            # authentication, which this charm will likely need to do in the
            # future when we allow Cassandra services to be related together.
            helpers.status_set('maintenance',
                               'Creating initial superuser account')
            username, password = helpers.superuser_credentials()
            pwhash = helpers.encrypt_password(password)
            helpers.ensure_user(session, username, pwhash, superuser=True)
            helpers.set_unit_superusers([hookenv.local_unit()])

            helpers.status_set('maintenance',
                               'Changing default admin password')
            helpers.query(session, 'ALTER USER cassandra WITH PASSWORD %s',
                          cassandra.ConsistencyLevel.ALL, (host.pwgen(),))
    except cassandra.AuthenticationFailed:
        hookenv.log('Default superuser account already reset')
        try:
            with helpers.connect():
                hookenv.log("Leader's superuser account already created")
        except cassandra.AuthenticationFailed:
            # We have no known superuser credentials. Create the account
            # the hard, slow way. This will be the normal method
            # of creating the service's initial account when we allow
            # services to be related together.
            helpers.create_unit_superuser_hard()

    hookenv.leader_set(default_admin_password_changed=True)
Пример #51
0
def superuser_credentials():
    '''Return (username, password) to connect to the Cassandra superuser.

    The credentials are persisted in the root user's cqlshrc file,
    making them easily accessible to the command line tools.
    '''
    cqlshrc_path = get_cqlshrc_path()
    cqlshrc = configparser.ConfigParser(interpolation=None)
    cqlshrc.read([cqlshrc_path])

    try:
        section = cqlshrc['authentication']
        return section['username'], section['password']
    except KeyError:
        hookenv.log('Generating superuser credentials into {}'.format(
            cqlshrc_path))

    config = hookenv.config()

    username = superuser_username()
    password = host.pwgen()

    hookenv.log('Generated username {}'.format(username))

    # We set items separately, rather than together, so that we have a
    # defined order for the ConfigParser to preserve and the tests to
    # rely on.
    cqlshrc.setdefault('authentication', {})
    cqlshrc['authentication']['username'] = username
    cqlshrc['authentication']['password'] = password
    cqlshrc.setdefault('connection', {})
    cqlshrc['connection']['hostname'] = hookenv.unit_public_ip()
    if get_cassandra_version().startswith('2.0'):
        cqlshrc['connection']['port'] = str(config['rpc_port'])
    else:
        cqlshrc['connection']['port'] = str(config['native_transport_port'])

    ini = io.StringIO()
    cqlshrc.write(ini)
    host.mkdir(os.path.dirname(cqlshrc_path), perms=0o700)
    host.write_file(cqlshrc_path, ini.getvalue().encode('UTF-8'), perms=0o400)

    return username, password
def get_rabbit_password_on_disk(username, password=None, local=False):
    ''' Retrieve, generate or store a rabbit password for
    the provided username on disk'''
    if local:
        _passwd_file = _local_named_passwd.format(service_name(), username)
    else:
        _passwd_file = _named_passwd.format(service_name(), username)

    _password = None
    if os.path.exists(_passwd_file):
        with open(_passwd_file, 'r') as passwd:
            _password = passwd.read().strip()
    else:
        mkdir(os.path.dirname(_passwd_file), owner=RABBIT_USER,
              group=RABBIT_USER, perms=0o775)
        os.chmod(os.path.dirname(_passwd_file), 0o775)
        _password = password or pwgen(length=64)
        write_file(_passwd_file, _password, owner=RABBIT_USER,
                   group=RABBIT_USER, perms=0o660)

    return _password
Пример #53
0
def db_relation_master(rel):
    """The master generates credentials and negotiates resources."""
    master = rel.local
    # Pick one remote unit as representative. They should all converge.
    for remote in rel.values():
        break

    # The requested database name, the existing database name, or use
    # the remote service name as a default. We no longer use the
    # relation id for the database name or usernames, as when a
    # database dump is restored into a new Juju environment we
    # are more likely to have matching service names than relation ids
    # and less likely to have to perform manual permission and ownership
    # cleanups.
    if "database" in remote:
        master["database"] = remote["database"]
    elif "database" not in master:
        master["database"] = remote.service

    superuser, replication = _credential_types(rel)

    if "user" not in master:
        user = postgresql.username(remote.service, superuser=superuser, replication=replication)
        password = host.pwgen()
        master["user"] = user
        master["password"] = password

        # schema_user has never been documented and is deprecated.
        if not superuser:
            master["schema_user"] = user
            master["schema_password"] = password

    hookenv.log("** Master providing {} ({}/{})".format(rel, master["database"], master["user"]))

    # Reflect these settings back so the client knows when they have
    # taken effect.
    if not replication:
        master["roles"] = remote.get("roles")
        master["extensions"] = remote.get("extensions")
Пример #54
0
def setup_uaac_client(service_name):
    uaactx = contexts.UAARelation()
    orchctx = contexts.OrchestratorRelation()
    secretctx = contexts.StoredContext(utils.secrets_file, {
        'ui_secret': host.pwgen(20),
    })
    domain = orchctx.get_first('domain')
    uaa_secret = uaactx.get_first('admin_client_secret')
    ui_secret = secretctx['ui_secret']

    try:
        uaac("target http://uaa.%s" % domain)
    except CalledProcessError:
        log('FAILED UAA Target, try again.')
        sleep(0.5)
        uaac("target http://uaa.%s" % domain)

    uaac("token client get admin -s %s" % uaa_secret)

    client_needs_setup = bool(call("%s client get admin_ui_client" % ucmd,
                                   shell=True, env=uaac_env))
    if client_needs_setup:
        authorities = yaml.safe_load(uaac('client get admin'))['authorities']
        if 'scim.write' not in authorities:
            authorities += ' scim.write'
            uaac('client update admin --authorities "%s"' % authorities)
            uaac("token client get admin -s %s" % uaa_secret)
        uaac('group add admin_ui.admin')
        uaac('member add admin_ui.admin admin')
        uaac('client add admin_ui_client'
             ' --authorities cloud_controller.admin,cloud_controller.read,'
             'cloud_controller.write,openid,scim.read'
             ' --authorized_grant_types authorization_code,'
             'client_credentials,refresh_token'
             ' --autoapprove true'
             ' --scope admin_ui.admin,admin_ui.user,openid'
             ' -s %s' % ui_secret)
Пример #55
0
def publish_credentials(rel, superuser):
    pub = rel.to_publish_raw
    config = cassandra.config()
    if config['authenticator'].lower() == 'allowallauthenticator':
        if 'username' in pub:
            del pub['username']
            del pub['password']
        return
    if 'username' in pub:
        hookenv.log("Credentials for {} ({}) already published".format(rel.application_name, rel.relation_id))
        return
    hookenv.log("Publishing credentials for {} ({})".format(rel.application_name, rel.relation_id))
    assert rel.application_name, 'charms.reactive Relation failed to provide application_name property'
    username = '******'.format(rel.application_name)
    if superuser:
        username += '_admin'
    password = host.pwgen()
    pwhash = cassandra.encrypt_password(password)
    with cassandra.connect() as session:
        cassandra.ensure_user(session, username, pwhash, superuser)
    pub['username'] = username
    pub['password'] = password
    # Notify peers there are new credentials to be found.
    leadership.leader_set(client_rel_source=hookenv.local_unit(), client_rel_ping=str(time.time()))
    def __call__(self):
        ''' Provide all configuration for Horizon '''
        ctxt = {
            'compress_offline':
                bool_from_string(config('offline-compression')),
            'debug': bool_from_string(config('debug')),
            'customization_module': config('customization-module'),
            'default_role': config('default-role'),
            "webroot": config('webroot') or '/',
            "ubuntu_theme": bool_from_string(config('ubuntu-theme')),
            "default_theme": config('default-theme'),
            "custom_theme": config('custom-theme'),
            "secret": config('secret') or pwgen(),
            'support_profile': config('profile')
            if config('profile') in ['cisco'] else None,
            "neutron_network_dvr": config("neutron-network-dvr"),
            "neutron_network_l3ha": config("neutron-network-l3ha"),
            "neutron_network_lb": config("neutron-network-lb"),
            "neutron_network_firewall": config("neutron-network-firewall"),
            "neutron_network_vpn": config("neutron-network-vpn"),
            "cinder_backup": config("cinder-backup"),
            "allow_password_autocompletion":
            config("allow-password-autocompletion"),
            "password_retrieve": config("password-retrieve"),
            'default_domain': config('default-domain'),
            'multi_domain': False if config('default-domain') else True,
            "default_create_volume": config("default-create-volume"),
            'image_formats': config('image-formats'),
            'api_result_limit': config('api-result-limit') or 1000,
            'enable_fip_topology_check': config('enable-fip-topology-check'),
            'session_timeout': config('session-timeout'),
            'dropdown_max_items': config('dropdown-max-items'),
            'enable_consistency_groups': config('enable-consistency-groups'),
        }

        return ctxt
    def __call__(self):
        ''' Provide all configuration for Horizon '''
        projects_yaml = git_default_repos(config('openstack-origin-git'))
        ctxt = {
            'compress_offline': config('offline-compression') in ['yes', True],
            'debug': config('debug') in ['yes', True],
            'default_role': config('default-role'),
            "webroot": config('webroot'),
            "ubuntu_theme": config('ubuntu-theme') in ['yes', True],
            "default_theme": config('default-theme'),
            "secret": config('secret') or pwgen(),
            'support_profile': config('profile')
            if config('profile') in ['cisco'] else None,
            "neutron_network_dvr": config("neutron-network-dvr"),
            "neutron_network_l3ha": config("neutron-network-l3ha"),
            "neutron_network_lb": config("neutron-network-lb"),
            "neutron_network_firewall": config("neutron-network-firewall"),
            "neutron_network_vpn": config("neutron-network-vpn"),
            "cinder_backup": config("cinder-backup"),
            'virtualenv': git_pip_venv_dir(projects_yaml)
            if config('openstack-origin-git') else None,
        }

        return ctxt
Пример #58
0
def ensure_user(user, group=None):
    adduser(user, pwgen())
    if group:
        add_user_to_group(user, group)
Пример #59
0
def upgrade_charm():
    workloadstatus.status_set('maintenance', 'Upgrading charm')

    rels = context.Relations()

    # The master is now appointed by the leader.
    if hookenv.is_leader():
        master = replication.get_master()
        if not master:
            master = hookenv.local_unit()
            if rels.peer:
                for peer_relinfo in rels.peer.values():
                    if peer_relinfo.get('state') == 'master':
                        master = peer_relinfo.unit
                        break
            hookenv.log('Discovered {} is the master'.format(master))
            leadership.leader_set(master=master)

    # The name of this crontab has changed. It will get regenerated.
    if os.path.exists('/etc/cron.d/postgresql'):
        hookenv.log('Removing old crontab')
        os.unlink('/etc/cron.d/postgresql')

    # Older generated usernames where generated from the relation id,
    # and really old ones contained random components. This made it
    # problematic to restore a database into a fresh environment,
    # because the new usernames would not match the old usernames and
    # done of the database permissions would match. We now generate
    # usernames using just the client service name, so restoring a
    # database into a fresh environment will work provided the service
    # names match. We want to update the old usernames in upgraded
    # services to the new format to improve their disaster recovery
    # story.
    for relname, superuser in [('db', False), ('db-admin', True)]:
        for client_rel in rels[relname].values():
            hookenv.log('Migrating database users for {}'.format(client_rel))
            password = client_rel.local.get('password', host.pwgen())
            old_username = client_rel.local.get('user')
            new_username = postgresql.username(client_rel.service,
                                               superuser, False)
            if old_username and old_username != new_username:
                migrate_user(old_username, new_username, password, superuser)
                client_rel.local['user'] = new_username
                client_rel.local['password'] = password

            old_username = client_rel.local.get('schema_user')
            if old_username and old_username != new_username:
                migrate_user(old_username, new_username, password, superuser)
                client_rel.local['schema_user'] = new_username
                client_rel.local['schema_password'] = password

    # Admin relations used to get 'all' published as the database name,
    # which was bogus.
    for client_rel in rels['db-admin'].values():
        if client_rel.local.get('database') == 'all':
            client_rel.local['database'] = client_rel.service

    # Reconfigure PostgreSQL and republish client relations.
    reactive.remove_state('postgresql.cluster.configured')
    reactive.remove_state('postgresql.client.published')

    # Don't recreate the cluster.
    reactive.set_state('postgresql.cluster.created')

    # Set the postgresql.replication.cloned flag, so we don't rebuild
    # standbys when upgrading the charm from a pre-reactive version.
    reactive.set_state('postgresql.replication.cloned')