Пример #1
0
def configure_cinder_backup():
    """Configure cinder-backup-swift-proxy."""
    keystone_ip = zaza_model.get_app_ips(
        'swift-keystone')[0]
    swift_ip = zaza_model.get_app_ips(
        'swift-proxy')[0]
    auth_ver = (zaza_model.get_application_config('swift-keystone')
                .get('preferred-api-version').get('value'))
    if auth_ver == 2:
        auth_url = 'http://{}:5000/v2.0'.format(keystone_ip)
        endpoint_url = 'http://{}:8080/v1/AUTH_'.format(swift_ip)
    else:
        auth_url = 'http://{}:5000/v3'.format(keystone_ip)
        endpoint_url = 'http://{}:8080/v1/AUTH'.format(swift_ip)
    cinder_backup_swift_proxy_conf = {
        'endpoint-url': endpoint_url,
        'auth-url': auth_url
    }
    juju_service = 'cinder-backup-swift-proxy'
    zaza_model.set_application_config(juju_service,
                                      cinder_backup_swift_proxy_conf)
    zaza_model.wait_for_agent_status()
    zaza_model.wait_for_application_states()
    _singleton = zaza.openstack.charm_tests.test_utils.OpenStackBaseTest()
    _singleton.setUpClass()
    with _singleton.config_change(cinder_backup_swift_proxy_conf,
                                  cinder_backup_swift_proxy_conf):
        # wait for configuration to be applied then return
        pass
Пример #2
0
 def test_02_remove_check(self):
     """Verify swap check is removed."""
     model.set_application_config(self.application_name, {"swap": ""})
     model.block_until_all_units_idle()
     cmd = "cat /etc/nagios/nrpe.d/check_swap.cfg"
     result = model.run_on_unit(self.lead_unit_name, cmd)
     self.assertTrue(result.get("Code") != 0)
Пример #3
0
def setup_ceph_proxy():
    """
    Configure ceph proxy with ceph metadata.

    Fetches admin_keyring and FSID from ceph-mon and
    uses those to configure ceph-proxy.
    """
    raw_admin_keyring = model.run_on_leader(
        "ceph-mon", 'cat /etc/ceph/ceph.client.admin.keyring')["Stdout"]
    admin_keyring = [
        line for line in raw_admin_keyring.split("\n") if "key" in line
    ][0].split(' = ')[-1].rstrip()
    fsid = model.run_on_leader("ceph-mon", "leader-get fsid")["Stdout"]
    cluster_ips = model.get_app_ips("ceph-mon")

    proxy_config = {
        'auth-supported': 'cephx',
        'admin-key': admin_keyring,
        'fsid': fsid,
        'monitor-hosts': ' '.join(cluster_ips)
    }

    logging.debug('Config: {}'.format(proxy_config))

    model.set_application_config("ceph-proxy", proxy_config)
Пример #4
0
def nfs_setup():
    """Run setup for testing Trilio.

    Setup for testing Trilio is currently part of functional
    tests.
    """
    logging.info("Configuring NFS Server")
    nfs_server_ip = zaza_model.get_app_ips("nfs-server-test-fixture")[0]
    trilio_wlm_unit = zaza_model.get_first_unit_name("trilio-wlm")

    nfs_shares_conf = {"nfs-shares": "{}:/srv/testing".format(nfs_server_ip)}
    logging.info("NFS share config: {}".format(nfs_shares_conf))
    _trilio_services = ["trilio-wlm", "trilio-data-mover"]

    conf_changed = False
    for juju_service in _trilio_services:
        app_config = zaza_model.get_application_config(juju_service)
        if app_config["nfs-shares"] != nfs_shares_conf["nfs-shares"]:
            logging.info("Updating nfs-shares config option")
            zaza_model.set_application_config(juju_service, nfs_shares_conf)
            conf_changed = True

    if conf_changed:
        zaza_model.wait_for_agent_status()
        # NOTE(jamespage): wlm-api service must be running in order
        #                  to execute the setup actions
        zaza_model.block_until_service_status(
            unit_name=trilio_wlm_unit,
            services=["wlm-api"],
            target_status="active",
        )
Пример #5
0
def basic_setup():
    """Run setup for testing Trilio.

    Setup for testing Trilio is currently part of functional
    tests.
    """
    logging.info("Configuring NFS Server")
    nfs_server_ip = zaza_model.get_app_ips("nfs-server-test-fixture")[0]
    trilio_wlm_unit = zaza_model.get_first_unit_name("trilio-wlm")

    nfs_shares_conf = {"nfs-shares": "{}:/srv/testing".format(nfs_server_ip)}
    _trilio_services = ["trilio-wlm", "trilio-data-mover"]

    conf_changed = False
    for juju_service in _trilio_services:
        app_config = zaza_model.get_application_config(juju_service)
        if app_config["nfs-shares"] != nfs_shares_conf["nfs-shares"]:
            zaza_model.set_application_config(juju_service, nfs_shares_conf)
            conf_changed = True

    if conf_changed:
        zaza_model.wait_for_agent_status()
        # NOTE(jamespage): wlm-api service must be running in order
        #                  to execute the setup actions
        zaza_model.block_until_service_status(
            unit_name=trilio_wlm_unit,
            services=["wlm-api"],
            target_status="active",
        )

    logging.info("Executing create-cloud-admin-trust")
    password = juju_utils.leader_get("keystone", "admin_passwd")

    generic_utils.assertActionRanOK(
        zaza_model.run_action_on_leader(
            "trilio-wlm",
            "create-cloud-admin-trust",
            raise_on_failure=True,
            action_params={"password": password},
        )
    )

    logging.info("Executing create-license")
    test_license = os.environ.get("TEST_TRILIO_LICENSE")
    if test_license and os.path.exists(test_license):
        zaza_model.attach_resource("trilio-wlm",
                                   resource_name='license',
                                   resource_path=test_license)
        generic_utils.assertActionRanOK(
            zaza_model.run_action_on_leader(
                "trilio-wlm", "create-license",
                raise_on_failure=True
            )
        )

    else:
        logging.error("Unable to find Trilio License file")
Пример #6
0
 def set_new_config(self):
     """Change applications charm config."""
     logging.info("Triggering deferred restart via config change")
     config_key, new_value = self.get_new_config()
     logging.info("Setting {}: {}".format(config_key, new_value))
     model.set_application_config(
         self.application_name,
         {config_key: new_value})
     return new_value
Пример #7
0
def s3_setup():
    """Run setup of s3 options for Trilio."""
    session = openstack_utils.get_overcloud_keystone_session()
    ks_client = openstack_utils.get_keystone_session_client(session)

    # Get token data so we can glean our user_id and project_id
    token_data = ks_client.tokens.get_token_data(session.get_token())
    project_id = token_data['token']['project']['id']
    user_id = token_data['token']['user']['id']

    # Store URL to service providing S3 compatible API
    for entry in token_data['token']['catalog']:
        if entry['type'] == 's3':
            for endpoint in entry['endpoints']:
                if endpoint['interface'] == 'public':
                    s3_region = endpoint['region']
                    s3_endpoint = endpoint['url']

    # Create AWS compatible application credentials in Keystone
    ec2_creds = ks_client.ec2.create(user_id, project_id)
    cacert = openstack_utils.get_cacert()
    kwargs = {
        'region_name': s3_region,
        'aws_access_key_id': ec2_creds.access,
        'aws_secret_access_key': ec2_creds.secret,
        'endpoint_url': s3_endpoint,
        'verify': cacert,
    }
    s3 = boto3.resource('s3', **kwargs)

    # Create bucket
    bucket_name = 'zaza-trilio'
    logging.info("Creating bucket: {}".format(bucket_name))
    bucket = s3.Bucket(bucket_name)
    bucket.create()

    s3_config = {
        'tv-s3-secret-key': ec2_creds.secret,
        'tv-s3-access-key': ec2_creds.access,
        'tv-s3-region-name': s3_region,
        'tv-s3-bucket': bucket_name,
        'tv-s3-endpoint-url': s3_endpoint
    }
    for app in ['trilio-wlm', 'trilio-data-mover']:
        logging.info("Setting s3 config for {}".format(app))
        zaza_model.set_application_config(app, s3_config)
    test_config = lifecycle_utils.get_charm_config(fatal=False)
    states = test_config.get('target_deploy_status', {})
    states['trilio-wlm'] = {
        'workload-status': 'blocked',
        'workload-status-message': 'application not trusted'
    }
    zaza_model.wait_for_application_states(states=test_config.get(
        'target_deploy_status', {}),
                                           timeout=7200)
    zaza_model.block_until_all_units_idle()
Пример #8
0
def configure_external_s3_backend():
    """Set up Ceph-radosgw as an external S3 backend for Glance."""
    logging.info("Creating a test S3 user and credentials for Glance")
    username, displayname = "zaza-glance-test", "Zaza Glance Test User"
    cmd = "radosgw-admin user create --uid='{}' --display-name='{}'".format(
        username, displayname)
    results = model.run_on_leader("ceph-mon", cmd)
    stdout = json.loads(results["stdout"])
    keys = stdout["keys"][0]
    access_key, secret_key = keys["access_key"], keys["secret_key"]

    logging.info("Getting S3 endpoint URL of Radosgw from Keystone")
    keystone_auth = openstack_utils.get_overcloud_auth()
    keystone_client = openstack_utils.get_keystone_client(keystone_auth)
    endpoint_url = keystone_client.session.get_endpoint(
        service_type="s3",
        interface="public",
        region="RegionOne",
    )

    logging.info("Creating a test S3 bucket for Glance")
    bucket_name = "zaza-glance-s3-test"
    s3_client = boto3.client(
        "s3",
        endpoint_url=endpoint_url,
        aws_access_key_id=access_key,
        aws_secret_access_key=secret_key,
    )
    s3_client.create_bucket(Bucket=bucket_name)

    logging.info("Updating Glance configs with S3 endpoint information")
    model.set_application_config(
        "glance",
        {
            "s3-store-host": endpoint_url,
            "s3-store-access-key": access_key,
            "s3-store-secret-key": secret_key,
            "s3-store-bucket": bucket_name,
        },
    )
    model.wait_for_agent_status()

    logging.info("Waiting for units to reach target states")
    model.wait_for_application_states(
        states={
            "glance": {
                "workload-status": "active",
                "workload-status-message": "Unit is ready",
            }
        })
    model.block_until_all_units_idle()
Пример #9
0
def set_origin(application, origin='openstack-origin', pocket='distro'):
    """Set the configuration option for origin source.

    :param application: Name of application to upgrade series
    :type application: str
    :param origin: The configuration setting variable name for changing origin
                   source. (openstack-origin or source)
    :type origin: str
    :param pocket: Origin source cloud pocket.
                   i.e. 'distro' or 'cloud:xenial-newton'
    :type pocket: str
    :returns: None
    :rtype: None
    """
    logging.info("Set origin on {} to {}".format(application, origin))
    model.set_application_config(application, {origin: pocket})
Пример #10
0
def configure_s3_backend():
    """Inject S3 parameters from Swift for Gnocchi config."""
    session = openstack_utils.get_overcloud_keystone_session()
    ks_client = openstack_utils.get_keystone_session_client(session)

    logging.info('Retrieving S3 connection data from Swift')
    token_data = ks_client.tokens.get_token_data(session.get_token())
    project_id = token_data['token']['project']['id']
    user_id = token_data['token']['user']['id']

    # Store URL to service providing S3 compatible API
    for entry in token_data['token']['catalog']:
        if entry['type'] == 's3':
            for endpoint in entry['endpoints']:
                if endpoint['interface'] == 'public':
                    s3_region = endpoint['region']
                    s3_endpoint = endpoint['url']

    # Create AWS compatible application credentials in Keystone
    ec2_creds = ks_client.ec2.create(user_id, project_id)

    logging.info('Changing Gnocchi charm config to connect to S3')
    model.set_application_config(
        'gnocchi', {
            's3-endpoint-url': s3_endpoint,
            's3-region-name': s3_region,
            's3-access-key-id': ec2_creds.access,
            's3-secret-access-key': ec2_creds.secret
        })
    logging.info('Waiting for units to execute config-changed hook')
    model.wait_for_agent_status()
    logging.info('Waiting for units to reach target states')
    model.wait_for_application_states(
        states={
            'gnocchi': {
                'workload-status-': 'active',
                'workload-status-message': 'Unit is ready'
            },
            'ceilometer': {
                'workload-status':
                'blocked',
                'workload-status-message':
                'Run the ' + 'ceilometer-upgrade action on the leader ' +
                'to initialize ceilometer and gnocchi'
            }
        })
    model.block_until_all_units_idle()
Пример #11
0
def setup_ceph_proxy():
    raw_admin_keyring = model.run_on_leader(
        "ceph-mon", 'cat /etc/ceph/ceph.client.admin.keyring')["Stdout"]
    admin_keyring = raw_admin_keyring.split(' = ')[-1].rstrip()
    fsid = model.run_on_leader("ceph-mon", "leader-get fsid")["Stdout"]
    cluster_ips = model.get_app_ips("ceph-mon")

    proxy_config = {
        'auth-supported': 'cephx',
        'admin-key': admin_keyring,
        'fsid': fsid,
        'monitor-hosts': ' '.join(cluster_ips)
    }

    logging.debug('Config: {}'.format(proxy_config))

    model.set_application_config("ceph-proxy", proxy_config)
Пример #12
0
    def test_200_haproxy_stats_config(self):
        """Verify that the HAProxy stats are properly setup."""
        logging.info('Checking dashboard HAProxy settings...')
        unit = zaza_model.get_unit_from_name(
            zaza_model.get_lead_unit_name(self.application_name))
        logging.debug("... dashboard_ip is:{}".format(
            zaza_model.get_unit_public_address(unit)))
        conf = '/etc/haproxy/haproxy.cfg'
        port = '8888'
        set_alternate = {
            'haproxy-expose-stats': 'True',
        }

        request = urllib.request.Request('http://{}:{}'.format(
            zaza_model.get_unit_public_address(unit), port))

        output = str(generic_utils.get_file_contents(unit, conf))

        password = None
        for line in output.split('\n'):
            if "stats auth" in line:
                password = line.split(':')[1]
                break
        else:
            raise ValueError("'stats auth' not found in output'")
        base64string = base64.b64encode(
            bytes('{}:{}'.format('admin', password), 'ascii'))
        request.add_header("Authorization",
                           "Basic {}".format(base64string.decode('utf-8')))

        # Expect default config to not be available externally.
        expected = 'bind 127.0.0.1:{}'.format(port)
        self.assertIn(expected, output)
        with self.assertRaises(urllib.error.URLError):
            _do_request(request)

        zaza_model.set_application_config(self.application_name, set_alternate)
        zaza_model.block_until_all_units_idle(model_name=self.model_name)

        # Once exposed, expect HAProxy stats to be available externally
        output = str(generic_utils.get_file_contents(unit, conf))
        expected = 'bind 0.0.0.0:{}'.format(port)
        html = _do_request(request).read().decode(encoding='utf-8')
        self.assertIn(expected, output)
        self.assertIn('Statistics Report for HAProxy', html,
                      "HAProxy stats check failed")
Пример #13
0
 def test_05_netlinks(self):
     """Check netlinks checks are applied."""
     netlinks = "- eth0 mtu:9000 speed:10000"
     model.set_application_config(self.application_name,
                                  {"netlinks": netlinks})
     model.block_until_all_units_idle()
     cmd = "cat /etc/nagios/nrpe.d/check_netlinks_eth0.cfg"
     line = ("command[check_netlinks_eth0]=/usr/local/lib/nagios/plugins/"
             "check_netlinks.py -i eth0 -m 9000 -s 1000")
     result = model.run_on_unit(self.lead_unit_name, cmd)
     code = result.get("Code")
     if code != "0":
         logging.warning("Unable to find nrpe check at "
                         "/etc/nagios/nrpe.d/check_netlinks_eth0.cfg")
         raise model.CommandRunFailed(cmd, result)
     content = result.get("Stdout")
     self.assertTrue(line in content)
Пример #14
0
def apply_certs(application_name):
    """Read certs from disk and apply them to runninch charms.

    :param application_name: Name of application, used to derive path to store
                             artefacts.
    :type application_name: str
    """
    APP_CERT_DIR = os.path.join(CERT_DIR, application_name)
    ssl_options = [('ssl_ca', os.path.join(CERT_DIR, 'cacert.pem')),
                   ('ssl_cert', os.path.join(APP_CERT_DIR, 'cert.pem')),
                   ('ssl_key', os.path.join(APP_CERT_DIR, 'cert.key'))]
    charm_config = {}
    for (charm_option, ssl_file) in ssl_options:
        with open(ssl_file, 'rb') as f:
            ssl_data = f.read()
        charm_config[charm_option] = base64.b64encode(ssl_data).decode('utf8')
    model.set_application_config(application_name, configuration=charm_config)
def main(argv):
    cli_utils.setup_logging()
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--target_release",
        default='auto', help="Openstack release name to upgrade to or 'auto' "
                             "to have script upgrade based on the lowest value"
                             "across all services")
    options = parser.parse_args()
    target_release = cli_utils.parse_arg(options, 'target_release')
    principle_services = mojo_utils.get_principle_applications()
    current_versions = openstack_utils.get_current_os_versions(
        principle_services)
    if target_release == 'auto':
        # If in auto mode find the lowest value openstack release across all
        # services and make sure all servcies are upgraded to one release
        # higher than the lowest
        lowest_release = mojo_os_utils.get_lowest_os_version(current_versions)
        target_release = mojo_os_utils.next_release(lowest_release)[1]
    # Get a list of services that need upgrading
    needs_upgrade = get_upgrade_targets(target_release, current_versions)
    for application in openstack_utils.UPGRADE_SERVICES:
        if application['name'] not in principle_services:
            continue
        if application['name'] not in needs_upgrade:
            logging.info('Not upgrading {} it is at {} or higher'.format(
                application['name'],
                target_release)
            )
            continue
        logging.info('Upgrading {} to {}'.format(application['name'],
                                                 target_release))
        # Update required relations
        update_relations(application['name'], target_release)
        ubuntu_version = mojo_utils.get_ubuntu_version(application['name'])
        config = {application['type']['origin_setting']:
                  "cloud:{}-{}/proposed"
                  .format(ubuntu_version, target_release)}
        model.set_application_config(application['name'], config)
        # NOTE: For liberty->mitaka upgrade ceilometer-agent gets stuck at
        # 'Services not running that should be: memcached' after nova-compute
        # upgrade, and test would wait forever. Therefore we upgrade
        # ceilometer-agent immediately after nova-compute.
        if application['name'] == 'nova-compute':
            model.set_application_config('ceilometer-agent', config)
        mojo_utils.juju_wait_finished()
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument("--service")
    parser.add_argument("--kv")
    parser.add_argument("--wait")
    options = parser.parse_args()
    application = cli_utils.parse_arg(options, 'service')
    key, value = cli_utils.parse_arg(options, 'kv').split("=")
    wait = cli_utils.parse_arg(options, 'wait')
    print("Wait: {}".format(wait))
    if wait is not None:
        wait = wait == 'True'
    print("Applicatoin: {}".format(application))
    print("Option: {}={}".format(key, value))
    print("Wait: {}".format(wait))
    model.set_application_config(application, {key: value})
    if wait:
        mojo_utils.juju_wait_finished()
def main(argv):
    cli_utils.setup_logging()
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--target_release",
        default='auto', help="Openstack release name to upgrade to or 'auto' "
                             "to have script upgrade based on the lowest value"
                             "across all services")
    options = parser.parse_args()
    target_release = cli_utils.parse_arg(options, 'target_release')
    principle_services = mojo_utils.get_principle_applications()
    current_versions = openstack_utils.get_current_os_versions(
        principle_services)
    if target_release == 'auto':
        # If in auto mode find the lowest value openstack release across all
        # services and make sure all servcies are upgraded to one release
        # higher than the lowest
        lowest_release = mojo_os_utils.get_lowest_os_version(current_versions)
        target_release = mojo_os_utils.next_release(lowest_release)[1]
    # Get a list of services that need upgrading
    needs_upgrade = get_upgrade_targets(target_release, current_versions)
    add_new_charms(target_release)
    mojo_utils.juju_wait_finished()
    for application in openstack_utils.UPGRADE_SERVICES:
        if application['name'] not in principle_services:
            continue
        if application['name'] not in needs_upgrade:
            logging.info('Not upgrading {} it is at {} or higher'.format(
                application['name'],
                target_release)
            )
            continue
        logging.info('Upgrading {} to {}'.format(application['name'],
                                                 target_release))
        # Update required relations
        update_relations(application['name'], target_release)
        ubuntu_version = mojo_utils.get_ubuntu_version(application['name'])
        config = {application['type']['origin_setting']:
                  "cloud:{}-{}/proposed"
                  .format(ubuntu_version, target_release)}
        model.set_application_config(application['name'], config)
        mojo_utils.juju_wait_finished()
Пример #18
0
def configure_cinder_backup():
    """Configure cinder-backup-swift."""
    keystone_ip = zaza_model.get_app_ips('swift-keystone')[0]
    swift_ip = zaza_model.get_app_ips('swift-proxy')[0]
    auth_ver = (zaza_model.get_application_config('swift-keystone').get(
        'preferred-api-version').get('value'))
    if auth_ver == 2:
        auth_url = 'http://{}:5000/v2.0'.format(keystone_ip)
        endpoint_url = 'http://{}:8080/v1/AUTH_'.format(swift_ip)
    else:
        auth_url = 'http://{}:5000/v3'.format(keystone_ip)
        endpoint_url = 'http://{}:8080/v1/AUTH'.format(swift_ip)
    cinder_backup_swift_conf = {
        'endpoint-url': endpoint_url,
        'auth-url': auth_url
    }
    juju_service = 'cinder-backup-swift'
    zaza_model.set_application_config(juju_service, cinder_backup_swift_conf)
    zaza_model.wait_for_application_states()
    time.sleep(300)
Пример #19
0
    def test_upload_external_cert(self):
        """Verify that the external CA is uploaded correctly."""
        logging.info('Changing value for trusted-external-ca-cert.')
        ca_cert_option = 'trusted-external-ca-cert'
        ppk, cert = utilities.cert.generate_cert('gnocchi_test.ci.local')
        b64_cert = base64.b64encode(cert).decode()
        config = {
            ca_cert_option: b64_cert,
        }
        model.set_application_config('gnocchi', config)
        model.block_until_all_units_idle()

        files = [
            '/usr/local/share/ca-certificates/gnocchi-external.crt',
            '/etc/ssl/certs/gnocchi-external.pem',
        ]

        for file in files:
            logging.info("Validating that {} is created.".format(file))
            model.block_until_file_has_contents('gnocchi', file, 'CERTIFICATE')
            logging.info("Found {} successfully.".format(file))
Пример #20
0
def add_new_charms(target_release):
    for charm, charm_data in NEW_CHARMS.get(target_release, {}).items():
        logging.info("Adding charm {}".format(charm))
        deploy_cmd = ['juju', 'deploy', '-n', str(charm_data['num_units'])]
        if charm_data['origin_config']:
            deploy_cmd = deploy_cmd + [
                '--config', '{}={}'.format(
                    charm_data['origin_config'], "cloud:{}-{}/proposed".format(
                        charm_data['lts_premier'], target_release))
            ]
        deploy_cmd = deploy_cmd + [charm_data['charm_source'], charm]
        subprocess.check_call(deploy_cmd)

        if charm_data['config']:
            model.set_application_config(charm, charm_data['config'])

        rel_cmd = ['juju', 'add-relation']
        for relation in charm_data['relations']:
            logging.info("Adding charm relation {} {}".format(
                relation[0], relation[1]))
            subprocess.check_call(rel_cmd + [relation[0], relation[1]])
Пример #21
0
    def test_deferred_restarts(self):
        """Run deferred restart tests."""
        app_config = model.get_application_config(self.application_name)
        auto_restart_config_key = 'enable-auto-restarts'
        if auto_restart_config_key not in app_config:
            raise unittest.SkipTest("Deferred restarts not implemented")

        # Ensure auto restarts are off.
        policy_file = '/etc/policy-rc.d/charm-{}.policy'.format(
            self.application_name)
        if app_config[auto_restart_config_key]['value']:
            logging.info("Turning off auto restarts")
            model.set_application_config(
                self.application_name, {auto_restart_config_key: 'False'})
            logging.info("Waiting for {} to appear on units of {}".format(
                policy_file,
                self.application_name))
            model.block_until_file_has_contents(
                self.application_name,
                policy_file,
                'policy_requestor_name')
            # The block_until_file_has_contents ensures the change we waiting
            # for has happened, now just wait for any hooks to finish.
            logging.info("Waiting for units to be idle")
            model.block_until_all_units_idle()
        else:
            logging.info("Auto restarts already disabled")

        self.run_tests()

        # Finished so turn auto-restarts back on.
        logging.info("Turning on auto restarts")
        model.set_application_config(
            self.application_name, {auto_restart_config_key: 'True'})
        model.block_until_file_missing(
            self.application_name,
            policy_file)
        model.block_until_all_units_idle()
        self.check_clear_hooks()
Пример #22
0
 def _set_config(self, state):
     s = "True" if state else "False"
     config = {"use-policyd-override": s}
     logging.info("Setting config to {}".format(config))
     zaza_model.set_application_config(self.application_name, config)
     zaza_model.wait_for_agent_status()
Пример #23
0
    def config_change(self,
                      default_config,
                      alternate_config,
                      application_name=None):
        """Run change config tests.

        Change config to `alternate_config`, wait for idle workload status,
        yield, return config to `default_config` and wait for idle workload
        status before return from function.

        Example usage:
            with self.config_change({'preferred-api-version': '2'},
                                    {'preferred-api-version': '3'}):
                do_something()

        :param default_config: Dict of charm settings to set on completion
        :type default_config: dict
        :param alternate_config: Dict of charm settings to change to
        :type alternate_config: dict
        :param application_name: String application name for use when called
                                 by a charm under test other than the object's
                                 application.
        :type application_name: str
        """
        if not application_name:
            application_name = self.application_name

        # we need to compare config values to what is already applied before
        # attempting to set them.  otherwise the model will behave differently
        # than we would expect while waiting for completion of the change
        app_config = self.config_current(application_name,
                                         keys=alternate_config.keys())

        if all(item in app_config.items()
               for item in alternate_config.items()):
            logging.debug('alternate_config equals what is already applied '
                          'config')
            yield
            if default_config == alternate_config:
                logging.debug('default_config also equals what is already '
                              'applied config')
                return
            logging.debug('alternate_config already set, and default_config '
                          'needs to be applied before return')
        else:
            logging.debug(
                'Changing charm setting to {}'.format(alternate_config))
            model.set_application_config(
                application_name,
                self._stringed_value_config(alternate_config),
                model_name=self.model_name)

            logging.debug('Waiting for units to execute config-changed hook')
            model.wait_for_agent_status(model_name=self.model_name)

            logging.debug('Waiting for units to reach target states')
            model.wait_for_application_states(model_name=self.model_name,
                                              states=self.test_config.get(
                                                  'target_deploy_status', {}))
            # TODO: Optimize with a block on a specific application until idle.
            model.block_until_all_units_idle()

            yield

        logging.debug('Restoring charm setting to {}'.format(default_config))
        model.set_application_config(
            application_name,
            self._stringed_value_config(default_config),
            model_name=self.model_name)

        logging.debug('Waiting for units to reach target states')
        model.wait_for_application_states(model_name=self.model_name,
                                          states=self.test_config.get(
                                              'target_deploy_status', {}))
        # TODO: Optimize with a block on a specific application until idle.
        model.block_until_all_units_idle()
Пример #24
0
    def test_blocked_when_non_pristine_disk_appears(self):
        """Test blocked state with non-pristine disk.

        Validate that charm goes into blocked state when it is presented with
        new block devices that have foreign data on them.
        Instances used in UOSCI has a flavour with ephemeral storage in
        addition to the bootable instance storage.  The ephemeral storage
        device is partitioned, formatted and mounted early in the boot process
        by cloud-init.
        As long as the device is mounted the charm will not attempt to use it.
        If we unmount it and trigger the config-changed hook the block device
        will appear as a new and previously untouched device for the charm.
        One of the first steps of device eligibility checks should be to make
        sure we are seeing a pristine and empty device before doing any
        further processing.
        As the ephemeral device will have data on it we can use it to validate
        that these checks work as intended.
        """
        logging.info('Checking behaviour when non-pristine disks appear...')
        logging.info('Configuring ephemeral-unmount...')
        alternate_conf = {
            'ephemeral-unmount': '/mnt',
            'osd-devices': '/dev/vdb'
        }
        juju_service = 'ceph-osd'
        zaza_model.set_application_config(juju_service, alternate_conf)
        ceph_osd_states = {
            'ceph-osd': {
                'workload-status': 'blocked',
                'workload-status-message': 'Non-pristine'
            }
        }
        zaza_model.wait_for_application_states(states=ceph_osd_states)
        logging.info('Units now in blocked state, running zap-disk action...')
        unit_names = ['ceph-osd/0', 'ceph-osd/1', 'ceph-osd/2']
        for unit_name in unit_names:
            zap_disk_params = {
                'devices': '/dev/vdb',
                'i-really-mean-it': True,
            }
            action_obj = zaza_model.run_action(unit_name=unit_name,
                                               action_name='zap-disk',
                                               action_params=zap_disk_params)
            logging.debug('Result of action: {}'.format(action_obj))

        logging.info('Running add-disk action...')
        for unit_name in unit_names:
            add_disk_params = {
                'osd-devices': '/dev/vdb',
            }
            action_obj = zaza_model.run_action(unit_name=unit_name,
                                               action_name='add-disk',
                                               action_params=add_disk_params)
            logging.debug('Result of action: {}'.format(action_obj))

        logging.info('Wait for idle/ready status...')
        zaza_model.wait_for_application_states()

        logging.info('OK')

        set_default = {
            'ephemeral-unmount': '',
            'osd-devices': '/dev/vdb',
        }

        current_release = zaza_openstack.get_os_release()
        bionic_train = zaza_openstack.get_os_release('bionic_train')
        if current_release < bionic_train:
            set_default['osd-devices'] = '/dev/vdb /srv/ceph'

        logging.info('Restoring to default configuration...')
        zaza_model.set_application_config(juju_service, set_default)

        zaza_model.wait_for_application_states()
        allocation_pools = subnet['allocation_pools']
        self.cidr = subnet['cidr']
        self.highest_assigned = netaddr.IPAddress(allocation_pools[0]['end'])
        # XXX look away now, nothing to see here, move along.
        #     If there is less than 30 free ips in the network after the top
        #     dhcp ip then eat into the top of the dhcp range
        available_ips = []
        for element in list(netaddr.IPNetwork(self.cidr)):
            if element == netaddr.IPAddress(self.highest_assigned) or \
                    available_ips:
                available_ips.append(element)
        if len(available_ips) < 30:
            self.highest_assigned = self.highest_assigned - 30

    def get_next(self):
        next_ip = self.highest_assigned + 1
        if next_ip in list(netaddr.IPNetwork(self.cidr)):
            self.highest_assigned = self.highest_assigned + 1
            return next_ip
        else:
            raise Exception("vip pool exhausted")


cli_utils.setup_logging()
vp = VipPool()
juju_status = juju_utils.get_full_juju_status()
for application in juju_status.applications.keys():
    if 'vip' in model.get_application_config(application).keys():
        model.set_application_config(application, {'vip': vp.get_next()})
mojo_utils.juju_wait_finished()
Пример #26
0
    def config_change(self, default_config, alternate_config):
        """Run change config tests.

        Change config to `alternate_config`, wait for idle workload status,
        yield, return config to `default_config` and wait for idle workload
        status before return from function.

        Example usage:
            with self.config_change({'preferred-api-version': '2'},
                                    {'preferred-api-version': '3'}):
                do_something()

        :param default_config: Dict of charm settings to set on completion
        :type default_config: dict
        :param alternate_config: Dict of charm settings to change to
        :type alternate_config: dict
        """
        # we need to compare config values to what is already applied before
        # attempting to set them.  otherwise the model will behave differently
        # than we would expect while waiting for completion of the change
        _app_config = model.get_application_config(self.application_name)
        app_config = {}
        # convert the more elaborate config structure from libjuju to something
        # we can compare to what the caller supplies to this function
        for k in alternate_config.keys():
            # note that conversion to string for all values is due to
            # attempting to set any config with other types lead to Traceback
            app_config[k] = str(_app_config.get(k, {}).get('value', ''))
        if all(item in app_config.items()
               for item in alternate_config.items()):
            logging.debug('alternate_config equals what is already applied '
                          'config')
            yield
            if default_config == alternate_config:
                logging.debug('default_config also equals what is already '
                              'applied config')
                return
            logging.debug('alternate_config already set, and default_config '
                          'needs to be applied before return')
        else:
            logging.debug(
                'Changing charm setting to {}'.format(alternate_config))
            model.set_application_config(self.application_name,
                                         alternate_config,
                                         model_name=self.model_name)

            logging.debug('Waiting for units to execute config-changed hook')
            model.wait_for_agent_status(model_name=self.model_name)

            logging.debug('Waiting for units to reach target states')
            model.wait_for_application_states(model_name=self.model_name,
                                              states=self.test_config.get(
                                                  'target_deploy_status', {}))
            # TODO: Optimize with a block on a specific application until idle.
            model.block_until_all_units_idle()

            yield

        logging.debug('Restoring charm setting to {}'.format(default_config))
        model.set_application_config(self.application_name,
                                     default_config,
                                     model_name=self.model_name)

        logging.debug('Waiting for units to reach target states')
        model.wait_for_application_states(model_name=self.model_name,
                                          states=self.test_config.get(
                                              'target_deploy_status', {}))
        # TODO: Optimize with a block on a specific application until idle.
        model.block_until_all_units_idle()
Пример #27
0
    def test_03_user_monitor(self):
        """Verify user monitors are applied."""
        user_monitors = {
            "version": "0.3",
            "monitors": {
                "local": {
                    "procrunning": {
                        "rsync": {
                            "max": 1,
                            "executable": "rsync",
                            "name": "RSYNc Running",
                            "min": 1,
                        },
                        "jujud": {
                            "max": 1,
                            "executable": "jujud",
                            "name": "Juju Running",
                            "min": 1,
                        },
                    }
                },
                "remote": {
                    "tcp": {
                        "ssh": {
                            "warning": 2,
                            "critical": 10,
                            "name": "SSH Running",
                            "timeout": 12,
                            "port": 22,
                            "string": "SSH.*",
                            "expect": None,
                        }
                    }
                },
            },
        }
        model.set_application_config(self.application_name,
                                     {"monitors": yaml.dump(user_monitors)})
        model.block_until_all_units_idle()

        local_nrpe_checks = {
            "check_proc_jujud_user.cfg":
            "command[check_proc_jujud_user]=/usr/lib/nagios/plugins/"
            "check_procs -w 1 -c 1 -C jujud",
            "check_proc_rsync_user.cfg":
            "command[check_proc_rsync_user]=/usr/lib/nagios/plugins/"
            "check_procs -w 1 -c 1 -C rsync",
        }

        for nrpe_check in local_nrpe_checks:
            logging.info(
                "Checking content of '{}' nrpe check".format(nrpe_check))
            cmd = "cat /etc/nagios/nrpe.d/" + nrpe_check
            result = model.run_on_unit(self.lead_unit_name, cmd)
            code = result.get("Code")
            if code != "0":
                logging.warning(
                    "Unable to find nrpe check {} at /etc/nagios/nrpe.d/".
                    format(nrpe_check))
                raise model.CommandRunFailed(cmd, result)
            content = result.get("Stdout")
            self.assertTrue(local_nrpe_checks[nrpe_check] in content)

        remote_nrpe_checks = {
            "check_tcp_H_HOSTADDRESS__E_p22_s_SSH____eNone_w2_c10_t12_t10.cfg":
            "/usr/lib/nagios/plugins/check_tcp -H $HOSTADDRESS$ "
            "-E -p 22 -s 'SSH.*' -e None -w 2 -c 10 -t 12 -t 10"
        }
        for nrpe_check in remote_nrpe_checks:
            logging.info(
                "Checking content of '{}' nrpe command in nagios unit".format(
                    nrpe_check))
            cmd = "cat /etc/nagios3/conf.d/commands/" + nrpe_check
            nagios_lead_unit_name = model.get_lead_unit_name(
                "nagios", model_name=self.model_name)
            result = model.run_on_unit(nagios_lead_unit_name, cmd)
            code = result.get("Code")
            if code != "0":
                logging.warning(
                    "Unable to find nrpe command {} at "
                    "/etc/nagios3/conf.d/commands/ in nagios unit".format(
                        nrpe_check))
                raise model.CommandRunFailed(cmd, result)
            content = result.get("Stdout")
            self.assertTrue(remote_nrpe_checks[nrpe_check] in content)
Пример #28
0
def configure_gateway_ext_port(novaclient,
                               neutronclient,
                               dvr_mode=None,
                               net_id=None):
    """Configure the neturong-gateway external port

    :param novaclient: Authenticated novaclient
    :type novaclient: novaclient.Client object
    :param neutronclient: Authenticated neutronclient
    :type neutronclient: neutronclient.Client object
    :param dvr_mode: Using DVR mode or not
    :type dvr_mode: boolean
    :param net_id: Network ID
    :type net_id: string
    :returns: Nothing: This fucntion is executed for its sideffect
    :rtype: None
    """

    if dvr_mode:
        uuids = get_ovs_uuids()
    else:
        uuids = get_gateway_uuids()

    deprecated_extnet_mode = deprecated_external_networking(dvr_mode)

    config_key = 'data-port'
    if deprecated_extnet_mode:
        config_key = 'ext-port'

    if not net_id:
        net_id = get_admin_net(neutronclient)['id']

    for uuid in uuids:
        server = novaclient.servers.get(uuid)
        ext_port_name = "{}_ext-port".format(server.name)
        for port in neutronclient.list_ports(device_id=server.id)['ports']:
            if port['name'] == ext_port_name:
                logging.warning('Neutron Gateway already has additional port')
                break
        else:
            logging.info('Attaching additional port to instance, '
                         'connected to net id: {}'.format(net_id))
            body_value = {
                "port": {
                    "admin_state_up": True,
                    "name": ext_port_name,
                    "network_id": net_id,
                    "port_security_enabled": False,
                }
            }
            port = neutronclient.create_port(body=body_value)
            server.interface_attach(port_id=port['port']['id'],
                                    net_id=None,
                                    fixed_ip=None)
    ext_br_macs = []
    for port in neutronclient.list_ports(network_id=net_id)['ports']:
        if 'ext-port' in port['name']:
            if deprecated_extnet_mode:
                ext_br_macs.append(port['mac_address'])
            else:
                ext_br_macs.append('br-ex:{}'.format(port['mac_address']))
    ext_br_macs.sort()
    ext_br_macs_str = ' '.join(ext_br_macs)
    if dvr_mode:
        application_name = 'neutron-openvswitch'
    else:
        application_name = 'neutron-gateway'

    if ext_br_macs:
        logging.info('Setting {} on {} external port to {}'.format(
            config_key, application_name, ext_br_macs_str))
        current_data_port = get_application_config_option(
            application_name, config_key)
        if current_data_port == ext_br_macs_str:
            logging.info('Config already set to value')
            return
        model.set_application_config(
            lifecycle_utils.get_juju_model(),
            application_name,
            configuration={config_key: ext_br_macs_str})
        juju_wait.wait(wait_for_workload=True)
Пример #29
0
    def test_blocked_when_non_pristine_disk_appears(self):
        """Test blocked state with non-pristine disk.

        Validate that charm goes into blocked state when it is presented with
        new block devices that have foreign data on them.
        Instances used in UOSCI has a flavour with ephemeral storage in
        addition to the bootable instance storage.  The ephemeral storage
        device is partitioned, formatted and mounted early in the boot process
        by cloud-init.
        As long as the device is mounted the charm will not attempt to use it.
        If we unmount it and trigger the config-changed hook the block device
        will appear as a new and previously untouched device for the charm.
        One of the first steps of device eligibility checks should be to make
        sure we are seeing a pristine and empty device before doing any
        further processing.
        As the ephemeral device will have data on it we can use it to validate
        that these checks work as intended.
        """
        current_release = zaza_openstack.get_os_release()
        focal_ussuri = zaza_openstack.get_os_release('focal_ussuri')
        if current_release >= focal_ussuri:
            # NOTE(ajkavanagh) - focal (on ServerStack) is broken for /dev/vdb
            # and so this test can't pass: LP#1842751 discusses the issue, but
            # basically the snapd daemon along with lxcfs results in /dev/vdb
            # being mounted in the lxcfs process namespace.  If the charm
            # 'tries' to umount it, it can (as root), but the mount is still
            # 'held' by lxcfs and thus nothing else can be done with it.  This
            # is only a problem in serverstack with images with a default
            # /dev/vdb ephemeral
            logging.warn("Skipping pristine disk test for focal and higher")
            return
        logging.info('Checking behaviour when non-pristine disks appear...')
        logging.info('Configuring ephemeral-unmount...')
        alternate_conf = {
            'ephemeral-unmount': '/mnt',
            'osd-devices': '/dev/vdb'
        }
        juju_service = 'ceph-osd'
        zaza_model.set_application_config(juju_service, alternate_conf)
        ceph_osd_states = {
            'ceph-osd': {
                'workload-status': 'blocked',
                'workload-status-message': 'Non-pristine'
            }
        }
        zaza_model.wait_for_application_states(states=ceph_osd_states)
        logging.info('Units now in blocked state, running zap-disk action...')
        unit_names = ['ceph-osd/0', 'ceph-osd/1', 'ceph-osd/2']
        for unit_name in unit_names:
            zap_disk_params = {
                'devices': '/dev/vdb',
                'i-really-mean-it': True,
            }
            action_obj = zaza_model.run_action(
                unit_name=unit_name,
                action_name='zap-disk',
                action_params=zap_disk_params
            )
            logging.debug('Result of action: {}'.format(action_obj))

        logging.info('Running add-disk action...')
        for unit_name in unit_names:
            add_disk_params = {
                'osd-devices': '/dev/vdb',
            }
            action_obj = zaza_model.run_action(
                unit_name=unit_name,
                action_name='add-disk',
                action_params=add_disk_params
            )
            logging.debug('Result of action: {}'.format(action_obj))

        logging.info('Wait for idle/ready status...')
        zaza_model.wait_for_application_states()

        logging.info('OK')

        set_default = {
            'ephemeral-unmount': '',
            'osd-devices': '/dev/vdb',
        }

        current_release = zaza_openstack.get_os_release()
        bionic_train = zaza_openstack.get_os_release('bionic_train')
        if current_release < bionic_train:
            set_default['osd-devices'] = '/dev/vdb /srv/ceph'

        logging.info('Restoring to default configuration...')
        zaza_model.set_application_config(juju_service, set_default)

        zaza_model.wait_for_application_states()