def test_100_cold_start_bootstrap(self):
        """Bootstrap a non-leader node.

        After bootstrapping a non-leader node, notify bootstrapped on the
        leader node.
        """
        _machines = sorted(
            juju_utils.get_machine_uuids_for_application(self.application))
        # Stop Nodes
        # Avoid hitting an update-status hook
        logging.info("Wait till model is idle ...")
        zaza.model.block_until_all_units_idle()
        logging.info("Stopping instances: {}".format(_machines))
        for uuid in _machines:
            self.nova_client.servers.stop(uuid)
        logging.info("Wait till all machines are shutoff ...")
        for uuid in _machines:
            openstack_utils.resource_reaches_status(self.nova_client.servers,
                                                    uuid,
                                                    expected_status='SHUTOFF',
                                                    stop_after_attempt=16)

        # Start nodes
        _machines.sort(reverse=True)
        logging.info("Starting instances: {}".format(_machines))
        for uuid in _machines:
            self.nova_client.servers.start(uuid)

        for unit in zaza.model.get_units(self.application):
            zaza.model.block_until_unit_wl_status(unit.entity_id,
                                                  'unknown',
                                                  negate_match=True)

        logging.info("Wait till model is idle ...")
        # XXX If a hook was executing on a unit when it was powered off
        #     it comes back in an error state.
        try:
            zaza.model.block_until_all_units_idle()
        except zaza.model.UnitError:
            self.resolve_update_status_errors()
            zaza.model.block_until_all_units_idle()

        logging.info("Wait for application states ...")
        for unit in zaza.model.get_units(self.application):
            try:
                zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
            except zaza.model.UnitError:
                self.resolve_update_status_errors()
                zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
        states = {
            "percona-cluster": {
                "workload-status": "blocked",
                "workload-status-message": "MySQL is down"
            }
        }
        zaza.model.wait_for_application_states(states=states)

        # Update which node is the leader and which are not
        _leader, _non_leaders = generic_utils.get_leaders_and_non_leaders(
            self.application_name)
        # We want to test the worst possible scenario which is the
        # non-leader with the highest sequence number. We will use the leader
        # for the notify-bootstrapped after. They just need to be different
        # units.
        logging.info("Execute bootstrap-pxc action after cold boot ...")
        zaza.model.run_action(_non_leaders[0],
                              "bootstrap-pxc",
                              action_params={})
        logging.info("Wait for application states ...")
        for unit in zaza.model.get_units(self.application):
            zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
        states = {
            "percona-cluster": {
                "workload-status": "waiting",
                "workload-status-message": "Unit waiting for cluster bootstrap"
            }
        }
        zaza.model.wait_for_application_states(states=states)
        logging.info("Execute notify-bootstrapped action after cold boot on "
                     "the leader node ...")
        zaza.model.run_action_on_leader(self.application,
                                        "notify-bootstrapped",
                                        action_params={})
        logging.info("Wait for application states ...")
        for unit in zaza.model.get_units(self.application):
            zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
        test_config = lifecycle_utils.get_charm_config(fatal=False)
        zaza.model.wait_for_application_states(
            states=test_config.get("target_deploy_status", {}))
示例#2
0
    def test_410_heat_stack_create_delete(self):
        """Create stack, confirm nova compute resource, delete stack."""
        # Verify new image name
        images_list = list(self.glance_client.images.list())
        self.assertEqual(images_list[0].name, IMAGE_NAME,
                         "glance image create failed or unexpected")

        # Create a heat stack from a heat template, verify its status
        logging.info('Creating heat stack...')
        t_name = 'hot_hello_world.yaml'
        if (openstack_utils.get_os_release() <
                openstack_utils.get_os_release('xenial_queens')):
            os_release = 'icehouse'
        else:
            os_release = 'queens'

        # Get location of template files in charm-heat
        bundle_path = charm_lifecycle_utils.BUNDLE_DIR
        if bundle_path[-1:] == "/":
            bundle_path = bundle_path[0:-1]

        file_rel_path = os.path.join(os.path.dirname(bundle_path),
                                     TEMPLATES_PATH, os_release, t_name)
        file_abs_path = os.path.abspath(file_rel_path)
        t_url = urlparse.urlparse(file_abs_path, scheme='file').geturl()
        logging.info('template url: {}'.format(t_url))

        r_req = self.heat_client.http_client
        t_files, template = template_utils.get_template_contents(t_url, r_req)
        env_files, env = template_utils.process_environment_and_files(
            env_path=None)

        fields = {
            'stack_name': STACK_NAME,
            'timeout_mins': '15',
            'disable_rollback': False,
            'parameters': {
                'admin_pass': '******',
                'key_name': nova_utils.KEYPAIR_NAME,
                'image': IMAGE_NAME
            },
            'template': template,
            'files': dict(list(t_files.items()) + list(env_files.items())),
            'environment': env
        }

        # Create the stack
        try:
            stack = self.heat_client.stacks.create(**fields)
            logging.info('Stack data: {}'.format(stack))
            stack_id = stack['stack']['id']
            logging.info('Creating new stack, ID: {}'.format(stack_id))
        except Exception as e:
            # Generally, an api or cloud config error if this is hit.
            msg = 'Failed to create heat stack: {}'.format(e)
            self.fail(msg)

        # Confirm stack reaches COMPLETE status.
        # /!\ Heat stacks reach a COMPLETE status even when nova cannot
        # find resources (a valid hypervisor) to fit the instance, in
        # which case the heat stack self-deletes!  Confirm anyway...
        openstack_utils.resource_reaches_status(self.heat_client.stacks,
                                                stack_id,
                                                expected_status="COMPLETE",
                                                msg="Stack status wait")
        # List stack
        stacks = list(self.heat_client.stacks.list())
        logging.info('All stacks: {}'.format(stacks))

        # Get stack information
        try:
            stack = self.heat_client.stacks.get(STACK_NAME)
        except Exception as e:
            # Generally, a resource availability issue if this is hit.
            msg = 'Failed to get heat stack: {}'.format(e)
            self.fail(msg)

        # Confirm stack name.
        logging.info('Expected, actual stack name: {}, '
                     '{}'.format(STACK_NAME, stack.stack_name))
        self.assertEqual(stack.stack_name, STACK_NAME,
                         'Stack name mismatch, '
                         '{} != {}'.format(STACK_NAME, stack.stack_name))

        # Confirm existence of a heat-generated nova compute resource
        logging.info('Confirming heat stack resource status...')
        resource = self.heat_client.resources.get(STACK_NAME, RESOURCE_TYPE)
        server_id = resource.physical_resource_id
        self.assertTrue(server_id, "Stack failed to spawn a compute resource.")

        # Confirm nova instance reaches ACTIVE status
        openstack_utils.resource_reaches_status(self.nova_client.servers,
                                                server_id,
                                                expected_status="ACTIVE",
                                                msg="nova instance")
        logging.info('Nova instance reached ACTIVE status')

        # Delete stack
        logging.info('Deleting heat stack...')
        openstack_utils.delete_resource(self.heat_client.stacks,
                                        STACK_NAME, msg="heat stack")
示例#3
0
def launch_instance(instance_key,
                    use_boot_volume=False,
                    vm_name=None,
                    private_network_name=None,
                    image_name=None,
                    flavor_name=None,
                    external_network_name=None,
                    meta=None,
                    userdata=None):
    """Launch an instance.

    :param instance_key: Key to collect associated config data with.
    :type instance_key: str
    :param use_boot_volume: Whether to boot guest from a shared volume.
    :type use_boot_volume: boolean
    :param vm_name: Name to give guest.
    :type vm_name: str
    :param private_network_name: Name of private network to attach guest to.
    :type private_network_name: str
    :param image_name: Image name to use with guest.
    :type image_name: str
    :param flavor_name: Flavor name to use with guest.
    :type flavor_name: str
    :param external_network_name: External network to create floating ip from
                                  for guest.
    :type external_network_name: str
    :param meta: A dict of arbitrary key/value metadata to store for this
                 server. Both keys and values must be <=255 characters.
    :type meta: dict
    :param userdata: Configuration to use upon launch, used by cloud-init.
    :type userdata: str
    :returns: the created instance
    :rtype: novaclient.Server
    """
    keystone_session = openstack_utils.get_overcloud_keystone_session()
    nova_client = openstack_utils.get_nova_session_client(keystone_session)
    neutron_client = openstack_utils.get_neutron_session_client(
        keystone_session)

    # Collect resource information.
    vm_name = vm_name or time.strftime("%Y%m%d%H%M%S")

    image_name = image_name or boot_tests[instance_key]['image_name']
    image = nova_client.glance.find_image(image_name)

    flavor_name = flavor_name or boot_tests[instance_key]['flavor_name']
    flavor = nova_client.flavors.find(name=flavor_name)

    private_network_name = private_network_name or "private"
    net = neutron_client.find_resource("network", private_network_name)
    nics = [{'net-id': net.get('id')}]

    meta = meta or {}
    external_network_name = external_network_name or "ext_net"

    if use_boot_volume:
        bdmv2 = [{
            'boot_index': '0',
            'uuid': image.id,
            'source_type': 'image',
            'volume_size': flavor.disk,
            'destination_type': 'volume',
            'delete_on_termination': True
        }]
        image = None
    else:
        bdmv2 = None

    # Launch instance.
    logging.info('Launching instance {}'.format(vm_name))
    instance = nova_client.servers.create(name=vm_name,
                                          image=image,
                                          block_device_mapping_v2=bdmv2,
                                          flavor=flavor,
                                          key_name=nova_utils.KEYPAIR_NAME,
                                          meta=meta,
                                          nics=nics,
                                          userdata=userdata)

    # Test Instance is ready.
    logging.info('Checking instance is active')
    openstack_utils.resource_reaches_status(nova_client.servers,
                                            instance.id,
                                            expected_status='ACTIVE',
                                            stop_after_attempt=16)

    logging.info('Checking cloud init is complete')
    openstack_utils.cloud_init_complete(nova_client, instance.id,
                                        boot_tests[instance_key]['bootstring'])
    port = openstack_utils.get_ports_from_device_id(neutron_client,
                                                    instance.id)[0]
    logging.info('Assigning floating ip.')
    ip = openstack_utils.create_floating_ip(neutron_client,
                                            external_network_name,
                                            port=port)['floating_ip_address']
    logging.info('Assigned floating IP {} to {}'.format(ip, vm_name))
    try:
        for attempt in Retrying(stop=stop_after_attempt(8),
                                wait=wait_exponential(multiplier=1,
                                                      min=2,
                                                      max=60)):
            with attempt:
                try:
                    openstack_utils.ping_response(ip)
                except subprocess.CalledProcessError as e:
                    logging.error('Pinging {} failed with {}'.format(
                        ip, e.returncode))
                    logging.error('stdout: {}'.format(e.stdout))
                    logging.error('stderr: {}'.format(e.stderr))
                    raise
    except RetryError:
        raise openstack_exceptions.NovaGuestNoPingResponse()

    # Check ssh'ing to instance.
    logging.info('Testing ssh access.')
    openstack_utils.ssh_test(username=boot_tests[instance_key]['username'],
                             ip=ip,
                             vm_name=vm_name,
                             password=boot_tests[instance_key].get('password'),
                             privkey=openstack_utils.get_private_key(
                                 nova_utils.KEYPAIR_NAME))
    return instance
示例#4
0
    def test_100_reboot_cluster_from_complete_outage(self):
        """Reboot cluster from complete outage.

        After a cold start, reboot cluster from complete outage.
        """
        _machines = sorted(
            juju_utils.get_machine_uuids_for_application(self.application))
        # Stop Nodes
        # Avoid hitting an update-status hook
        logging.info("Wait till model is idle ...")
        zaza.model.block_until_all_units_idle()
        logging.info("Stopping instances: {}".format(_machines))
        for uuid in _machines:
            self.nova_client.servers.stop(uuid)
        logging.info("Wait till all machines are shutoff ...")
        for uuid in _machines:
            openstack_utils.resource_reaches_status(self.nova_client.servers,
                                                    uuid,
                                                    expected_status='SHUTOFF',
                                                    stop_after_attempt=16)

        # Start nodes
        _machines.sort(reverse=True)
        logging.info("Starting instances: {}".format(_machines))
        for uuid in _machines:
            self.nova_client.servers.start(uuid)

        logging.info("Wait till all {} units are in state 'unkown' ...".format(
            self.application))
        for unit in zaza.model.get_units(self.application):
            zaza.model.block_until_unit_wl_status(unit.entity_id,
                                                  'unknown',
                                                  negate_match=True)

        logging.info("Wait till model is idle ...")
        try:
            zaza.model.block_until_all_units_idle()
        except zaza.model.UnitError:
            self.resolve_update_status_errors()
            zaza.model.block_until_all_units_idle()

        logging.info("Clear error hooks after reboot ...")
        for unit in zaza.model.get_units(self.application):
            try:
                zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
            except zaza.model.UnitError:
                self.resolve_update_status_errors()
                zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")

        logging.info(
            "Wait till all {} units are in state 'blocked' ...".format(
                self.application))
        for unit in zaza.model.get_units(self.application):
            zaza.model.block_until_unit_wl_status(unit.entity_id, 'blocked')

        logging.info("Execute reboot-cluster-from-complete-outage "
                     "action after cold boot ...")
        # We do not know which unit has the most up to date data
        # run reboot-cluster-from-complete-outage until we get a success.
        for unit in zaza.model.get_units(self.application):
            action = zaza.model.run_action(
                unit.entity_id,
                "reboot-cluster-from-complete-outage",
                action_params={})
            if "Success" in action.data.get("results", {}).get("outcome", ""):
                break
            else:
                logging.info(action.data.get("results", {}).get("output", ""))

        assert "Success" in action.data["results"]["outcome"], (
            "Reboot cluster from complete outage action failed: {}".format(
                action.data))
        logging.info("Wait for application states ...")
        for unit in zaza.model.get_units(self.application):
            zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
        test_config = lifecycle_utils.get_charm_config(fatal=False)
        zaza.model.wait_for_application_states(
            states=test_config.get("target_deploy_status", {}))
示例#5
0
    def test_manila_share(self):
        """Test that Manila + Ganesha shares can be accessed on two instances.

        1. create a share
        2. Spawn two servers
        3. mount it on both
        4. write a file on one
        5. read it on the other
        6. profit
        """
        # Create a share
        share = self.manila_client.shares.create(share_type='cephfsnfstype',
                                                 name='cephnfsshare1',
                                                 share_proto="nfs",
                                                 size=1)

        # Spawn Servers
        instance_1, instance_2 = self.launch_guests(
            userdata=self.INSTANCE_USERDATA)

        fip_1 = neutron_tests.floating_ips_from_instance(instance_1)[0]
        fip_2 = neutron_tests.floating_ips_from_instance(instance_2)[0]

        # Wait for the created share to become available before it gets used.
        openstack_utils.resource_reaches_status(
            self.manila_client.shares,
            share.id,
            wait_iteration_max_time=120,
            stop_after_attempt=2,
            expected_status="available",
            msg="Waiting for a share to become available")

        share.allow(access_type='ip', access=fip_1, access_level='rw')
        share.allow(access_type='ip', access=fip_2, access_level='rw')

        # Mount Share
        username = guest.boot_tests['bionic']['username']
        password = guest.boot_tests['bionic'].get('password')
        privkey = openstack_utils.get_private_key(nova_utils.KEYPAIR_NAME)

        # Write a file on instance_1
        def verify_setup(stdin, stdout, stderr):
            status = stdout.channel.recv_exit_status()
            if status:
                logging.info("{}".format(stderr.readlines()[0].strip()))
            self.assertEqual(status, 0)

        mount_path = share.export_locations[0]

        for attempt in Retrying(stop=stop_after_attempt(3),
                                wait=wait_exponential(multiplier=1,
                                                      min=2,
                                                      max=10)):
            with attempt:
                openstack_utils.ssh_command(
                    username,
                    fip_1,
                    'instance-1',
                    'sudo mkdir -p /mnt/ceph && '
                    'sudo mount -t nfs -o nfsvers=4.1,proto=tcp '
                    '{} /mnt/ceph && '
                    'echo "test" | sudo tee /mnt/ceph/test'.format(mount_path),
                    password=password,
                    privkey=privkey,
                    verify=verify_setup)

        for attempt in Retrying(stop=stop_after_attempt(3),
                                wait=wait_exponential(multiplier=1,
                                                      min=2,
                                                      max=10)):
            with attempt:
                # Setup that file on instance_2
                openstack_utils.ssh_command(
                    username,
                    fip_2,
                    'instance-2',
                    'sudo mkdir -p /mnt/ceph && '
                    'sudo /bin/mount -t nfs -o nfsvers=4.1,proto=tcp '
                    '{} /mnt/ceph'.format(mount_path),
                    password=password,
                    privkey=privkey,
                    verify=verify_setup)

        def verify(stdin, stdout, stderr):
            status = stdout.channel.recv_exit_status()
            if status:
                logging.info("{}".format(stderr.readlines()[0].strip()))
            self.assertEqual(status, 0)
            out = ""
            for line in iter(stdout.readline, ""):
                out += line
            self.assertEqual(out, "test\n")

        openstack_utils.ssh_command(username,
                                    fip_2,
                                    'instance-2',
                                    'sudo cat /mnt/ceph/test',
                                    password=password,
                                    privkey=privkey,
                                    verify=verify)
    def test_100_reboot_cluster_from_complete_outage(self):
        """Reboot cluster from complete outage.

        After a cold start, reboot cluster from complete outage.
        """
        _machines = list(
            juju_utils.get_machine_uuids_for_application(self.application))
        # Stop Nodes
        _machines.sort()
        # Avoid hitting an update-status hook
        logging.debug("Wait till model is idle ...")
        zaza.model.block_until_all_units_idle()
        logging.info("Stopping instances: {}".format(_machines))
        for uuid in _machines:
            self.nova_client.servers.stop(uuid)
        logging.debug("Wait till all machines are shutoff ...")
        for uuid in _machines:
            openstack_utils.resource_reaches_status(self.nova_client.servers,
                                                    uuid,
                                                    expected_status='SHUTOFF',
                                                    stop_after_attempt=16)

        # Start nodes
        _machines.sort(reverse=True)
        logging.info("Starting instances: {}".format(_machines))
        for uuid in _machines:
            self.nova_client.servers.start(uuid)

        for unit in zaza.model.get_units(self.application):
            zaza.model.block_until_unit_wl_status(unit.entity_id,
                                                  'unknown',
                                                  negate_match=True)

        logging.debug("Wait till model is idle ...")
        try:
            zaza.model.block_until_all_units_idle()
        except zaza.model.UnitError:
            self.resolve_update_status_errors()
            zaza.model.block_until_all_units_idle()

        logging.debug("Wait for application states ...")
        for unit in zaza.model.get_units(self.application):
            try:
                zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
            except zaza.model.UnitError:
                self.resolve_update_status_errors()
                zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
        states = {
            self.application: {
                "workload-status": "blocked",
                "workload-status-message":
                "MySQL InnoDB Cluster not healthy: None"
            }
        }
        zaza.model.wait_for_application_states(states=states)

        logging.info("Execute reboot-cluster-from-complete-outage "
                     "action after cold boot ...")
        action = zaza.model.run_action_on_leader(
            self.application,
            "reboot-cluster-from-complete-outage",
            action_params={})
        assert "Success" in action.data["results"]["outcome"], (
            "Reboot cluster from complete outage action failed: {}".format(
                action.data))
        logging.debug("Wait for application states ...")
        for unit in zaza.model.get_units(self.application):
            zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
        test_config = lifecycle_utils.get_charm_config(fatal=False)
        zaza.model.wait_for_application_states(
            states=test_config.get("target_deploy_status", {}))