예제 #1
0
def add_domain(system_service, sd_name, url):
    target_server = sdk4.types.OpenStackImageProvider(
        name=sd_name,
        description=sd_name,
        url=url,
        requires_authentication=False)

    try:
        providers_service = system_service.openstack_image_providers_service()
        providers_service.add(target_server)
        glance = []

        def get():
            providers = [
                provider for provider in providers_service.list()
                if provider.name == sd_name
            ]
            if not providers:
                return False
            instance = providers_service.provider_service(providers.pop().id)
            if instance:
                glance.append(instance)
                return True
            else:
                return False

        assertions.assert_true_within_short(
            func=get, allowed_exceptions=[sdk4.NotFoundError])
    except (AssertionError, sdk4.NotFoundError):
        # RequestError if add method was failed.
        # AssertionError if add method succeed but we couldn't verify that glance was actually added
        return None

    return glance.pop()
def _start_he_vm(ansible_host):
    logging.info('Starting the engine VM...')
    ansible_host.shell('hosted-engine --vm-start')
    logging.info('Waiting for the engine VM to be UP...')
    assertions.assert_true_within_short(
        lambda: he_utils.engine_vm_is_up(ansible_host))
    logging.info('Engine VM is UP.')
예제 #3
0
def test_hotplug_disk(assert_vm_is_alive, engine_api):
    engine = engine_api.system_service()
    disk_attachments_service = test_utils.get_disk_attachments_service(
        engine, VM0_NAME)
    disk_attachment = disk_attachments_service.add(
        types.DiskAttachment(disk=types.Disk(
            name=DISK0_NAME,
            provisioned_size=2 * GB,
            format=types.DiskFormat.COW,
            storage_domains=[
                types.StorageDomain(name=SD_NFS_NAME, ),
            ],
            status=None,
            sparse=True,
        ),
                             interface=types.DiskInterface.VIRTIO,
                             bootable=False,
                             active=True))

    disks_service = engine.disks_service()
    disk_service = disks_service.disk_service(disk_attachment.disk.id)
    attachment_service = disk_attachments_service.attachment_service(
        disk_attachment.id)

    assertions.assert_true_within_short(
        lambda: attachment_service.get().active == True)
    assertions.assert_true_within_short(
        lambda: disk_service.get().status == types.DiskStatus.OK)
    assert_vm_is_alive(VM0_NAME)
예제 #4
0
def migrate_vm(all_hosts_hostnames, ansible_by_hostname, system_service):
    vm_service = test_utils.get_vm_service(system_service, VM0_NAME)
    vm_id = vm_service.get().id
    hosts_service = system_service.hosts_service()

    def _current_running_host():
        host_id = vm_service.get().host.id
        host = hosts_service.list(search='id={}'.format(host_id))[0]
        return host.name

    src_host = _current_running_host()
    dst_host = next(iter(all_hosts_hostnames - {src_host}))

    print('source host: {}'.format(src_host))
    print('destination host: {}'.format(dst_host))

    assert_finished_within_long(vm_service.migrate,
                                system_service,
                                host=Host(name=dst_host))

    # Verify that VDSM cleaned the vm in the source host
    def vm_is_not_on_host():
        ansible_src_host = ansible_by_hostname(src_host)
        out = ansible_src_host.shell('vdsm-client Host getVMList')["stdout"]
        vms = json.loads(out)
        return vm_id not in [vm["vmId"] for vm in vms]

    assertions.assert_true_within_short(vm_is_not_on_host)

    assertions.assert_true_within_short(
        lambda: vm_service.get().status == VmStatus.UP)

    assert _current_running_host() == dst_host
def _validate_vnic_profile(api, vnic_profile_name):
    def _get_vnic_profile(profiles_service, vnic_profile_name):
        return next((profile for profile in profiles_service.list()
                     if profile.name == vnic_profile_name), None)

    profiles_service = api.system_service().vnic_profiles_service()
    assertions.assert_true_within_short(lambda: _get_vnic_profile(
        profiles_service, vnic_profile_name) is not None)
def test_set_global_maintenance(ansible_host0):
    logging.info('Waiting For System Stability...')
    he_utils.wait_until_engine_vm_is_not_migrating(ansible_host0)

    he_utils.set_and_test_global_maintenance_mode(ansible_host0, True)

    assertions.assert_true_within_short(
        lambda: he_utils.all_hosts_state_global_maintenance(ansible_host0))
    logging.info('Global maintenance state set on all hosts')
예제 #7
0
def test_remove_vm2_lease(engine_api):
    engine = engine_api.system_service()
    vm2_service = test_utils.get_vm_service(engine, VM2_NAME)

    vm2_service.update(
        vm=types.Vm(high_availability=types.HighAvailability(enabled=False, ),
                    lease=types.StorageDomainLease(storage_domain=None)))
    assertions.assert_true_within_short(
        lambda: vm2_service.get().lease is None)
def _remove_iface_from_vm(api, vm_name, iface_name):
    nics_service = test_utils.get_nics_service(api.system_service(), vm_name)
    nic = next(nic for nic in nics_service.list() if nic.name == iface_name)

    nic_service = nics_service.nic_service(nic.id)
    nic_service.deactivate()
    assertions.assert_true_within_short(
        lambda: nic_service.get().plugged == False)
    nic_service.remove()
예제 #9
0
def remove_vm_root_checkpoint(checkpoints_service):
    vm_checkpoints = checkpoints_service.list()
    root_checkpoint = vm_checkpoints[0]
    checkpoint_service = checkpoints_service.checkpoint_service(
        id=root_checkpoint.id)
    checkpoint_service.remove()

    assertions.assert_true_within_short(
        lambda: len(checkpoints_service.list()) == len(vm_checkpoints) - 1)
예제 #10
0
def test_add_disks(engine_api, cirros_image_glance_disk_name):
    engine = engine_api.system_service()
    vm_service = test_utils.get_vm_service(engine, VM0_NAME)
    glance_disk = test_utils.get_disk_service(
        engine,
        cirros_image_glance_disk_name,
    )
    assert vm_service and glance_disk

    vm0_disk_attachments_service = test_utils.get_disk_attachments_service(
        engine, VM0_NAME)

    vm0_disk_attachments_service.add(
        types.DiskAttachment(
            disk=types.Disk(
                id=glance_disk.get().id,
                storage_domains=[
                    types.StorageDomain(name=SD_ISCSI_NAME, ),
                ],
            ),
            interface=types.DiskInterface.VIRTIO,
            active=True,
            bootable=True,
        ), )

    disk_params = types.Disk(
        provisioned_size=1 * GB,
        format=types.DiskFormat.COW,
        status=None,
        sparse=True,
        active=True,
        bootable=True,
        backup=types.DiskBackup.INCREMENTAL,
    )

    for vm_name, disk_name, sd_name in ((VM1_NAME, DISK1_NAME,
                                         SD_NFS_NAME), (VM2_NAME, DISK2_NAME,
                                                        SD_SECOND_NFS_NAME),
                                        (BACKUP_VM_NAME, BACKUP_DISK_NAME,
                                         SD_NFS_NAME)):
        disk_params.name = disk_name
        disk_params.storage_domains = [types.StorageDomain(name=sd_name, )]

        disk_attachments_service = test_utils.get_disk_attachments_service(
            engine, vm_name)
        assert disk_attachments_service.add(
            types.DiskAttachment(disk=disk_params,
                                 interface=types.DiskInterface.VIRTIO))

    for disk_name in (cirros_image_glance_disk_name, DISK1_NAME, DISK2_NAME,
                      BACKUP_DISK_NAME):
        disk_service = test_utils.get_disk_service(engine, disk_name)
        assertions.assert_true_within_short(
            lambda: disk_service.get().status == types.DiskStatus.OK)
예제 #11
0
def test_verify_add_vm1_from_template(engine_api):
    engine = engine_api.system_service()
    _verify_vm_state(engine, VM1_NAME, types.VmStatus.DOWN)

    disks_service = engine.disks_service()
    vm1_disk_attachments_service = test_utils.get_disk_attachments_service(
        engine, VM1_NAME)
    for disk_attachment in vm1_disk_attachments_service.list():
        disk_service = disks_service.disk_service(disk_attachment.disk.id)
        assertions.assert_true_within_short(
            lambda: disk_service.get().status == types.DiskStatus.OK)
예제 #12
0
def test_template_update(engine_api, cirros_image_glance_template_name):
    template_guest = test_utils.get_template_service(
        engine_api.system_service(), cirros_image_glance_template_name)

    if template_guest is None:
        pytest.skip('{0}: template {1} is missing'.format(
            template_update.__name__, cirros_image_glance_template_name))
    new_comment = "comment by ovirt-system-tests"
    template_guest.update(template=types.Template(comment=new_comment))
    assertions.assert_true_within_short(
        lambda: template_guest.get().status == types.TemplateStatus.OK)
    assert template_guest.get().comment == new_comment
예제 #13
0
def test_add_floating_disk(engine_api, disks_service):
    disks_service.add(
        types.Disk(
            name=FLOATING_DISK_NAME,
            format=types.DiskFormat.COW,
            provisioned_size=2 * MB,
            active=True,
            storage_domains=[types.StorageDomain(name=SD_SECOND_NFS_NAME)]))

    engine = engine_api.system_service()
    disk_service = test_utils.get_disk_service(engine, FLOATING_DISK_NAME)
    assertions.assert_true_within_short(
        lambda: disk_service.get().status == types.DiskStatus.OK)
예제 #14
0
    def restart():
        ansible_engine.systemd(name='ovirt-engine', state='stopped')
        ansible_engine.systemd(name='ovirt-engine', state='started')

        health_url = 'http://{}/ovirt-engine/services/health'.format(
            engine_fqdn)

        def engine_is_alive():
            with http_proxy_disabled():
                engine_download(health_url)
                return True

        assertions.assert_true_within_short(engine_is_alive,
                                            allowed_exceptions=[ShellError])
예제 #15
0
def test_add_vm_pool(engine_api, cirros_image_glance_template_name):
    engine = engine_api.system_service()
    pools_service = engine.vm_pools_service()
    pool_cluster = engine.clusters_service().list(
        search='name={}'.format(TEST_CLUSTER))[0]
    pool_template = engine.templates_service().list(
        search='name={}'.format(cirros_image_glance_template_name))[0]
    with engine_utils.wait_for_event(engine, 302):
        pools_service.add(pool=types.VmPool(
            name=VMPOOL_NAME,
            cluster=pool_cluster,
            template=pool_template,
            use_latest_template_version=True,
        ))
    vm_service = test_utils.get_vm_service(engine, VMPOOL_NAME + '-1')
    assertions.assert_true_within_short(
        lambda: vm_service.get().status == types.VmStatus.DOWN,
        allowed_exceptions=[IndexError])
예제 #16
0
def test_extend_disk1(engine_api):
    engine = engine_api.system_service()
    disk_attachments_service = test_utils.get_disk_attachments_service(
        engine, VM1_NAME)
    for disk_attachment in disk_attachments_service.list():
        disk = engine_api.follow_link(disk_attachment.disk)
        if disk.name == DISK1_NAME:
            attachment_service = disk_attachments_service.attachment_service(
                disk_attachment.id)
    with engine_utils.wait_for_event(
            engine, 371):  # USER_EXTEND_DISK_SIZE_SUCCESS(371)
        attachment_service.update(
            types.DiskAttachment(disk=types.Disk(provisioned_size=2 * GB, )))

        disk_service = test_utils.get_disk_service(engine, DISK1_NAME)
        assertions.assert_true_within_short(
            lambda: disk_service.get().status == types.DiskStatus.OK)
        assertions.assert_true_within_short(
            lambda: disk_service.get().provisioned_size == 2 * GB)
예제 #17
0
def test_add_blank_vms(engine_api, ost_cluster_name):
    engine = engine_api.system_service()
    vms_service = engine.vms_service()

    vm_params = sdk4.types.Vm(
        os=sdk4.types.OperatingSystem(type='other_linux', ),
        type=sdk4.types.VmType.SERVER,
        high_availability=sdk4.types.HighAvailability(enabled=False, ),
        cluster=sdk4.types.Cluster(name=ost_cluster_name, ),
        template=sdk4.types.Template(name=TEMPLATE_BLANK, ),
        display=sdk4.types.Display(
            smartcard_enabled=True,
            keyboard_layout='en-us',
            file_transfer_enabled=True,
            copy_paste_enabled=True,
        ),
        usb=sdk4.types.Usb(
            enabled=True,
            type=sdk4.types.UsbType.NATIVE,
        ),
        memory_policy=sdk4.types.MemoryPolicy(ballooning=True, ),
    )

    vm_params.name = BACKUP_VM_NAME
    vm_params.memory = 96 * MB
    vm_params.memory_policy.guaranteed = 64 * MB
    vms_service.add(vm_params)
    backup_vm_service = test_utils.get_vm_service(engine, BACKUP_VM_NAME)

    vm_params.name = VM0_NAME
    least_hotplug_increment = 256 * MB
    required_memory = 96 * MB
    vm_params.memory = required_memory
    vm_params.memory_policy.guaranteed = required_memory
    vm_params.memory_policy.max = required_memory + least_hotplug_increment

    vms_service.add(vm_params)
    vm0_vm_service = test_utils.get_vm_service(engine, VM0_NAME)

    for vm_service in [backup_vm_service, vm0_vm_service]:
        assertions.assert_true_within_short(
            lambda: vm_service.get().status == sdk4.types.VmStatus.DOWN)
def test_assign_labeled_network(system_service, networks_service,
                                hosts_service, ost_dc_name, ost_cluster_name):
    """
    Adds the labeled network to the cluster and asserts the hosts are attached
    """
    labeled_net = networks_service.list(search=f'name={LABELED_NET_NAME}')[0]

    # the logical network will be automatically assigned to all host network
    # interfaces with that label asynchronously

    cluster_service = test_utils.get_cluster_service(
        system_service, ost_cluster_name)
    assert cluster_service.networks_service().add(labeled_net)

    for host in test_utils.hosts_in_cluster_v4(system_service,
                                               ost_cluster_name):
        host_service = hosts_service.host_service(id=host.id)
        assertions.assert_true_within_short(
            functools.partial(_host_is_attached_to_network, system_service,
                              host_service, LABELED_NET_NAME, ost_dc_name))
예제 #19
0
def test_hotunplug_disk(engine_api):
    engine = engine_api.system_service()
    disk_service = test_utils.get_disk_service(engine, DISK0_NAME)
    disk_attachments_service = test_utils.get_disk_attachments_service(
        engine, VM0_NAME)
    disk_attachment = disk_attachments_service.attachment_service(
        disk_service.get().id)

    with engine_utils.wait_for_event(engine, 2002):
        # USER_HOTUNPLUG_DISK(2,002)
        correlation_id = 'test_hotunplug_disk'
        assert disk_attachment.update(types.DiskAttachment(active=False),
                                      query={'correlation_id': correlation_id})
        assertions.assert_true_within_long(
            lambda: test_utils.all_jobs_finished(engine, correlation_id))

        assertions.assert_true_within_short(
            lambda: disk_service.get().status == types.DiskStatus.OK)

        assertions.assert_true_within_short(
            lambda: disk_attachment.get().active == False)
예제 #20
0
def set_and_test_global_maintenance_mode(ansible_host, mode):
    """
    Updates the global maintenance mode and tests if the value was stored.

    Sometimes there is a race condition where the command that modifies the
    global maintenance flag is ignored. That is why the maintenance mode is
    updated repeatedly in a loop until it succeeds.

    'mode' must be a bool value:
    True - set maintenance mode to global
    False - set maintenance mode to none
    """
    def _set_and_test_global_maintenance_mode():
        logging.debug('_set_and_test_global_maintenance_mode: Start')
        ansible_host.shell('hosted-engine '
                           '--set-maintenance '
                           '--mode={}'.format('global' if mode else 'none'))
        logging.debug('_set_and_test_global_maintenance_mode: After setting')
        return is_global_maintenance_mode(ansible_host) == mode

    logging.info(f'set_and_test_global_maintenance_mode: Start, mode={mode}')
    assertions.assert_true_within_short(_set_and_test_global_maintenance_mode)
예제 #21
0
def he_status(ansible_host):
    ret = {}

    def get_value():
        nonlocal ret
        ansible_res = ansible_host.shell(
            'hosted-engine --vm-status --json')['stdout']
        try:
            status = json.loads(ansible_res)
        except ValueError:
            raise RuntimeError(f'could not parse JSON: {ansible_res}')
        # This outputs a dict whose keys are either numbers, and then the
        # values are data about the corresponding host, or
        # 'global_maintenance', and then the value is true or false.
        # Put all hosts' data in a new item 'hosts' so that callers do not
        # have to check isdigit themselves.
        # Also: one of the items per host is 'extra', which is a string
        # containing a newline-separated key=value list. Convert this
        # to a dict as well.
        result = {}
        result['global_maintenance'] = status['global_maintenance']
        result['hosts'] = {}
        for i, data in status.items():
            if i.isdigit():
                hostname = data['hostname']
                result['hosts'][hostname] = data
                result['hosts'][hostname]['extra'] = dict(
                    item.split('=') for item in data['extra'].split('\n')
                    if item)
        ret = result
        logging.debug(f'he_status: {ret}')
        return ret

    assertions.assert_true_within_short(
        lambda: bool(get_value()),
        allowed_exceptions=[RuntimeError, AnsibleExecutionError])
    return ret
예제 #22
0
def test_vdsm_recovery(ansible_by_hostname, engine_api):
    engine = engine_api.system_service()
    vm_service = test_utils.get_vm_service(engine, VM0_NAME)
    host_id = vm_service.get().host.id
    host_service = engine.hosts_service().host_service(host_id)
    host_name = host_service.get().name
    ansible_host = ansible_by_hostname(host_name)

    ansible_host.systemd(name='vdsmd', state='stopped')
    assertions.assert_true_within_short(
        lambda: vm_service.get().status == types.VmStatus.UNKNOWN)

    ansible_host.systemd(name='vdsmd', state='started')
    assertions.assert_true_within_short(
        lambda: host_service.get().status == types.HostStatus.UP)
    assertions.assert_true_within_short(
        lambda: vm_service.get().status == types.VmStatus.UP)
def test_deactivate_storage_domain(engine_api):
    # TODO: uncomment once VnicSetup checks are fixed.
    # TODO: this also seems to leave running tasks behind which break the deactivation.
    # TODO: it should be tested in multiple runs or properly waited for.
    # VnicSetup.vnic_setup().init(engine_api.system_service(),
    #                            VM2_NAME, DC_NAME, CLUSTER_NAME)
    engine = engine_api.system_service()
    dc = test_utils.data_center_service(engine_api.system_service(), DC_NAME)
    correlation_id = 'deactivate_storage_domain'

    def _deactivate_with_running_ovf_update_task():
        try:
            test_utils.get_attached_storage_domain(
                dc, SD_SECOND_NFS_NAME, service=True).deactivate(
                query={'correlation_id': correlation_id})
            return True
        except ovirtsdk4.Error as err:
            # The storage domain's deactivation may fail if it has running tasks.
            # In case of updating ovf_store disks task (UploadStream),
            # ignore. Otherwise, raise the exception.
            if not (('UploadStream' in err.args[0]) or ('OVF' in err.args[0])):
                raise
            return False

    assertions.assert_true_within_short(
        _deactivate_with_running_ovf_update_task)

    # Wait for the storage deactivation to be finished.
    # TODO Fix the code on engine, so the status will be changed once the operation finished (BZ 1949101).
    assertions.assert_true_within_short(
        lambda: test_utils.all_jobs_finished(engine, correlation_id)
    )
    assertions.assert_true_within_short(
        lambda:
        test_utils.get_attached_storage_domain(
            dc, SD_SECOND_NFS_NAME).status == ovirtsdk4.types.StorageDomainStatus.MAINTENANCE
    )
def _shutdown_he_vm(ansible_host):
    ansible_host.shell('hosted-engine --vm-shutdown')
    logging.info('Waiting for the engine VM to be down...')
    assertions.assert_true_within_short(
        lambda: he_utils.engine_vm_is_down(ansible_host))