Пример #1
0
    def test_vm_define(self):
        vm_dataset_obj = Query({'hostname': VM_HOSTNAME}, VM_ATTRIBUTES).get()

        hv = Hypervisor(vm_dataset_obj['hypervisor'])
        vm = VM(vm_dataset_obj, hv)

        vm_stop(VM_HOSTNAME)
        hv.undefine_vm(vm, keep_storage=True)

        self.check_vm_absent()
        vm_define(VM_HOSTNAME)
        self.check_vm_present()
Пример #2
0
def vm_define(vm_hostname):
    """Define VM on hypervisor

    This command executes necessary code to just define the VM aka create the
    domain.xml for libvirt. It is a convenience command to restore a domain
    in case you lost your SSH session while the domain was not defined.

    :param: vm_hostname: hostname of VM
    """

    vm_dataset_obj = Query({'hostname': vm_hostname}, VM_ATTRIBUTES).get()
    hv = Hypervisor(vm_dataset_obj['hypervisor'])
    vm = VM(vm_dataset_obj, hv)

    hv.define_vm(vm)
    vm.start()

    log.info('VM {} defined and booted on {}'.format(
        vm_hostname, vm_dataset_obj['hypervisor']['hostname']))
Пример #3
0
def _get_vm(hostname, unlock=True, allow_retired=False):
    """Get a server from Serveradmin by hostname to return VM object

    The function is accepting hostnames in any length as long as it resolves
    to a single server on Serveradmin.
    """

    object_id = Query({
        'hostname': Any(hostname, StartsWith(hostname + '.')),
        'servertype': 'vm',
    }, ['object_id']).get()['object_id']

    def vm_query():
        return Query({
            'object_id': object_id,
        }, VM_ATTRIBUTES).get()

    dataset_obj = vm_query()

    hypervisor = None
    if dataset_obj['hypervisor']:
        hypervisor = Hypervisor(dataset_obj['hypervisor'])

        # XXX: Ugly hack until adminapi supports modifying joined objects
        dict.__setitem__(
            dataset_obj, 'hypervisor', dataset_obj['hypervisor']['hostname']
        )

    vm = VM(dataset_obj, hypervisor)
    vm.acquire_lock()

    try:
        if not allow_retired and dataset_obj['state'] == 'retired':
            raise InvalidStateError(
                'VM {} is in state retired, I refuse to work on it!'.format(
                    hostname,
                )
            )
        yield vm
    except (Exception, KeyboardInterrupt):
        VM(vm_query(), hypervisor).release_lock()
        raise
    else:
        # We re-fetch the VM because we can't risk commiting any other changes
        # to the VM than unlocking. There can be changes from failed things,
        # like setting memory.
        # Most operations require unlocking, the only exception is deleting of
        # a VM. After object is deleted, it can't be unlocked.
        if unlock:
            VM(vm_query(), hypervisor).release_lock()
Пример #4
0
def _get_best_hypervisor(vm, hypervisor_states, offline=False):
    hypervisors = (Hypervisor(o) for o in Query(
        {
            'servertype': 'hypervisor',
            'environment': environ.get('IGVM_MODE', 'production'),
            'vlan_networks': vm.dataset_obj['route_network'],
            'state': Any(*hypervisor_states),
        }, HYPERVISOR_ATTRIBUTES))

    for hypervisor in sorted_hypervisors(HYPERVISOR_PREFERENCES, vm,
                                         hypervisors):
        # The actual resources are not checked during sorting for performance.
        # We need to validate the hypervisor using the actual values before
        # the final decision.
        try:
            hypervisor.acquire_lock()
        except InvalidStateError as error:
            log.warning(error)
            continue

        try:
            hypervisor.check_vm(vm, offline)
        except libvirtError as error:
            hypervisor.release_lock()
            log.warning('Preferred hypervisor "{}" is skipped: {}'.format(
                hypervisor, error))
            continue
        except HypervisorError as error:
            hypervisor.release_lock()
            log.warning('Preferred hypervisor "{}" is skipped: {}'.format(
                hypervisor, error))
            continue

        try:
            yield hypervisor
        finally:
            hypervisor.release_lock()
        break
    else:
        raise IGVMError('Cannot find a hypervisor')
Пример #5
0
def _get_hypervisor(hostname, allow_reserved=False):
    """Get a server from Serveradmin by hostname to return Hypervisor object"""
    dataset_obj = Query({
        'hostname': hostname,
        'servertype': 'hypervisor',
    }, HYPERVISOR_ATTRIBUTES).get()

    if not allow_reserved and dataset_obj['state'] == 'online_reserved':
        raise InvalidStateError('Server "{0}" is online_reserved.'.format(
            dataset_obj['hostname']))

    hypervisor = Hypervisor(dataset_obj)
    hypervisor.acquire_lock()

    try:
        yield hypervisor
    finally:
        hypervisor.release_lock()
Пример #6
0
def clean_all(route_network, datacenter_type, vm_hostname=None):
    # Cancelled builds are forcefully killed by Jenkins. They did not have the
    # opportunity to clean up so we forcibly destroy everything found on any HV
    # which would interrupt our work in the current JENKINS_EXECUTOR.
    hvs = [Hypervisor(o) for o in Query({
        'servertype': 'hypervisor',
        'environment': 'testing',
        'vlan_networks': route_network,
        'state': 'online',
    }, HYPERVISOR_ATTRIBUTES)]

    # If a VM hostname is given, only that will be cleaned from HVs.
    if vm_hostname is None:
        pattern = '^([0-9]+_)?(vm-rename-)?{}$'.format(
            VM_HOSTNAME_PATTERN.format(JENKINS_EXECUTOR, '[0-9]+'),
        )
    else:
        pattern = '^([0-9]+_)?(vm-rename-)?{}$'.format(vm_hostname)

    # Clean HVs one by one.
    if datacenter_type == 'kvm.dct':
        for hv in hvs:
            clean_hv(hv, pattern)

    if datacenter_type == 'aws.dct':
        clean_aws(vm_hostname)

    # Remove all connected Serveradmin objects.
    clean_serveradmin({'hostname': Regexp(pattern)})

    # Try to remove VMs with the same IP in any case because we use custom
    # logic to assign them and we want to avoid IP address conflicts.
    # Index 1 is usually used for the test's subject VM,
    # 2 might be used for testing IP change.
    ips = [get_next_address(VM_NET, i) for i in [1, 2]]
    clean_serveradmin({'intern_ip': Any(*ips)})
Пример #7
0
def _get_best_hypervisor(
        vm,
        hypervisor_states,
        offline=False,
        enforce_vm_env=False,
        soft_preferences=False,
):
    hv_filter = {
        'servertype': 'hypervisor',
        'vlan_networks': vm.route_network,
        'state': Any(*hypervisor_states),
    }

    # Enforce IGVM_MODE used for tests
    if 'IGVM_MODE' in environ:
        hv_filter['environment'] = environ.get('IGVM_MODE')
    else:
        if enforce_vm_env:
            hv_filter['environment'] = vm.dataset_obj['environment']

    # Get all (theoretically) possible HVs sorted by HV preferences
    hypervisors = (
        Hypervisor(o) for o in
        Query(hv_filter, HYPERVISOR_ATTRIBUTES)
    )
    hypervisors = sort_by_preference(
        vm,
        HYPERVISOR_PREFERENCES,
        hypervisors,
        soft_preferences,
    )

    possible_hvs = OrderedDict()
    for possible_hv in hypervisors:
        possible_hvs[str(possible_hv)] = possible_hv

    # Check all HVs in parallel. This will check live data on those HVs
    # but without locking them. This allows us to do a real quick first
    # filtering round. Below follows another one on the filtered HVs only.
    chunk_size = 10
    iterations = math.ceil(len(possible_hvs) / chunk_size)
    found_hv = None

    # We are checking HVs in chunks. This will enable us to select HVs early
    # without looping through all of them if unnecessary.
    for i in range(iterations):
        start_idx = i * chunk_size
        end_idx = start_idx + chunk_size
        hv_chunk = dict(list(possible_hvs.items())[start_idx:end_idx])

        results = parallel(
            _check_vm,
            identifiers=list(hv_chunk.keys()),
            args=[
                [possible_hv, vm, offline]
                for possible_hv in hv_chunk.values()
            ],
            workers=chunk_size,
        )

        # Remove unsupported HVs from the list
        for checked_hv, success in results.items():
            if not success:
                hv_chunk.pop(checked_hv)

        # Do another checking iteration, this time with HV locking
        for possible_hv in hv_chunk.values():
            try:
                possible_hv.acquire_lock()
            except InvalidStateError as e:
                log.warning(e)
                continue

            if not _check_vm(possible_hv, vm, offline):
                possible_hv.release_lock()
                continue

            # HV found
            found_hv = possible_hv

            break

        if found_hv:
            break

    if not found_hv:
        # No supported HV was found
        raise IGVMError(
            'Automatically finding the best Hypervisor failed! '
            'Can not find a suitable hypervisor with the preferences and '
            'the Query: {}'.format(hv_filter))

    # Yield the hypervisor locked for working on it
    try:
        log.info('Picked {} as destination Hypervisor'.format(str(found_hv)))
        yield found_hv
    finally:
        found_hv.release_lock()
Пример #8
0
def setUpModule():
    # Automatically find suitable HVs for tests.
    # Terminate if this is impossible - we can't run tests without HVs.
    global HYPERVISORS
    vm_route_net = (Query({
        'hostname': VM_NET
    }, ['route_network']).get()['route_network'])

    # We can access HVs as objects but that does not mean we can compare them
    # to any objects returned from igvm - those will be different objects,
    # created from scratch from Serveradmin data.
    HYPERVISORS = [
        Hypervisor(o) for o in Query(
            {
                'servertype': 'hypervisor',
                'environment': 'testing',
                'vlan_networks': vm_route_net,
                'state': 'online',
            }, HYPERVISOR_ATTRIBUTES)
    ]

    if len(HYPERVISORS) < 2:
        raise Exception('Not enough testing hypervisors found')

    query = Query()
    vm_obj = query.new_object('vm')
    vm_obj['hostname'] = VM_HOSTNAME
    vm_obj['intern_ip'] = Query({
        'hostname': VM_NET
    }, ['intern_ip']).get_free_ip_addrs()
    vm_obj['project'] = 'test'
    vm_obj['team'] = 'test'

    query.commit()

    # Cancelled builds are forcefully killed by Jenkins. They did not have the
    # opportunity to clean up so we forcibly destroy everything found on any HV
    # which would interrupt our work in the current JENKINS_EXECUTOR.
    for hv in HYPERVISORS:
        hv.get_storage_pool().refresh()
        for domain in hv.conn().listAllDomains():
            if match(
                ('^([0-9]+_)?' + VM_HOSTNAME_PATTERN + '$').format(
                    '[0-9]+',
                    JENKINS_EXECUTOR,
                ),
                    domain.name(),
            ):
                if domain.state()[0] == VIR_DOMAIN_RUNNING:
                    domain.destroy()
                domain.undefine()
        st_pool = hv.get_storage_pool()
        for vol_name in st_pool.listVolumes():
            if match(
                ('^([0-9]+_)?' + VM_HOSTNAME_PATTERN + '$').format(
                    '[0-9]+',
                    JENKINS_EXECUTOR,
                ),
                    vol_name,
            ):
                vol_path = st_pool.storageVolLookupByName(vol_name).path()
                hv.run('mount | awk \'/{}/ {{print $3}}\' | '
                       'xargs -r -n1 umount'.format(
                           vol_name.replace('-', '--'), ))
                hv.get_storage_pool().storageVolLookupByName(
                    vol_name, ).delete()
Пример #9
0
def _get_best_hypervisor(vm, hypervisor_states, offline=False):
    hv_env = environ.get('IGVM_MODE', 'production')

    # Get all (theoretically) possible HVs sorted by HV preferences
    hypervisors = (Hypervisor(o) for o in Query(
        {
            'servertype': 'hypervisor',
            'environment': hv_env,
            'vlan_networks': vm.route_network,
            'state': Any(*hypervisor_states),
        }, HYPERVISOR_ATTRIBUTES))
    hypervisors = sorted_hypervisors(HYPERVISOR_PREFERENCES, vm, hypervisors)

    possible_hvs = OrderedDict()
    for possible_hv in hypervisors:
        possible_hvs[str(possible_hv)] = possible_hv

    # Check all HVs in parallel. This will check live data on those HVs
    # but without locking them. This allows us to do a real quick first
    # filtering round. Below follows another one on the filtered HVs only.
    results = parallel(
        _check_vm,
        identifiers=list(possible_hvs.keys()),
        args=[[possible_hv, vm, offline]
              for possible_hv in possible_hvs.values()],
    )

    # Remove unsupported HVs from the list
    for checked_hv, success in results.items():
        if not success:
            possible_hvs.pop(checked_hv)

    # No supported HV was found
    not_found_err = IGVMError(
        'Cannot find hypervisor matching environment: {}, '
        'states: {}, vlan_network: {}, offline: {}'.format(
            hv_env,
            ', '.join(hypervisor_states),
            vm.route_network,
            offline,
        ))

    if len(possible_hvs) == 0:
        raise not_found_err

    # Do another checking iteration, this time with HV locking
    for possible_hv in possible_hvs.values():
        try:
            possible_hv.acquire_lock()
        except InvalidStateError as e:
            log.warning(e)
            continue

        if not _check_vm(possible_hv, vm, offline):
            possible_hv.release_lock()
            continue

        try:
            yield possible_hv
            break
        finally:
            possible_hv.release_lock()
    else:
        raise not_found_err
Пример #10
0
    def setUp(self):
        """Initialize VM object before every test

        Get object from Serveradmin and initialize it to safe defaults.
        Don't assign VM to any of HVs yet!
        """
        super().setUp()

        # Check that enough HVs are available.
        self.route_network = Query(
            {
                'hostname': VM_NET
            },
            ['route_network'],
        ).get()['route_network']

        self.datacenter_type = Query(
            {
                'hostname': self.route_network
            },
            ['datacenter_type'],
        ).get()['datacenter_type']

        self.hvs = [
            Hypervisor(o) for o in Query(
                {
                    'environment': 'testing',
                    'servertype': 'hypervisor',
                    'state': 'online',
                    'vlan_networks': self.route_network,
                }, HYPERVISOR_ATTRIBUTES)
        ]

        if self.datacenter_type == 'kvm.dct':
            assert len(self.hvs) >= 2, 'Not enough testing hypervisors found'

        # Cleanup all leftovers from previous tests or failures.
        clean_all(self.route_network, self.datacenter_type, VM_HOSTNAME)

        # Create subject VM object
        self.vm_obj = Query().new_object('vm')
        self.vm_obj['backup_disabled'] = True
        self.vm_obj['disk_size_gib'] = 3
        self.vm_obj['environment'] = 'testing'
        self.vm_obj['hostname'] = VM_HOSTNAME
        self.vm_obj['hypervisor'] = None
        self.vm_obj['intern_ip'] = get_next_address(VM_NET, 1)
        self.vm_obj['memory'] = 2048
        self.vm_obj['no_monitoring'] = True
        self.vm_obj['num_cpu'] = 2
        self.vm_obj['os'] = 'buster'
        self.vm_obj['project'] = 'test'
        self.vm_obj['puppet_environment'] = None
        self.vm_obj['puppet_ca'] = 'testing-puppetca.innogames.de'
        self.vm_obj['puppet_master'] = 'puppet-lb.test.innogames.net'
        self.vm_obj['repositories'] = [
            'int:basebuster:stable',
            'int:innogames:stable',
        ]
        self.vm_obj['state'] = 'online'

        if self.datacenter_type == 'aws.dct':
            self.vm_obj['aws_image_id'] = 'ami-0e2b90ca04cae8da5'  # buster
            self.vm_obj['aws_instance_type'] = 't2.micro'
            self.vm_obj['aws_key_name'] = 'eu-central-1-key'
            self.vm_obj['disk_size_gib'] = 8

        self.vm_obj.commit()

        # It would be enough to create SGs in AWS once but with parallel runs
        # we can't really test if sync has already been performed.
        if self.datacenter_type == 'aws.dct':
            fw_api = api.get('firewall')
            fw_api.update_config([self.route_network])

        self.uid_name = '{}_{}'.format(
            self.vm_obj['object_id'],
            self.vm_obj['hostname'],
        )

        # Make sure we can make a fresh build
        clean_cert(self.vm_obj)