def bond_nics(prefix, api):
    engine = api.system_service()

    def _bond_nics(number, host):
        slaves = [HostNic(name=nic) for nic in _host_vm_nics(  # eth2, eth3
                    prefix, host.name, LIBVIRT_NETWORK_FOR_BONDING)]

        options = [
            Option(name='mode', value='active-backup'),
            Option(name='miimon', value='200'),
            ]

        bond = HostNic(
            name=BOND_NAME,
            bonding=Bonding(slaves=slaves, options=options))

        ip_configuration = network_utils_v4.create_static_ip_configuration(
            MIGRATION_NETWORK_IPv4_ADDR.format(number),
            MIGRATION_NETWORK_IPv4_MASK,
            MIGRATION_NETWORK_IPv6_ADDR.format(number),
            MIGRATION_NETWORK_IPv6_MASK)

        host_service = engine.hosts_service().host_service(id=host.id)
        network_utils_v4.attach_network_to_host(
            host_service, BOND_NAME, MIGRATION_NETWORK, ip_configuration,
            [bond])

    hosts = test_utils.hosts_in_cluster_v4(engine, CLUSTER_NAME)
    utils.invoke_in_parallel(_bond_nics, range(1, len(hosts) + 1), hosts)

    for host in test_utils.hosts_in_cluster_v4(engine, CLUSTER_NAME):
        host_service = engine.hosts_service().host_service(id=host.id)
        nt.assert_true(_host_is_attached_to_network(
            engine, host_service, MIGRATION_NETWORK, nic_name=BOND_NAME))
def bond_nics(prefix, api):
    engine = api.system_service()

    def _bond_nics(number, host):
        slaves = [HostNic(name=nic) for nic in _host_vm_nics(  # eth2, eth3
                    prefix, host.name, LIBVIRT_NETWORK_FOR_BONDING)]

        options = [
            Option(name='mode', value='active-backup'),
            Option(name='miimon', value='200'),
            ]

        bond = HostNic(
            name=BOND_NAME,
            bonding=Bonding(slaves=slaves, options=options))

        ip_configuration = network_utils_v4.create_static_ip_configuration(
            MIGRATION_NETWORK_IPv4_ADDR.format(number),
            MIGRATION_NETWORK_IPv4_MASK,
            MIGRATION_NETWORK_IPv6_ADDR.format(number),
            MIGRATION_NETWORK_IPv6_MASK)

        host_service = engine.hosts_service().host_service(id=host.id)
        network_utils_v4.attach_network_to_host(
            host_service, BOND_NAME, MIGRATION_NETWORK, ip_configuration,
            [bond])

    hosts = test_utils.hosts_in_cluster_v4(engine, CLUSTER_NAME)
    utils.invoke_in_parallel(_bond_nics, range(1, len(hosts) + 1), hosts)

    for host in test_utils.hosts_in_cluster_v4(engine, CLUSTER_NAME):
        host_service = engine.hosts_service().host_service(id=host.id)
        nt.assert_true(_host_is_attached_to_network(
            engine, host_service, MIGRATION_NETWORK, nic_name=BOND_NAME))
def bond_nics(prefix, api):
    def _bond_nics(number, host):
        slaves = params.Slaves(host_nic=[
            params.HostNIC(name=nic)
            for nic in _nics_to_bond(prefix, host.name)
        ])

        options = params.Options(option=[
            params.Option(name='mode', value='active-backup'),
            params.Option(name='miimon', value='200'),
        ])

        bond = params.HostNIC(name=BOND_NAME,
                              bonding=params.Bonding(slaves=slaves,
                                                     options=options))

        ip_configuration = network_utils.create_static_ip_configuration(
            VLAN200_NET_IPv4_ADDR % number, VLAN200_NET_IPv4_MASK,
            VLAN200_NET_IPv6_ADDR % number, VLAN200_NET_IPv6_MASK)

        network_utils.attach_network_to_host(api, host, BOND_NAME, VLAN200_NET,
                                             ip_configuration, [bond])

    hosts = test_utils.hosts_in_cluster_v3(api, CLUSTER_NAME)
    utils.invoke_in_parallel(_bond_nics, range(1, len(hosts) + 1), hosts)

    for host in test_utils.hosts_in_cluster_v3(api, CLUSTER_NAME):
        nt.assert_true(
            _host_is_attached_to_network(api,
                                         host,
                                         VLAN200_NET,
                                         nic_name=BOND_NAME))
Exemple #4
0
def bond_nics(prefix, api):
    def _bond_nics(number, host):
        slaves = params.Slaves(host_nic=[
            params.HostNIC(name=nic) for nic in _host_vm_nics(
                prefix, host.name, LIBVIRT_NETWORK_FOR_BONDING)])  # eth2, eth3

        options = params.Options(option=[
            params.Option(name='mode', value='active-backup'),
            params.Option(name='miimon', value='200'),
            ])

        bond = params.HostNIC(
            name=BOND_NAME,
            bonding=params.Bonding(slaves=slaves, options=options))

        ip_configuration = network_utils_v3.create_static_ip_configuration(
            MIGRATION_NETWORK_IPv4_ADDR.format(number),
            MIGRATION_NETWORK_IPv4_MASK,
            MIGRATION_NETWORK_IPv6_ADDR.format(number),
            MIGRATION_NETWORK_IPv6_MASK)

        network_utils_v3.attach_network_to_host(
            api, host, BOND_NAME, MIGRATION_NETWORK, ip_configuration, [bond])

    hosts = test_utils.hosts_in_cluster_v3(api, CLUSTER_NAME)
    utils.invoke_in_parallel(_bond_nics, range(1, len(hosts) + 1), hosts)

    for host in test_utils.hosts_in_cluster_v3(api, CLUSTER_NAME):
        nt.assert_true(_host_is_attached_to_network(
            api, host, MIGRATION_NETWORK, nic_name=BOND_NAME))
Exemple #5
0
def _combine_coverage_data_on_hosts(hosts):
    print("Combining coverage data on hosts...")

    def _combine_coverage_data_on_host(host):
        host.ssh(['$([ -x /usr/bin/coverage ] && echo coverage || echo coverage-3)', 'combine',
                  '--rcfile=/var/lib/vdsm/coverage/coveragerc'])

    utils.invoke_in_parallel(_combine_coverage_data_on_host, hosts)
Exemple #6
0
def _stop_vdsm_services(hosts):
    # need to stop gracefully both vdsmd and supervdsmd
    # to make coverage.py dump coverage data
    print("Stopping VDSM services...")

    def _stop_vdsm_services_on_host(host):
        host.ssh(['systemctl', 'stop', 'vdsmd', 'supervdsmd'])

    utils.invoke_in_parallel(_stop_vdsm_services_on_host, hosts)
def remove_bonding(api):
    def _remove_bonding(host):
        network_utils.detach_network_from_host(api, host, VLAN200_NET,
                                               BOND_NAME)

    network_utils.set_network_required_in_cluster(api, VLAN200_NET,
                                                  CLUSTER_NAME, False)
    utils.invoke_in_parallel(_remove_bonding,
                             test_utils.hosts_in_cluster_v3(api, CLUSTER_NAME))

    for host in test_utils.hosts_in_cluster_v3(api, CLUSTER_NAME):
        nt.assert_false(_host_is_attached_to_network(api, host, VLAN200_NET))
def remove_bonding(api):
    def _remove_bonding(host):
        network_utils_v3.detach_network_from_host(api, host, MIGRATION_NETWORK,
                                                  BOND_NAME)

    network_utils_v3.set_network_required_in_cluster(api, MIGRATION_NETWORK,
                                                     CLUSTER_NAME, False)
    utils.invoke_in_parallel(_remove_bonding,
                             test_utils.hosts_in_cluster_v3(api, CLUSTER_NAME))

    for host in test_utils.hosts_in_cluster_v3(api, CLUSTER_NAME):
        nt.assert_false(_host_is_attached_to_network(api, host,
                                                     MIGRATION_NETWORK))
def remove_bonding(api):
    engine = api.system_service()

    def _remove_bonding(host):
        host_service = engine.hosts_service().host_service(id=host.id)
        network_utils_v4.detach_network_from_host(
            engine, host_service, MIGRATION_NETWORK, BOND_NAME)

    network_utils_v4.set_network_required_in_cluster(engine, MIGRATION_NETWORK,
                                                     CLUSTER_NAME, False)
    utils.invoke_in_parallel(
        _remove_bonding, test_utils.hosts_in_cluster_v4(engine, CLUSTER_NAME))

    for host in test_utils.hosts_in_cluster_v4(engine, CLUSTER_NAME):
        host_service = engine.hosts_service().host_service(id=host.id)
        nt.assert_false(_host_is_attached_to_network(engine, host_service,
                                                     MIGRATION_NETWORK))
Exemple #10
0
def test_remove_bonding(api_v4):
    engine = api_v4.system_service()

    def _remove_bonding(host):
        host_service = engine.hosts_service().host_service(id=host.id)
        network_utils_v4.detach_network_from_host(engine, host_service,
                                                  MIGRATION_NETWORK, BOND_NAME)

    network_utils_v4.set_network_required_in_cluster(engine, MIGRATION_NETWORK,
                                                     CLUSTER_NAME, False)
    utils.invoke_in_parallel(
        _remove_bonding, test_utils.hosts_in_cluster_v4(engine, CLUSTER_NAME))

    for host in test_utils.hosts_in_cluster_v4(engine, CLUSTER_NAME):
        host_service = engine.hosts_service().host_service(id=host.id)
        assert not _host_is_attached_to_network(engine, host_service,
                                                MIGRATION_NETWORK)
Exemple #11
0
    def export_vms(self,
                   vms_names,
                   standalone,
                   dst_dir,
                   compress,
                   init_file_name,
                   out_format,
                   collect_only=False,
                   with_threads=True):
        # todo: move this logic to PrefixExportManager
        if not vms_names:
            vms_names = list(self._vms.keys())

        running_vms = []
        vms = []
        for name in vms_names:
            try:
                vm = self._vms[name]
                if not vm.spec.get('skip-export'):
                    vms.append(vm)
                    if vm.running():
                        running_vms.append(vm)
            except KeyError:
                raise utils.LagoUserException(
                    'Entity {} does not exist'.format(name))

        if running_vms:
            raise utils.LagoUserException(
                'The following vms must be off:\n{}'.format('\n'.join(
                    [_vm.name() for _vm in running_vms])))

        with LogTask('Exporting disks to: {}'.format(dst_dir)):
            if not os.path.isdir(dst_dir):
                os.mkdir(dst_dir)

            def _export_disks(vm):
                return vm.export_disks(standalone, dst_dir, compress,
                                       collect_only, with_threads)

            if collect_only:
                return (functools.reduce(lambda x, y: x.update(y) or x,
                                         [_export_disks(v) for v in vms]))
            else:
                if with_threads:
                    results = utils.invoke_in_parallel(_export_disks, vms)
                else:
                    results = [_export_disks(v) for v in vms]

                results = functools.reduce(lambda x, y: x.update(y) or x,
                                           results)

        self.generate_init(os.path.join(dst_dir, init_file_name), out_format,
                           vms)

        results['init-file'] = os.path.join(dst_dir, init_file_name)

        return results
Exemple #12
0
def _copy_coverage_data_to_first_host(first_host, remaining_hosts):
    # coverage.py needs source files at the moment of report generation -
    # that's why we need to do it on one of the hosts
    print("Copying coverage data to one of the hosts...")
    try:
        tmpdir = tempfile.mkdtemp()

        def _copy_coverage_data_from_host(host_idx, host):
            target_coverage_file_name = 'vdsm.coverage.{}'.format(host_idx)
            host.copy_from('/var/lib/vdsm/coverage/vdsm.coverage',
                           os.path.join(tmpdir, target_coverage_file_name))

        utils.invoke_in_parallel(_copy_coverage_data_from_host,
                                 tuple(range(len(remaining_hosts))),
                                 remaining_hosts)

        for coverage_data_file in os.listdir(tmpdir):
            coverage_data_file = os.path.join(tmpdir, coverage_data_file)
            first_host.copy_to(coverage_data_file, '/var/lib/vdsm/coverage')
    finally:
        shutil.rmtree(tmpdir)
Exemple #13
0
 def revert_snapshots(self, name):
     utils.invoke_in_parallel(
         lambda vm: vm.revert_snapshot(name),
         self._vms.values(),
     )
Exemple #14
0
 def bootstrap(self):
     vms = filter(lambda vm: vm.spec.get('bootstrap', True),
                  self._vms.values())
     if vms:
         utils.invoke_in_parallel(lambda vm: vm.bootstrap(), vms)
Exemple #15
0
 def bootstrap(self):
     utils.invoke_in_parallel(lambda vm: vm.bootstrap(), self._vms.values())
Exemple #16
0
 def revert_snapshots(self, name):
     utils.invoke_in_parallel(
         lambda vm: vm.revert_snapshot(name),
         self._vms.values(),
     )
Exemple #17
0
    def export_vms(
        self,
        vms_names,
        standalone,
        dst_dir,
        compress,
        init_file_name,
        out_format,
        collect_only=False,
        with_threads=True
    ):
        # todo: move this logic to PrefixExportManager
        if not vms_names:
            vms_names = self._vms.keys()

        running_vms = []
        vms = []
        for name in vms_names:
            try:
                vm = self._vms[name]
                if not vm.spec.get('skip-export'):
                    vms.append(vm)
                    if vm.running():
                        running_vms.append(vm)
            except KeyError:
                raise utils.LagoUserException(
                    'Entity {} does not exist'.format(name)
                )

        if running_vms:
            raise utils.LagoUserException(
                'The following vms must be off:\n{}'.format(
                    '\n'.join([_vm.name() for _vm in running_vms])
                )
            )

        with LogTask('Exporting disks to: {}'.format(dst_dir)):
            if not os.path.isdir(dst_dir):
                os.mkdir(dst_dir)

            def _export_disks(vm):
                return vm.export_disks(
                    standalone, dst_dir, compress, collect_only, with_threads
                )

            if collect_only:
                return (
                    reduce(
                        lambda x, y: x.update(y) or x, map(_export_disks, vms)
                    )
                )
            else:
                if with_threads:
                    results = utils.invoke_in_parallel(_export_disks, vms)
                else:
                    results = map(_export_disks, vms)

                results = reduce(lambda x, y: x.update(y) or x, results)

        self.generate_init(
            os.path.join(dst_dir, init_file_name), out_format, vms
        )

        results['init-file'] = os.path.join(dst_dir, init_file_name)

        return results
Exemple #18
0
 def bootstrap(self):
     vms = filter(
         lambda vm: vm.spec.get('bootstrap', True), self._vms.values()
     )
     if vms:
         utils.invoke_in_parallel(lambda vm: vm.bootstrap(), vms)
Exemple #19
0
 def bootstrap(self):
     vms = [
         vm for vm in self._vms.values() if vm.spec.get('bootstrap', True)
     ]
     if vms:
         utils.invoke_in_parallel(lambda vm: vm.bootstrap(), vms)
Exemple #20
0
 def create_snapshots(self, name):
     utils.invoke_in_parallel(
         lambda vm: vm.create_snapshot(name),
         list(self._vms.values()),
     )