Esempio n. 1
0
    def _startUnderlyingMigration(self, startTime):
        if self.hibernating:
            hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf)
            try:
                self._vm._vmStats.pause()
                fname = self._vm.cif.prepareVolumePath(self._dst)
                try:
                    self._vm._dom.save(fname)
                finally:
                    self._vm.cif.teardownVolumePath(self._dst)
            except Exception:
                self._vm._vmStats.cont()
                raise
        else:
            for dev in self._vm._customDevices():
                hooks.before_device_migrate_source(
                    dev._deviceXML, self._vm.conf, dev.custom)
            hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0),
                                           self._vm.conf)

            # Do not measure the time spent for creating the VM on the
            # destination. In some cases some expensive operations can cause
            # the migration to get cancelled right after the transfer started.
            destCreateStartTime = time.time()
            result = self._destServer.migrationCreate(self._machineParams)
            destCreationTime = time.time() - destCreateStartTime
            startTime += destCreationTime
            self.log.info('Creation of destination VM took: %d seconds',
                          destCreationTime)

            if result['status']['code']:
                self.status = result
                raise RuntimeError('migration destination error: ' +
                                   result['status']['message'])
            if config.getboolean('vars', 'ssl'):
                transport = 'tls'
            else:
                transport = 'tcp'
            duri = 'qemu+%s://%s/system' % (transport, self.remoteHost)
            if self._vm.conf['_migrationParams']['dstqemu']:
                muri = 'tcp://%s' % \
                       self._vm.conf['_migrationParams']['dstqemu']
            else:
                muri = 'tcp://%s' % self.remoteHost

            self._vm.log.info('starting migration to %s '
                              'with miguri %s', duri, muri)

            downtimeThread = DowntimeThread(self._vm, int(self._downtime))
            self._monitorThread = MonitorThread(self._vm, startTime)
            with utils.running(downtimeThread):
                with utils.running(self._monitorThread):
                    # we need to support python 2.6, so two nested with-s.
                    self._perform_migration(duri, muri)

            self.log.info("migration took %d seconds to complete",
                          (time.time() - startTime) + destCreationTime)
Esempio n. 2
0
    def _perform_with_downtime_thread(self, duri, muri):
        self._vm.log.debug('performing migration with downtime thread')
        downtimeThread = DowntimeThread(
            self._vm,
            int(self._downtime),
            config.getint('vars', 'migration_downtime_steps'))

        with utils.running(downtimeThread):
            with utils.running(self._monitorThread):
                # we need to support python 2.6, so two nested with-s.
                self._perform_migration(duri, muri)
Esempio n. 3
0
 def test_wakeup_blocked_reader(self):
     lock = RWLock()
     writer = LockingThread(lock.exclusive)
     with utils.running(writer):
         if not writer.acquired.wait(2):
             raise RuntimeError("Timeout waiting for writer thread")
         reader = LockingThread(lock.shared)
         with utils.running(reader):
             if not reader.ready.wait(2):
                 raise RuntimeError("Timeout waiting for reader thread")
             self.assertFalse(reader.acquired.wait(1))
             writer.done.set()
             self.assertTrue(reader.acquired.wait(2))
Esempio n. 4
0
def test_loopback_event(tmpdir):
    listener = udev.MultipathListener()
    received = threading.Event()
    devices = []

    def callback(device):
        pprint.pprint({k: device[k] for k in device})
        devices.append(device)
        received.set()

    listener._callback = callback
    with running(listener):
        # Create a backing file
        filename = str(tmpdir.join("file"))
        with open(filename, "wb") as f:
            f.truncate(1024**2 * 10)

        # Create and remove a loop device
        with loopback.Device(filename) as loop:
            print("Created a loop device at %r" % loop.path)
            if not received.wait(1):
                raise RuntimeError("Timeout receiving event")

            # We expect an event about our loop device
            assert devices[0].get("DEVNAME") == loop.path
Esempio n. 5
0
 def test_release_other_thread_write_lock(self):
     lock = RWLock()
     writer = LockingThread(lock.exclusive)
     with utils.running(writer):
         if not writer.acquired.wait(2):
             raise RuntimeError("Timeout waiting for writer thread")
         self.assertRaises(RuntimeError, lock.release)
Esempio n. 6
0
 def test_release_other_thread_read_lock(self):
     lock = RWLock()
     reader = LockingThread(lock.shared)
     with utils.running(reader):
         if not reader.acquired.wait(2):
             raise RuntimeError("Timeout waiting for reader thread")
         self.assertRaises(RuntimeError, lock.release)
Esempio n. 7
0
def test_loopback_event(tmpdir):
    listener = udev.MultipathListener()
    received = threading.Event()
    devices = []

    def callback(device):
        pprint.pprint({k: device[k] for k in device})
        devices.append(device)
        received.set()

    listener._callback = callback
    with running(listener):
        # Create a backing file
        filename = str(tmpdir.join("file"))
        with open(filename, "wb") as f:
            f.truncate(1024**2 * 10)

        # Create and remove a loop device
        with loopback.Device(filename) as loop:
            print("Created a loop device at %r" % loop.path)
            if not received.wait(1):
                raise RuntimeError("Timeout receiving event")

            # We expect an event about our loop device
            assert devices[0].get("DEVNAME") == loop.path
Esempio n. 8
0
 def test_release_other_thread_read_lock(self):
     lock = RWLock()
     reader = LockingThread(lock.shared)
     with utils.running(reader):
         if not reader.acquired.wait(2):
             raise RuntimeError("Timeout waiting for reader thread")
         self.assertRaises(RuntimeError, lock.release)
Esempio n. 9
0
 def test_release_other_thread_write_lock(self):
     lock = RWLock()
     writer = LockingThread(lock.exclusive)
     with utils.running(writer):
         if not writer.acquired.wait(2):
             raise RuntimeError("Timeout waiting for writer thread")
         self.assertRaises(RuntimeError, lock.release)
Esempio n. 10
0
    def test_multiple_executors(self):
        names = []
        workers = 2
        done = concurrent.Barrier(2 * workers + 1)

        def get_worker_name():
            names.append(pthread.getname())
            done.wait()

        foo = executor.Executor('foo', workers, workers, None)
        bar = executor.Executor('bar', workers, workers, None)
        with utils.running(foo), utils.running(bar):
            for i in range(workers):
                foo.dispatch(get_worker_name)
                bar.dispatch(get_worker_name)
            done.wait()

        self.assertEqual(sorted(names), ["bar/0", "bar/1", "foo/0", "foo/1"])
Esempio n. 11
0
def test_hotunplug_monitor():
    # When unregistring a monitor while the listener is running, we should stop
    # it, since the listener started it.
    listener = udev.MultipathListener()
    with running(listener):
        mon = Monitor()
        listener.register(mon)
        listener.unregister(mon)
        assert mon.state == Monitor.STOPPED
Esempio n. 12
0
    def _startUnderlyingMigration(self, startTime):
        if self.hibernating:
            hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf)
            try:
                self._vm._vmStats.pause()
                fname = self._vm.cif.prepareVolumePath(self._dst)
                try:
                    self._vm._dom.save(fname)
                finally:
                    self._vm.cif.teardownVolumePath(self._dst)
            except Exception:
                self._vm._vmStats.cont()
                raise
        else:
            for dev in self._vm._customDevices():
                hooks.before_device_migrate_source(dev._deviceXML,
                                                   self._vm.conf, dev.custom)
            hooks.before_vm_migrate_source(self._vm._dom.XMLDesc(0),
                                           self._vm.conf)
            response = self.destServer.migrationCreate(self._machineParams)
            if response['status']['code']:
                self.status = response
                raise RuntimeError('migration destination error: ' +
                                   response['status']['message'])
            if config.getboolean('vars', 'ssl'):
                transport = 'tls'
            else:
                transport = 'tcp'
            duri = 'qemu+%s://%s/system' % (transport, self.remoteHost)
            if self._vm.conf['_migrationParams']['dstqemu']:
                muri = 'tcp://%s' % \
                       self._vm.conf['_migrationParams']['dstqemu']
            else:
                muri = 'tcp://%s' % self.remoteHost

            self._vm.log.info('starting migration to %s '
                              'with miguri %s', duri, muri)

            downtimeThread = DowntimeThread(self._vm, int(self._downtime))
            self._monitorThread = MonitorThread(self._vm, startTime)
            with utils.running(downtimeThread):
                with utils.running(self._monitorThread):
                    # we need to support python 2.6, so two nested with-s.
                    self._perform_migration(duri, muri)
Esempio n. 13
0
    def _perform_with_downtime_thread(self, duri, muri):
        self._vm.log.debug('performing migration with downtime thread')
        self._monitorThread.downtime_thread = DowntimeThread(
            self._vm, int(self._downtime),
            config.getint('vars', 'migration_downtime_steps'))

        with utils.running(self._monitorThread):
            self._perform_migration(duri, muri)

        self._monitorThread.join()
Esempio n. 14
0
    def test_echo(self, use_ssl):
        data = dummyTextGenerator(1024)

        with constructAcceptor(self.log, use_ssl, _SampleBridge()) as acceptor:
            sslctx = DEAFAULT_SSL_CONTEXT if use_ssl else None

            with utils.running(
                    StandAloneRpcClient(acceptor._host, acceptor._port,
                                        'jms.topic.vdsm_requests',
                                        str(uuid4()), sslctx)) as client:
                self.assertEquals(
                    client.callMethod('echo', (data, ), str(uuid4())), data)
Esempio n. 15
0
def test_hotplug_monitor():
    # Registering a monitor after the listenr was started will start the
    # monitor after registering it. The monitor must be able to handle events
    # while the monitor is starting.
    listener = udev.MultipathListener()
    with running(listener):
        mon = Monitor()
        listener.register(mon)
        assert mon.state == Monitor.STARTED

        listener._callback(DEVICE)
        assert mon.calls == [EVENT]
Esempio n. 16
0
def test_monitor_lifecycle():
    listener = udev.MultipathListener()
    monitors = [Monitor(), Monitor()]
    for m in monitors:
        listener.register(m)

    # Starting the listener starts the monitors.
    with running(listener):
        for m in monitors:
            assert m.state == Monitor.STARTED

    # Stopping the listener stops the monitors.
    for m in monitors:
        assert m.state == Monitor.STOPPED
Esempio n. 17
0
def test_hotplug_monitor_error():

    listener = udev.MultipathListener()
    with running(listener):
        mon = UnstartableMonitor()
        # Monitor start() error should raised
        with pytest.raises(MonitorError):
            listener.register(mon)

        assert mon.state == Monitor.CREATED

        # Monitor should not be registered.
        listener._callback(DEVICE)
        assert mon.calls == []
Esempio n. 18
0
    def test_worker_thread_system_name(self):
        names = []
        workers = 2
        done = concurrent.Barrier(workers + 1)

        def get_worker_name():
            names.append(pthread.getname())
            done.wait()

        foo = executor.Executor('foo', workers, workers, None)
        with utils.running(foo):
            for i in range(workers):
                foo.dispatch(get_worker_name)
            done.wait()

        self.assertEqual(sorted(names), ["foo/0", "foo/1"])
Esempio n. 19
0
 def test_iperf_upper_limit(self):
     # Upper limit is not an accurate measure. This is because it converges
     # over time and depends on current machine hardware (CPU).
     # Hence, it is hard to make hard assertions on it. The test should run
     # at least 60 seconds (the longer the better) and the user should
     # inspect the computed average rate and optionally the additional
     # traffic data that was collected in client.out in order to be
     # convinced QOS is working properly.
     limit_kbps = 1000  # 1 Mbps (in kbps)
     server_ip = '192.0.2.1'
     client_ip = '192.0.2.10'
     qos_out = {'ul': {'m2': limit_kbps}, 'ls': {'m2': limit_kbps}}
     # using a network namespace is essential since otherwise the kernel
     # short-circuits the traffic and bypasses the veth devices and the
     # classfull qdisc.
     with network_namespace('server_ns') as ns, bridge_device() as bridge, \
             veth_pair() as (server_peer, server_dev), \
             veth_pair() as (client_dev, client_peer):
         linkSet(server_peer, ['up'])
         linkSet(client_peer, ['up'])
         # iperf server and its veth peer lie in a separate network
         # namespace
         link_set_netns(server_dev, ns)
         bridge.addIf(server_peer)
         bridge.addIf(client_peer)
         linkSet(client_dev, ['up'])
         netns_exec(ns, ['ip', 'link', 'set', 'dev', server_dev, 'up'])
         addrAdd(client_dev, client_ip, 24)
         netns_exec(ns, [
             'ip', '-4', 'addr', 'add', 'dev', server_dev,
             '%s/24' % server_ip
         ])
         qos.configure_outbound(qos_out, client_peer, None)
         with running(IperfServer(server_ip, network_ns=ns)):
             client = IperfClient(server_ip, client_ip, test_time=60)
             client.start()
             max_rate = max([
                 float(interval['streams'][0]['bits_per_second']) / (2**10)
                 for interval in client.out['intervals']
             ])
             self.assertTrue(0 < max_rate < limit_kbps * 1.5)
Esempio n. 20
0
 def _perform_with_conv_schedule(self, duri, muri):
     self._vm.log.debug('performing migration with conv schedule')
     with utils.running(self._monitorThread):
         self._perform_migration(duri, muri)
     self._monitorThread.join()
Esempio n. 21
0
 def test_shared_context_allows_reader(self):
     lock = RWLock()
     with lock.shared:
         reader = LockingThread(lock.shared)
         with utils.running(reader):
             self.assertTrue(reader.acquired.wait(1))
Esempio n. 22
0
 def check(*monitors):
     listener = udev.MultipathListener()
     for m in monitors:
         listener.register(m)
     with running(listener):
         pass