Esempio n. 1
0
    def testCpuCoreStats(self):
        node_id, cpu_id = 0, 0
        self._hs = sampling.HostStatsThread(self.log)
        cpu_sample = {'user': 1.0, 'sys': 2.0}

        # "5" is the size of the SampleWindow.
        # there is no easy way to get SampleWindow, so
        # we hardcode a magic number here.
        for fake_ts in six.moves.xrange(5):
            self._hs._samples.append(
                fake.HostSample(fake_ts, {cpu_id: cpu_sample}))

        def fakeNumaTopology():
            return {node_id: {'cpus': [cpu_id]}}

        expected = {
            '0': {
                'cpuIdle': '100.00',
                'cpuSys': '0.00',
                'cpuUser': '******',
                'nodeIndex': 0
            }
        }

        with MonkeyPatchScope([(caps, 'getNumaTopology', fakeNumaTopology)]):
            self.assertEqual(self._hs._getCpuCoresStats(), expected)
Esempio n. 2
0
    def __init__(self, irs, log, scheduler):
        """
        Initialize the (single) clientIF instance

        :param irs: a Dispatcher object to be used as this object's irs.
        :type irs: :class:`storage.dispatcher.Dispatcher`
        :param log: a log object to be used for this object's logging.
        :type log: :class:`logging.Logger`
        """
        self.vmContainerLock = threading.Lock()
        self._networkSemaphore = threading.Semaphore()
        self._shutdownSemaphore = threading.Semaphore()
        self.irs = irs
        if self.irs:
            self._contEIOVmsCB = partial(clientIF.contEIOVms, proxy(self))
            self.irs.registerDomainStateChangeCallback(self._contEIOVmsCB)
        self.log = log
        self._recovery = True
        self.channelListener = Listener(self.log)
        self._generationID = str(uuid.uuid4())
        self.mom = None
        self.bindings = {}
        self._broker_client = None
        self._subscriptions = defaultdict(list)
        self._scheduler = scheduler
        if _glusterEnabled:
            self.gluster = gapi.GlusterApi(self, log)
        else:
            self.gluster = None
        try:
            self.vmContainer = {}
            self._hostStats = sampling.HostStatsThread(log=log)
            self._hostStats.start()
            self.lastRemoteAccess = 0
            self._enabled = True
            self._netConfigDirty = False
            self._prepareMOM()
            secret.clear()
            threading.Thread(target=self._recoverThread,
                             name='clientIFinit').start()
            self.channelListener.settimeout(
                config.getint('vars', 'guest_agent_timeout'))
            self.channelListener.start()
            self.threadLocal = threading.local()
            self.threadLocal.client = ''

            host = config.get('addresses', 'management_ip')
            port = config.getint('addresses', 'management_port')

            self._createAcceptor(host, port)
            self._prepareXMLRPCBinding()
            self._prepareJSONRPCBinding()
            self._connectToBroker()
        except:
            self.log.error('failed to init clientIF, '
                           'shutting down storage dispatcher')
            if self.irs:
                self.irs.prepareForShutdown()
            raise
Esempio n. 3
0
 def testOutputWithNoSamples(self):
     expected = {
         'cpuIdle': 100.0,
         'cpuSys': 0.0,
         'cpuSysVdsmd': 0.0,
         'cpuUser': 0.0,
         'cpuUserVdsmd': 0.0,
         'rxRate': 0.0,
         'txRate': 0.0,
         'elapsedTime': 0,
     }
     with MonkeyPatchScope([(time, 'time', lambda: 0)]):
         self._hs = sampling.HostStatsThread(self.log)
         self.assertEquals(self._hs.get(), expected)
Esempio n. 4
0
    def __init__(self, irs, log):
        """
        Initialize the (single) clientIF instance

        :param irs: a Dispatcher object to be used as this object's irs.
        :type irs: :class:`storage.dispatcher.Dispatcher`
        :param log: a log object to be used for this object's logging.
        :type log: :class:`logging.Logger`
        """
        self.vmContainerLock = threading.Lock()
        self._networkSemaphore = threading.Semaphore()
        self._shutdownSemaphore = threading.Semaphore()
        self.irs = irs
        if self.irs:
            self.irs.registerDomainStateChangeCallback(self.contEIOVms)
        self.log = log
        self._recovery = True
        self.channelListener = Listener(self.log)
        self._generationID = str(uuid.uuid4())
        self.mom = None
        if _glusterEnabled:
            self.gluster = gapi.GlusterApi(self, log)
        else:
            self.gluster = None
        try:
            self.vmContainer = {}
            self._hostStats = sampling.HostStatsThread(log=log)
            self._hostStats.start()
            self.lastRemoteAccess = 0
            self._enabled = True
            self._netConfigDirty = False
            self._prepareMOM()
            threading.Thread(target=self._recoverThread,
                             name='clientIFinit').start()
            self.channelListener.settimeout(
                config.getint('vars', 'guest_agent_timeout'))
            self.channelListener.start()
            self.threadLocal = threading.local()
            self.threadLocal.client = ''
        except:
            self.log.error('failed to init clientIF, '
                           'shutting down storage dispatcher')
            if self.irs:
                self.irs.prepareForShutdown()
            if self.mom:
                self.mom.stop()
            raise
        self._prepareBindings()
Esempio n. 5
0
    def testSamplesWraparound(self):
        NUM = sampling.HOST_STATS_AVERAGING_WINDOW + 1

        samples = sampling.SampleWindow(sampling.HOST_STATS_AVERAGING_WINDOW)

        class FakeEvent(object):
            def __init__(self, *args):
                self.counter = 0

            def isSet(self):
                return self.counter >= NUM

            def set(self):
                pass

            def wait(self, unused):
                self.counter += 1

        class FakeHostSample(object):

            counter = 0

            def __repr__(self):
                return "FakeHostSample(id=%i)" % self.id

            def __init__(self, *args):
                self.id = FakeHostSample.counter
                FakeHostSample.counter += 1

            def to_connlog(self):
                pass

            def connlog_diff(self, *args):
                pass

        with MonkeyPatchScope([(sampling, 'HostSample', FakeHostSample)]):
            self._hs = sampling.HostStatsThread(samples)
            self._hs._sampleInterval = 0
            # we cannot monkey patch, it will interfer on threading internals
            self._hs._stopEvent = FakeEvent()
            self._hs.start()
            self._hs.wait()
            first, last, _ = samples.stats()
            self.assertEqual(
                first.id,
                FakeHostSample.counter - sampling.HOST_STATS_AVERAGING_WINDOW)
            self.assertEqual(last.id, FakeHostSample.counter - 1)
Esempio n. 6
0
    def testContinueWithErrors(self):
        """
        bz1113948: do not give up on errors != TimeoutError
        """
        def WrapHostSample(pid):
            self._sampleCount += 1
            if self._sampleCount == self.FAILED_SAMPLE:
                raise ValueError
            if self._sampleCount == self.STOP_SAMPLE:
                self._hs.stop()
                self._samplingDone.set()
            return sampling.HostSample(1)

        with MonkeyPatchScope([(sampling, 'HostSample', WrapHostSample),
                               (sampling.HostStatsThread,
                                'SAMPLE_INTERVAL_SEC', 0.1)]):
            self._hs = sampling.HostStatsThread(self.log)
            self._hs.start()
            self._samplingDone.wait(3.0)
            self.assertTrue(self._samplingDone.is_set())
            self.assertTrue(self._sampleCount >= self.STOP_SAMPLE)