def setUp(self): self.sched = schedule.Scheduler(name="test.Scheduler", clock=monotonic_time) self.sched.start() self.exc = executor.Executor(name="test.Executor", workers_count=1, max_tasks=100, scheduler=self.sched) self.exc.start()
def setUp(self): self.scheduler = schedule.Scheduler() self.scheduler.start() self.max_tasks = 20 self.max_workers = 15 self.executor = executor.Executor('test', workers_count=10, max_tasks=self.max_tasks, scheduler=self.scheduler, max_workers=self.max_workers) self.executor.start() time.sleep(0.1) # Give time to start all threads
def constructAcceptor(log, ssl, jsonBridge, dest=LEGACY_SUBSCRIPTION_ID_RESPONSE): sslctx = DEAFAULT_SSL_CONTEXT if ssl else None reactor = Reactor() acceptor = MultiProtocolAcceptor( reactor, "::1", 0, sslctx, ) scheduler = schedule.Scheduler(name="test.Scheduler", clock=utils.monotonic_time) scheduler.start() cif = FakeClientIf(dest) json_binding = BindingJsonRpc(jsonBridge, defaultdict(list), 60, scheduler, cif) json_binding.start() cif.json_binding = json_binding with namedTemporaryDir() as tmp_dir: client_log = os.path.join(tmp_dir, 'client.log') with MonkeyPatchScope([(API.clientIF, 'getInstance', lambda _: cif), (constants, 'P_VDSM_CLIENT_LOG', client_log)]): xml_binding = BindingXMLRPC(cif, cif.log) xml_binding.start() xmlDetector = XmlDetector(xml_binding) acceptor.add_detector(xmlDetector) jsonBridge.cif = cif stompDetector = StompDetector(json_binding) acceptor.add_detector(stompDetector) thread = threading.Thread(target=reactor.process_requests, name='Detector thread') thread.setDaemon(True) thread.start() try: yield acceptor finally: acceptor.stop() json_binding.stop() xml_binding.stop() scheduler.stop(wait=False)
def __init__(self, irs=None): # the bare minimum initialization for our test needs. self.irs = irs or IRS() self.log = logging.getLogger('fake.ClientIF') self.channelListener = None self.vm_container_lock = threading.Lock() self.vmContainer = {} self.vmRequests = {} self.bindings = {} self._recovery = False self.unknown_vm_ids = [] self._scheduler = schedule.Scheduler(name="test.Scheduler", clock=monotonic_time) self._scheduler.start() self.qga_poller = qemuguestagent.QemuGuestAgentPoller( self, self.log, self._scheduler)
def constructAcceptor(log, ssl, jsonBridge, dest=LEGACY_SUBSCRIPTION_ID_RESPONSE): sslctx = DEAFAULT_SSL_CONTEXT if ssl else None reactor = Reactor() acceptor = MultiProtocolAcceptor( reactor, "127.0.0.1", 0, sslctx, ) scheduler = schedule.Scheduler(name="test.Scheduler", clock=utils.monotonic_time) scheduler.start() json_binding = BindingJsonRpc(jsonBridge, defaultdict(list), 60, scheduler) json_binding.start() cif = FakeClientIf(json_binding, dest) xml_binding = BindingXMLRPC(cif, cif.log) xml_binding.start() xmlDetector = XmlDetector(xml_binding) acceptor.add_detector(xmlDetector) jsonBridge.cif = cif stompDetector = StompDetector(json_binding) acceptor.add_detector(stompDetector) thread = threading.Thread(target=reactor.process_requests, name='Detector thread') thread.setDaemon(True) thread.start() try: yield acceptor finally: acceptor.stop() json_binding.stop() xml_binding.stop() scheduler.stop(wait=False)
def constructAcceptor(log, ssl, jsonBridge, dest=SUBSCRIPTION_ID_RESPONSE): sslctx = DEAFAULT_SSL_CONTEXT if ssl else None reactor = Reactor() acceptor = MultiProtocolAcceptor( reactor, "::1" if ipv6_enabled() else "127.0.0.1", 0, sslctx, ) scheduler = schedule.Scheduler(name="test.Scheduler", clock=time.monotonic_time) scheduler.start() cif = FakeClientIf(dest) json_binding = BindingJsonRpc(jsonBridge, defaultdict(list), 60, scheduler, cif) json_binding.start() cif.json_binding = json_binding with MonkeyPatchScope([ (API.clientIF, 'getInstance', lambda _: cif), (API, 'confirm_connectivity', lambda: None) ]): jsonBridge.cif = cif stompDetector = StompDetector(json_binding) acceptor.add_detector(stompDetector) thread = threading.Thread(target=reactor.process_requests, name='Detector thread') thread.setDaemon(True) thread.start() try: yield acceptor finally: acceptor.stop() json_binding.stop() scheduler.stop(wait=False)
def setUp(self): self.cif = fake.ClientIF() self.scheduler = schedule.Scheduler(name="test.Scheduler", clock=monotonic_time) self.scheduler.start() self.log = logging.getLogger("test") self.qga_poller = qemuguestagent.QemuGuestAgentPoller( self.cif, self.log, self.scheduler) self.vm = FakeVM() self.qga_poller.update_caps( self.vm.id, { 'version': '0.0-test', 'commands': [ qemuguestagent._QEMU_ACTIVE_USERS_COMMAND, qemuguestagent._QEMU_GUEST_INFO_COMMAND, qemuguestagent._QEMU_HOST_NAME_COMMAND, qemuguestagent._QEMU_NETWORK_INTERFACES_COMMAND, qemuguestagent._QEMU_OSINFO_COMMAND, qemuguestagent._QEMU_TIMEZONE_COMMAND, ] })
def create_scheduler(self, clock): self.clock = clock self.scheduler = schedule.Scheduler(clock=clock) self.scheduler.start()
def serve_clients(log): cif = None irs = None scheduler = None running = [True] def sigtermHandler(signum, frame): log.info("Received signal %s, shutting down" % signum) running[0] = False def sigusr1Handler(signum, frame): if irs: log.info("Received signal %s, stopping SPM" % signum) # pylint: disable=no-member # TODO remove when side effect removed from HSM.__init__ and # initialize it in line #63 irs.spmStop(irs.getConnectedStoragePoolsList()['poollist'][0]) sigutils.register() signal.signal(signal.SIGTERM, sigtermHandler) signal.signal(signal.SIGUSR1, sigusr1Handler) zombiereaper.registerSignalHandler() profile.start() metrics.start() libvirtconnection.start_event_loop() try: if config.getboolean('irs', 'irs_enable'): try: irs = Dispatcher(HSM()) except: panic("Error initializing IRS") scheduler = schedule.Scheduler(name="vdsm.Scheduler", clock=time.monotonic_time) scheduler.start() from vdsm.clientIF import clientIF # must import after config is read cif = clientIF.getInstance(irs, log, scheduler) jobs.start(scheduler, cif) install_manhole({'irs': irs, 'cif': cif}) cif.start() init_unprivileged_network_components(cif) periodic.start(cif, scheduler) health.start() try: while running[0]: sigutils.wait_for_signal() profile.stop() finally: metrics.stop() health.stop() periodic.stop() cif.prepareForShutdown() jobs.stop() scheduler.stop() finally: libvirtconnection.stop_event_loop(wait=False)
import functools import logging import threading from vdsm import executor from vdsm import schedule from vdsm.config import config from vdsm.utils import monotonic_time # just a made up number. Maybe should be equal to number of cores? # TODO: make them tunable through private, unsupported configuration items _WORKERS = config.getint('sampling', 'periodic_workers') _TASK_PER_WORKER = config.getint('sampling', 'periodic_task_per_worker') _TASKS = _WORKERS * _TASK_PER_WORKER _scheduler = schedule.Scheduler(name="periodic.Scheduler", clock=monotonic_time) _executor = executor.Executor(name="periodic.Executor", workers_count=_WORKERS, max_tasks=_TASKS, scheduler=_scheduler) _operations = [] def _timeout_from(interval): """ Estimate a sensible timeout given a periodic interval. """ return interval / 2.