Ejemplo n.º 1
0
def start(address):
    global _running
    if _running:
        raise RuntimeError('trying to start reporter while running')
    logging.info("Starting hawkular reporter")
    concurrent.thread(_run, name='hawkular', args=(address,)).start()
    _running = True
Ejemplo n.º 2
0
def start(address):
    global _running
    if _running:
        raise RuntimeError('trying to start reporter while running')
    logging.info("Starting hawkular reporter")
    concurrent.thread(_run, name='hawkular', args=(address, )).start()
    _running = True
Ejemplo n.º 3
0
    def __init__(self, irs, log, scheduler):
        """
        Initialize the (single) clientIF instance

        :param irs: a Dispatcher object to be used as this object's irs.
        :type irs: :class:`storage.dispatcher.Dispatcher`
        :param log: a log object to be used for this object's logging.
        :type log: :class:`logging.Logger`
        """
        self.vmContainerLock = threading.Lock()
        self._networkSemaphore = threading.Semaphore()
        self._shutdownSemaphore = threading.Semaphore()
        self.irs = irs
        if self.irs:
            self._contEIOVmsCB = partial(clientIF.contEIOVms, proxy(self))
            self.irs.registerDomainStateChangeCallback(self._contEIOVmsCB)
        self.log = log
        self._recovery = True
        self.channelListener = Listener(self.log)
        self._generationID = str(uuid.uuid4())
        self.mom = None
        self.bindings = {}
        self._broker_client = None
        self._subscriptions = defaultdict(list)
        self._scheduler = scheduler
        if _glusterEnabled:
            self.gluster = gapi.GlusterApi(self, log)
        else:
            self.gluster = None
        try:
            self.vmContainer = {}
            self._hostStats = sampling.HostStatsThread(
                sampling.host_samples)
            self._hostStats.start()
            self.lastRemoteAccess = 0
            self._enabled = True
            self._netConfigDirty = False
            self._prepareMOM()
            secret.clear()
            concurrent.thread(self._recoverThread, name='clientIFinit').start()
            self.channelListener.settimeout(
                config.getint('vars', 'guest_agent_timeout'))
            self.channelListener.start()
            self.threadLocal = threading.local()
            self.threadLocal.client = ''

            host = config.get('addresses', 'management_ip')
            port = config.getint('addresses', 'management_port')

            self._createAcceptor(host, port)
            self._prepareXMLRPCBinding()
            self._prepareJSONRPCBinding()
            self._connectToBroker()
        except:
            self.log.error('failed to init clientIF, '
                           'shutting down storage dispatcher')
            if self.irs:
                self.irs.prepareForShutdown()
            raise
Ejemplo n.º 4
0
    def __init__(self, irs, log, scheduler):
        """
        Initialize the (single) clientIF instance

        :param irs: a Dispatcher object to be used as this object's irs.
        :type irs: :class:`storage.dispatcher.Dispatcher`
        :param log: a log object to be used for this object's logging.
        :type log: :class:`logging.Logger`
        """
        self.vmContainerLock = threading.Lock()
        self._networkSemaphore = threading.Semaphore()
        self._shutdownSemaphore = threading.Semaphore()
        self.irs = irs
        if self.irs:
            self._contEIOVmsCB = partial(clientIF.contEIOVms, proxy(self))
            self.irs.registerDomainStateChangeCallback(self._contEIOVmsCB)
        self.log = log
        self._recovery = True
        self.channelListener = Listener(self.log)
        self._generationID = str(uuid.uuid4())
        self.mom = None
        self.bindings = {}
        self._broker_client = None
        self._subscriptions = defaultdict(list)
        self._scheduler = scheduler
        if _glusterEnabled:
            self.gluster = gapi.GlusterApi(self, log)
        else:
            self.gluster = None
        try:
            self.vmContainer = {}
            self._hostStats = sampling.HostStatsThread(sampling.host_samples)
            self._hostStats.start()
            self.lastRemoteAccess = 0
            self._enabled = True
            self._netConfigDirty = False
            self._prepareMOM()
            secret.clear()
            concurrent.thread(self._recoverThread, name='clientIFinit').start()
            self.channelListener.settimeout(
                config.getint('vars', 'guest_agent_timeout'))
            self.channelListener.start()
            self.threadLocal = threading.local()
            self.threadLocal.client = ''

            host = config.get('addresses', 'management_ip')
            port = config.getint('addresses', 'management_port')

            self._createAcceptor(host, port)
            self._prepareXMLRPCBinding()
            self._prepareJSONRPCBinding()
            self._connectToBroker()
        except:
            self.log.error('failed to init clientIF, '
                           'shutting down storage dispatcher')
            if self.irs:
                self.irs.prepareForShutdown()
            raise
Ejemplo n.º 5
0
Archivo: task.py Proyecto: kanalun/vdsm
    def __del__(self):
        def finalize(log, owner, taskDir):
            log.warn("Task was autocleaned")
            owner.releaseAll()
            if taskDir is not None:
                getProcPool().fileUtils.cleanupdir(taskDir)

        if not self.state.isDone():
            taskDir = None
            if (self.cleanPolicy == TaskCleanType.auto and
                    self.store is not None):
                taskDir = os.path.join(self.store, self.id)
            concurrent.thread(finalize,
                              args=(self.log, self.resOwner, taskDir)).start()
Ejemplo n.º 6
0
    def __del__(self):
        def finalize(log, owner, taskDir):
            log.warn("Task was autocleaned")
            owner.releaseAll()
            if taskDir is not None:
                getProcPool().fileUtils.cleanupdir(taskDir)

        if not self.state.isDone():
            taskDir = None
            if (self.cleanPolicy == TaskCleanType.auto
                    and self.store is not None):
                taskDir = os.path.join(self.store, self.id)
            concurrent.thread(finalize,
                              args=(self.log, self.resOwner, taskDir)).start()
Ejemplo n.º 7
0
 def __init__(self, sdUUID, hostId, interval, changeEvent, checker):
     self.thread = concurrent.thread(self._run, logger=log.name,
                                     name="monitor/" + sdUUID[:7])
     self.stopEvent = threading.Event()
     self.domain = None
     self.sdUUID = sdUUID
     self.hostId = hostId
     self.interval = interval
     self.changeEvent = changeEvent
     self.checker = checker
     self.lock = threading.Lock()
     self.monitoringPath = None
     # For backward compatibility, we must present a fake status before
     # collecting the first sample. The fake status is marked as
     # actual=False so engine can handle it correctly.
     self.status = Status(PathStatus(actual=False),
                          DomainStatus(actual=False))
     self.isIsoDomain = None
     self.isoPrefix = None
     self.lastRefresh = time.time()
     # Use float to allow short refresh internal during tests.
     self.refreshTime = \
         config.getfloat("irs", "repo_stats_cache_refresh_timeout")
     self.wasShutdown = False
     # Used for synchronizing during the tests
     self.cycleCallback = None
Ejemplo n.º 8
0
 def start(self, blocking):
     if blocking:
         return self._dhclient()
     else:
         t = concurrent.thread(self._dhclient,
                               name='dhclient/%s' % self.iface)
         t.start()
Ejemplo n.º 9
0
 def test_non_daemon_thread(self):
     t = concurrent.thread(lambda: None, daemon=False)
     t.start()
     try:
         self.assertFalse(t.daemon)
     finally:
         t.join()
Ejemplo n.º 10
0
 def test_default_daemon_thread(self):
     t = concurrent.thread(lambda: None)
     t.start()
     try:
         self.assertTrue(t.daemon)
     finally:
         t.join()
Ejemplo n.º 11
0
 def start(self, blocking):
     if blocking:
         return self._dhclient()
     else:
         t = concurrent.thread(self._dhclient,
                               name='dhclient/%s' % self.iface)
         t.start()
Ejemplo n.º 12
0
 def __init__(self, inbox, outbox, hostID, queue, monitorInterval):
     # Save arguments
     tpSize = config.getint('irs', 'thread_pool_size') / 2
     waitTimeout = 3
     maxTasks = config.getint('irs', 'max_tasks')
     self.tp = ThreadPool(tpSize, waitTimeout, maxTasks)
     self._stop = False
     self._flush = False
     self._queue = queue
     self._activeMessages = {}
     self._monitorInterval = monitorInterval
     self._hostID = int(hostID)
     self._used_slots_array = [0] * MESSAGES_PER_MAILBOX
     self._outgoingMail = EMPTYMAILBOX
     self._incomingMail = EMPTYMAILBOX
     # TODO: add support for multiple paths (multiple mailboxes)
     self._spmStorageDir = config.get('irs', 'repository')
     self._inCmd = [
         constants.EXT_DD, 'if=' + str(inbox), 'iflag=direct,fullblock',
         'bs=' + str(BLOCK_SIZE), 'count=' + str(BLOCKS_PER_MAILBOX),
         'skip=' + str(self._hostID * BLOCKS_PER_MAILBOX)
     ]
     self._outCmd = [
         constants.EXT_DD, 'of=' + str(outbox), 'iflag=fullblock',
         'oflag=direct', 'conv=notrunc', 'bs=' + str(BLOCK_SIZE),
         'seek=' + str(self._hostID * BLOCKS_PER_MAILBOX)
     ]
     self._init = False
     self._initMailbox()  # Read initial mailbox state
     self._msgCounter = 0
     self._sendMail()  # Clear outgoing mailbox
     self._thread = concurrent.thread(self.run,
                                      name="mailbox/hsm",
                                      logger=self.log.name)
     self._thread.start()
Ejemplo n.º 13
0
 def __init__(self, sdUUID, hostId, interval, changeEvent, checker):
     self.thread = concurrent.thread(self._run, logger=log.name)
     self.stopEvent = threading.Event()
     self.domain = None
     self.sdUUID = sdUUID
     self.hostId = hostId
     self.interval = interval
     self.changeEvent = changeEvent
     self.checker = checker
     self.lock = threading.Lock()
     self.monitoringPath = None
     # For backward compatibility, we must present a fake status before
     # collecting the first sample. The fake status is marked as
     # actual=False so engine can handle it correctly.
     self.status = Status(PathStatus(actual=False),
                          DomainStatus(actual=False))
     self.isIsoDomain = None
     self.isoPrefix = None
     self.lastRefresh = time.time()
     # Use float to allow short refresh internal during tests.
     self.refreshTime = \
         config.getfloat("irs", "repo_stats_cache_refresh_timeout")
     self.wasShutdown = False
     # Used for synchronizing during the tests
     self.cycleCallback = None
Ejemplo n.º 14
0
 def test_echo(self, concurrency):
     msg = b"ping"
     sockets = []
     try:
         for i in range(concurrency):
             sock1, sock2 = socket.socketpair()
             self.loop.create_dispatcher(Echo, sock2)
             sockets.append(sock1)
         t = concurrent.thread(self.loop.run_forever)
         t.start()
         try:
             start = time.time()
             for sock in sockets:
                 osutils.uninterruptible(sock.send, msg)
             for sock in sockets:
                 data = osutils.uninterruptible(sock.recv, len(msg))
                 self.assertEqual(data, msg)
             elapsed = time.time() - start
             print("%7d echos: %f seconds" % (concurrency, elapsed))
         finally:
             self.loop.call_soon_threadsafe(self.loop.stop)
             t.join()
     finally:
         for sock in sockets:
             sock.close()
Ejemplo n.º 15
0
 def test_non_daemon_thread(self):
     t = concurrent.thread(lambda: None, daemon=False)
     t.start()
     try:
         self.assertFalse(t.daemon)
     finally:
         t.join()
Ejemplo n.º 16
0
    def __init__(self, pool, maxHostID, monitorInterval=2):
        self._messageTypes = {}
        # Save arguments
        self._stop = False
        self._stopped = False
        self._poolID = str(pool.spUUID)
        self._spmStorageDir = pool.storage_repository
        tpSize = config.getint('irs', 'thread_pool_size') / 2
        waitTimeout = 3
        maxTasks = config.getint('irs', 'max_tasks')
        self.tp = ThreadPool(tpSize, waitTimeout, maxTasks)
        #  *** IMPORTANT NOTE: The SPM's inbox is the HSMs' outbox and vice
        #                      versa *** #
        self._inbox = os.path.join(self._spmStorageDir, self._poolID,
                                   "mastersd", sd.DOMAIN_META_DATA, "inbox")
        if not os.path.exists(self._inbox):
            self.log.error("SPM_MailMonitor create failed - inbox %s does not "
                           "exist" % repr(self._inbox))
            raise RuntimeError("SPM_MailMonitor create failed - inbox %s does "
                               "not exist" % repr(self._inbox))
        self._outbox = os.path.join(self._spmStorageDir, self._poolID,
                                    "mastersd", sd.DOMAIN_META_DATA, "outbox")
        if not os.path.exists(self._outbox):
            self.log.error("SPM_MailMonitor create failed - outbox %s does "
                           "not exist" % repr(self._outbox))
            raise RuntimeError("SPM_MailMonitor create failed - outbox %s "
                               "does not exist" % repr(self._outbox))
        self._numHosts = int(maxHostID)
        self._outMailLen = MAILBOX_SIZE * self._numHosts
        self._monitorInterval = monitorInterval
        # TODO: add support for multiple paths (multiple mailboxes)
        self._outgoingMail = self._outMailLen * "\0"
        self._incomingMail = self._outgoingMail
        self._inCmd = ['dd',
                       'if=' + str(self._inbox),
                       'iflag=direct,fullblock',
                       'count=1'
                       ]
        self._outCmd = ['dd',
                        'of=' + str(self._outbox),
                        'oflag=direct',
                        'iflag=fullblock',
                        'conv=notrunc',
                        'count=1'
                        ]
        self._outLock = threading.Lock()
        self._inLock = threading.Lock()
        # Clear outgoing mail
        self.log.debug("SPM_MailMonitor - clearing outgoing mail, command is: "
                       "%s", self._outCmd)
        cmd = self._outCmd + ['bs=' + str(self._outMailLen)]
        (rc, out, err) = _mboxExecCmd(cmd, data=self._outgoingMail)
        if rc:
            self.log.warning("SPM_MailMonitor couldn't clear outgoing mail, "
                             "dd failed")

        t = concurrent.thread(self.run, name="mailbox.SPMMonitor",
                              logger=self.log.name)
        t.start()
        self.log.debug('SPM_MailMonitor created for pool %s' % self._poolID)
Ejemplo n.º 17
0
 def test_default_daemon_thread(self):
     t = concurrent.thread(lambda: None)
     t.start()
     try:
         self.assertTrue(t.daemon)
     finally:
         t.join()
Ejemplo n.º 18
0
    def __init__(self, pool, maxHostID, monitorInterval=2):
        self._messageTypes = {}
        # Save arguments
        self._stop = False
        self._stopped = False
        self._poolID = str(pool.spUUID)
        self._spmStorageDir = pool.storage_repository
        tpSize = config.getint('irs', 'thread_pool_size') / 2
        waitTimeout = 3
        maxTasks = config.getint('irs', 'max_tasks')
        self.tp = ThreadPool("mailbox-spm", tpSize, waitTimeout, maxTasks)
        #  *** IMPORTANT NOTE: The SPM's inbox is the HSMs' outbox and vice
        #                      versa *** #
        self._inbox = os.path.join(self._spmStorageDir, self._poolID,
                                   "mastersd", sd.DOMAIN_META_DATA, "inbox")
        if not os.path.exists(self._inbox):
            self.log.error("SPM_MailMonitor create failed - inbox %s does not "
                           "exist" % repr(self._inbox))
            raise RuntimeError("SPM_MailMonitor create failed - inbox %s does "
                               "not exist" % repr(self._inbox))
        self._outbox = os.path.join(self._spmStorageDir, self._poolID,
                                    "mastersd", sd.DOMAIN_META_DATA, "outbox")
        if not os.path.exists(self._outbox):
            self.log.error("SPM_MailMonitor create failed - outbox %s does "
                           "not exist" % repr(self._outbox))
            raise RuntimeError("SPM_MailMonitor create failed - outbox %s "
                               "does not exist" % repr(self._outbox))
        self._numHosts = int(maxHostID)
        self._outMailLen = MAILBOX_SIZE * self._numHosts
        self._monitorInterval = monitorInterval
        # TODO: add support for multiple paths (multiple mailboxes)
        self._outgoingMail = self._outMailLen * "\0"
        self._incomingMail = self._outgoingMail
        self._inCmd = ['dd',
                       'if=' + str(self._inbox),
                       'iflag=direct,fullblock',
                       'count=1'
                       ]
        self._outCmd = ['dd',
                        'of=' + str(self._outbox),
                        'oflag=direct',
                        'iflag=fullblock',
                        'conv=notrunc',
                        'count=1'
                        ]
        self._outLock = threading.Lock()
        self._inLock = threading.Lock()
        # Clear outgoing mail
        self.log.debug("SPM_MailMonitor - clearing outgoing mail, command is: "
                       "%s", self._outCmd)
        cmd = self._outCmd + ['bs=' + str(self._outMailLen)]
        (rc, out, err) = _mboxExecCmd(cmd, data=self._outgoingMail)
        if rc:
            self.log.warning("SPM_MailMonitor couldn't clear outgoing mail, "
                             "dd failed")

        t = concurrent.thread(self.run, name="mailbox-spm",
                              logger=self.log.name)
        t.start()
        self.log.debug('SPM_MailMonitor created for pool %s' % self._poolID)
Ejemplo n.º 19
0
 def __del__(self):
     if self._isValid and self.autoRelease:
         def release(log, namespace, name):
             log.warn("Resource reference was not properly released. "
                      "Autoreleasing.")
             # In Python, objects are refcounted and are deleted immediately
             # when the last reference is freed. This means the __del__
             # method can be called inside of any context. The
             # releaseResource method we use tries to acquire locks. So we
             # might try to acquire the lock in a locked context and reach a
             # deadlock. This is why I need to use a timer. It will defer
             # the operation and use a different context.
             ResourceManager.getInstance().releaseResource(namespace, name)
         concurrent.thread(release, args=(self._log, self.namespace,
                                          self.name)).start()
         self._isValid = False
Ejemplo n.º 20
0
 def __init__(self,
              vm,
              dst='',
              dstparams='',
              mode=MODE_REMOTE,
              method=METHOD_ONLINE,
              tunneled=False,
              dstqemu='',
              abortOnError=False,
              consoleAddress=None,
              compressed=False,
              autoConverge=False,
              **kwargs):
     self.log = vm.log
     self._vm = vm
     self._dst = dst
     self._mode = mode
     if method != METHOD_ONLINE:
         self.log.warning(
             'migration method %s is deprecated, forced to "online"',
             method)
     self._dstparams = dstparams
     self._enableGuestEvents = kwargs.get('enableGuestEvents', False)
     self._machineParams = {}
     # TODO: utils.tobool shouldn't be used in this constructor, the
     # conversions should be handled properly in the API layer
     self._tunneled = utils.tobool(tunneled)
     self._abortOnError = utils.tobool(abortOnError)
     self._consoleAddress = consoleAddress
     self._dstqemu = dstqemu
     self._downtime = kwargs.get('downtime') or \
         config.get('vars', 'migration_downtime')
     self._maxBandwidth = int(
         kwargs.get('maxBandwidth')
         or config.getint('vars', 'migration_max_bandwidth'))
     self._autoConverge = utils.tobool(autoConverge)
     self._compressed = utils.tobool(compressed)
     self._incomingLimit = kwargs.get('incomingLimit')
     self._outgoingLimit = kwargs.get('outgoingLimit')
     self.status = {
         'status': {
             'code': 0,
             'message': 'Migration in progress'
         }
     }
     self._progress = 0
     self._thread = concurrent.thread(self.run,
                                      name='migsrc/' + self._vm.id[:8])
     self._preparingMigrationEvt = True
     self._migrationCanceledEvt = threading.Event()
     self._monitorThread = None
     self._destServer = None
     self._convergence_schedule = {'init': [], 'stalling': []}
     self._use_convergence_schedule = False
     if 'convergenceSchedule' in kwargs:
         self._convergence_schedule = kwargs.get('convergenceSchedule')
         self._use_convergence_schedule = True
         self.log.debug('convergence schedule set to: %s',
                        str(self._convergence_schedule))
Ejemplo n.º 21
0
    def __init__(self, poolID, maxHostID, inbox, outbox, monitorInterval=2):
        """
        Note: inbox paramerter here should point to the HSM's outbox
        mailbox file, and vice versa.
        """
        self._messageTypes = {}
        # Save arguments
        self._stop = False
        self._stopped = False
        self._poolID = poolID
        tpSize = config.getint('irs', 'thread_pool_size') / 2
        waitTimeout = wait_timeout(monitorInterval)
        maxTasks = config.getint('irs', 'max_tasks')
        self.tp = ThreadPool("mailbox-spm", tpSize, waitTimeout, maxTasks)
        self._inbox = inbox
        if not os.path.exists(self._inbox):
            self.log.error("SPM_MailMonitor create failed - inbox %s does not "
                           "exist" % repr(self._inbox))
            raise RuntimeError("SPM_MailMonitor create failed - inbox %s does "
                               "not exist" % repr(self._inbox))
        self._outbox = outbox
        if not os.path.exists(self._outbox):
            self.log.error("SPM_MailMonitor create failed - outbox %s does "
                           "not exist" % repr(self._outbox))
            raise RuntimeError("SPM_MailMonitor create failed - outbox %s "
                               "does not exist" % repr(self._outbox))
        self._numHosts = int(maxHostID)
        self._outMailLen = MAILBOX_SIZE * self._numHosts
        self._monitorInterval = monitorInterval
        # TODO: add support for multiple paths (multiple mailboxes)
        self._outgoingMail = self._outMailLen * "\0"
        self._incomingMail = self._outgoingMail
        self._inCmd = ['dd',
                       'if=' + str(self._inbox),
                       'iflag=direct,fullblock',
                       'count=1'
                       ]
        self._outCmd = ['dd',
                        'of=' + str(self._outbox),
                        'oflag=direct',
                        'iflag=fullblock',
                        'conv=notrunc',
                        'count=1'
                        ]
        self._outLock = threading.Lock()
        self._inLock = threading.Lock()
        # Clear outgoing mail
        self.log.debug("SPM_MailMonitor - clearing outgoing mail, command is: "
                       "%s", self._outCmd)
        cmd = self._outCmd + ['bs=' + str(self._outMailLen)]
        (rc, out, err) = _mboxExecCmd(cmd, data=self._outgoingMail)
        if rc:
            self.log.warning("SPM_MailMonitor couldn't clear outgoing mail, "
                             "dd failed")

        self._thread = concurrent.thread(
            self.run, name="mailbox-spm", log=self.log)
        self._thread.start()
        self.log.debug('SPM_MailMonitor created for pool %s' % self._poolID)
Ejemplo n.º 22
0
Archivo: http.py Proyecto: EdDev/vdsm
 def handle_request(self):
     sock, addr = self.queue.get()
     if sock is self._STOP:
         return
     self.log.info("Starting request handler for %s:%d", addr[0], addr[1])
     t = concurrent.thread(self._process_requests, args=(sock, addr),
                           log=self.log)
     t.start()
Ejemplo n.º 23
0
 def __init__(self, vm, dst='', dstparams='',
              mode=MODE_REMOTE, method=METHOD_ONLINE,
              tunneled=False, dstqemu='', abortOnError=False,
              consoleAddress=None, compressed=False,
              autoConverge=False, recovery=False, **kwargs):
     self.log = vm.log
     self._vm = vm
     self._dst = dst
     self._mode = mode
     if method != METHOD_ONLINE:
         self.log.warning(
             'migration method %s is deprecated, forced to "online"',
             method)
     self._dstparams = dstparams
     self._enableGuestEvents = kwargs.get('enableGuestEvents', False)
     self._machineParams = {}
     # TODO: conv.tobool shouldn't be used in this constructor, the
     # conversions should be handled properly in the API layer
     self._tunneled = conv.tobool(tunneled)
     self._abortOnError = conv.tobool(abortOnError)
     self._consoleAddress = consoleAddress
     self._dstqemu = dstqemu
     self._downtime = kwargs.get('downtime') or \
         config.get('vars', 'migration_downtime')
     self._maxBandwidth = int(
         kwargs.get('maxBandwidth') or
         config.getint('vars', 'migration_max_bandwidth')
     )
     self._autoConverge = conv.tobool(autoConverge)
     self._compressed = conv.tobool(compressed)
     self._incomingLimit = kwargs.get('incomingLimit')
     self._outgoingLimit = kwargs.get('outgoingLimit')
     self.status = {
         'status': {
             'code': 0,
             'message': 'Migration in progress'}}
     # we need to guard against concurrent updates only
     self._lock = threading.Lock()
     self._progress = 0
     self._thread = concurrent.thread(
         self.run, name='migsrc/' + self._vm.id[:8])
     self._preparingMigrationEvt = True
     self._migrationCanceledEvt = threading.Event()
     self._monitorThread = None
     self._destServer = None
     self._convergence_schedule = {
         'init': [],
         'stalling': []
     }
     self._use_convergence_schedule = False
     if 'convergenceSchedule' in kwargs:
         self._convergence_schedule = kwargs.get('convergenceSchedule')
         self._use_convergence_schedule = True
         self.log.debug('convergence schedule set to: %s',
                        str(self._convergence_schedule))
     self._started = False
     self._recovery = recovery
Ejemplo n.º 24
0
def progress(op, estimated_size):
    done = threading.Event()
    th = concurrent.thread(volume_progress, args=(op, done, estimated_size))
    th.start()
    try:
        yield th
    finally:
        done.set()
        th.join()
Ejemplo n.º 25
0
    def _emit(self, *args, **kwargs):
        self._log.debug("Emitting event")
        with self._syncRoot:
            for funcId, (funcRef, oneshot) in self._registrar.items():
                func = funcRef()
                if func is None or oneshot:
                    del self._registrar[funcId]
                    if func is None:
                        continue
                try:
                    self._log.debug("Calling registered method `%s`", logUtils.funcName(func))
                    if self._sync:
                        func(*args, **kwargs)
                    else:
                        concurrent.thread(func, args=args, kwargs=kwargs).start()
                except:
                    self._log.warn("Could not run registered method because " "of an exception", exc_info=True)

        self._log.debug("Event emitted")
Ejemplo n.º 26
0
    def test_run_callable_in_thread(self):
        self.thread = threading.current_thread()

        def run():
            self.thread = threading.current_thread()

        t = concurrent.thread(run)
        t.start()
        t.join()
        self.assertEqual(t, self.thread)
Ejemplo n.º 27
0
    def test_pass_args(self):
        self.args = ()

        def run(*args):
            self.args = args

        t = concurrent.thread(run, args=(1, 2, 3))
        t.start()
        t.join()
        self.assertEqual((1, 2, 3), self.args)
Ejemplo n.º 28
0
    def test_run_callable_in_thread(self):
        self.thread = threading.current_thread()

        def run():
            self.thread = threading.current_thread()

        t = concurrent.thread(run)
        t.start()
        t.join()
        self.assertEqual(t, self.thread)
Ejemplo n.º 29
0
    def test_pass_args(self):
        self.args = ()

        def run(*args):
            self.args = args

        t = concurrent.thread(run, args=(1, 2, 3))
        t.start()
        t.join()
        self.assertEqual((1, 2, 3), self.args)
Ejemplo n.º 30
0
    def __del__(self):
        if self._isValid and self.autoRelease:

            def release(log, namespace, name):
                log.warn("Resource reference was not properly released. "
                         "Autoreleasing.")
                # In Python, objects are refcounted and are deleted immediately
                # when the last reference is freed. This means the __del__
                # method can be called inside of any context. The
                # releaseResource method we use tries to acquire locks. So we
                # might try to acquire the lock in a locked context and reach a
                # deadlock. This is why I need to use a timer. It will defer
                # the operation and use a different context.
                ResourceManager.getInstance().releaseResource(namespace, name)

            concurrent.thread(release,
                              args=(self._log, self.namespace,
                                    self.name)).start()
            self._isValid = False
Ejemplo n.º 31
0
    def test_pass_kwargs(self):
        self.kwargs = ()

        def run(**kwargs):
            self.kwargs = kwargs

        kwargs = {'a': 1, 'b': 2}
        t = concurrent.thread(run, kwargs=kwargs)
        t.start()
        t.join()
        self.assertEqual(kwargs, self.kwargs)
Ejemplo n.º 32
0
    def test_pass_kwargs(self):
        self.kwargs = ()

        def run(**kwargs):
            self.kwargs = kwargs

        kwargs = {'a': 1, 'b': 2}
        t = concurrent.thread(run, kwargs=kwargs)
        t.start()
        t.join()
        self.assertEqual(kwargs, self.kwargs)
Ejemplo n.º 33
0
 def __init__(self, vm, startTime, conv_schedule, use_conv_schedule):
     super(MonitorThread, self).__init__()
     self._stop = threading.Event()
     self._vm = vm
     self._startTime = startTime
     self.daemon = True
     self.progress = None
     self._conv_schedule = conv_schedule
     self._use_conv_schedule = use_conv_schedule
     self.downtime_thread = _FakeThreadInterface()
     self._thread = concurrent.thread(self.run)
Ejemplo n.º 34
0
 def __init__(self, vm, startTime, conv_schedule, use_conv_schedule):
     super(MonitorThread, self).__init__()
     self._stop = threading.Event()
     self._vm = vm
     self._startTime = startTime
     self.daemon = True
     self.progress = None
     self._conv_schedule = conv_schedule
     self._use_conv_schedule = use_conv_schedule
     self.downtime_thread = _FakeThreadInterface()
     self._thread = concurrent.thread(
         self.run, name='migmon/' + self._vm.id[:8])
Ejemplo n.º 35
0
    def test_close(self):
        reactor = Reactor()
        thread = concurrent.thread(reactor.process_requests, name="test ractor")
        thread.start()
        s1, s2 = socket.socketpair()
        with closing(s2):
            disp = reactor.create_dispatcher(s1, impl=TestingImpl())
            reactor.stop()

        thread.join(timeout=1)

        self.assertTrue(disp.closing)
        self.assertFalse(reactor._wakeupEvent.closing)
Ejemplo n.º 36
0
 def __init__(self, log):
     self.log = log
     self._quit = False
     self._epoll = select.epoll()
     self._channels = {}
     self._unconnected = {}
     self._update_lock = threading.Lock()
     self._add_channels = {}
     self._del_channels = []
     self._timeout = None
     self._thread = concurrent.thread(
         self.run, name='vmchannels'
     )
Ejemplo n.º 37
0
    def _emit(self, *args, **kwargs):
        self._log.debug("Emitting event")
        with self._syncRoot:
            for funcId, (funcRef, oneshot) in self._registrar.items():
                func = funcRef()
                if func is None or oneshot:
                    del self._registrar[funcId]
                    if func is None:
                        continue
                try:
                    self._log.debug("Calling registered method `%s`",
                                    logUtils.funcName(func))
                    if self._sync:
                        func(*args, **kwargs)
                    else:
                        concurrent.thread(func, args=args,
                                          kwargs=kwargs).start()
                except:
                    self._log.warn("Could not run registered method because "
                                   "of an exception", exc_info=True)

        self._log.debug("Event emitted")
Ejemplo n.º 38
0
 def __init__(self, log):
     self.log = log
     self._quit = False
     self._epoll = select.epoll()
     self._channels = {}
     self._unconnected = {}
     self._update_lock = threading.Lock()
     self._add_channels = {}
     self._del_channels = []
     self._timeout = None
     self._thread = concurrent.thread(
         self.run, name='vmchannels'
     )
Ejemplo n.º 39
0
    def test_close(self):
        reactor = Reactor()
        thread = concurrent.thread(reactor.process_requests,
                                   name='test ractor')
        thread.start()
        s1, s2 = socket.socketpair()
        with closing(s2):
            disp = reactor.create_dispatcher(s1, impl=TestingImpl())
            reactor.stop()

        thread.join(timeout=1)

        self.assertTrue(disp.closing)
        self.assertFalse(reactor._wakeupEvent.closing)
Ejemplo n.º 40
0
def itmap(func, iterable, maxthreads=UNLIMITED_THREADS):
    """
    Make an iterator that computes the function using
    arguments from the iterable. It works similar to tmap
    by running each operation in a different thread, this
    causes the results not to return in any particular
    order so it's good if you don't care about the order
    of the results.
    maxthreads stands for maximum threads that we can initiate simultaneosly.
               If we reached to max threads the function waits for thread to
               finish before initiate the next one.
    """
    if maxthreads < 1 and maxthreads != UNLIMITED_THREADS:
        raise ValueError("Wrong input to function itmap: %s", maxthreads)

    respQueue = queue.Queue()

    def wrapper(value):
        try:
            respQueue.put(func(value))
        except Exception as e:
            respQueue.put(e)

    threadsCreated = 0
    threadsCount = 0
    for arg in iterable:
        if maxthreads != UNLIMITED_THREADS:
            if maxthreads == 0:
                # This not supposed to happened. If it does, it's a bug.
                # maxthreads should get to 0 only after threadsCount is
                # greater than 1
                if threadsCount < 1:
                    raise RuntimeError("No thread initiated")
                else:
                    yield respQueue.get()
                    # if yield returns one thread stopped, so we can run
                    # another thread in queue
                    maxthreads += 1
                    threadsCount -= 1

        name = "itmap/%d" % threadsCreated
        t = concurrent.thread(wrapper, args=(arg, ), name=name)
        t.start()
        threadsCreated += 1
        threadsCount += 1
        maxthreads -= 1

    # waiting for rest threads to end
    for i in range(threadsCount):
        yield respQueue.get()
Ejemplo n.º 41
0
Archivo: misc.py Proyecto: EdDev/vdsm
def itmap(func, iterable, maxthreads=UNLIMITED_THREADS):
    """
    Make an iterator that computes the function using
    arguments from the iterable. It works similar to tmap
    by running each operation in a different thread, this
    causes the results not to return in any particular
    order so it's good if you don't care about the order
    of the results.
    maxthreads stands for maximum threads that we can initiate simultaneosly.
               If we reached to max threads the function waits for thread to
               finish before initiate the next one.
    """
    if maxthreads < 1 and maxthreads != UNLIMITED_THREADS:
        raise ValueError("Wrong input to function itmap: %s", maxthreads)

    respQueue = queue.Queue()

    def wrapper(value):
        try:
            respQueue.put(func(value))
        except Exception as e:
            respQueue.put(e)

    threadsCreated = 0
    threadsCount = 0
    for arg in iterable:
        if maxthreads != UNLIMITED_THREADS:
            if maxthreads == 0:
                # This not supposed to happened. If it does, it's a bug.
                # maxthreads should get to 0 only after threadsCount is
                # greater than 1
                if threadsCount < 1:
                    raise RuntimeError("No thread initiated")
                else:
                    yield respQueue.get()
                    # if yield returns one thread stopped, so we can run
                    # another thread in queue
                    maxthreads += 1
                    threadsCount -= 1

        name = "itmap/%d" % threadsCreated
        t = concurrent.thread(wrapper, args=(arg,), name=name)
        t.start()
        threadsCreated += 1
        threadsCount += 1
        maxthreads -= 1

    # waiting for rest threads to end
    for i in range(threadsCount):
        yield respQueue.get()
Ejemplo n.º 42
0
    def test_pass_args_and_kwargs(self):
        self.args = ()
        self.kwargs = {}

        def run(*args, **kwargs):
            self.args = args
            self.kwargs = kwargs

        args = (1, 2)
        kwargs = {'a': 3, 'b': 4}
        t = concurrent.thread(run, args=args, kwargs=kwargs)
        t.start()
        t.join()
        self.assertEqual(args, self.args)
        self.assertEqual(kwargs, self.kwargs)
Ejemplo n.º 43
0
 def __init__(self, domainMonitor, sdUUID, hostId, interval):
     self.thread = concurrent.thread(self._run, logger=self.log.name)
     self.domainMonitor = domainMonitor
     self.stopEvent = threading.Event()
     self.domain = None
     self.sdUUID = sdUUID
     self.hostId = hostId
     self.interval = interval
     self.nextStatus = Status(actual=False)
     self.status = FrozenStatus(self.nextStatus)
     self.isIsoDomain = None
     self.isoPrefix = None
     self.lastRefresh = time.time()
     self.refreshTime = \
         config.getint("irs", "repo_stats_cache_refresh_timeout")
Ejemplo n.º 44
0
 def __init__(self, domainMonitor, sdUUID, hostId, interval):
     self.thread = concurrent.thread(self._run, logger=self.log.name)
     self.domainMonitor = domainMonitor
     self.stopEvent = threading.Event()
     self.domain = None
     self.sdUUID = sdUUID
     self.hostId = hostId
     self.interval = interval
     self.nextStatus = Status(actual=False)
     self.status = FrozenStatus(self.nextStatus)
     self.isIsoDomain = None
     self.isoPrefix = None
     self.lastRefresh = time.time()
     self.refreshTime = \
         config.getint("irs", "repo_stats_cache_refresh_timeout")
Ejemplo n.º 45
0
 def __init__(self, groups=frozenset(), timeout=None, silent_timeout=False):
     self._time_start = None
     self._timeout = timeout
     self._silent_timeout = silent_timeout
     if groups:
         unknown_groups = frozenset(groups).difference(frozenset(_GROUPS))
         if unknown_groups:
             raise AttributeError('Invalid groups: %s' % (unknown_groups,))
         self._groups = groups
     else:
         self._groups = _GROUPS.keys()
     self._queue = queue.Queue()
     self._scan_thread = concurrent.thread(self._scan)
     self._scanning_started = threading.Event()
     self._scanning_stopped = threading.Event()
Ejemplo n.º 46
0
    def test_pass_args_and_kwargs(self):
        self.args = ()
        self.kwargs = {}

        def run(*args, **kwargs):
            self.args = args
            self.kwargs = kwargs

        args = (1, 2)
        kwargs = {'a': 3, 'b': 4}
        t = concurrent.thread(run, args=args, kwargs=kwargs)
        t.start()
        t.join()
        self.assertEqual(args, self.args)
        self.assertEqual(kwargs, self.kwargs)
Ejemplo n.º 47
0
 def __init__(self, groups=frozenset(), timeout=None, silent_timeout=False):
     self._time_start = None
     self._timeout = timeout
     self._silent_timeout = silent_timeout
     if groups:
         unknown_groups = frozenset(groups).difference(frozenset(_GROUPS))
         if unknown_groups:
             raise AttributeError('Invalid groups: %s' % (unknown_groups, ))
         self._groups = groups
     else:
         self._groups = _GROUPS.keys()
     self._queue = queue.Queue()
     self._scan_thread = concurrent.thread(self._scan)
     self._scanning_started = threading.Event()
     self._scanning_stopped = threading.Event()
Ejemplo n.º 48
0
    def __init__(self, vm, downtime, steps):
        self._vm = vm
        self._downtime = downtime
        self._steps = steps
        self._stop = threading.Event()

        delay_per_gib = config.getint('vars', 'migration_downtime_delay')
        memSize = int(vm.conf['memSize'])
        self._wait = min(delay_per_gib * memSize / (_MiB_IN_GiB * self._steps),
                         self._WAIT_STEP_LIMIT)
        # do not materialize, keep as generator expression
        self._downtimes = exponential_downtime(self._downtime, self._steps)
        # we need the first value to support set_initial_downtime
        self._initial_downtime = next(self._downtimes)

        self._thread = concurrent.thread(self.run)
Ejemplo n.º 49
0
Archivo: ifcfg.py Proyecto: nirs/vdsm
def _ifup(iface, cgroup=dhclient.DHCLIENT_CGROUP):
    if not iface.blockingdhcp and (iface.ipv4.bootproto == "dhcp" or iface.ipv6.dhcpv6):
        # wait for dhcp in another thread, so vdsm won't get stuck (BZ#498940)
        t = concurrent.thread(_exec_ifup, name="ifup/%s" % iface, args=(iface, cgroup))
        t.start()
    else:
        if not iface.master and (iface.ipv4 or iface.ipv6):
            if iface.ipv4:
                wait_for_ip = waitfor.waitfor_ipv4_addr
            elif iface.ipv6:
                wait_for_ip = waitfor.waitfor_ipv6_addr

            with wait_for_ip(iface.name):
                _exec_ifup(iface, cgroup)
        else:
            with waitfor.waitfor_linkup(iface.name):
                _exec_ifup(iface, cgroup)
Ejemplo n.º 50
0
    def test_call_soon_threadsafe(self, calls):
        self.count = 0

        def callback():
            self.count += 1

        start = time.time()
        t = concurrent.thread(self.loop.run_forever)
        t.start()
        try:
            for i in range(calls):
                self.loop.call_soon_threadsafe(callback)
        finally:
            self.loop.call_soon_threadsafe(self.loop.stop)
            t.join()
        elapsed = time.time() - start
        print("%7d calls: %f seconds" % (calls, elapsed))
        self.assertEqual(calls, self.count)
Ejemplo n.º 51
0
def _start_profiling():
    global cherrypy
    global dowser
    global _thread

    logging.debug("Starting memory profiling")

    import cherrypy
    import dowser
    # this nonsense makes pyflakes happy
    cherrypy
    dowser

    with _lock:
        if is_running():
            raise UsageError('Memory profiler is already running')
        _thread = concurrent.thread(_memory_viewer, name='memprofile')
        _thread.start()
Ejemplo n.º 52
0
    def __init__(self, vm, downtime, steps):
        self._vm = vm
        self._downtime = downtime
        self._steps = steps
        self._stop = threading.Event()

        delay_per_gib = config.getint('vars', 'migration_downtime_delay')
        memSize = vm.mem_size_mb()
        self._wait = min(
            delay_per_gib * memSize / (_MiB_IN_GiB * self._steps),
            self._WAIT_STEP_LIMIT)
        # do not materialize, keep as generator expression
        self._downtimes = exponential_downtime(self._downtime, self._steps)
        # we need the first value to support set_initial_downtime
        self._initial_downtime = next(self._downtimes)

        self._thread = concurrent.thread(
            self.run, name='migdwn/' + self._vm.id[:8])
Ejemplo n.º 53
0
    def test_abort_running(self):
        op = operation.Command(["sleep", "5"])
        aborted = threading.Event()

        def run():
            try:
                op.run()
            except exception.ActionStopped:
                aborted.set()

        t = concurrent.thread(run)
        t.start()
        try:
            # TODO: add way to wait until operation is stated?
            time.sleep(0.5)
            op.abort()
        finally:
            t.join()
        self.assertTrue(aborted.is_set())
Ejemplo n.º 54
0
def _ifup(iface, cgroup=dhclient.DHCLIENT_CGROUP):
    if not iface.blockingdhcp and (iface.ipv4.bootproto == 'dhcp'
                                   or iface.ipv6.dhcpv6):
        # wait for dhcp in another thread, so vdsm won't get stuck (BZ#498940)
        t = concurrent.thread(_exec_ifup,
                              name='ifup/%s' % iface,
                              args=(iface, cgroup))
        t.start()
    else:
        if not iface.master and (iface.ipv4 or iface.ipv6):
            if iface.ipv4:
                wait_for_ip = waitfor.waitfor_ipv4_addr
            elif iface.ipv6:
                wait_for_ip = waitfor.waitfor_ipv6_addr

            with wait_for_ip(iface.name):
                _exec_ifup(iface, cgroup)
        else:
            with waitfor.waitfor_linkup(iface.name):
                _exec_ifup(iface, cgroup)
Ejemplo n.º 55
0
 def __init__(self):
     self._lock = threading.Lock()
     self._loop = asyncevent.EventLoop()
     self._thread = concurrent.thread(self._loop.run_forever,
                                      name="check/loop")
     self._checkers = {}
Ejemplo n.º 56
0
 def startMonitoring(self):
     t = concurrent.thread(self._monitorConnections, logger=self._log.name)
     self._stopEvent.clear()
     t.start()
Ejemplo n.º 57
0
 def start():
     thread = concurrent.thread(reactor.process_requests,
                                name='Client %s:%s' % (host, port))
     thread.start()