Example #1
0
 def testIsInIOThread(self):
     foreignResult = []
     t = threading.Thread(target=lambda: foreignResult.append(threadable.isInIOThread()))
     t.start()
     t.join()
     self.failIf(foreignResult[0], "Non-IO thread reported as IO thread")
     self.failUnless(threadable.isInIOThread(), "IO thread reported as not IO thread")
 def test_isInIOThread(self):
     """
     L{threadable.isInIOThread} returns C{True} if and only if it is called
     in the same thread as L{threadable.registerAsIOThread}.
     """
     threadable.registerAsIOThread()
     foreignResult = []
     t = threading.Thread(
         target=lambda: foreignResult.append(threadable.isInIOThread()))
     t.start()
     t.join()
     self.assertFalse(
         foreignResult[0], "Non-IO thread reported as IO thread")
     self.assertTrue(
         threadable.isInIOThread(), "IO thread reported as not IO thread")
Example #3
0
    def commit_now(self, vacuum=False, exiting=False):
        if self._should_commit and isInIOThread():
            try:
                self._logger.info(u"Start committing...")
                self.execute(u"COMMIT;")
            except:
                self._logger.exception(u"COMMIT FAILED")
                raise
            self._should_commit = False

            if vacuum:
                self._logger.info(u"Start vacuuming...")
                self.execute(u"VACUUM;")

            if not exiting:
                try:
                    self._logger.info(u"Beginning another transaction...")
                    self.execute(u"BEGIN;")
                except:
                    self._logger.exception(u"Failed to execute BEGIN")
                    raise
            else:
                self._logger.info(u"Exiting, not beginning another transaction")

        elif vacuum:
            self.execute(u"VACUUM;")
	def sendMessage(self, sendType, dataDict, callback=None):
		self.updateComplete = False
		if callback != None:
			self.factory.dataQueue.append(EmulatorResponse(dataDict, callback, self.factory))
		self.factory.sendDataToServer(sendType, dataDict)
		if not threadable.isInIOThread() and callback != None:
			return self.waitForUpdate()
Example #5
0
    def in_thread_wrapper(*args, **kw):

        if isInIOThread():

            return func(*args, **kw)

        f = Future()

        def twisted_wrapper():
            try:
                d = func(*args, **kw)
                if isinstance(d, Deferred):

                    def _done(result):
                        f.set_result(result)
                        f.done()

                    def _error(e):
                        f.set_exception(e)
                        f.done()

                    d.addCallback(_done)
                    d.addErrback(_error)

                else:
                    f.set_result(d)
                    f.done()

            except Exception, e:
                f.set_exception(e)
                f.done()
Example #6
0
 def start(self):
     if self._started:
         raise RuntimeError("worker has already been started")
     if not threadable.isInIOThread():
         raise RuntimeError("worker can only be started in the IO thread")
     self._started = True
     callInGreenThread(self.__run__)
Example #7
0
    def shutdown(self):
        """
        Checkpoints the session and closes it, stopping the download engine.
        This method has to be called from the reactor thread.
        """
        assert isInIOThread()

        @inlineCallbacks
        def on_early_shutdown_complete(_):
            """
            Callback that gets called when the early shutdown has been completed.
            Continues the shutdown procedure that is dependant on the early shutdown.
            :param _: ignored parameter of the Deferred
            """
            self.config.write()
            yield self.checkpoint_downloads()
            self.lm.shutdown_downloads()
            self.lm.network_shutdown()
            if self.lm.mds:
                self.lm.mds.shutdown()

            if self.sqlite_db:
                self.sqlite_db.close()
            self.sqlite_db = None

        return self.lm.early_shutdown().addCallback(on_early_shutdown_complete)
Example #8
0
    def wait(self, timeout=None):
        """
        Return the result, or throw the exception if result is a failure.

        It may take an unknown amount of time to return the result, so a
        timeout option is provided. If the given number of seconds pass with
        no result, a TimeoutError will be thrown.

        If a previous call timed out, additional calls to this function will
        still wait for a result and return it if available. If a result was
        returned or raised on one call, additional calls will return/raise the
        same result.
        """
        if threadable.isInIOThread():
            raise RuntimeError("EventualResult.wait() must not be run in the reactor thread.")

        if imp.lock_held():
            # If EventualResult.wait() is run during module import, if the
            # Twisted code that is being run also imports something the result
            # will be a deadlock. Even if that is not an issue it would
            # prevent importing in other threads until the call returns.
            raise RuntimeError("EventualResult.wait() must not be run at module import time.")

        result = self._result(timeout)
        if isinstance(result, Failure):
            result.raiseException()
        return result
Example #9
0
    def _do_save(self):
        assert not isInIOThread()

        while not self.write_queue.empty():
            items = []

            try:
                self.writelock = True
                try:
                    while True:
                        items.append(self.write_queue.get_nowait())
                except Empty:
                    pass

                session = Session()

                try:
                    session.add_all(items)
                    session.commit()
                except:
                    session.rollback()
                    raise
                finally:
                    session.close()
            finally:
                self.writelock = False
Example #10
0
    def get_id(self, model, unique, fields):
        ''' Get an ID from the cache or from the database.
        If doesn't exist - create an item.
        All database operations are done from
        the separate thread
        '''
        assert isInIOThread()

        fval = fields[unique]

        try:
            result = self.cache[model][fval]
            self.counters['hit'][model] += 1
            returnValue(result)
        except KeyError:
            self.counters['miss'][model] += 1

        selectors = {unique: fval}

        result, created = yield deferToThreadPool(
            self.reactor, self.read_pool,
            get_or_create,
            model, fields, **selectors)

        result = result.id

        if created:
            self.counters['db_create'][model] += 1
        else:
            self.counters['db_hit'][model] += 1

        self.cache[model][fval] = result
        returnValue(result)
Example #11
0
def call_in_twisted_thread(func, *args, **kwargs):
    if threadable.isInIOThread():
        func(*args, **kwargs)
    else:
        from twisted.internet import reactor

        reactor.callFromThread(func, *args, **kwargs)
Example #12
0
    def __call__(self, environ, start_response):
        """
        This function have to be called in a worker thread, not the IO thread.
        """
        rargs = environ['wsgiorg.routing_args'][1]
        controller = rargs['controller']

        # Media Transport
        if controller == 'mt':
            name = rargs['name']
            if name in self.mts:
                return self.mts[name](environ, start_response)
            else:
                return not_found(environ, start_response)

        if controller != 'upnp':
            return not_found(environ, start_response)

        try:
            udn = rargs['udn']
            if isInIOThread():
                # TODO: read request body
                return self.devices[udn](environ, start_response)
            else:
                # read request body
                input = environ['wsgi.input']
                environ['upnp.body'] = input.read(self.SOAP_BODY_MAX)
                # call the app in IO thread
                args = [udn, environ, start_response]
                blockingCallFromThread(self.reactor, self._call_handler, args)
                return args[3]
        except Exception, e:
            #print e
            #print 'Unknown access: ' + environ['PATH_INFO']
            return not_found(environ, start_response)
Example #13
0
 def _queue_action(self, action, event=None):
     if isinstance(action, Action):
         if isInIOThread():
             self.bot.route_response(action, event)
         else:
             reactor.callFromThread(self.bot.route_response, action, event)
     else:
         self.log.error('tried to queue invalid action: {0!r}'.format(action))
Example #14
0
 def write(self, *args):
     """Ensure that all writes are serialized regardless if the command is executing in another thread.
     """
     if self.write_buffer is not None and hasattr(self.write_buffer, 'append'):
         self.write_buffer.append(' '.join(map(str, args)))
     deferredMethod = (reactor.callFromThread if not isInIOThread() else defer.maybeDeferred)
     # XXX: HACK: force str for the first param to avoid UnicodeDecodeError happening in Conch
     return deferredMethod(self.terminal.write, str(args[0]), *args[1:])
Example #15
0
        def trans(txn):
            self.assertFalse(threadable.isInIOThread(), "Transactions must not run in main thread")

            yield Transaction(name="TEST1").save()
            yield Transaction(name="TEST2").save()

            barrier.wait()  # wait here to delay commit
            returnValue("return value")
Example #16
0
def get_connection(accept_main_thread=False):
    if not accept_main_thread and isInIOThread() and not _testing:
        raise Exception('The ZODB should not be accessed from the main thread')

    global _connection
    if not hasattr(_connection, 'x'):
        _connection.x = get_db().open()
    return _connection.x
	def sendDataToServer(self, sendType, dataDict):
		dictToSend = {'pktType': sendType}
		dictToSend.update(dataDict)
		pickledData = cPickle.dumps(dictToSend)
		if threadable.isInIOThread():
			self.connProtocol.sendString(pickledData)
		else:
			reactor.callFromThread(self.connProtocol.sendString, pickledData)
Example #18
0
 def __init__(self, crawler, update_vars=None, code=None):
     self.crawler = crawler
     self.update_vars = update_vars or (lambda x: None)
     self.item_class = load_object(crawler.settings['DEFAULT_ITEM_CLASS'])
     self.spider = None
     self.inthread = not threadable.isInIOThread()
     self.code = code
     self.vars = {}
Example #19
0
 def _sendMessage(self, msg):
     """ Internally used method to send messages via RCERobotProtocol.
         
         @param msg:         Message which should be sent.
     """
     if isInIOThread():
         self._sendMessageSynced(msg)
     else:
         self._reactor.callFromThread(self._sendMessageSynced, msg)
 def waitForMore(self):
     # TODO, this is being called from the twisted event loop if the
     # container has not been loaded already.
     # maximum block 1 second
     self._more_loaded.clear()
     if not self._load_complete:
         _log.debug("waitForMore %s %s (%s)", self.id(), self._loaded_at_update_id, self._last_seen_update_id )
         if not threadable.isInIOThread():
             self._more_loaded.wait(1)   # lets avoid a possible race condition
         else:
             # naughty, nauhgty
             raise Exception("waitForMore is in event loop")
Example #21
0
 def callFromThread(self, f, *args, **kw):
     """See twisted.internet.interfaces.IReactorThreads.callFromThread.
     """
     assert callable(f), "%s is not callable" % f
     if threadable.isInIOThread():
         self.callLater(0, f, *args, **kw)
     else:
         # lists are thread-safe in CPython, but not in Jython
         # this is probably a bug in Jython, but until fixed this code
         # won't work in Jython.
         self.threadCallQueue.append((f, args, kw))
         self.wakeUp()
Example #22
0
    def sendMessage(self, msg):
        """ Internally used method to send messages via WebSocket connection.
            Thread-safe implementation.

            @param msg:         Message which should be sent.
        """
        binaries, msg = recursiveBinarySearch(msg)
        msg = json.dumps(msg)

        if isInIOThread():
            self._send(msg, binaries)
        else:
            self._connection.reactor.callFromThread(self._send, msg, binaries)
Example #23
0
def getLogEntries(store=None, _reactor=reactor):
    """
    Gets log entries.

    If the reactor is not running or it is running and we are in its
    thread, calls ``_getLogEntries``. If it is running, but we're in a
    different thread, calls ``_getLogEntries`` through
    ``blockingCallFromThread``.
    """
    store = store or app.store # Don't bind at function creation, for testing
    if threadable.ioThread is None or threadable.isInIOThread():
        return _getLogEntries(store)
    else:
        return threads.blockingCallFromThread(_reactor, _getLogEntries, store)
Example #24
0
 def tempObserver(event):
     # Probably won't be a problem, but let's not have any intermittent
     # test issues that stem from multi-threaded log messages randomly
     # going off...
     if not isInIOThread():
         callFromThread(tempObserver, event)
         return
     if event.get("isError"):
         d.errback()
     m = event.get("message")[0]
     if m.startswith("[Dummy] "):
         logged.append(event)
         if m == "[Dummy] z":
             d.callback("done")
Example #25
0
 def wrapper(*args, **kw):
     event = coros.event()
     def wrapped_func():
         try:
             result = func(*args, **kw)
         except:
             event.send_exception(*sys.exc_info())
         else:
             event.send(result)
     if threadable.isInIOThread():
         callInGreenThread(wrapped_func)
     else:
         from twisted.internet import reactor
         reactor.callFromThread(callInGreenThread, wrapped_func)
     return event
Example #26
0
    def invoke_func(*args, **kwargs):
        from twisted.python.threadable import isInIOThread
        from traceback import print_stack

        if isInIOThread():
            import inspect
            caller = inspect.stack()[1]
            callerstr = "%s %s:%s" % (caller[3], caller[1], caller[2])

            from time import time
            logger.error("%d CANNOT BE ON DISPERSYTHREAD %s %s:%s called by %s", long(time()),
                        func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno, callerstr)
            print_stack()

        return func(*args, **kwargs)
Example #27
0
def waitForDeferred(d):
    """
    Wait until deferred finishes and return the result.
    
    @note: Can not be run on twisted thread.
    """
    if threadable.isInIOThread():
        log.critical("Can't wait on twisted thread")
        raise RuntimeError("Can't wait on twisted thread")

    queue = Queue.Queue(1)
    d.addBoth(queue.put)
    result = queue.get(True)
    if isinstance(result, failure.Failure):
        result.raiseException()
    return result
Example #28
0
File: utils.py Project: shvar/redfs
def in_main_thread():
    """Whether the code is running in the main ("reactor" thread).

    >>> in_main_thread()  # more tests in TestInMainThread
    True

    @note: assumes the threads were not ever renamed.

    @note: global variable/constant C{_FORCE_SPOOF_AS_SECONDARY_THREAD}
        can affect the behaviour of this function, but only for the unit tests.

    @rtype: bool
    """
    return False if _FORCE_SPOOF_AS_SECONDARY_THREAD \
                 else (threadable.isInIOThread() or
                       threading.current_thread().name == 'MainThread')
Example #29
0
    def wrapper(*args, **kwargs):
        d = None  # declare here so that on_result can access it

        def on_result(success, txn_deferred):
            from twisted.internet import reactor
            txn_deferred.addCallbacks(lambda res: reactor.callFromThread(d.callback, res),
                                      lambda fail: reactor.callFromThread(d.errback, fail))

        if threadable.isInIOThread():
            d = Deferred()
            thpool = Registry.DBPOOL.threadpool
            thpool.callInThreadWithCallback(on_result, _runTransaction, *args, **kwargs)
            return d
        else:
            # we are already in a db thread, so just execute the transaction
            return _runTransaction(*args, **kwargs)
    def start(self):
        assert isInIOThread()
        if super(BartercastCrawler, self).start():
            self._create_my_member()
            print "loading bartercc as member %s: " % self._my_member
           # self.register_task("unload inactive communities",
            #                   LoopingCall(self.unload_inactive_communities)).start(COMMUNITY_CLEANUP_INTERVAL)

            self.define_auto_load(BarterCommunityCrawler, self._my_member, (), load=True)
            # self.define_auto_load(TrackerHardKilledCommunity, self._my_member)

            # if not self._silent:
            #    self._statistics_looping_call = LoopingCall(self._report_statistics)
            #    self._statistics_looping_call.start(300)

            return True
        return False
Example #31
0
 def _safestop(self, r=None):
     if not threadable.isInIOThread():
         self.external_add_task(0, self._stop)
     else:
         self._stop()
Example #32
0
    def Add(self,
            sender,
            workerFn,
            args=(),
            kwargs={},
            name=None,
            delay=0.0,
            uId=None,
            retryOnBusy=False,
            priority=0,
            workerType="dbthread"):
        """The sender will send the return value of
        workerFn(*args, **kwargs) to the main thread.
        """
        if self.utility.abcquitting:
            self._logger.debug("GUIDBHandler: abcquitting ignoring Task(%s)",
                               name)
            return

        assert uId is None or isinstance(uId, unicode), type(uId)
        assert name is None or isinstance(name, unicode), type(name)

        if uId:
            try:
                self.uIdsLock.acquire()
                if uId in self.uIds:
                    self._logger.debug(
                        "GUIDBHandler: Task(%s) already scheduled in queue, ignoring uId = %s",
                        name, uId)
                    return
                else:
                    self.uIds.add(uId)
            finally:
                self.uIdsLock.release()

            callbackId = uId
        else:
            callbackId = name

        self._logger.debug("GUIDBHandler: adding Task(%s)", callbackId)

        if __debug__:
            self.uIdsLock.acquire()
            self.nrCallbacks[callbackId] = self.nrCallbacks.get(callbackId,
                                                                0) + 1
            if self.nrCallbacks[callbackId] > 10:
                self._logger.debug("GUIDBHandler: Scheduled Task(%s) %d times",
                                   callbackId, self.nrCallbacks[callbackId])

            self.uIdsLock.release()

        t1 = time()

        def wrapper():
            if __debug__:
                self.uIdsLock.acquire()
                self.nrCallbacks[callbackId] = self.nrCallbacks.get(
                    callbackId, 0) - 1
                self.uIdsLock.release()

            # Call the actual function
            try:
                t2 = time()
                result = workerFn(*args, **kwargs)

            except (AbortedException, wx.PyDeadObjectError):
                return

            except Exception as exc:
                originalTb = format_exc()
                sender.sendException(exc, originalTb)
                return

            t3 = time()
            self._logger.debug(
                "GUIDBHandler: Task(%s) took to be called %.1f (expected %.1f), actual task took %.1f %s",
                name, t2 - t1, delay, t3 - t2, workerType)

            if uId:
                try:
                    self.uIdsLock.acquire()
                    if uId in self.uIds:
                        self.uIds.discard(uId)

                    # this callback has been removed during wrapper, cancel now
                    else:
                        return
                finally:
                    self.uIdsLock.release()

            # if we get to this step, send result to callback
            try:
                sender.sendResult(result)
            except:
                print_exc()
                self._logger.error(
                    "GUIDBHandler: Could not send result of Task(%s)", name)

        wrapper.__name__ = str(name)

        # Have in mind that setting workerType to "ThreadPool" means that the
        # task wants to be executed OUT of the GUI thread, nothing more.
        if delay or not (isInIOThread() or isInThreadPool()):
            if workerType == "dbThread":
                # Schedule the task to be called later in the reactor thread.
                self.utility.session.lm.threadpool.add_task(wrapper, delay)
            elif workerType == "ThreadPool":
                self.utility.session.lm.threadpool.add_task_in_thread(
                    wrapper, delay)
            else:
                raise RuntimeError(
                    "Asked to schedule a task with unknown workerType: %s",
                    workerType)
        elif workerType == "dbThread" and not isInIOThread():
            reactor.callFromThread(wrapper)
        else:
            self._logger.debug(
                "GUIDBHandler: Task(%s) scheduled to be called on non GUI thread from non GUI thread, "
                "executing synchronously.", name)
            wrapper()
    def register(self, session, session_lock):
        assert isInIOThread()
        if not self.registered:
            self.registered = True

            self.session = session
            self.session_lock = session_lock

            # On Mac, we bundle the root certificate for the SSL validation since Twisted is not using the root
            # certificates provided by the system trust store.
            if sys.platform == 'darwin':
                os.environ['SSL_CERT_FILE'] = os.path.join(
                    get_lib_path(), 'root_certs_mac.pem')

            if self.session.config.get_torrent_store_enabled():
                from Tribler.Core.leveldbstore import LevelDbStore
                self.torrent_store = LevelDbStore(
                    self.session.config.get_torrent_store_dir())
                if not self.torrent_store.get_db():
                    raise RuntimeError(
                        "Torrent store (leveldb) is None which should not normally happen"
                    )

            if self.session.config.get_metadata_enabled():
                from Tribler.Core.leveldbstore import LevelDbStore
                self.metadata_store = LevelDbStore(
                    self.session.config.get_metadata_store_dir())
                if not self.metadata_store.get_db():
                    raise RuntimeError(
                        "Metadata store (leveldb) is None which should not normally happen"
                    )

            # torrent collecting: RemoteTorrentHandler
            if self.session.config.get_torrent_collecting_enabled():
                from Tribler.Core.RemoteTorrentHandler import RemoteTorrentHandler
                self.rtorrent_handler = RemoteTorrentHandler(self.session)

            # TODO(emilon): move this to a megacache component or smth
            if self.session.config.get_megacache_enabled():
                from Tribler.Core.CacheDB.SqliteCacheDBHandler import (
                    PeerDBHandler, TorrentDBHandler, MyPreferenceDBHandler,
                    VoteCastDBHandler, ChannelCastDBHandler)
                from Tribler.Core.Category.Category import Category

                self._logger.debug('tlm: Reading Session state from %s',
                                   self.session.config.get_state_dir())

                self.category = Category()

                # create DBHandlers
                self.peer_db = PeerDBHandler(self.session)
                self.torrent_db = TorrentDBHandler(self.session)
                self.mypref_db = MyPreferenceDBHandler(self.session)
                self.votecast_db = VoteCastDBHandler(self.session)
                self.channelcast_db = ChannelCastDBHandler(self.session)

                # initializes DBHandlers
                self.peer_db.initialize()
                self.torrent_db.initialize()
                self.mypref_db.initialize()
                self.votecast_db.initialize()
                self.channelcast_db.initialize()

                from Tribler.Core.Modules.tracker_manager import TrackerManager
                self.tracker_manager = TrackerManager(self.session)

            if self.session.config.get_video_server_enabled():
                self.video_server = VideoServer(
                    self.session.config.get_video_server_port(), self.session)
                self.video_server.start()

            # Dispersy
            self.tftp_handler = None
            if self.session.config.get_dispersy_enabled():
                from Tribler.dispersy.dispersy import Dispersy
                from Tribler.dispersy.endpoint import StandaloneEndpoint

                # set communication endpoint
                endpoint = StandaloneEndpoint(
                    self.session.config.get_dispersy_port())

                working_directory = unicode(
                    self.session.config.get_state_dir())
                self.dispersy = Dispersy(endpoint, working_directory)

                # register TFTP service
                from Tribler.Core.TFTP.handler import TftpHandler
                self.tftp_handler = TftpHandler(self.session,
                                                endpoint,
                                                "fffffffd".decode('hex'),
                                                block_size=1024)
                self.tftp_handler.initialize()

            if self.session.config.get_torrent_search_enabled(
            ) or self.session.config.get_channel_search_enabled():
                self.search_manager = SearchManager(self.session)
                self.search_manager.initialize()

        if not self.initComplete:
            self.init()

        self.session.add_observer(self.on_tribler_started, NTFY_TRIBLER,
                                  [NTFY_STARTED])
        self.session.notifier.notify(NTFY_TRIBLER, NTFY_STARTED, None)
        return self.startup_deferred
Example #34
0
 def call(self, func, *args, **kargs):
     # TODO(emilon): timeout is not supported anymore, clean the tests so they don't pass the named argument.
     if isInIOThread():
         return func(*args, **kargs)
     else:
         return blockingCallFromThread(reactor, func, *args, **kargs)
Example #35
0
    def discover_and_sync_pod(self):
        """Discover and sync the pod information."""
        def update_db(result):
            discovered_pod, discovered = result

            # When called with an instance that has no name, be sure to set
            # it before going any further. If this is a new instance this will
            # also create it in the database.
            if not self.instance.name:
                self.instance.set_random_name()
            self.instance.sync(discovered_pod, self.request.user)

            # Save which rack controllers can route and which cannot.
            discovered_rack_ids = [
                rack_id for rack_id, _ in discovered[0].items()
            ]
            for rack_controller in RackController.objects.all():
                routable = rack_controller.system_id in discovered_rack_ids
                bmc_route_model = BMCRoutableRackControllerRelationship
                relation, created = (bmc_route_model.objects.get_or_create(
                    bmc=self.instance.as_bmc(),
                    rack_controller=rack_controller,
                    defaults={'routable': routable}))
                if not created and relation.routable != routable:
                    relation.routable = routable
                    relation.save()
            return self.instance

        if isInIOThread():
            # Running in twisted reactor, do the work inside the reactor.
            d = discover_pod(self.instance.power_type,
                             self.instance.power_parameters,
                             pod_id=self.instance.id,
                             name=self.instance.name)
            d.addCallback(lambda discovered:
                          (get_best_discovered_result(discovered), discovered))

            def catch_no_racks(result):
                discovered_pod, discovered = result
                if discovered_pod is None:
                    raise PodProblem(
                        "Unable to start the pod discovery process. "
                        "No rack controllers connected.")
                return discovered_pod, discovered

            def wrap_errors(failure):
                if failure.check(PodProblem):
                    return failure
                else:
                    raise PodProblem(str(failure.value))

            d.addCallback(catch_no_racks)
            d.addCallback(partial(deferToDatabase, transactional(update_db)))
            d.addErrback(wrap_errors)
            return d
        else:
            # Perform the actions inside the executing thread.
            try:
                discovered = discover_pod(self.instance.power_type,
                                          self.instance.power_parameters,
                                          pod_id=self.instance.id,
                                          name=self.instance.name)
            except Exception as exc:
                raise PodProblem(str(exc)) from exc

            # Use the first discovered pod object. All other objects are
            # ignored. The other rack controllers that also provided a result
            # can route to the pod.
            try:
                discovered_pod = get_best_discovered_result(discovered)
            except Exception as error:
                raise PodProblem(str(error))
            if discovered_pod is None:
                raise PodProblem("Unable to start the pod discovery process. "
                                 "No rack controllers connected.")
            return update_db((discovered_pod, discovered))
Example #36
0
 def threadedFunc():
     result.append(threadable.isInIOThread())
     waiter.set()
Example #37
0
 def wrapper(*args, **kwargs):
     if isInIOThread() and not _testing:
         raise Exception('The ZODB should not be accessed from the main thread')
     return fun(*args, **kwargs)
Example #38
0
    def compose(self,
                timeout=120,
                creation_type=NODE_CREATION_TYPE.MANUAL,
                skip_commissioning=None):
        """Compose the machine.

        Internal operation of this form is asynchronous. It will block the
        calling thread until the asynchronous operation is complete. Adjust
        `timeout` to minimize the maximum wait for the asynchronous operation.
        """

        if skip_commissioning is None:
            skip_commissioning = self.get_value_for('skip_commissioning')

        def check_over_commit_ratios(result):
            # Check over commit ratios.
            over_commit_message = self.pod.check_over_commit_ratios(
                requested_cores=self.get_value_for('cores'),
                requested_memory=self.get_value_for('memory'))
            if over_commit_message:
                raise PodProblem(over_commit_message)
            return result

        def create_and_sync(result):
            discovered_machine, pod_hints = result
            created_machine = self.pod.create_machine(
                discovered_machine,
                self.request.user,
                skip_commissioning=skip_commissioning,
                creation_type=creation_type,
                domain=self.get_value_for('domain'),
                pool=self.get_value_for('pool'),
                zone=self.get_value_for('zone'))
            self.pod.sync_hints(pod_hints)
            return created_machine

        power_parameters = self.pod.power_parameters.copy()

        def _set_default_pool_id():
            if self.pod.default_storage_pool is not None:
                power_parameters['default_storage_pool_id'] = (
                    self.pod.default_storage_pool.pool_id)

        if isInIOThread():
            # Running under the twisted reactor, before the work from inside.
            d = deferToDatabase(transactional(self.pod.get_client_identifiers))
            d.addCallback(getClientFromIdentifiers)
            d.addCallback(
                partial(deferToDatabase,
                        transactional(check_over_commit_ratios)))
            d.addCallback(callOutToDatabase, _set_default_pool_id)
            d.addCallback(compose_machine,
                          self.pod.power_type,
                          power_parameters,
                          self.get_requested_machine(),
                          pod_id=self.pod.id,
                          name=self.pod.name)
            d.addCallback(
                partial(deferToDatabase, transactional(create_and_sync)))
            return d
        else:
            # Running outside of reactor. Do the work inside and then finish
            # the work outside.
            @asynchronous
            def wrap_compose_machine(client_idents, pod_type, parameters,
                                     request, pod_id, name):
                """Wrapper to get the client."""
                d = getClientFromIdentifiers(client_idents)
                d.addCallback(
                    partial(deferToDatabase,
                            transactional(check_over_commit_ratios)))
                d.addCallback(compose_machine,
                              pod_type,
                              parameters,
                              request,
                              pod_id=pod_id,
                              name=name)
                return d

            _set_default_pool_id()
            try:
                result = wrap_compose_machine(
                    self.pod.get_client_identifiers(),
                    self.pod.power_type,
                    power_parameters,
                    self.get_requested_machine(),
                    pod_id=self.pod.id,
                    name=self.pod.name).wait(timeout)
            except crochet.TimeoutError:
                raise PodProblem(
                    "Unable to compose a machine because '%s' driver "
                    "timed out after %d seconds." %
                    (self.pod.power_type, timeout))
            return create_and_sync(result)
Example #39
0
    def __init__(self,
                 params,
                 installdir,
                 autoload_discovery=True,
                 use_torrent_search=True,
                 use_channel_search=True):
        super(ABCApp, self).__init__()
        assert not isInIOThread(
        ), "isInIOThread() seems to not be working correctly"
        self._logger = logging.getLogger(self.__class__.__name__)

        self.params = params
        self.installdir = installdir

        self.state_dir = None
        self.error = None
        self.last_update = 0
        self.ready = False
        self.done = False
        self.frame = None
        self.upgrader = None
        self.i2i_server = None

        # DISPERSY will be set when available
        self.dispersy = None
        self.tunnel_community = None

        self.webUI = None
        self.utility = None

        # Stage 1 start
        session = self.InitStage1(installdir,
                                  autoload_discovery=autoload_discovery,
                                  use_torrent_search=use_torrent_search,
                                  use_channel_search=use_channel_search)

        try:
            self._logger.info('Client Starting Up.')
            self._logger.info("Tribler is using %s as working directory",
                              self.installdir)

            # Stage 2: show the splash window and start the session

            self.utility = Utility(self.installdir, session.get_state_dir())

            if self.utility.read_config(u'saveas', u'downloadconfig'):
                DefaultDownloadStartupConfig.getInstance().set_dest_dir(
                    self.utility.read_config(u'saveas', u'downloadconfig'))

            self.utility.set_app(self)
            self.utility.set_session(session)
            self.guiUtility = GUIUtility.getInstance(self.utility, self.params,
                                                     self)
            GUIDBProducer.getInstance()

            # Broadcast that the initialisation is starting for the splash gauge and those who are interested
            self.utility.session.notifier.notify(NTFY_STARTUP_TICK,
                                                 NTFY_CREATE, None, None)

            session.notifier.notify(NTFY_STARTUP_TICK, NTFY_INSERT, None,
                                    'Starting API')
            wx.Yield()

            self._logger.info('Tribler Version: %s Build: %s', version_id,
                              commit_id)

            version_info = self.utility.read_config('version_info')
            if version_info.get('version_id', None) != version_id:
                # First run of a different version
                version_info['first_run'] = int(time())
                version_info['version_id'] = version_id
                self.utility.write_config('version_info', version_info)

            session.notifier.notify(
                NTFY_STARTUP_TICK, NTFY_INSERT, None,
                'Starting session and upgrading database (it may take a while)'
            )
            wx.Yield()

            session.start()
            self.dispersy = session.lm.dispersy
            self.dispersy.attach_progress_handler(self.progressHandler)

            session.notifier.notify(NTFY_STARTUP_TICK, NTFY_INSERT, None,
                                    'Initializing Family Filter')
            wx.Yield()
            cat = session.lm.category

            state = self.utility.read_config('family_filter')
            if state in (1, 0):
                cat.set_family_filter(state == 1)
            else:
                self.utility.write_config('family_filter', 1)
                self.utility.flush_config()

                cat.set_family_filter(True)

            # Create global speed limits
            session.notifier.notify(NTFY_STARTUP_TICK, NTFY_INSERT, None,
                                    'Setting up speed limits')
            wx.Yield()

            # Counter to suppress some event from occurring
            self.ratestatecallbackcount = 0

            maxup = self.utility.read_config('maxuploadrate')
            maxdown = self.utility.read_config('maxdownloadrate')
            # set speed limits using LibtorrentMgr
            session.set_max_upload_speed(maxup)
            session.set_max_download_speed(maxdown)

            # Only allow updates to come in after we defined ratelimiter
            self.prevActiveDownloads = []
            session.set_download_states_callback(self.sesscb_states_callback)

            # Schedule task for checkpointing Session, to avoid hash checks after
            # crashes.
            self.register_task("checkpoint_loop", LoopingCall(self.guiservthread_checkpoint_timer))\
                .start(SESSION_CHECKPOINT_INTERVAL, now=False)

            session.notifier.notify(NTFY_STARTUP_TICK, NTFY_INSERT, None,
                                    'GUIUtility register')
            wx.Yield()
            self.guiUtility.register()

            self.frame = MainFrame(self, None, False)
            self.frame.SetIcon(
                wx.Icon(
                    os.path.join(self.installdir, 'Tribler', 'Main', 'vwxGUI',
                                 'images', 'tribler.ico'), wx.BITMAP_TYPE_ICO))

            # Arno, 2011-06-15: VLC 1.1.10 pops up separate win, don't have two.
            self.frame.videoframe = None

            if sys.platform == 'win32':
                wx.CallAfter(self.frame.top_bg.Refresh)
                wx.CallAfter(self.frame.top_bg.Layout)
            else:
                self.frame.top_bg.Layout()

            # Arno, 2007-05-03: wxWidgets 2.8.3.0 and earlier have the MIME-type for .bmp
            # files set to 'image/x-bmp' whereas 'image/bmp' is the official one.
            try:
                bmphand = None
                hands = wx.Image.GetHandlers()
                for hand in hands:
                    if hand.GetMimeType() == 'image/x-bmp':
                        bmphand = hand
                        break
                # wx.Image.AddHandler()
                if bmphand is not None:
                    bmphand.SetMimeType('image/bmp')
            except:
                # wx < 2.7 don't like wx.Image.GetHandlers()
                print_exc()

            session.notifier.notify(NTFY_STARTUP_TICK, NTFY_DELETE, None, None)
            wx.Yield()
            self.frame.Show(True)
            self.register_task('free_space_check', LoopingCall(self.guiservthread_free_space_check))\
                .start(FREE_SPACE_CHECK_INTERVAL)

            self.webUI = None
            if self.utility.read_config('use_webui'):
                try:
                    from Tribler.Main.webUI.webUI import WebUI
                    self.webUI = WebUI.getInstance(
                        self.guiUtility.library_manager,
                        self.guiUtility.torrentsearch_manager,
                        self.utility.read_config('webui_port'))
                    self.webUI.start()
                except Exception:
                    print_exc()

            self.emercoin_mgr = None
            try:
                from Tribler.Main.Emercoin.EmercoinMgr import EmercoinMgr
                self.emercoin_mgr = EmercoinMgr(self.utility)
            except Exception:
                print_exc()

            wx.CallAfter(self.PostInit2)

            # 08/02/10 Boudewijn: Working from home though console
            # doesn't allow me to press close.  The statement below
            # gracefully closes Tribler after 120 seconds.
            # wx.CallLater(120*1000, wx.GetApp().Exit)

            self.ready = True

        except Exception as e:
            session.notifier.notify(NTFY_STARTUP_TICK, NTFY_DELETE, None, None)
            self.onError(e)
Example #40
0
 def test_connections_are_all_usable_outside_the_event_loop(self):
     self.assertFalse(isInIOThread())
     for alias in connections:
         connection = connections[alias]
         self.assertTrue(connection.is_usable())
Example #41
0
File: pods.py Project: uraniid/maas
    def save(self, *args, **kwargs):
        """Persist the pod into the database."""
        def check_for_duplicate(power_type, power_parameters):
            # When the Pod is new try to get a BMC of the same type and
            # parameters to convert the BMC to a new Pod. When the Pod is not
            # new the form will use the already existing pod instance to update
            # those fields. If updating the fields causes a duplicate BMC then
            # a validation erorr will be raised from the model level.
            if self.is_new:
                bmc = BMC.objects.filter(
                    power_type=power_type,
                    power_parameters=power_parameters).first()
                if bmc is not None:
                    if bmc.bmc_type == BMC_TYPE.BMC:
                        # Convert the BMC to a Pod and set as the instance for
                        # the PodForm.
                        bmc.bmc_type = BMC_TYPE.POD
                        bmc.default_pool = (
                            ResourcePool.objects.get_default_resource_pool())
                        return bmc.as_pod()
                    else:
                        # Pod already exists with the same power_type and
                        # parameters.
                        raise ValidationError("Pod %s with type and "
                                              "parameters already exist." %
                                              bmc.name)

        def update_obj(existing_obj):
            if existing_obj is not None:
                self.instance = existing_obj
            self.instance = super(PodForm, self).save(commit=False)
            if tags:
                self.instance.tags = tags
            if zone:
                self.instance.zone = zone
            if cpu_over_commit_ratio:
                self.instance.cpu_over_commit_ratio = cpu_over_commit_ratio
            if memory_over_commit_ratio:
                self.instance.memory_over_commit_ratio = (
                    memory_over_commit_ratio)
            self.instance.power_type = power_type
            self.instance.power_parameters = power_parameters
            return self.instance

        tags = self.cleaned_data.get('tags')
        zone = self.cleaned_data.get('zone')
        cpu_over_commit_ratio = self.cleaned_data.get('cpu_over_commit_ratio')
        memory_over_commit_ratio = self.cleaned_data.get(
            'memory_over_commit_ratio')
        power_type = self.cleaned_data['type']
        # Set power_parameters to the generated param_fields.
        power_parameters = {
            param_name: self.cleaned_data[param_name]
            for param_name in self.param_fields.keys()
            if param_name in self.cleaned_data
        }

        if isInIOThread():
            # Running in twisted reactor, do the work inside the reactor.
            d = deferToDatabase(transactional(check_for_duplicate), power_type,
                                power_parameters)
            d.addCallback(update_obj)
            d.addCallback(lambda _: self.discover_and_sync_pod())
            return d
        else:
            # Perform the actions inside the executing thread.
            existing_obj = check_for_duplicate(power_type, power_parameters)
            if existing_obj is not None:
                self.instance = existing_obj
            self.instance = update_obj(self.instance)
            return self.discover_and_sync_pod()
Example #42
0
    def __init__(self,
                 params,
                 installdir,
                 autoload_discovery=True,
                 use_torrent_search=True,
                 use_channel_search=True):
        assert not isInIOThread(
        ), "isInIOThread() seems to not be working correctly"
        self._logger = logging.getLogger(self.__class__.__name__)

        self.params = params
        self.installdir = installdir

        self.state_dir = None
        self.error = None
        self.last_update = 0
        self.ready = False
        self.done = False
        self.frame = None
        self.upgrader = None

        self.said_start_playback = False
        self.decodeprogress = 0

        self.old_reputation = 0

        # DISPERSY will be set when available
        self.dispersy = None
        # BARTER_COMMUNITY will be set when both Dispersy and the EffortCommunity are available
        self.barter_community = None
        self.tunnel_community = None

        self.torrentfeed = None
        self.webUI = None
        self.utility = None

        # Stage 1 start
        session = self.InitStage1(installdir,
                                  autoload_discovery=autoload_discovery,
                                  use_torrent_search=use_torrent_search,
                                  use_channel_search=use_channel_search)

        self.splash = None
        try:
            bm = self.gui_image_manager.getImage(u'splash.png')
            self.splash = GaugeSplash(bm, "Loading...", 13)
            self.splash.Show()

            self._logger.info('Client Starting Up.')
            self._logger.info("Tribler is using %s as working directory",
                              self.installdir)

            # Stage 2: show the splash window and start the session

            self.splash.tick('Starting API')
            s = self.startAPI(session, self.splash.tick)

            self.utility = Utility(self.installdir, s.get_state_dir())

            if self.utility.read_config(u'saveas', u'downloadconfig'):
                DefaultDownloadStartupConfig.getInstance().set_dest_dir(
                    self.utility.read_config(u'saveas', u'downloadconfig'))

            self.utility.set_app(self)
            self.utility.set_session(s)
            self.guiUtility = GUIUtility.getInstance(self.utility, self.params,
                                                     self)
            GUIDBProducer.getInstance()

            self._logger.info('Tribler Version: %s Build: %s', version_id,
                              commit_id)

            version_info = self.utility.read_config('version_info')
            if version_info.get('version_id', None) != version_id:
                # First run of a different version
                version_info['first_run'] = int(time())
                version_info['version_id'] = version_id
                self.utility.write_config('version_info', version_info)

            self.splash.tick(
                'Starting session and upgrading database (it may take a while)'
            )
            s.start()
            self.dispersy = s.lm.dispersy

            self.splash.tick('Loading userdownloadchoice')
            from Tribler.Main.vwxGUI.UserDownloadChoice import UserDownloadChoice
            UserDownloadChoice.get_singleton().set_utility(self.utility)

            self.splash.tick('Initializing Family Filter')
            cat = Category.getInstance(session)

            state = self.utility.read_config('family_filter')
            if state in (1, 0):
                cat.set_family_filter(state == 1)
            else:
                self.utility.write_config('family_filter', 1)
                self.utility.flush_config()

                cat.set_family_filter(True)

            # Create global speed limits
            self.splash.tick('Setting up speed limits')

            # Counter to suppress some event from occurring
            self.ratestatecallbackcount = 0

            maxup = self.utility.read_config('maxuploadrate')
            maxdown = self.utility.read_config('maxdownloadrate')
            # set speed limits using LibtorrentMgr
            s.set_max_upload_speed(maxup)
            s.set_max_download_speed(maxdown)

            # Only allow updates to come in after we defined ratelimiter
            self.prevActiveDownloads = []
            s.set_download_states_callback(self.sesscb_states_callback)

            # Schedule task for checkpointing Session, to avoid hash checks after
            # crashes.
            startWorker(consumer=None,
                        workerFn=self.guiservthread_checkpoint_timer,
                        delay=SESSION_CHECKPOINT_INTERVAL)

            if not ALLOW_MULTIPLE:
                # Put it here so an error is shown in the startup-error popup
                # Start server for instance2instance communication
                Instance2InstanceServer(
                    self.utility.read_config('i2ilistenport'),
                    self.i2ithread_readlinecallback)

            self.splash.tick('GUIUtility register')
            self.guiUtility.register()

            self.frame = MainFrame(
                self, None, PLAYBACKMODE_INTERNAL
                in return_feasible_playback_modes(), self.splash.tick)
            self.frame.SetIcon(
                wx.Icon(
                    os.path.join(self.installdir, 'Tribler', 'Main', 'vwxGUI',
                                 'images', 'tribler.ico'), wx.BITMAP_TYPE_ICO))

            # Arno, 2011-06-15: VLC 1.1.10 pops up separate win, don't have two.
            self.frame.videoframe = None
            if PLAYBACKMODE_INTERNAL in return_feasible_playback_modes():
                vlcwrap = s.lm.videoplayer.get_vlcwrap()
                wx.CallLater(3000, vlcwrap._init_vlc)
                self.frame.videoframe = VideoDummyFrame(
                    self.frame.videoparentpanel, self.utility, vlcwrap)

            if sys.platform == 'win32':
                wx.CallAfter(self.frame.top_bg.Refresh)
                wx.CallAfter(self.frame.top_bg.Layout)
            else:
                self.frame.top_bg.Layout()

            # Arno, 2007-05-03: wxWidgets 2.8.3.0 and earlier have the MIME-type for .bmp
            # files set to 'image/x-bmp' whereas 'image/bmp' is the official one.
            try:
                bmphand = None
                hands = wx.Image.GetHandlers()
                for hand in hands:
                    # print "Handler",hand.GetExtension(),hand.GetType(),hand.GetMimeType()
                    if hand.GetMimeType() == 'image/x-bmp':
                        bmphand = hand
                        break
                # wx.Image.AddHandler()
                if bmphand is not None:
                    bmphand.SetMimeType('image/bmp')
            except:
                # wx < 2.7 don't like wx.Image.GetHandlers()
                print_exc()

            self.splash.Destroy()
            self.frame.Show(True)
            session.lm.threadpool.call_in_thread(
                0, self.guiservthread_free_space_check)

            self.torrentfeed = RssParser.getInstance()

            self.webUI = None
            if self.utility.read_config('use_webui'):
                try:
                    from Tribler.Main.webUI.webUI import WebUI
                    self.webUI = WebUI.getInstance(
                        self.guiUtility.library_manager,
                        self.guiUtility.torrentsearch_manager,
                        self.utility.read_config('webui_port'))
                    self.webUI.start()
                except Exception:
                    print_exc()

            self.emercoin_mgr = None
            try:
                from Tribler.Main.Emercoin.EmercoinMgr import EmercoinMgr
                self.emercoin_mgr = EmercoinMgr(self.utility)
            except Exception:
                print_exc()

            wx.CallAfter(self.PostInit2)

            # 08/02/10 Boudewijn: Working from home though console
            # doesn't allow me to press close.  The statement below
            # gracefully closes Tribler after 120 seconds.
            # wx.CallLater(120*1000, wx.GetApp().Exit)

            self.ready = True

        except Exception as e:
            if self.splash:
                self.splash.Destroy()

            self.onError(e)
Example #43
0
def call_in_twisted_thread(func, *args, **kwargs):
    if threadable.isInIOThread():
        func(*args, **kwargs)
    else:
        from twisted.internet import reactor
        reactor.callFromThread(func, *args, **kwargs)
Example #44
0
    def compose(self,
                timeout=120,
                skip_commissioning=False,
                creation_type=NODE_CREATION_TYPE.MANUAL):
        """Compose the machine.

        Internal operation of this form is asynchronously. It will block the
        calling thread until the asynchronous operation is complete. Adjust
        `timeout` to minimize the maximum wait for the asynchronous operation.
        """
        def create_and_sync(result):
            discovered_machine, pod_hints = result
            created_machine = self.pod.create_machine(
                discovered_machine,
                self.request.user,
                skip_commissioning=skip_commissioning,
                creation_type=creation_type,
                domain=self.get_value_for('domain'),
                zone=self.get_value_for('zone'))
            self.pod.sync_hints(pod_hints)
            return created_machine

        if isInIOThread():
            # Running under the twisted reactor, before the work from inside.
            d = deferToDatabase(transactional(self.pod.get_client_identifiers))
            d.addCallback(getClientFromIdentifiers)
            d.addCallback(compose_machine,
                          self.pod.power_type,
                          self.pod.power_parameters,
                          self.get_requested_machine(),
                          pod_id=self.pod.id,
                          name=self.pod.name)
            d.addCallback(
                partial(deferToDatabase, transactional(create_and_sync)))
            return d
        else:
            # Running outside of reactor. Do the work inside and then finish
            # the work outside.
            @asynchronous
            def wrap_compose_machine(client_idents, pod_type, parameters,
                                     request, pod_id, name):
                """Wrapper to get the client."""
                d = getClientFromIdentifiers(client_idents)
                d.addCallback(compose_machine,
                              pod_type,
                              parameters,
                              request,
                              pod_id=pod_id,
                              name=name)
                return d

            try:
                result = wrap_compose_machine(
                    self.pod.get_client_identifiers(),
                    self.pod.power_type,
                    self.pod.power_parameters,
                    self.get_requested_machine(),
                    pod_id=self.pod.id,
                    name=self.pod.name).wait(timeout)
            except crochet.TimeoutError:
                raise PodProblem(
                    "Unable to compose a machine because '%s' driver "
                    "timed out after %d seconds." %
                    (self.pod.power_type, timeout))
            return create_and_sync(result)
Example #45
0
 def check():
     results.append(isInIOThread())
     reactor.callFromThread(reactor.stop)
Example #46
0
    def save(self, *args, **kwargs):
        """Persist the pod into the database."""
        def check_for_duplicate(power_type, power_parameters):
            # When the Pod is new try to get a BMC of the same type and
            # parameters to convert the BMC to a new Pod. When the Pod is not
            # new the form will use the already existing pod instance to update
            # those fields. If updating the fields causes a duplicate BMC then
            # a validation erorr will be raised from the model level.
            if self.is_new:
                bmc = BMC.objects.filter(
                    power_type=power_type,
                    power_parameters=power_parameters).first()
                if bmc is not None:
                    if bmc.bmc_type == BMC_TYPE.BMC:
                        # Convert the BMC to a Pod and set as the instance for
                        # the PodForm.
                        bmc.bmc_type = BMC_TYPE.POD
                        bmc.pool = (
                            ResourcePool.objects.get_default_resource_pool())
                        return bmc.as_pod()
                    else:
                        # Pod already exists with the same power_type and
                        # parameters.
                        raise ValidationError("Pod %s with type and "
                                              "parameters already exist." %
                                              bmc.name)

        def update_obj(existing_obj):
            if existing_obj is not None:
                self.instance = existing_obj
            self.instance = super(PodForm, self).save(commit=False)
            self.instance.power_type = power_type
            self.instance.power_parameters = power_parameters
            # Add tag for pod console logging with
            # appropriate kernel parameters.
            tag, _ = Tag.objects.get_or_create(
                name="pod-console-logging",
                kernel_opts="console=tty1 console=ttyS0",
            )
            # Add this tag to the pod.  This only adds the tag
            # if it is not present on the pod.
            self.instance.add_tag(tag.name)
            return self.instance

        power_type = self.cleaned_data["type"]
        # Set power_parameters to the generated param_fields.
        power_parameters = {
            param_name: self.cleaned_data[param_name]
            for param_name in self.param_fields.keys()
            if param_name in self.cleaned_data
        }

        if isInIOThread():
            # Running in twisted reactor, do the work inside the reactor.
            d = deferToDatabase(
                transactional(check_for_duplicate),
                power_type,
                power_parameters,
            )
            d.addCallback(partial(deferToDatabase, transactional(update_obj)))
            d.addCallback(lambda _: self.discover_and_sync_pod())
            return d
        else:
            # Perform the actions inside the executing thread.
            existing_obj = check_for_duplicate(power_type, power_parameters)
            if existing_obj is not None:
                self.instance = existing_obj
            self.instance = update_obj(self.instance)
            return self.discover_and_sync_pod()
Example #47
0
 def validate_in_reactor(_):
     self.assertTrue(isInIOThread())
Example #48
0
    def _do_get_id(self, model, unique, fval, fields):
        assert not isInIOThread()

        return Session().query(model).filter(
            getattr(model, unique) == fval).one().id
Example #49
0
 def check():
     results.append(isInIOThread())
     reactor.stop()
Example #50
0
    def create_channelcast(self):
        assert isInIOThread()
        now = time()

        favoriteTorrents = None
        normalTorrents = None

        # cleanup blocklist
        for candidate in self._blocklist.keys():
            if self._blocklist[
                    candidate] + CHANNELCAST_BLOCK_PERIOD < now:  # unblock address
                self._blocklist.pop(candidate)

        mychannel_id = self._channelcast_db.getMyChannelId()

        # loop through all candidates to see if we can find a non-blocked address
        for candidate in [
                candidate
                for candidate in self._iter_categories([u'walk', u'stumble'],
                                                       once=True)
                if candidate not in self._blocklist
        ]:
            if not candidate:
                continue

            didFavorite = False
            # only check if we actually have a channel
            if mychannel_id:
                peer_ids = set()
                key = candidate.get_member().public_key
                peer_ids.add(self._peer_db.addOrGetPeerID(key))

                # see if all members on this address are subscribed to my channel
                didFavorite = len(peer_ids) > 0
                for peer_id in peer_ids:
                    vote = self._votecast_db.getVoteForMyChannel(peer_id)
                    if vote != 2:
                        didFavorite = False
                        break

            # Modify type of message depending on if all peers have marked my channels as their favorite
            if didFavorite:
                if not favoriteTorrents:
                    favoriteTorrents = self._channelcast_db.getRecentAndRandomTorrents(
                        0, 0, 25, 25, 5)
                torrents = favoriteTorrents
            else:
                if not normalTorrents:
                    normalTorrents = self._channelcast_db.getRecentAndRandomTorrents(
                    )
                torrents = normalTorrents

            # torrents is a dictionary of channel_id (key) and infohashes (value)
            if len(torrents) > 0:
                meta = self.get_meta_message(u"channelcast")
                message = meta.impl(authentication=(self._my_member, ),
                                    distribution=(self.global_time, ),
                                    destination=(candidate, ),
                                    payload=(torrents, ))

                self._dispersy._forward([message])

                # we've send something to this address, add to blocklist
                self._blocklist[candidate] = now

                nr_torrents = sum(
                    len(infohashes) for infohashes in torrents.itervalues())
                self._logger.debug(
                    "sending channelcast message containing %s torrents to %s didFavorite %s",
                    nr_torrents, candidate.sock_addr, didFavorite)
                # we're done
                break

        else:
            self._logger.debug(
                "Did not send channelcast messages, no candidates or torrents")
Example #51
0
def get_db():
    if not _db:
        raise Exception('DB not initalized')
    if isInIOThread() and not _testing:
        raise Exception('The ZODB should not be accessed from the main thread')
    return _db
Example #52
0
 def helper(*args, **kargs):
     if isInIOThread():
         # TODO(emilon): Do we really want it to block if its on the reactor thread?
         return func(*args, **kargs)
     else:
         return reactor.callFromThread(func, *args, **kargs)
Example #53
0
    def save(self, *args, **kwargs):
        """Persist the pod into the database."""
        def check_for_duplicate(power_type, power_parameters):
            # When the Pod is new try to get a BMC of the same type and
            # parameters to convert the BMC to a new Pod. When the Pod is not
            # new the form will use the already existing pod instance to update
            # those fields. If updating the fields causes a duplicate BMC then
            # a validation erorr will be raised from the model level.
            if self.is_new:
                bmc = BMC.objects.filter(
                    power_type=power_type,
                    power_parameters=power_parameters).first()
                if bmc is not None:
                    if bmc.bmc_type == BMC_TYPE.BMC:
                        # Convert the BMC to a Pod and set as the instance for
                        # the PodForm.
                        bmc.bmc_type = BMC_TYPE.POD
                        bmc.pool = (
                            ResourcePool.objects.get_default_resource_pool())
                        return bmc.as_pod()
                    else:
                        # Pod already exists with the same power_type and
                        # parameters.
                        raise ValidationError("Pod %s with type and "
                                              "parameters already exist." %
                                              bmc.name)

        def update_obj(existing_obj):
            if existing_obj is not None:
                self.instance = existing_obj
            self.instance = super(PodForm, self).save(commit=False)
            self.instance.power_type = power_type
            self.instance.power_parameters = power_parameters
            # If console_log is set, create a tag for the kernel parameters
            # if it does not already exist.  Delete otherwise.
            if self.update_console_log:
                if self.console_log:
                    tag, _ = Tag.objects.get_or_create(
                        name="pod-console-logging",
                        kernel_opts="console=tty1 console=ttyS0")
                    # Add this tag to the pod.
                    self.instance.add_tag(tag.name)
                else:
                    try:
                        tag = Tag.objects.get(name="pod-console-logging")
                        # Remove this tag from the pod.
                        self.instance.remove_tag(tag.name)
                        # Delete the tag if there are no longer any other
                        # pods using it.
                        pods = Pod.objects.filter(
                            tags__contains=['pod-console-logging']).exclude(
                                id=self.instance.id)
                        if not pods:
                            tag.delete()
                    except Tag.DoesNotExist:
                        # There was no tag so just continue.
                        pass

            return self.instance

        power_type = self.cleaned_data['type']
        # Set power_parameters to the generated param_fields.
        power_parameters = {
            param_name: self.cleaned_data[param_name]
            for param_name in self.param_fields.keys()
            if param_name in self.cleaned_data
        }

        if isInIOThread():
            # Running in twisted reactor, do the work inside the reactor.
            d = deferToDatabase(transactional(check_for_duplicate), power_type,
                                power_parameters)
            d.addCallback(partial(deferToDatabase, transactional(update_obj)))
            d.addCallback(lambda _: self.discover_and_sync_pod())
            return d
        else:
            # Perform the actions inside the executing thread.
            existing_obj = check_for_duplicate(power_type, power_parameters)
            if existing_obj is not None:
                self.instance = existing_obj
            self.instance = update_obj(self.instance)
            return self.discover_and_sync_pod()
Example #54
0
    def discover_and_sync_pod(self):
        """Discover and sync the pod information."""
        def update_db(result):
            discovered_pod, discovered = result

            if self.request is not None:
                user = self.request.user
            else:
                user = self.user
            # If this is a new instance it will be stored in the database
            # at the end of sync.
            self.instance.sync(discovered_pod, user)

            # Save which rack controllers can route and which cannot.
            discovered_rack_ids = [
                rack_id for rack_id, _ in discovered[0].items()
            ]
            for rack_controller in RackController.objects.all():
                routable = rack_controller.system_id in discovered_rack_ids
                bmc_route_model = BMCRoutableRackControllerRelationship
                relation, created = bmc_route_model.objects.get_or_create(
                    bmc=self.instance.as_bmc(),
                    rack_controller=rack_controller,
                    defaults={"routable": routable},
                )
                if not created and relation.routable != routable:
                    relation.routable = routable
                    relation.save()
            return self.instance

        if isInIOThread():
            # Running in twisted reactor, do the work inside the reactor.
            d = discover_pod(
                self.instance.power_type,
                self.instance.power_parameters,
                pod_id=self.instance.id,
                name=self.instance.name,
            )
            d.addCallback(lambda discovered: (
                get_best_discovered_result(discovered),
                discovered,
            ))

            def catch_no_racks(result):
                discovered_pod, discovered = result
                if discovered_pod is None:
                    raise PodProblem(
                        "Unable to start the pod discovery process. "
                        "No rack controllers connected.")
                return discovered_pod, discovered

            def wrap_errors(failure):
                if failure.check(PodProblem):
                    return failure
                else:
                    log.err(failure, "Failed to discover pod.")
                    raise PodProblem(str(failure.value))

            d.addCallback(catch_no_racks)
            d.addCallback(partial(deferToDatabase, transactional(update_db)))
            d.addCallback(request_commissioning_results)
            d.addErrback(wrap_errors)
            return d
        else:
            # Perform the actions inside the executing thread.
            try:
                discovered = discover_pod(
                    self.instance.power_type,
                    self.instance.power_parameters,
                    pod_id=self.instance.id,
                    name=self.instance.name,
                )
            except Exception as exc:
                raise PodProblem(str(exc)) from exc

            # Use the first discovered pod object. All other objects are
            # ignored. The other rack controllers that also provided a result
            # can route to the pod.
            try:
                discovered_pod = get_best_discovered_result(discovered)
            except Exception as error:
                raise PodProblem(str(error))
            if discovered_pod is None:
                raise PodProblem("Unable to start the pod discovery process. "
                                 "No rack controllers connected.")
            update_db((discovered_pod, discovered))
            # The data isn't committed to the database until the transaction is
            # complete. The commissioning results must be sent after the
            # transaction completes so the metadata server can process the
            # data.
            post_commit_do(
                reactor.callLater,
                0,
                request_commissioning_results,
                self.instance,
            )
            # Run commissioning request here
            return self.instance
Example #55
0
def call_in_green_thread(func, *args, **kw):
    if threadable.isInIOThread():
        callInGreenThread(func, *args, **kw)
    else:
        from twisted.internet import reactor
        reactor.callFromThread(callInGreenThread, func, *args, **kw)
Example #56
0
 def wakeUp(self):
     """Wake up the event loop."""
     if not threadable.isInIOThread():
         if self.waker:
             self.waker.wakeUp()
Example #57
0
    def wakeUp(self):
        def ignore(ret, bytes, arg):
            pass

        if not threadable.isInIOThread():
            self.issuePostQueuedCompletionStatus(ignore, None)
Example #58
0
    def register(self, session, session_lock):
        assert isInIOThread()
        if not self.registered:
            self.registered = True

            self.session = session
            self.session_lock = session_lock

            # On Mac, we bundle the root certificate for the SSL validation since Twisted is not using the root
            # certificates provided by the system trust store.
            if sys.platform == 'darwin':
                os.environ['SSL_CERT_FILE'] = os.path.join(
                    get_lib_path(), 'root_certs_mac.pem')

            if self.session.config.get_torrent_store_enabled():
                from Tribler.Core.leveldbstore import LevelDbStore
                self.torrent_store = LevelDbStore(
                    self.session.config.get_torrent_store_dir())
                if not self.torrent_store.get_db():
                    raise RuntimeError(
                        "Torrent store (leveldb) is None which should not normally happen"
                    )

            if self.session.config.get_metadata_enabled():
                from Tribler.Core.leveldbstore import LevelDbStore
                self.metadata_store = LevelDbStore(
                    self.session.config.get_metadata_store_dir())
                if not self.metadata_store.get_db():
                    raise RuntimeError(
                        "Metadata store (leveldb) is None which should not normally happen"
                    )

            # torrent collecting: RemoteTorrentHandler
            if self.session.config.get_torrent_collecting_enabled():
                from Tribler.Core.RemoteTorrentHandler import RemoteTorrentHandler
                self.rtorrent_handler = RemoteTorrentHandler(self.session)

            # TODO(emilon): move this to a megacache component or smth
            if self.session.config.get_megacache_enabled():
                from Tribler.Core.CacheDB.SqliteCacheDBHandler import (
                    PeerDBHandler, TorrentDBHandler, MyPreferenceDBHandler,
                    VoteCastDBHandler, ChannelCastDBHandler)
                from Tribler.Core.Category.Category import Category

                self._logger.debug('tlm: Reading Session state from %s',
                                   self.session.config.get_state_dir())

                self.category = Category()

                # create DBHandlers
                self.peer_db = PeerDBHandler(self.session)
                self.torrent_db = TorrentDBHandler(self.session)
                self.mypref_db = MyPreferenceDBHandler(self.session)
                self.votecast_db = VoteCastDBHandler(self.session)
                self.channelcast_db = ChannelCastDBHandler(self.session)

                # initializes DBHandlers
                self.peer_db.initialize()
                self.torrent_db.initialize()
                self.mypref_db.initialize()
                self.votecast_db.initialize()
                self.channelcast_db.initialize()

                from Tribler.Core.Modules.tracker_manager import TrackerManager
                self.tracker_manager = TrackerManager(self.session)

            if self.session.config.get_video_server_enabled():
                self.video_server = VideoServer(
                    self.session.config.get_video_server_port(), self.session)
                self.video_server.start()

            # IPv8
            if self.session.config.get_ipv8_enabled():
                from Tribler.pyipv8.ipv8.configuration import get_default_configuration
                ipv8_config = get_default_configuration()
                ipv8_config['port'] = self.session.config.get_dispersy_port()
                ipv8_config['address'] = self.session.config.get_ipv8_address()
                ipv8_config['overlays'] = []
                ipv8_config['keys'] = []  # We load the keys ourselves

                if self.session.config.get_ipv8_bootstrap_override():
                    import Tribler.pyipv8.ipv8.deprecated.community as community_file
                    community_file._DEFAULT_ADDRESSES = [
                        self.session.config.get_ipv8_bootstrap_override()
                    ]
                    community_file._DNS_ADDRESSES = []

                self.ipv8 = IPv8(ipv8_config)

                self.session.config.set_anon_proxy_settings(
                    2, ("127.0.0.1",
                        self.session.config.
                        get_tunnel_community_socks5_listen_ports()))
            # Dispersy
            self.tftp_handler = None
            if self.session.config.get_dispersy_enabled():
                from Tribler.dispersy.dispersy import Dispersy
                from Tribler.dispersy.endpoint import MIMEndpoint
                from Tribler.dispersy.endpoint import IPv8toDispersyAdapter

                # set communication endpoint
                if self.session.config.get_ipv8_enabled():
                    dispersy_endpoint = IPv8toDispersyAdapter(
                        self.ipv8.endpoint)
                else:
                    dispersy_endpoint = MIMEndpoint(
                        self.session.config.get_dispersy_port())

                working_directory = unicode(
                    self.session.config.get_state_dir())
                self.dispersy = Dispersy(dispersy_endpoint, working_directory)
                self.dispersy.statistics.enable_debug_statistics(False)

                # register TFTP service
                from Tribler.Core.TFTP.handler import TftpHandler
                self.tftp_handler = TftpHandler(self.session,
                                                dispersy_endpoint,
                                                "fffffffd".decode('hex'),
                                                block_size=1024)
                self.tftp_handler.initialize()

            # Torrent search
            if self.session.config.get_torrent_search_enabled(
            ) or self.session.config.get_channel_search_enabled():
                self.search_manager = SearchManager(self.session)
                self.search_manager.initialize()

        if not self.initComplete:
            self.init()

        self.session.add_observer(self.on_tribler_started, NTFY_TRIBLER,
                                  [NTFY_STARTED])
        self.session.notifier.notify(NTFY_TRIBLER, NTFY_STARTED, None)
        return self.startup_deferred
Example #59
0
    def compose(
        self,
        timeout=120,
        dynamic=False,
        skip_commissioning=None,
    ):
        """Compose the machine.

        Internal operation of this form is asynchronous. It will block the
        calling thread until the asynchronous operation is complete. Adjust
        `timeout` to minimize the maximum wait for the asynchronous operation.
        """

        if skip_commissioning is None:
            skip_commissioning = self.get_value_for("skip_commissioning")

        def db_work(client):
            # Check overcommit ratios.
            over_commit_message = self.pod.check_over_commit_ratios(
                requested_cores=self.get_value_for("cores"),
                requested_memory=self.get_value_for("memory"),
            )
            if over_commit_message:
                raise PodProblem("Unable to compose KVM instance in '%s'. %s" %
                                 (self.pod.name, over_commit_message))

            # Update the default storage pool.
            if self.pod.default_storage_pool is not None:
                power_parameters[
                    "default_storage_pool_id"] = self.pod.default_storage_pool.pool_id

            interfaces = get_known_host_interfaces(self.pod)

            return client, interfaces

        def create_and_sync(result):
            requested_machine, result = result
            discovered_machine, pod_hints = result
            created_machine = self.pod.create_machine(
                discovered_machine,
                self.request.user,
                skip_commissioning=skip_commissioning,
                dynamic=dynamic,
                interfaces=self.get_value_for("interfaces"),
                requested_machine=requested_machine,
                domain=self.get_value_for("domain"),
                pool=self.get_value_for("pool"),
                zone=self.get_value_for("zone"),
            )
            self.pod.sync_hints(pod_hints)
            return created_machine

        @inlineCallbacks
        def async_compose_machine(result, power_type, power_paramaters,
                                  **kwargs):
            client, result = result
            requested_machine = yield deferToDatabase(
                self.get_requested_machine, result)
            result = yield compose_machine(
                client,
                power_type,
                power_paramaters,
                requested_machine,
                **kwargs,
            )
            return requested_machine, result

        power_parameters = self.pod.power_parameters.copy()

        if isInIOThread():
            # Running under the twisted reactor, before the work from inside.
            d = deferToDatabase(transactional(self.pod.get_client_identifiers))
            d.addCallback(getClientFromIdentifiers)
            d.addCallback(partial(deferToDatabase, transactional(db_work)))
            d.addCallback(
                async_compose_machine,
                self.pod.power_type,
                power_parameters,
                pod_id=self.pod.id,
                name=self.pod.name,
            )
            d.addCallback(
                partial(deferToDatabase, transactional(create_and_sync)))
            d.addCallback(
                lambda created_machine, _: created_machine,
                request_commissioning_results(self.pod),
            )
            return d
        else:
            # Running outside of reactor. Do the work inside and then finish
            # the work outside.
            @asynchronous
            def wrap_compose_machine(client_idents, pod_type, parameters,
                                     request, pod_id, name):
                """Wrapper to get the client."""
                d = getClientFromIdentifiers(client_idents)
                d.addCallback(
                    compose_machine,
                    pod_type,
                    parameters,
                    request,
                    pod_id=pod_id,
                    name=name,
                )
                return d

            _, result = db_work(None)
            try:
                requested_machine = self.get_requested_machine(result)
                result = wrap_compose_machine(
                    self.pod.get_client_identifiers(),
                    self.pod.power_type,
                    power_parameters,
                    requested_machine,
                    pod_id=self.pod.id,
                    name=self.pod.name,
                ).wait(timeout)
            except crochet.TimeoutError:
                raise PodProblem(
                    "Unable to compose a machine because '%s' driver "
                    "timed out after %d seconds." %
                    (self.pod.power_type, timeout))
            created_machine = create_and_sync((requested_machine, result))
            post_commit_do(reactor.callLater, 0, request_commissioning_results,
                           self.pod)
            return created_machine
Example #60
0
    def register(self, session, sesslock):
        assert isInIOThread()
        if not self.registered:
            self.registered = True

            self.session = session
            self.sesslock = sesslock

            if self.session.get_torrent_store():
                from Tribler.Core.leveldbstore import LevelDbStore
                self.torrent_store = LevelDbStore(
                    self.session.get_torrent_store_dir())

            if self.session.get_enable_metadata():
                from Tribler.Core.leveldbstore import LevelDbStore
                self.metadata_store = LevelDbStore(
                    self.session.get_metadata_store_dir())

            # torrent collecting: RemoteTorrentHandler
            if self.session.get_torrent_collecting():
                from Tribler.Core.RemoteTorrentHandler import RemoteTorrentHandler
                self.rtorrent_handler = RemoteTorrentHandler(self.session)

            # TODO(emilon): move this to a megacache component or smth
            if self.session.get_megacache():
                from Tribler.Core.CacheDB.SqliteCacheDBHandler import (
                    PeerDBHandler, TorrentDBHandler, MyPreferenceDBHandler,
                    VoteCastDBHandler, ChannelCastDBHandler)
                from Tribler.Core.Category.Category import Category

                self._logger.debug('tlm: Reading Session state from %s',
                                   self.session.get_state_dir())

                self.category = Category()

                # create DBHandlers
                self.peer_db = PeerDBHandler(self.session)
                self.torrent_db = TorrentDBHandler(self.session)
                self.mypref_db = MyPreferenceDBHandler(self.session)
                self.votecast_db = VoteCastDBHandler(self.session)
                self.channelcast_db = ChannelCastDBHandler(self.session)

                # initializes DBHandlers
                self.peer_db.initialize()
                self.torrent_db.initialize()
                self.mypref_db.initialize()
                self.votecast_db.initialize()
                self.channelcast_db.initialize()

                from Tribler.Core.Modules.tracker_manager import TrackerManager
                self.tracker_manager = TrackerManager(self.session)
                self.tracker_manager.initialize()

            if self.session.get_videoserver_enabled():
                self.video_server = VideoServer(
                    self.session.get_videoserver_port(), self.session)
                self.video_server.start()

            # Dispersy
            self.tftp_handler = None
            if self.session.get_dispersy():
                from Tribler.dispersy.dispersy import Dispersy
                from Tribler.dispersy.endpoint import StandaloneEndpoint

                # set communication endpoint
                endpoint = StandaloneEndpoint(self.session.get_dispersy_port(),
                                              ip=self.session.get_ip())

                working_directory = unicode(self.session.get_state_dir())
                self.dispersy = Dispersy(endpoint, working_directory)

                # register TFTP service
                from Tribler.Core.TFTP.handler import TftpHandler
                self.tftp_handler = TftpHandler(self.session,
                                                endpoint,
                                                "fffffffd".decode('hex'),
                                                block_size=1024)
                self.tftp_handler.initialize()

            if self.session.get_enable_torrent_search(
            ) or self.session.get_enable_channel_search():
                self.search_manager = SearchManager(self.session)
                self.search_manager.initialize()

        if not self.initComplete:
            self.init()

        self.session.add_observer(self.on_tribler_started, NTFY_TRIBLER,
                                  [NTFY_STARTED])
        self.session.notifier.notify(NTFY_TRIBLER, NTFY_STARTED, None)
        return self.startup_deferred