示例#1
0
文件: upnp.py 项目: provegard/pyupnp
    def __call__(self, environ, start_response):
        """
        This function have to be called in a worker thread, not the IO thread.
        """
        rargs = environ['wsgiorg.routing_args'][1]
        controller = rargs['controller']

        # Media Transport
        if controller == 'mt':
            name = rargs['name']
            if name in self.mts:
                return self.mts[name](environ, start_response)
            else:
                return not_found(environ, start_response)

        if controller != 'upnp':
            return not_found(environ, start_response)

        try:
            udn = rargs['udn']
            if isInIOThread():
                # TODO: read request body
                return self.devices[udn](environ, start_response)
            else:
                # read request body
                input = environ['wsgi.input']
                environ['upnp.body'] = input.read(self.SOAP_BODY_MAX)
                # call the app in IO thread
                args = [udn, environ, start_response]
                blockingCallFromThread(self.reactor, self._call_handler, args)
                return args[3]
        except Exception, e:
            #print e
            #print 'Unknown access: ' + environ['PATH_INFO']
            return not_found(environ, start_response)
示例#2
0
def syncRunServer(srv, host=C.MANAGER_HOST, port=C.MANAGER_PORT, password=None):
    """Run a labrad server of the specified class in a synchronous context.

    Returns a context manager to be used with python's with statement that
    will yield when the server has started and then shut the server down after
    the context is exited.
    """

    if password is None:
        password = C.PASSWORD

    srv.password = password

    @inlineCallbacks
    def start_server():
        connector = reactor.connectTCP(host, port, srv)
        yield srv.onStartup()
        returnValue(connector)

    @inlineCallbacks
    def stop_server():
        yield srv.onShutdown()

    thread.startReactor()
    connector = blockingCallFromThread(reactor, start_server)
    try:
        yield
    finally:
        try:
            connector.disconnect()
            blockingCallFromThread(reactor, stop_server)
        except Exception:
            pass # don't care about exceptions here
示例#3
0
    def __launch_blocker_thread(self, user_id, user_name, x11_display, linuxsb):
        try:
            proclist = gtop.proclist(gtop.PROCLIST_KERN_PROC_UID, int(user_id))
            env_lang_var = "C"

            if len(proclist) > 0:
                for proc in proclist:
                    lang_var = (
                        Popen('cat /proc/%s/environ | tr "\\000" "\\n" | grep ^LANG= ' % proc, shell=True, stdout=PIPE)
                        .stdout.readline()
                        .strip("\n")
                    )
                    if len(lang_var) > 0:
                        env_lang_var = lang_var.replace("LANG=", "")
                        break

            cmd = ["su", user_name, "-c", "LANG=%s DISPLAY=%s nanny-desktop-blocker" % (env_lang_var, x11_display)]
            print cmd

            p = Popen(cmd)
            print "[LinuxSessionFiltering] launching blocker (pid : %s)" % p.pid

            while p.poll() == None:
                time.sleep(1)
                b = threads.blockingCallFromThread(reactor, linuxsb.is_user_blocked, user_id)
                if b == False:
                    p.terminate()
                    print "[LinuxSessionFiltering] Unblocking session %s" % user_id
                    return

            print "[LinuxSessionFiltering] blocker terminated by user interaction"
            threads.blockingCallFromThread(reactor, linuxsb.blocker_terminate_from_thread, user_id, p.poll())
        except:
            print "[LinuxSessionFiltering] blocker terminated by exception"
            threads.blockingCallFromThread(reactor, linuxsb.blocker_terminate_from_thread, user_id, 1)
示例#4
0
	def setOption(self, opt, value, channel=None, inreactor=False, **kwargs):
		if not self.event.isPM() and channel is None:
			if inreactor: self._botcont._settings.setOption(opt, value, channel=self.event.target, **kwargs)
			else: blockingCallFromThread(reactor, self._botcont._settings.setOption, opt, value, channel=self.event.target, **kwargs)
		else:
			if inreactor: self._botcont._settings.setOption(opt, value, channel=channel, **kwargs)
			else: blockingCallFromThread(reactor, self._botcont._settings.setOption, opt, value, channel=channel, **kwargs)
示例#5
0
文件: threaded.py 项目: jafd/txfileio
 def execute(self, op):
     """
     @param op: operation to execute
     @return: None
     
     
     """
     self.fd = op.fd
     op.state = 'running'
     self.busy = True
     try:
         if (op.fd is None) and (op.name not in ('open', '_stop')):
             raise RuntimeError("Calling a file operation {0} on None".format(op.name))
         if op.name == '_stop':
             self.running = False
             result = True
         elif op.name == 'open':
             result = self.manager.take(open(*op.args, **op.kwargs))
             op.fd = result
         elif op.name == 'interaction':
             result = op.callable(op.fd.fd, *op.args, **op.kwargs)
         else:
             result = getattr(op.fd.fd, op.name)(*op.args, **op.kwargs)
         op.state = 'success'
         threads.blockingCallFromThread(self.manager.reactor, op.deferred.callback, result)
     except Exception as e:
         op.state = 'failure'
         threads.blockingCallFromThread(self.manager.reactor, op.deferred.errback, e)
     finally:
         self.busy = False
示例#6
0
def main(iface):
    ret = 0
    try:
        a = AutoDHTServer()
        a.start(iface)

        b = AutoDHTServer()
        b.start(iface)

        time.sleep(4)

        print a.set(key="APA", value="banan")

        print a.get(key="APA")
        print b.get(key="APA")

        a.stop()
        b.stop()

    except:
        traceback.print_exc()
        ret = 1

    finally:
        if reactor.running:
            threads.blockingCallFromThread(reactor, reactor.stop)

    return ret
示例#7
0
def update(event, bot):
	""" update will check for git update and restart bot if core files need updating. """

	gitpath = bot.getOption("git_path", module="pbm_updaterelaunch")
	if not gitpath:
		gitpath = "git"

	check_output([gitpath, "fetch"])
	changes = check_output([gitpath, "diff", "--name-status", "master", "origin/master"])
	print "CHANGES:", changes
	corechange = False
	modchange = False
	for line in changes.splitlines():
		if line.lstrip("M\t").startswith("modules/") or line.lstrip("A\t").startswith("modules/"):
			modchange = True
		elif line.endswith(".py"):
			corechange = True
	check_output([gitpath, "merge", "origin/master"])

	if corechange:
		print "RESTARTING BOT"
		#restart bot
		blockingCallFromThread(reactor, Settings.shutdown, True)

	elif modchange:
		#reload
		if bot.isModuleAvailable("pbm_reload"):
			bot.getModule("pbm_reload").admin_reload_bot(event, bot)
		else:
			bot.say("Module(s) updated but can't reload. reload module not available.")
	else:
		bot.say("Already up-to date.")
示例#8
0
def syncRunServer(srv, host=C.MANAGER_HOST, port=None, username=None,
                  password=None, tls_mode=C.MANAGER_TLS):
    """Run a labrad server of the specified class in a synchronous context.

    Returns a context manager to be used with python's with statement that
    will yield when the server has started and then shut the server down after
    the context is exited.
    """
    from labrad import protocol

    tls_mode = C.check_tls_mode(tls_mode)

    if port is None:
        port = C.MANAGER_PORT_TLS if tls_mode == 'on' else C.MANAGER_PORT

    @inlineCallbacks
    def start_server():
        p = yield protocol.connect(host, port, tls_mode, username, password)
        yield srv.startup(p)

    @inlineCallbacks
    def stop_server():
        srv.disconnect()
        yield srv.onShutdown()

    thread.startReactor()
    blockingCallFromThread(reactor, start_server)
    try:
        yield
    finally:
        try:
            blockingCallFromThread(reactor, stop_server)
        except Exception:
            pass # don't care about exceptions here
示例#9
0
 def open(self, path, flags):
   if threads.blockingCallFromThread(reactor, self.file_db.file_exists, self.key, path):
     file_path = os.path.join(self.file_dir, path[1:])
     if not self.file_is_up_to_date(file_path, path):
       # we need to find this file on the dht
       threads.blockingCallFromThread(reactor, self.file_service.download, path, file_path, self.key, True)
     
   return os.open(os.path.join(self.file_dir, path[1:]), flags)
示例#10
0
def wrapBlocking(f, *a, **kw):
    """This wraps a function to make sure all is halted until the function
    is done. This works also for functions that return deferreds."""
    try:
        threads.blockingCallFromThread(reactor, _wrapBlocking, f, *a, **kw)
    #        threads.blockingCallFromThread(reactor,f,*a,**kw)
    except:
        print "An exception was raised when wrapBlocking..."
示例#11
0
 def create(self, path, mode):
   threads.blockingCallFromThread(reactor, self.file_db.add_file, self.key, path, mode, 0)
   real_path = os.path.join(self.file_dir, path[1:])
   dir_path = os.path.dirname(real_path)
   if not os.path.exists(dir_path):
     self.log('create dir: {}'.format(dir_path))
     os.makedirs(dir_path)
   self.log('create file: {}'.format(real_path))
   return os.open(real_path, os.O_WRONLY | os.O_CREAT, mode)
示例#12
0
 def flush(self, path, fh):
   os.fsync(fh)
   if fh in self.updateables:
     full_file_path = os.path.join(self.file_dir, path[1:])
     mtime = threads.blockingCallFromThread(reactor, self.file_db.update_file_mtime, self.key, path)
     threads.blockingCallFromThread(reactor, self.file_db.update_size, self.key, path, os.path.getsize(full_file_path))
     reactor.callFromThread(self.file_service.publish_file, self.key, path, full_file_path, mtime)
     self.updateables.remove(fh)
   return 0
示例#13
0
def teardown(config, store):
    if reactor.running:
        tangelo.log_info("VTKWEB", "Shutting down Twisted reactor")
        threads.blockingCallFromThread(reactor, reactor.stop)

    if "processes" in store:
        tangelo.log_info("VTKWEB", "Terminating VTKWeb processes")
        for p in store["processes"].values():
            p["process"].terminate()
            p["process"].wait()
 def testDataReceived(self):
     self.assertNotEqual(self.protocol,None)
     threads.blockingCallFromThread(reactor,self.protocol.dataReceived,"This is a test line")
     threads.blockingCallFromThread(reactor,self.protocol.transport.loseConnection)
     unity.wait(0.1)
     f = open("testing.txt","r")
     self.assertNotEqual(f,None)
     l = f.readline()
     self.assertEqual(l,"This is a test line")
     f.close()
示例#15
0
 def run(self):
     sys.stderr.write("SshClientFactory running!\n")
     self.factory = SSHFactory(self)
     self.sem = Semaphore(0)
     def _connectLater():
         sys.stderr.write("SshClientFactory connecting asynchronously\n")
         reactor.connectTCP(self.host, self.port, self.factory)      #@UndefinedVariable
         sys.stderr.write("SshClientFactory connected\n")
     threads.blockingCallFromThread(reactor, _connectLater)          #@UndefinedVariable
     self.sem.acquire()
示例#16
0
 def testDataReceived(self):
     self.assertNotEqual(self.protocol,None)
     threads.blockingCallFromThread(reactor,self.protocol.transport.write,"Start something")
     threads.blockingCallFromThread(reactor,self.protocol.transport.loseConnection)
     unity.wait(0.1)
     f = open("testingout.txt","r")
     self.assertNotEqual(f,None)
     for i in range(0,100):
         l = f.readline()
         self.assertEqual(l,"This is a test line" + str(i) +"\n")
     f.close()
示例#17
0
 def stop(self):
     """
     Stop the connector, closing the connection.
     The Reactor loop remains active as the reactor cannot be restarted.
     """
     if self._host:
         #threads.blockingCallFromThread(reactor, self._factory.stopTrying)
         threads.blockingCallFromThread(reactor, self._disconnect)
     else:
         self._database = None
         self._stock_exchange.stop()
         self._stock_exchange = None
示例#18
0
 def start(self, url=None, request=None, response=None):
     if url:
         self.fetch(url)
     elif request:
         self.fetch(request)
     elif response:
         request = response.request
         self.populate_vars(request, response)
     else:
         self.populate_vars()
     start_python_console(self.vars)
     threads.blockingCallFromThread(reactor, self.engine.stop)
示例#19
0
    def _resolveQuery(self, session, objects, query):
        """Resolve a L{Query}.

        @param session: The L{FluidinfoSession} for the request.
        @param objects: The L{SecureObjectAPI} to use to fetch object IDs.
        @param query: The L{Query} to resolve.
        @return: A C{list} of object ID C{str}s that match the query.
        """
        try:
            result = objects.search([query])
        except UnknownPathError as error:
            session.log.exception(error)
            unknownPath = error.paths[0]
            raise TNonexistentTag(unknownPath.encode('utf-8'))
        except PermissionDeniedError as error:
            session.log.exception(error)
            deniedPath, operation = error.pathsAndOperations[0]
            raise TNonexistentTag(deniedPath)

        try:
            with session.timer.track('index-search'):
                result = blockingCallFromThread(reactor, result.get)
        except SearchError as error:
            session.log.exception(error)
            raise TParseError(query, error.message)

        return result[query]
示例#20
0
def help(event, bot):
	""" help [argument].  If argument is specified, get the help string for that command.
	Otherwise list all commands (same as commands function).
	"""
	cmd, arg = argumentSplit(event.argument, 2)
	# other modules should probably not do this:
	if cmd:
		cmd_mappings = blockingCallFromThread(reactor, _filter_mappings, bot, event.isPM, cmd)
		if cmd_mappings:
			for mapping in cmd_mappings:
				if arg:
					h = functionHelp(mapping.function, arg)
					if h: bot.say(h)
					else: bot.say("No help for (%s) available." % cmd)
				else:
					h = functionHelp(mapping.function)
					if h:
						command = mapping.command
						if isIterable(command) and len(command) > 1:
							bot.say("%s Aliases: %s" % (h, ", ".join(command)))
						else:
							bot.say(h)
					else:
						bot.say("No help for (%s) available." % cmd)
		else:
			bot.say("Command %s not found." % cmd)
	else:
		list_commands(bot, event.isPM())
示例#21
0
def list_commands(bot, pm=False):
	cmds = set()
	for mapping in blockingCallFromThread(reactor, _filter_mappings, bot, pm):
		cmds.add(mapping.command[0])
	cmds = list(cmds)
	cmds.sort()
	bot.say(" ".join(cmds))
示例#22
0
文件: timer.py 项目: Clam-/pyBurlyBot
	def restarttimer(cls, name):
		try:
			if name.startswith("_"):
				raise TimerInvalidName("Invalid name (%s)." % name)
		except AttributeError:
			raise TimerInvalidName("Invalid name (%s)." % name)
		return blockingCallFromThread(reactor, cls._restarttimer, name)
示例#23
0
 def connect(self, host, port):
     """
     Start the reactor.
     """
     logging.info("Connecting host '%s' on port '%d'." % (host, port))
     self._server_connector = ServerConnector(self._post_network_event)
     self._connector = blockingCallFromThread(reactor, reactor.connectTCP, host, port, self._server_connector)
示例#24
0
    def send_message_blocking(self, message):
        """
        Call from another thread to send a message to the server and wait for the result.
        Accepts a serializable message.
        Based on the response either the result is returned or an exception is raised.

        Parameters
        ----------
        message : :class:`quartjes.connector.messages.Message`
            Message object to send to the server.
        
        Returns
        -------
        result
            The response returned by the server.
            
        Raises
        ------
        MessageHandleError
            Something went wrong while handling the message on the server.
        ConnectionError
            There is an issue with the connection to the server.
        TimeoutError
            No response was received within the set timeout.
        """
        serial_message = create_message_string(message)
        try:
            result_msg = threads.blockingCallFromThread(reactor, self._r_send_message_and_wait, message.id, serial_message)
            if result_msg.result_code > 0:
                raise MessageHandleError(error_code=result_msg.result_code, error_details = result_msg.result)
            return result_msg.result
        except TimeoutError:
            self._waiting_messages.pop(message.id, None)
            raise
示例#25
0
 def file_is_up_to_date(self, file_path_on_disk, path):
   self.log('Is file up to date? {}'.format(file_path_on_disk))
   if not os.path.isfile(file_path_on_disk):
     return False
   if os.stat(file_path_on_disk).st_mtime < threads.blockingCallFromThread(reactor, self.file_db.get_file_mtime, self.key, path):
     return False
   return True
示例#26
0
    def add_to_download(self, item, eps_no):
        '''
        add current episode to download, when download is added, update episode status and add torrent_file record
        :param item: the item of corresponding episode, it contains an enclosure list which has magnet uri
        :param eps_no: the episode number
        :return: the episode number, the return value is useless
        '''
        magnet_uri = item.enclosures[0].href
        torrent_file = yield threads.blockingCallFromThread(reactor, download_manager.download, magnet_uri, self.bangumi_path)

        if torrent_file is None:
            logger.warn('episode %s of %s added failed', eps_no, self.bangumi.name)
            returnValue(eps_no)
        else:

            episode = None
            for eps in self.episode_list:
                if eps_no == eps.episode_no:
                    episode = eps
                    break

            if episode.torrent_files is not list:
                episode.torrent_files = []

            episode.torrent_files.append(torrent_file)

            episode.status = Episode.STATUS_DOWNLOADING

            logger.info('episode %s of %s added', eps_no, self.bangumi.name)

            returnValue(eps_no)
示例#27
0
 def blocking_call(self):
     try:
         result = threads.blockingCallFromThread(reactor, os.system, self.run_me) 
     except:
         log.debug("Netalyzr had an error, please see the log file: %s" % self.output_file)
     finally:
         self.clean_up()
def configure():
    logger.debug("Configuring pipeline run server...")
    try:
        data = json.loads(request.data)
        rsc = RunServerConfiguration(data)
        status = None
        if server_model.config == None:
            server_model.config = rsc
            msg = "New configuration set for the pipeline run server."
            status = CONFIG_OK
        elif server_model.config != rsc:
            msg = "Configuration not accepted for the pipeline run server since another configuration has already been set."
            status = CONFIG_NOT_ACCEPTED
        else:
            msg = "The same configuration has already been set for the pipeline run server. Nothing to do."
            status = CONFIG_OK
        
        if status==CONFIG_OK: 
            drm_status, drm_msg = blockingCallFromThread(reactor, _configure_drm, rsc)
            msg += "\n%s"%drm_msg
            if drm_status != CONFIG_OK:
                status=CONFIG_ERROR

        logger.info(msg)
        response = ConfigurationResponse(status, msg)
        return Response(json.dumps(response.__dict__), mimetype="application/json")

    except:
        msg = "An error occured while configuring the run server. Reason: \n%s" % traceback.format_exc()
        logger.error(msg)
        response = ConfigurationResponse(CONFIG_ERROR, msg)
        return Response(json.dumps(response.__dict__), mimetype="application/json")
示例#29
0
 def getattr(self, path, fh=None):
   result = threads.blockingCallFromThread(
       reactor, self.file_db.getattr, self.key, path)
   if result:
     return result
   else:
     raise FuseOSError(ENOENT)
示例#30
0
    def _perform_updates(self):
        """This is the main working function of the PDConfigurer class.
            It should be executed as a separate thread, it does the following:
                checks for any updates to perform
                does them
                responds to the server
                removes the update
                checks for more updates
                    if more exist it calls itself again more quickly
                    else it puts itself to sleep for a little while
        """
        if settings.CHECK_DOCKER:
            ready = dockerMonitor.ensureReady()
            if not ready:
                out.warn("Docker does not appear to be running.  "
                            "Most functionality with containers will be broken.")

            ready = containerdMonitor.ensureReady()
            if not ready:
                out.warn("Docker containerd does not appear to be running.  "
                            "Most functionality with containers will be broken.")

        # add any chutes that should already be running to the front of the
        # update queue before processing any updates
        startQueue = reloadChutes()
        self.updateLock.acquire()
        self.updateQueue.append(self._make_router_update("prehostconfig"))
        self.updateQueue.append(self._make_router_update("inithostconfig"))
        self.updateQueue.extend(startQueue)
        self.updateLock.release()

        # Always perform this work
        while self.reactor.running:
            # Check for new updates
            change = self._get_next_update()
            if change is None:
                time.sleep(1)
                continue

            self._perform_update(change)

            # Apply a batch of updates and when the queue is empty, send a
            # state report.  We're not reacquiring the mutex here because the
            # worst case is we send out an extra state update.
            if len(self.active_changes) == 0 and nexus.core.provisioned():
                threads.blockingCallFromThread(self.reactor,
                        reporting.sendStateReport)
示例#31
0
 def catcher_func(*args, **kwargs):
     if _WampSession is not None:
         try:
             return threads.blockingCallFromThread(
                 reactor, _WampSession.call, wampfuncname, *args,
                 **kwargs)
         except TransportLost:
             confnodesroot.logger.write_error(_("Connection lost!\n"))
             confnodesroot._SetConnector(None)
         except Exception:
             errmess = traceback.format_exc()
             confnodesroot.logger.write_error(errmess + "\n")
             print(errmess)
             # confnodesroot._SetConnector(None)
     return PLCObjDefaults.get(funcname)
示例#32
0
 def fetch(self, request_or_url, spider=None):
     if isinstance(request_or_url, Request):
         request = request_or_url
         url = request.url
     else:
         url = any_to_uri(request_or_url)
         request = Request(url, dont_filter=True)
         request.meta['handle_httpstatus_all'] = True
     response = None
     try:
         response, spider = threads.blockingCallFromThread(
             reactor, self._schedule, request, spider)
     except IgnoreRequest:
         pass
     self.populate_vars(response, request, spider)
示例#33
0
 def update_description(self):
     """
     This method updates the current description for the proxy object. This
     is blocking so dont call it from the mainthread. If it returns, you are
     assured that the description is up to date. Throws an exception if no
     answer.
     """
     with self.supervisor:
         if logger.isEnabledFor(logging.DEBUG):
             logger.debug("Thread blocking function called")
         element = threads.blockingCallFromThread(reactor,
                                                  self.control.query,
                                                  SERVICE_FULL_DESCRIPTION,
                                                  self.peerid)
         self.update(element)
示例#34
0
文件: main.py 项目: yorig/tribler
    def start(self, introduce_port):
        def start_community():
            if self.crawl_keypair_filename:
                keypair = read_keypair(self.crawl_keypair_filename)
                member = self.dispersy.get_member(
                    private_key=self.dispersy.crypto.key_to_bin(keypair))
                cls = TunnelCommunityCrawler
            else:
                member = self.dispersy.get_new_member(u"curve25519")
                cls = HiddenTunnelCommunity
            self.community = self.dispersy.define_auto_load(
                cls, member, (self.session, self.settings), load=True)[0]

            self.session.set_anon_proxy_settings(
                2, ("127.0.0.1",
                    self.session.get_tunnel_community_socks5_listen_ports()))
            if introduce_port:
                self.community.add_discovered_candidate(
                    Candidate(('127.0.0.1', introduce_port), tunnel=False))

        blockingCallFromThread(reactor, start_community)

        self.session.set_download_states_callback(
            self.download_states_callback, False)
示例#35
0
    def fetch(self, request_or_url):
        if isinstance(request_or_url, Request):
            request = request_or_url
            url = request.url
        else:
            url = any_to_uri(request_or_url)
            request = Request(url)

        response = None
        try:
            response = threads.blockingCallFromThread(reactor, self._schedule,
                                                      request)
        except:
            traceback.print_exc()
        self.populate_vars(request, response)
示例#36
0
文件: shipper.py 项目: philk/shipper
    def build(self, path=None, fobj=None, tag=None, quiet=False):
        """Run build of a container from buildfile
        that can be passed as local/remote path or file object(fobj)
        """
        dockerfile = DockerFile(path, fobj)

        def call():
            deferreds = []
            for host in self.hosts:
                deferreds.append(
                    self.c.build(host, dockerfile, tag=tag, quiet=quiet))
            return defer.gatherResults(deferreds, consumeErrors=True)

        responses = threads.blockingCallFromThread(reactor, call)
        return [Response(h, 200, r) for h, r in zip(self.hosts, responses)]
示例#37
0
 def getConnectedPeers(self, cname):
     """
     This method returns a list of connected peerid of the given connector 
     name
     """
     conn = self.connectors[cname]
     with self.supervisor:
         if logger.isEnabledFor(logging.DEBUG):
             logger.debug("Thread blocking function called")
         element = threads.blockingCallFromThread(
             reactor, self.control.query,
             REQUEST_CONTROL_QUERY % (conn.xml_type, cname), self.peerid)[0]
         el = element.find('peers')
         res = [Peerid(e.text) for e in el.findall('peer')]
     return res
示例#38
0
def update(event, bot):
    """ update will check for git update and restart bot if core files need updating. """

    gitpath = bot.getOption("git_path", module="pbm_updaterelaunch")
    if not gitpath:
        gitpath = "git"

    check_output([gitpath, "fetch"])
    changes = check_output(
        [gitpath, "diff", "--name-status", "master", "origin/master"])
    print "CHANGES:", changes
    corechange = False
    modchange = False
    for line in changes.splitlines():
        if line.lstrip("M\t").startswith("modules/") or line.lstrip(
                "A\t").startswith("modules/"):
            modchange = True
        elif line.endswith(".py"):
            corechange = True
    check_output([gitpath, "merge", "origin/master"])

    if corechange:
        print "RESTARTING BOT"
        #restart bot
        blockingCallFromThread(reactor, Settings.shutdown, True)

    elif modchange:
        #reload
        if bot.isModuleAvailable("pbm_reload"):
            bot.getModule("pbm_reload").admin_reload_bot(event, bot)
        else:
            bot.say(
                "Module(s) updated but can't reload. reload module not available."
            )
    else:
        bot.say("Already up-to date.")
示例#39
0
    def blocking_call_from_thread(self, callback, timeout):
        """Call the given function from a thread, and wait for the result synchronously
        for as long as the timeout will allow.

        Args:
            callback: Callable function to be invoked from the thread.
            timeout (:obj: int): Number of seconds to wait for the response before
                raising an exception.

        Returns:
            The results from the callback, or a timeout exception.
        """
        result_placeholder = defer.Deferred()
        if timeout:
            result_placeholder.addTimeout(timeout, reactor, onTimeoutCancel=self.raise_timeout_exception)
        return threads.blockingCallFromThread(reactor, callback, result_placeholder)
示例#40
0
 def do_get(self, cmd):
     """get KEY: show the value of KEY"""
     key = cmd
     try:
         value = threads.blockingCallFromThread(
             self.reactor,
             self.proto.get,
             key,
         )
     except KeyError:
         self.stdout.write("Error: Key '{k}' not found!\n".format(k=key))
         return
     except Exception as e:
         self.stdout.write("Error: {e}\n".format(e=e))
         return
     else:
         self.stdout.write("{v}\n".format(v=value))
示例#41
0
    def parallel(self, method, params):
        def call():
            if isinstance(params, dict):
                # we assume that it's all the same call to all default hosts
                # with the same arguments
                deferreds = [method(h, **copy(params)) for h in self.hosts]
            elif isinstance(params, list):
                # we assume that it's a list of tuples (host, kwargs)
                # (useful in case if you have parallel calls to
                # different endpoints)
                deferreds = []
                for host, kwargs in params:
                    deferreds.append(method(host, **copy(kwargs)))

            return defer.gatherResults(deferreds, consumeErrors=True)

        return threads.blockingCallFromThread(reactor, call)
示例#42
0
文件: shell.py 项目: linsicai/scrapy
 def fetch(self, request_or_url, spider=None, redirect=True, **kwargs):
     if isinstance(request_or_url, Request):
         request = request_or_url
     else:
         url = any_to_uri(request_or_url)
         request = Request(url, dont_filter=True, **kwargs)
         if redirect:
             request.meta['handle_httpstatus_list'] = SequenceExclude(range(300, 400))
         else:
             request.meta['handle_httpstatus_all'] = True
     response = None
     try:
         response, spider = threads.blockingCallFromThread(
             reactor, self._schedule, request, spider)
     except IgnoreRequest:
         pass
     self.populate_vars(response, request, spider)
示例#43
0
    def handleBanCommand(self, player, arguments):
        if player.user.Moderator:
            self.logger.info("%s is attempting to ban %s." %
                             (player.user.Username, arguments.Username))

            playerId = blockingCallFromThread(reactor, self.getPlayer,
                                              player.session,
                                              arguments.Username, Penguin.ID)

            if playerId is not None:
                reactor.callFromThread(moderatorBan, player, playerId,
                                       arguments.Duration, arguments.Reason)

                self.logger.info(
                    "%s has banned %s for %s hours using the !BAN command." %
                    (player.user.Username, arguments.Username,
                     arguments.Duration))
示例#44
0
    def connect(self):
        """Connect to wpa_supplicant over D-Bus

        :returns: Remote D-Bus proxy object of the root wpa_supplicant interface
        :rtype: :class:`~WpaSupplicant`
        """

        if not self._reactor.running:
            raise ReactorNotRunning('Twisted Reactor must be started (call .run())')

        @defer.inlineCallbacks
        def get_conn():
            self._reactor.thread_name = threading.currentThread().getName()
            conn = yield client.connect(self._reactor, busAddress='system')
            defer.returnValue(conn)

        conn = threads.blockingCallFromThread(self._reactor, get_conn)
        return WpaSupplicant('/fi/w1/wpa_supplicant1', conn, self._reactor, )
示例#45
0
def _eval(deferred, reactor):
    """Evaluate a deferred on a given reactor and return the result

        This function is safe to call with a deferred that has already been evaluated.
        """
    @defer.inlineCallbacks
    def closure():
        if deferred.called:
            result = deferred.result
        else:
            result = yield deferred

        defer.returnValue(result)

    if threading.currentThread().getName() == reactor.thread_name:
        return closure()
    else:
        return threads.blockingCallFromThread(reactor, closure)
示例#46
0
    def wait_for_connection(self, timeout=None):
        """
        Wait for the connection to be established.
        Provide a timeout value to control the time waited for a connection. If no
        timeout value is provided, the default value is used.

        Parameters
        ----------
        timeout : int
            Time in seconds to wait for a connection to be established. If not
            given, the default will be used.

        Raises
        ------
        TimeoutError
            No connection was established within the timeout.
        """
        return threads.blockingCallFromThread(reactor, self._r_wait_for_connection, timeout)
示例#47
0
    def write(self, data):
        """
        The WSGI I{write} callable returned by the I{start_response} callable.
        The given bytes will be written to the response body, possibly flushing
        the status and headers first.

        This will be called in a non-I/O thread.
        """

        # PEP-3333 states:
        #
        #   The server or gateway must transmit the yielded bytestrings to the
        #   client in an unbuffered fashion, completing the transmission of
        #   each bytestring before requesting another one.
        #
        # This write() method is used for the imperative and (indirectly) for
        # the more familiar iterable-of-bytestrings WSGI mechanism. It uses
        # C{blockingCallFromThread} to schedule writes. This allows exceptions
        # to propagate up from the underlying HTTP implementation. However,
        # that underlying implementation does not, as yet, provide any way to
        # know if the written data has been transmitted, so this method
        # violates the above part of PEP-3333.
        #
        # PEP-3333 also says that a server may:
        #
        #   Use a different thread to ensure that the block continues to be
        #   transmitted while the application produces the next block.
        #
        # Which suggests that this is actually compliant with PEP-3333,
        # because writes are done in the reactor thread.
        #
        # However, providing some back-pressure may nevertheless be a Good
        # Thing at some point in the future.

        def wsgiWrite(started):
            if not started:
                self._sendResponseHeaders()
            self.request.write(data)

        try:
            return blockingCallFromThread(self.reactor, wsgiWrite,
                                          self.started)
        finally:
            self.started = True
示例#48
0
    def render_PUT(self, request):
        """ Converts arguments into command-line counterparts and executes the omsh command.

        Parameters passed as 'arg' are converted into positional arguments, others are converted into
        named parameters:

            PUT /bin/ls?arg=/some/path&arg=/another/path&-l&--recursive

        thus translates to:

            /bin/ls /some/path /another/path -l --recursive

        Allows blocking (synchronous) and non-blocking operation using the 'asynchronous' parameter (any
        value will trigger it). Synchronous operation requires two threads to function.
        """
        def named_args_filter_and_flatten(nargs):
            for name, vallist in nargs:
                if name not in ('arg', 'asynchronous'):
                    for val in vallist:
                        yield name
                        yield val

        def convert_args(args):
            tokenized_args = args.get('arg', [])
            return tokenized_args + list(
                named_args_filter_and_flatten(args.items()))

        protocol = DetachedProtocol()
        protocol.interaction = get_interaction(
            self.context) or request.interaction

        args = convert_args(request.args)
        args = filter(None, args)
        cmd = self.context.cmd(protocol)
        # Setting write_buffer to a list makes command save the output to the buffer too
        cmd.write_buffer = []
        d0 = defer.Deferred()

        try:
            pid = threads.blockingCallFromThread(
                reactor, cmd.register, d0, args,
                '%s %s' % (request.path, args))
        except ArgumentParsingError, e:
            raise BadRequest(str(e))
示例#49
0
 def get_remote_metadata(self, dircap, basedir=''):
     metadata = {}
     jobs = []
     logging.debug("Getting remote metadata from %s...", dircap)
     url = '{}uri/{}/?t=json'.format(self.tahoe.node_url, dircap)
     received_data = requests.get(url).json()
     for filename, data in received_data[1]['children'].items():
         path = '/'.join([basedir, filename]).strip('/')
         metadata[path] = {
             'uri': data[1]['ro_uri'],
             'mtime': int(data[1]['metadata']['mtime'])}
         if data[0] == 'dirnode':
             jobs.append(
                 deferToThread(self.get_remote_metadata,
                               '/'.join([dircap, filename]), path))
     results = blockingCallFromThread(reactor, gatherResults, jobs)
     for result in results:
         metadata.update(result)
     return metadata
示例#50
0
文件: timer.py 项目: ckx/pyBurlyBot
 def addtimer(cls,
              name,
              interval,
              f,
              reps=1,
              startnow=False,
              *args,
              **kwargs):
     #kinda want to use _ prefix for internal things like DBcommit
     try:
         if name.startswith("_"):
             raise TimerInvalidName("Invalid name (%s)." % name)
     except AttributeError:
         raise TimerInvalidName("Invalid name (%s)." % name)
     else:
         # force interval and rep in to float and int respectivly in case module forgot (I forgot)
         return blockingCallFromThread(reactor, cls._addTimer, name,
                                       float(interval), f, int(reps),
                                       startnow, *args, **kwargs)
示例#51
0
    def do_login(self, line):
        """Login as a specific user: "******"
        You MAY be prompted for a password, or instructed to visit a URL.
        """
        try:
            args = self._parse(line, ["user_id"], force_keys=True)
            can_login = threads.blockingCallFromThread(reactor, self._check_can_login)
            if can_login:
                p = getpass.getpass("Enter your password: "******"user_id"]
                if self._is_on("complete_usernames") and not user.startswith("@"):
                    domain = self._domain()
                    if domain:
                        user = "******" + user + ":" + domain

                reactor.callFromThread(self._do_login, user, p)
                # print " got %s " % p
        except Exception as e:
            print(e)
示例#52
0
    def test_live_edge_bootstrapping(self):
        """
        A node without trust for anyone should still find a candidate.
        """
        # Arrange
        node, other = self.create_nodes(2)
        candidate = node.community.create_or_update_walkcandidate(
            other.my_candidate.sock_addr, other.my_candidate.sock_addr,
            ('0.0.0.0', 0), other.my_candidate.tunnel, u"unknown")
        candidate.associate(other.community.my_member)
        candidate.walk_response(time.time())

        # Assert
        intro = blockingCallFromThread(
            reactor, node.community.dispersy_get_introduce_candidate,
            node.my_candidate)
        self.assertIsNotNone(intro)
        self.assertIsInstance(intro, Candidate)
        self.assertEqual(intro, candidate)
示例#53
0
 def getVariableValue(self, vname):
     """
     This method query as needed the value of a variable. It should not be
     considered as accurate as the callback since it only ask for the value
     if we dont hava a subscription to the variable.
     """
     var = self.variables[vname]
     if (var.access_type is variable.CONSTANT) and (var.value is not None):
         res = var.__value__
     else:
         with self.supervisor:
             if logger.isEnabledFor(logging.DEBUG):
                 logger.debug("Thread blocking function called")
             element = threads.blockingCallFromThread(
                 reactor, self.control.query,
                 REQUEST_CONTROL_QUERY % (var.xml_type, vname),
                 self.peerid)[0]
             res = element.find('value').text
     return res
示例#54
0
    def process():
        if 'name' not in flask.request.form or \
           'account' not in flask.request.form or \
           'sequence' not in flask.request.form:
            flask.flash(u'Co to proboha provádíte?!')
            return flask.redirect('/')

        name = flask.request.form['name']
        account = flask.request.form['account']
        sequence = 0

        inp = blockingCallFromThread(reactor, manager.get_input, name)
        if inp is None:
            flask.flash(u'Jejda, vstupní soubor tu už není. '
                        u'Příště zkuste doplnit chybějící informace rychleji.')
            return flask.redirect('/')

        try:
            sequence = int(flask.request.form['sequence'])
        except ValueError:
            flask.flash(u'Neplatné pořadové číslo. Zkuste to znovu.')
            return flask.render_template('preview.html', **locals())

        if not re.match('^[0-9]+(-[0-9]+)?/[0-9]+$', account):
            flask.flash(u'Neplatné číslo účtu. Zkuste to znovu.')
            return flask.render_template('preview.html', **locals())

        try:
            fp = StringIO()
            render_output(fp, inp, account, sequence)
            data = fp.getvalue()
        except:
            print_exc()
            flask.flash(u'Nastala chyba při zpracování souboru. Omlouváme se.')
            return flask.redirect('/')

        resp = flask.make_response(data)
        resp.headers['Content-Type'] = 'application/octet-stream'
        resp.headers[
            'Content-Disposition'] = 'attachment; filename=account.gpc'
        return resp
示例#55
0
文件: wsgi.py 项目: lzimm/360io
    def readline(self, size=None):
        """
        Read a line, delimited by a newline. If the stream reaches EOF
        or size bytes have been read before reaching a newline (if
        size is given), the partial line is returned.

        COMPATIBILITY NOTE: the size argument is excluded from the
        WSGI specification, but is provided here anyhow, because
        useful libraries such as python stdlib's cgi.py assume their
        input file-like-object supports readline with a size
        argument. If you use it, be aware your application may not be
        portable to other conformant WSGI servers.
        """
        # Called in application thread
        if size < 0:
            # E.g. -1, which is the default readline size for *some*
            # other file-like-objects...
            size = None

        return threads.blockingCallFromThread(
            reactor, self.stream.readline, '\n', size=size)
示例#56
0
def configure():
    logger.debug("Configuring pipeline run server...")
    try:
        data = json.loads(request.data)
        rsc = RunServerConfiguration(data)
        status = None
        if server_model.config == None:
            server_model.config = rsc
            msg = "New configuration set for the pipeline run server."
            status = CONFIG_OK
        elif server_model.config != rsc:
            msg = "Configuration not accepted for the pipeline run server since another configuration has already been set."
            status = CONFIG_NOT_ACCEPTED
        else:
            msg = "The same configuration has already been set for the pipeline run server. Nothing to do."
            status = CONFIG_OK

        if status == CONFIG_OK:
            drm_status, drm_msg = blockingCallFromThread(
                reactor, _configure_drm, rsc)
            msg += "\n%s" % drm_msg
            if drm_status != CONFIG_OK:
                status = CONFIG_ERROR

        logger.info(msg)
        response = ConfigurationResponse(status, msg)
        return Response(json.dumps(response.__dict__),
                        mimetype="application/json")

    except:
        msg = "An error occured while configuring the run server. Reason: \n%s" % traceback.format_exc(
        )
        logger.error(msg)
        response = ConfigurationResponse(CONFIG_ERROR, msg)
        return Response(json.dumps(response.__dict__),
                        mimetype="application/json")
示例#57
0
 def do_getkeys(self, cmd):
     """getkeys [COLUMNS]: show a list of all keys."""
     if cmd:
         try:
             cols = int(cmd)
         except ValueError:
             self.stdout.write("Error: Invalid argument!\n")
             return
     else:
         cols = 2
     try:
         keys = threads.blockingCallFromThread(
             self.reactor,
             self.proto.getkeys,
         )
     except Exception as e:
         self.stdout.write("Error: {e}\n".format(e=e))
         return
     else:
         sk = sorted(keys)
         formated = utils.fmtcols(sk, cols)
         if not formated.endswith("\n"):
             formated += "\n"
         self.stdout.write(formated)
示例#58
0
 def sync(self, snapshot=None, force_backup=False): # flake8: noqa
     # FIXME ...
     if self.sync_state:
         logging.debug("Sync already in progress; queueing to end...")
         self.do_sync = True
         return
     if not snapshot:
         try:
             ls = self.tahoe.ls(self.remote_dircap)
             if not ls:
                 logging.debug("No /Archives found; "
                               "performing (first?) backup...")
                 self.sync_state += 1
                 self.backup(self.local_dir, self.remote_dircap_alias)
                 self.sync_complete(ls)
                 return
         except Exception as error:
             logging.error(error)
             return
         # XXX: It might be preferable to just check the dircap of /Latest/
         pre_sync_archives = self.tahoe.ls(self.remote_dircap + "/Archives")
         available_snapshot = pre_sync_archives[-1]
         if self.local_snapshot == available_snapshot:
             if force_backup:
                 self.sync_state += 1
                 self.backup(self.local_dir, self.remote_dircap_alias)
                 self.sync_complete(pre_sync_archives)
             return
         else:
             snapshot = available_snapshot
     remote_path = self.remote_dircap + '/Archives/' + snapshot
     logging.info("Syncing %s with %s...", self.local_dir, snapshot)
     self.sync_state += 1
     local_metadata = self.get_local_metadata(self.local_dir)
     remote_metadata = self.get_remote_metadata(remote_path)
     # TODO: If tahoe.get_metadata() fails or doesn't contain a
     # valid snapshot, jump to backup?
     jobs = []
     for file, metadata in remote_metadata.items():
         if metadata['uri'].startswith('URI:DIR'):
             dirpath = os.path.join(self.local_dir, file)
             if not os.path.isdir(dirpath):
                 logging.info("Creating directory: %s...", dirpath)
                 os.makedirs(dirpath)
     for file, metadata in remote_metadata.items():
         if not metadata['uri'].startswith('URI:DIR'):
             filepath = os.path.join(self.local_dir, file)
             remote_mtime = metadata['mtime']
             if filepath in local_metadata:
                 #local_filesize = local_metadata[filepath]['size']
                 local_mtime = local_metadata[filepath]['mtime']
                 if local_mtime < remote_mtime:
                     logging.debug(
                         "[<] %s is older than remote version; "
                         "downloading %s...", file, file)
                     if self.keep_versions:
                         self._create_versioned_copy(filepath, local_mtime)
                     jobs.append(
                         deferToThread(
                             self.download, remote_path + '/' + file,
                             filepath, remote_mtime))
                 elif local_mtime > remote_mtime:
                     logging.debug(
                         "[>] %s is newer than remote version; "
                         "backup scheduled", file)
                     self.do_backup = True
                 else:
                     logging.debug("[.] %s is up to date.", file)
             else:
                 logging.debug(
                     "[?] %s is missing; downloading %s...", file, file)
                 jobs.append(
                     deferToThread(
                         self.download, remote_path + '/' + file,
                         filepath, remote_mtime))
     for file, metadata in local_metadata.items():
         fn = file.split(self.local_dir + os.path.sep)[1]
         if fn not in remote_metadata:
             if metadata:
                 recovery_uri = self.tahoe.stored(
                     file, metadata['size'], metadata['mtime'])
                 if recovery_uri:
                     logging.debug(
                         "[x] %s removed from latest snapshot; "
                         "deleting local file...", file)
                     if self.keep_versions:
                         self._create_versioned_copy(file, local_mtime)
                     try:
                         os.remove(file)
                     except Exception as error:
                         logging.error(error)
                 else:
                     logging.debug(
                         "[!] %s isn't stored; backup scheduled", fn)
                     self.do_backup = True
     blockingCallFromThread(reactor, gatherResults, jobs)
     if self.do_backup:
         self.backup(self.local_dir, self.remote_dircap_alias)
         self.do_backup = False
     if self.do_sync:
         self.sync()
     self.sync_complete(pre_sync_archives)
示例#59
0
 def captureMessage(self, message, **extra):
     kwargs = self.ravenCaptureArguments(**extra)
     return blockingCallFromThread(self.reactor, self.client.captureMessage,
                                   message, **kwargs)
示例#60
0
 def captureException(self, **extra):
     kwargs = self.ravenCaptureArguments(**extra)
     exc_info = sys.exc_info()
     return blockingCallFromThread(self.reactor,
                                   self.client.captureException, exc_info,
                                   **kwargs)