예제 #1
0
    def __init__(self):
        self.started = False
        self.daemon = None
        self.io_loop = IOLoop()
        self.pid = os.getpid()
        self.showlist = []

        self.tz = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal()

        self.config_file = None
        self.data_dir = None
        self.cache_dir = None
        self.quite = None
        self.no_launch = None
        self.web_port = None
        self.developer = None
        self.debug = None
        self.newest_version = None
        self.newest_version_string = None

        self.naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02d E%(episodenumber)02d")
        self.sports_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02 dE%(episodenumber)02d")
        self.naming_ep_type_text = ("1x02", "s01e02", "S01E02", "01x02", "S01 E02",)
        self.naming_multi_ep_type = {0: ["-%(episodenumber)02d"] * len(self.naming_ep_type),
                                     1: [" - " + x for x in self.naming_ep_type],
                                     2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]}
        self.naming_multi_ep_type_text = ("extend", "duplicate", "repeat")
        self.naming_sep_type = (" - ", " ")
        self.naming_sep_type_text = (" - ", "space")

        self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(platform.system(), platform.release(), str(uuid.uuid1()))
        self.languages = [language for language in os.listdir(sickrage.LOCALE_DIR) if '_' in language]
        self.sys_encoding = get_sys_encoding()
        self.client_web_urls = {'torrent': '', 'newznab': ''}

        self.adba_connection = None
        self.notifier_providers = None
        self.metadata_providers = {}
        self.search_providers = None
        self.log = None
        self.config = None
        self.alerts = None
        self.main_db = None
        self.cache_db = None
        self.scheduler = None
        self.wserver = None
        self.google_auth = None
        self.name_cache = None
        self.show_queue = None
        self.search_queue = None
        self.postprocessor_queue = None
        self.version_updater = None
        self.show_updater = None
        self.daily_searcher = None
        self.backlog_searcher = None
        self.proper_searcher = None
        self.trakt_searcher = None
        self.subtitle_searcher = None
        self.auto_postprocessor = None
        self.upnp_client = None
        self.oidc_client = None
        self.quicksearch_cache = None
예제 #2
0
파일: __init__.py 프로젝트: mrvn/OctoPrint
class Server():
	def __init__(self, configfile=None, basedir=None, host="0.0.0.0", port=5000, debug=False, allowRoot=False, logConf=None):
		self._configfile = configfile
		self._basedir = basedir
		self._host = host
		self._port = port
		self._debug = debug
		self._allowRoot = allowRoot
		self._logConf = logConf
		self._server = None

		self._logger = None

		self._lifecycle_callbacks = defaultdict(list)

		self._template_searchpaths = []

	def run(self):
		if not self._allowRoot:
			self._check_for_root()

		global app
		global babel

		global printer
		global printerProfileManager
		global fileManager
		global slicingManager
		global analysisQueue
		global userManager
		global eventManager
		global loginManager
		global pluginManager
		global appSessionManager
		global pluginLifecycleManager
		global debug

		from tornado.ioloop import IOLoop
		from tornado.web import Application, RequestHandler

		import sys

		debug = self._debug

		# first initialize the settings singleton and make sure it uses given configfile and basedir if available
		s = settings(init=True, basedir=self._basedir, configfile=self._configfile)

		# then monkey patch a bunch of stuff
		util.tornado.fix_ioloop_scheduling()
		util.flask.enable_additional_translations(additional_folders=[s.getBaseFolder("translations")])

		# setup app
		self._setup_app()

		# setup i18n
		self._setup_i18n(app)

		# then initialize logging
		self._setup_logging(self._debug, self._logConf)
		self._logger = logging.getLogger(__name__)
		def exception_logger(exc_type, exc_value, exc_tb):
			self._logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_tb))
		sys.excepthook = exception_logger
		self._logger.info("Starting OctoPrint %s" % DISPLAY_VERSION)

		# then initialize the plugin manager
		pluginManager = octoprint.plugin.plugin_manager(init=True)

		printerProfileManager = PrinterProfileManager()
		eventManager = events.eventManager()
		analysisQueue = octoprint.filemanager.analysis.AnalysisQueue()
		slicingManager = octoprint.slicing.SlicingManager(s.getBaseFolder("slicingProfiles"), printerProfileManager)
		storage_managers = dict()
		storage_managers[octoprint.filemanager.FileDestinations.LOCAL] = octoprint.filemanager.storage.LocalFileStorage(s.getBaseFolder("uploads"))
		fileManager = octoprint.filemanager.FileManager(analysisQueue, slicingManager, printerProfileManager, initial_storage_managers=storage_managers)
		printer = Printer(fileManager, analysisQueue, printerProfileManager)
		appSessionManager = util.flask.AppSessionManager()
		pluginLifecycleManager = LifecycleManager(pluginManager)

		def octoprint_plugin_inject_factory(name, implementation):
			if not isinstance(implementation, octoprint.plugin.OctoPrintPlugin):
				return None
			return dict(
				plugin_manager=pluginManager,
				printer_profile_manager=printerProfileManager,
				event_bus=eventManager,
				analysis_queue=analysisQueue,
				slicing_manager=slicingManager,
				file_manager=fileManager,
				printer=printer,
				app_session_manager=appSessionManager,
				plugin_lifecycle_manager=pluginLifecycleManager,
				data_folder=os.path.join(settings().getBaseFolder("data"), name)
			)

		def settings_plugin_inject_factory(name, implementation):
			if not isinstance(implementation, octoprint.plugin.SettingsPlugin):
				return None
			default_settings = implementation.get_settings_defaults()
			get_preprocessors, set_preprocessors = implementation.get_settings_preprocessors()
			plugin_settings = octoprint.plugin.plugin_settings(name,
			                                                   defaults=default_settings,
			                                                   get_preprocessors=get_preprocessors,
			                                                   set_preprocessors=set_preprocessors)
			return dict(settings=plugin_settings)

		def settings_plugin_config_migration(name, implementation):
			if not isinstance(implementation, octoprint.plugin.SettingsPlugin):
				return

			settings_version = implementation.get_settings_version()
			settings_migrator = implementation.on_settings_migrate

			if settings_version is not None and settings_migrator is not None:
				stored_version = implementation._settings.get_int(["_config_version"])
				if stored_version is None or stored_version < settings_version:
					settings_migrator(settings_version, stored_version)
					implementation._settings.set_int(["_config_version"], settings_version)
					implementation._settings.save()

			implementation.on_settings_initialized()

		pluginManager.implementation_inject_factories=[octoprint_plugin_inject_factory, settings_plugin_inject_factory]
		pluginManager.initialize_implementations()

		settingsPlugins = pluginManager.get_implementations(octoprint.plugin.SettingsPlugin)
		for implementation in settingsPlugins:
			try:
				settings_plugin_config_migration(implementation._identifier, implementation)
			except:
				self._logger.exception("Error while trying to migrate settings for plugin {}, ignoring it".format(implementation._identifier))

		pluginManager.implementation_post_inits=[settings_plugin_config_migration]

		pluginManager.log_all_plugins()

		# initialize file manager and register it for changes in the registered plugins
		fileManager.initialize()
		pluginLifecycleManager.add_callback(["enabled", "disabled"], lambda name, plugin: fileManager.reload_plugins())

		# initialize slicing manager and register it for changes in the registered plugins
		slicingManager.initialize()
		pluginLifecycleManager.add_callback(["enabled", "disabled"], lambda name, plugin: slicingManager.reload_slicers())

		# setup jinja2
		self._setup_jinja2()
		def template_enabled(name, plugin):
			if plugin.implementation is None or not isinstance(plugin.implementation, octoprint.plugin.TemplatePlugin):
				return
			self._register_additional_template_plugin(plugin.implementation)
		def template_disabled(name, plugin):
			if plugin.implementation is None or not isinstance(plugin.implementation, octoprint.plugin.TemplatePlugin):
				return
			self._unregister_additional_template_plugin(plugin.implementation)
		pluginLifecycleManager.add_callback("enabled", template_enabled)
		pluginLifecycleManager.add_callback("disabled", template_disabled)

		# setup assets
		self._setup_assets()

		# configure timelapse
		octoprint.timelapse.configureTimelapse()

		# setup command triggers
		events.CommandTrigger(printer)
		if self._debug:
			events.DebugEventListener()

		# setup access control
		if s.getBoolean(["accessControl", "enabled"]):
			userManagerName = s.get(["accessControl", "userManager"])
			try:
				clazz = octoprint.util.get_class(userManagerName)
				userManager = clazz()
			except AttributeError, e:
				self._logger.exception("Could not instantiate user manager %s, will run with accessControl disabled!" % userManagerName)

		app.wsgi_app = util.ReverseProxied(
			app.wsgi_app,
			s.get(["server", "reverseProxy", "prefixHeader"]),
			s.get(["server", "reverseProxy", "schemeHeader"]),
			s.get(["server", "reverseProxy", "hostHeader"]),
			s.get(["server", "reverseProxy", "prefixFallback"]),
			s.get(["server", "reverseProxy", "schemeFallback"]),
			s.get(["server", "reverseProxy", "hostFallback"])
		)

		secret_key = s.get(["server", "secretKey"])
		if not secret_key:
			import string
			from random import choice
			chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
			secret_key = "".join(choice(chars) for _ in xrange(32))
			s.set(["server", "secretKey"], secret_key)
			s.save()
		app.secret_key = secret_key
		loginManager = LoginManager()
		loginManager.session_protection = "strong"
		loginManager.user_callback = load_user
		if userManager is None:
			loginManager.anonymous_user = users.DummyUser
			principals.identity_loaders.appendleft(users.dummy_identity_loader)
		loginManager.init_app(app)

		if self._host is None:
			self._host = s.get(["server", "host"])
		if self._port is None:
			self._port = s.getInt(["server", "port"])

		app.debug = self._debug

		# register API blueprint
		self._setup_blueprints()

		## Tornado initialization starts here

		ioloop = IOLoop()
		ioloop.install()

		self._router = SockJSRouter(self._create_socket_connection, "/sockjs")

		upload_suffixes = dict(name=s.get(["server", "uploads", "nameSuffix"]), path=s.get(["server", "uploads", "pathSuffix"]))

		server_routes = self._router.urls + [
			# various downloads
			(r"/downloads/timelapse/([^/]*\.mpg)", util.tornado.LargeResponseHandler, dict(path=s.getBaseFolder("timelapse"), as_attachment=True)),
			(r"/downloads/files/local/(.*)", util.tornado.LargeResponseHandler, dict(path=s.getBaseFolder("uploads"), as_attachment=True, path_validation=util.tornado.path_validation_factory(lambda path: not os.path.basename(path).startswith("."), status_code=404))),
			(r"/downloads/logs/([^/]*)", util.tornado.LargeResponseHandler, dict(path=s.getBaseFolder("logs"), as_attachment=True, access_validation=util.tornado.access_validation_factory(app, loginManager, util.flask.admin_validator))),
			# camera snapshot
			(r"/downloads/camera/current", util.tornado.UrlForwardHandler, dict(url=s.get(["webcam", "snapshot"]), as_attachment=True, access_validation=util.tornado.access_validation_factory(app, loginManager, util.flask.user_validator))),
			# generated webassets
			(r"/static/webassets/(.*)", util.tornado.LargeResponseHandler, dict(path=os.path.join(s.getBaseFolder("generated"), "webassets")))
		]
		for name, hook in pluginManager.get_hooks("octoprint.server.http.routes").items():
			try:
				result = hook(list(server_routes))
			except:
				self._logger.exception("There was an error while retrieving additional server routes from plugin hook {name}".format(**locals()))
			else:
				if isinstance(result, (list, tuple)):
					for entry in result:
						if not isinstance(entry, tuple) or not len(entry) == 3:
							continue
						if not isinstance(entry[0], basestring):
							continue
						if not isinstance(entry[2], dict):
							continue

						route, handler, kwargs = entry
						route = r"/plugin/{name}/{route}".format(name=name, route=route if not route.startswith("/") else route[1:])

						self._logger.debug("Adding additional route {route} handled by handler {handler} and with additional arguments {kwargs!r}".format(**locals()))
						server_routes.append((route, handler, kwargs))

		server_routes.append((r".*", util.tornado.UploadStorageFallbackHandler, dict(fallback=util.tornado.WsgiInputContainer(app.wsgi_app), file_prefix="octoprint-file-upload-", file_suffix=".tmp", suffixes=upload_suffixes)))

		self._tornado_app = Application(server_routes)
		max_body_sizes = [
			("POST", r"/api/files/([^/]*)", s.getInt(["server", "uploads", "maxSize"])),
			("POST", r"/api/languages", 5 * 1024 * 1024)
		]

		# allow plugins to extend allowed maximum body sizes
		for name, hook in pluginManager.get_hooks("octoprint.server.http.bodysize").items():
			try:
				result = hook(list(max_body_sizes))
			except:
				self._logger.exception("There was an error while retrieving additional upload sizes from plugin hook {name}".format(**locals()))
			else:
				if isinstance(result, (list, tuple)):
					for entry in result:
						if not isinstance(entry, tuple) or not len(entry) == 3:
							continue
						if not entry[0] in util.tornado.UploadStorageFallbackHandler.BODY_METHODS:
							continue
						if not isinstance(entry[2], int):
							continue

						method, route, size = entry
						route = r"/plugin/{name}/{route}".format(name=name, route=route if not route.startswith("/") else route[1:])

						self._logger.debug("Adding maximum body size of {size}B for {method} requests to {route})".format(**locals()))
						max_body_sizes.append((method, route, size))

		self._server = util.tornado.CustomHTTPServer(self._tornado_app, max_body_sizes=max_body_sizes, default_max_body_size=s.getInt(["server", "maxSize"]))
		self._server.listen(self._port, address=self._host)

		eventManager.fire(events.Events.STARTUP)
		if s.getBoolean(["serial", "autoconnect"]):
			(port, baudrate) = s.get(["serial", "port"]), s.getInt(["serial", "baudrate"])
			printer_profile = printerProfileManager.get_default()
			connectionOptions = get_connection_options()
			if port in connectionOptions["ports"]:
				printer.connect(port=port, baudrate=baudrate, profile=printer_profile["id"] if "id" in printer_profile else "_default")

		# start up watchdogs
		if s.getBoolean(["feature", "pollWatched"]):
			# use less performant polling observer if explicitely configured
			observer = PollingObserver()
		else:
			# use os default
			observer = Observer()
		observer.schedule(util.watchdog.GcodeWatchdogHandler(fileManager, printer), s.getBaseFolder("watched"))
		observer.start()

		# run our startup plugins
		octoprint.plugin.call_plugin(octoprint.plugin.StartupPlugin,
		                             "on_startup",
		                             args=(self._host, self._port))

		def call_on_startup(name, plugin):
			implementation = plugin.get_implementation(octoprint.plugin.StartupPlugin)
			if implementation is None:
				return
			implementation.on_startup(self._host, self._port)
		pluginLifecycleManager.add_callback("enabled", call_on_startup)

		# prepare our after startup function
		def on_after_startup():
			self._logger.info("Listening on http://%s:%d" % (self._host, self._port))

			# now this is somewhat ugly, but the issue is the following: startup plugins might want to do things for
			# which they need the server to be already alive (e.g. for being able to resolve urls, such as favicons
			# or service xmls or the like). While they are working though the ioloop would block. Therefore we'll
			# create a single use thread in which to perform our after-startup-tasks, start that and hand back
			# control to the ioloop
			def work():
				octoprint.plugin.call_plugin(octoprint.plugin.StartupPlugin,
				                             "on_after_startup")

				def call_on_after_startup(name, plugin):
					implementation = plugin.get_implementation(octoprint.plugin.StartupPlugin)
					if implementation is None:
						return
					implementation.on_after_startup()
				pluginLifecycleManager.add_callback("enabled", call_on_after_startup)

			import threading
			threading.Thread(target=work).start()
		ioloop.add_callback(on_after_startup)

		# prepare our shutdown function
		def on_shutdown():
			# will be called on clean system exit and shutdown the watchdog observer and call the on_shutdown methods
			# on all registered ShutdownPlugins
			self._logger.info("Shutting down...")
			observer.stop()
			observer.join()
			octoprint.plugin.call_plugin(octoprint.plugin.ShutdownPlugin,
			                             "on_shutdown")
			self._logger.info("Goodbye!")
		atexit.register(on_shutdown)

		def sigterm_handler(*args, **kwargs):
			# will stop tornado on SIGTERM, making the program exit cleanly
			def shutdown_tornado():
				ioloop.stop()
			ioloop.add_callback_from_signal(shutdown_tornado)
		signal.signal(signal.SIGTERM, sigterm_handler)

		try:
			# this is the main loop - as long as tornado is running, OctoPrint is running
			ioloop.start()
		except (KeyboardInterrupt, SystemExit):
			pass
		except:
			self._logger.fatal("Now that is embarrassing... Something really really went wrong here. Please report this including the stacktrace below in OctoPrint's bugtracker. Thanks!")
			self._logger.exception("Stacktrace follows:")
예제 #3
0
 def get_new_ioloop(self):
     """Creates a new `.IOLoop` for this test.  May be overridden in
     subclasses for tests that require a specific `.IOLoop` (usually
     the singleton `.IOLoop.instance()`).
     """
     return IOLoop()
예제 #4
0
    item_cache = ItemCache()
    application = tornado.web.Application([
        (r"/", MainHandler, dict(item_cache=item_cache)),
        (r"/about", AboutHandler),
        (r"/search", SearchHandler),
    ], **settings)

    HOST = os.getenv('VCAP_APP_HOST', '0.0.0.0')

    PORT = int(os.getenv('VCAP_APP_PORT', '80'))

    http_server = tornado.httpserver.HTTPServer(application)
    http_server.listen(PORT, HOST)

    ioloop = IOLoop().instance()

    sched = InitialPeriodicCallback(item_cache.update_local_file_database,
                                    20 * MINUTE,
                                    1 * SECOND,
                                    io_loop=ioloop)
    sched.start()

    if 'production' not in sys.argv:
        for root, dirs, files in os.walk('.', topdown=False):
            for name in files:
                if '#' not in name and 'DS_S' not in name and 'flymake' not in name and 'pyc' not in name:
                    tornado.autoreload.watch(root + '/' + name)
        tornado.autoreload.start(ioloop)

    ioloop.start()
예제 #5
0
 def __init__(self, **kwargs):
     Thread.__init__(self, name="Control", **kwargs)
     self.io_loop = IOLoop(make_current=False)
     self.pydev_do_not_trace = True
     self.is_pydev_daemon_thread = True
예제 #6
0
 def __init__(self, async_client_class=None, **kwargs):
     self._io_loop = IOLoop()
     if async_client_class is None:
         async_client_class = AsyncHTTPClient
     self._async_client = async_client_class(self._io_loop, **kwargs)
     self._closed = False
예제 #7
0
 def __init__(self):
     # always use a new ioloop
     IOLoop.clear_current()
     IOLoop(make_current=True)
     super(_TestReactor, self).__init__()
     IOLoop.clear_current()
예제 #8
0
def bk_worker():
    server = Server({'/bkapp': modify_doc},
                    io_loop=IOLoop(),
                    allow_websocket_origin=["0.0.0.0:5000"])
    server.start()
    server.io_loop.start()
예제 #9
0
 def f():
     for i in range(10):
         loop = IOLoop()
         loop.close()
예제 #10
0
 def get_new_ioloop(self):
     '''Creates a new IOLoop for this test.  May be overridden in
     subclasses for tests that require a specific IOLoop (usually
     the singleton).
     '''
     return IOLoop()
예제 #11
0
    def _run(
        cls,
        worker_kwargs,
        worker_start_args,
        silence_logs,
        init_result_q,
        child_stop_q,
        uid,
        env,
        config,
        Worker,
    ):  # pragma: no cover
        os.environ.update(env)
        dask.config.set(config)
        try:
            from dask.multiprocessing import initialize_worker_process
        except ImportError:  # old Dask version
            pass
        else:
            initialize_worker_process()

        if silence_logs:
            logger.setLevel(silence_logs)

        IOLoop.clear_instance()
        loop = IOLoop()
        loop.make_current()
        worker = Worker(**worker_kwargs)

        async def do_stop(timeout=5, executor_wait=True):
            try:
                await worker.close(
                    report=False,
                    nanny=False,
                    executor_wait=executor_wait,
                    timeout=timeout,
                )
            finally:
                loop.stop()

        def watch_stop_q():
            """
            Wait for an incoming stop message and then stop the
            worker cleanly.
            """
            while True:
                try:
                    msg = child_stop_q.get(timeout=1000)
                except Empty:
                    pass
                else:
                    child_stop_q.close()
                    assert msg.pop("op") == "stop"
                    loop.add_callback(do_stop, **msg)
                    break

        t = threading.Thread(target=watch_stop_q,
                             name="Nanny stop queue watch")
        t.daemon = True
        t.start()

        async def run():
            """
            Try to start worker and inform parent of outcome.
            """
            try:
                await worker
            except Exception as e:
                logger.exception("Failed to start worker")
                init_result_q.put({"uid": uid, "exception": e})
                init_result_q.close()
            else:
                try:
                    assert worker.address
                except ValueError:
                    pass
                else:
                    init_result_q.put({
                        "address": worker.address,
                        "dir": worker.local_directory,
                        "uid": uid,
                    })
                    init_result_q.close()
                    await worker.finished()
                    logger.info("Worker closed")

        try:
            loop.run_sync(run)
        except (TimeoutError, gen.TimeoutError):
            # Loop was stopped before wait_until_closed() returned, ignore
            pass
        except KeyboardInterrupt:
            pass
예제 #12
0
def test_github_missing_ref():
    provider = GitHubRepoProvider(
        spec='jupyterhub/zero-to-jupyterhub-k8s/v0.1.2.3.4.5.6')
    ref = IOLoop().run_sync(provider.get_resolved_ref)
    assert ref is None
예제 #13
0
 def get_new_ioloop(self):
     return IOLoop().instance()
예제 #14
0
def serve_http():
    data['ioloop'] = IOLoop()
    http_server.listen(PORT)
    IOLoop.current().start()
def test_loop_runner(loop_in_thread):
    # Implicit loop
    loop = IOLoop()
    loop.make_current()
    runner = LoopRunner()
    assert runner.loop not in (loop, loop_in_thread)
    assert not runner.is_started()
    assert_not_running(runner.loop)
    runner.start()
    assert runner.is_started()
    assert_running(runner.loop)
    runner.stop()
    assert not runner.is_started()
    assert_not_running(runner.loop)

    # Explicit loop
    loop = IOLoop()
    runner = LoopRunner(loop=loop)
    assert runner.loop is loop
    assert not runner.is_started()
    assert_not_running(loop)
    runner.start()
    assert runner.is_started()
    assert_running(loop)
    runner.stop()
    assert not runner.is_started()
    assert_not_running(loop)

    # Explicit loop, already started
    runner = LoopRunner(loop=loop_in_thread)
    assert not runner.is_started()
    assert_running(loop_in_thread)
    runner.start()
    assert runner.is_started()
    assert_running(loop_in_thread)
    runner.stop()
    assert not runner.is_started()
    assert_running(loop_in_thread)

    # Implicit loop, asynchronous=True
    loop = IOLoop()
    loop.make_current()
    runner = LoopRunner(asynchronous=True)
    assert runner.loop is loop
    assert not runner.is_started()
    assert_not_running(runner.loop)
    runner.start()
    assert runner.is_started()
    assert_not_running(runner.loop)
    runner.stop()
    assert not runner.is_started()
    assert_not_running(runner.loop)

    # Explicit loop, asynchronous=True
    loop = IOLoop()
    runner = LoopRunner(loop=loop, asynchronous=True)
    assert runner.loop is loop
    assert not runner.is_started()
    assert_not_running(runner.loop)
    runner.start()
    assert runner.is_started()
    assert_not_running(runner.loop)
    runner.stop()
    assert not runner.is_started()
    assert_not_running(runner.loop)
예제 #16
0
    def _run(cls, worker_args, worker_kwargs, worker_start_args, silence_logs,
             init_result_q, child_stop_q, uid, Worker):  # pragma: no cover
        try:
            from dask.multiprocessing import initialize_worker_process
        except ImportError:  # old Dask version
            pass
        else:
            initialize_worker_process()

        if silence_logs:
            logger.setLevel(silence_logs)

        IOLoop.clear_instance()
        loop = IOLoop()
        loop.make_current()
        worker = Worker(*worker_args, **worker_kwargs)

        @gen.coroutine
        def do_stop(timeout=5, executor_wait=True):
            try:
                yield worker._close(report=False,
                                    nanny=False,
                                    executor_wait=executor_wait,
                                    timeout=timeout)
            finally:
                loop.stop()

        def watch_stop_q():
            """
            Wait for an incoming stop message and then stop the
            worker cleanly.
            """
            while True:
                try:
                    msg = child_stop_q.get(timeout=1000)
                except Empty:
                    pass
                else:
                    child_stop_q.close()
                    assert msg.pop('op') == 'stop'
                    loop.add_callback(do_stop, **msg)
                    break

        t = threading.Thread(target=watch_stop_q,
                             name="Nanny stop queue watch")
        t.daemon = True
        t.start()

        @gen.coroutine
        def run():
            """
            Try to start worker and inform parent of outcome.
            """
            try:
                yield worker._start(*worker_start_args)
            except Exception as e:
                logger.exception("Failed to start worker")
                init_result_q.put({'uid': uid, 'exception': e})
                init_result_q.close()
            else:
                assert worker.address
                init_result_q.put({
                    'address': worker.address,
                    'dir': worker.local_dir,
                    'uid': uid
                })
                init_result_q.close()
                yield worker.wait_until_closed()
                logger.info("Worker closed")

        try:
            loop.run_sync(run)
        except TimeoutError:
            # Loop was stopped before wait_until_closed() returned, ignore
            pass
        except KeyboardInterrupt:
            pass
예제 #17
0
파일: Test.py 프로젝트: ArvinDevel/druid
def threadoperation(queryPerSec):
    @gen.coroutine
    def printresults():
        logger.info('{} {} {} {}'.format(start.strftime("%Y-%m-%d %H:%M:%S"),
                                         end.strftime("%Y-%m-%d %H:%M:%S"),
                                         runtime, queryPerSec))
        line = list()
        querypermin = queryPerSec * 60
        endtime = datetime.now(timezone('UTC')) + timedelta(minutes=runtime)
        popularitylist = list()
        newquerylist = list()
        if filename != "":
            newquerylist = QueryGenerator.generateQueriesFromFile(
                start, end, querypermin * runtime, timeAccessGenerator,
                periodAccessGenerator, filename)
        elif isbatch == True:
            newquerylist = QueryGenerator.generateQueries(
                start, end, querypermin * runtime, timeAccessGenerator,
                periodAccessGenerator, popularitylist)
#        if filename != "" or isbatch == True:
#            count = 0
#            time = datetime.now(timezone('UTC'))
#            logger.info("Time: {}".format(time.strftime("%Y-%m-%d %H:%M:%S")))
#            nextminute = time + timedelta(minutes=1)
#            for query in newquerylist:
#                try:
#                    line.append(applyOperation(query, config, logger))
#                except Exception as inst:
#                    logger.error(type(inst))     # the exception instance
#                    logger.error(inst.args)      # arguments stored in .args
#                    logger.error(inst)           # __str__ allows args to be printed directly
#                    x, y = inst.args
#                    logger.error('x =', x)
#                    logger.error('y =', y)
#
#                count = count + 1
#                if count >= querypermin:
#                    timediff = (nextminute - datetime.now(timezone('UTC'))).total_seconds()
#                    if timediff > 0:
#                        yield gen.sleep(timediff)
#                    count = 0
#                    time = datetime.now(timezone('UTC'))
#                    logger.info("Time: {}".format(time.strftime("%Y-%m-%d %H:%M:%S")))
#                    nextminute = time + timedelta(minutes=1)
#
#        else:
#            while True:
#                time = datetime.now(timezone('UTC'))
#                logger.info("Time: {}".format(time.strftime("%Y-%m-%d %H:%M:%S")))
#                if time >= endtime:
#                    break
#
#                #Query generated every minute. This is to optimize the overhead of query generation and also because segment granularity is minute
#                newquerylist = QueryGenerator.generateQueries(start, time, querypermin, timeAccessGenerator, periodAccessGenerator, popularitylist)
#
#                for query in newquerylist:
#                    try:
#                        line.append(applyOperation(query, config, logger))
#                    except Exception as inst:
#                        logger.error(type(inst))     # the exception instance
#                        logger.error(inst.args)      # arguments stored in .args
#                        logger.error(inst)           # __str__ allows args to be printed directly
#                        x, y = inst.args
#                        logger.error('x =', x)
#                        logger.error('y =', y)
#
#
#                nextminute = time + timedelta(minutes=1)
#                timediff = (nextminute - datetime.now(timezone('UTC'))).total_seconds()
#                if timediff > 0:
#                    yield gen.sleep(timediff)
#
#        wait_iterator = gen.WaitIterator(*line)
#        while not wait_iterator.done():
#            try:
#                result = yield wait_iterator.next()
#            except Exception as e:
#                logger.error("Error {} from {}".format(e, wait_iterator.current_future))
#            #else:
#            #    logger.info("Result {} received from {} at {}".format(
#            #        result, wait_iterator.current_future,
#            #        wait_iterator.current_index))

    IOLoop().run_sync(printresults)
예제 #18
0
        logger.info('exiting...')
        self.is_closing = True

    def try_exit(self):
        if self.is_closing:
            IOLoop.instance().stop()
            logger.info('exit success')


if __name__ == '__main__':
    tornado_options.parse_command_line()

    routes = [
        URLSpec(r'/healthz', HealthzHandler),
        URLSpec(r"/metrics", MetricsHandler),
        URLSpec(r"/", FunctionHandler)
    ]

    logger.info("Server is starting")
    loop = uvloop.new_event_loop()
    asyncio.set_event_loop(loop)
    io_loop = IOLoop().current()
    app = KubelessApplication(routes)
    server = HTTPServer(app)
    server.bind(func_port, reuse_port=True)
    server.start()
    signal.signal(signal.SIGINT, app.signal_handler)
    signal.signal(signal.SIGTERM, app.signal_handler)
    PeriodicCallback(app.try_exit, 100).start()
    io_loop.start()
예제 #19
0
def serve_http():
    global IOLOOP
    IOLOOP = IOLoop().current()
    http_server = HTTPServer(WSGIContainer(app))
    http_server.listen(PORT)
    IOLOOP.start()
예제 #20
0
 def __init__(self):
     self._io_loop = IOLoop()
     self._async_client = AsyncHTTPClient(self._io_loop)
     self._response = None
     self._closed = False
예제 #21
0
 def _start():
     self.io_loop = IOLoop()
     self.io_loop.make_current()
     self.io_loop.add_callback(_start_co)
     self.io_loop.start()
예제 #22
0
파일: utils.py 프로젝트: numpand/bokeh
 def __init__(self, application, **server_kwargs):
     loop = IOLoop()
     loop.make_current()
     server_kwargs['io_loop'] = loop
     self._server = Server(application, **server_kwargs)
예제 #23
0
def serve(panels,
          port=0,
          address=None,
          websocket_origin=None,
          loop=None,
          show=True,
          start=True,
          title=None,
          verbose=True,
          location=True,
          threaded=False,
          **kwargs):
    """
    Allows serving one or more panel objects on a single server.
    The panels argument should be either a Panel object or a function
    returning a Panel object or a dictionary of these two. If a
    dictionary is supplied the keys represent the slugs at which
    each app is served, e.g. `serve({'app': panel1, 'app2': panel2})`
    will serve apps at /app and /app2 on the server.

    Arguments
    ---------
    panel: Viewable, function or {str: Viewable or function}
      A Panel object, a function returning a Panel object or a
      dictionary mapping from the URL slug to either.
    port: int (optional, default=0)
      Allows specifying a specific port
    address : str
      The address the server should listen on for HTTP requests.
    websocket_origin: str or list(str) (optional)
      A list of hosts that can connect to the websocket.

      This is typically required when embedding a server app in
      an external web site.

      If None, "localhost" is used.
    loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())
      The tornado IOLoop to run the Server on
    show : boolean (optional, default=False)
      Whether to open the server in a new browser tab on start
    start : boolean(optional, default=False)
      Whether to start the Server
    title: str or {str: str} (optional, default=None)
      An HTML title for the application or a dictionary mapping
      from the URL slug to a customized title
    verbose: boolean (optional, default=True)
      Whether to print the address and port
    location : boolean or panel.io.location.Location
      Whether to create a Location component to observe and
      set the URL location.
    threaded: boolean (default=False)
      Whether to start the server on a new Thread
    kwargs: dict
      Additional keyword arguments to pass to Server instance
    """
    kwargs = dict(
        kwargs,
        **dict(port=port,
               address=address,
               websocket_origin=websocket_origin,
               loop=loop,
               show=show,
               start=start,
               title=title,
               verbose=verbose,
               location=location))
    if threaded:
        from tornado.ioloop import IOLoop
        kwargs['loop'] = loop = IOLoop() if loop is None else loop
        server = StoppableThread(target=get_server,
                                 io_loop=loop,
                                 args=(panels, ),
                                 kwargs=kwargs)
        server.start()
    else:
        server = get_server(panels, **kwargs)
    return server
예제 #24
0
def test_prepare_asking_for_password_with_browser(monkeypatch):
    keyring.reset_keyring_module()

    # In this scenario, we store a password in the keyring.
    from tornado.ioloop import IOLoop
    io_loop = IOLoop()

    http_results = {}

    def click_submit(url):
        from anaconda_project.internal.test.http_utils import http_get_async, http_post_async
        from tornado import gen

        @gen.coroutine
        def do_http():
            http_results['get_click_submit'] = get_response = yield http_get_async(url)

            if get_response.code != 200:
                raise Exception("got a bad http response " + repr(get_response))

            http_results['post_click_submit'] = post_response = yield http_post_async(url, body="")

            assert 200 == post_response.code
            assert '</form>' in str(post_response.body)
            assert 'FOO_PASSWORD' in str(post_response.body)

            fill_in_password(url, post_response)

        io_loop.add_callback(do_http)

    def fill_in_password(url, first_response):
        from anaconda_project.internal.test.http_utils import http_post_async
        from anaconda_project.internal.plugin_html import _BEAUTIFUL_SOUP_BACKEND
        from tornado import gen
        from bs4 import BeautifulSoup

        if first_response.code != 200:
            raise Exception("got a bad http response " + repr(first_response))

        # set the FOO_PASSWORD field
        soup = BeautifulSoup(first_response.body, _BEAUTIFUL_SOUP_BACKEND)
        password_fields = soup.find_all("input", attrs={'type': 'password'})
        if len(password_fields) == 0:
            print("No password fields in " + repr(soup))
            raise Exception("password field not found")
        else:
            field = password_fields[0]

        assert 'name' in field.attrs

        @gen.coroutine
        def do_http():
            http_results['post_fill_in_password'] = yield http_post_async(url, form={field['name']: 'bloop'})

        io_loop.add_callback(do_http)

    def mock_open_new_tab(url):
        return click_submit(url)

    monkeypatch.setattr('webbrowser.open_new_tab', mock_open_new_tab)

    def prepare_with_browser(dirname):
        project = project_no_dedicated_env(dirname)
        environ = minimal_environ()
        result = prepare_with_browser_ui(project, environ=environ, keep_going_until_success=False, io_loop=io_loop)
        assert result.errors == []
        assert result
        assert dict(FOO_PASSWORD='******', PROJECT_DIR=project.directory_path) == strip_environ(result.environ)
        assert dict() == strip_environ(environ)

        # wait for the results of the POST to come back,
        # awesome hack-tacular
        while 'post_fill_in_password' not in http_results:
            io_loop.call_later(0.01, lambda: io_loop.stop())
            io_loop.start()

        assert 'get_click_submit' in http_results
        assert 'post_click_submit' in http_results
        assert 'post_fill_in_password' in http_results

        assert 200 == http_results['get_click_submit'].code
        assert 200 == http_results['post_click_submit'].code
        assert 200 == http_results['post_fill_in_password'].code

        final_done_html = str(http_results['post_fill_in_password'].body)
        assert "Done!" in final_done_html
        assert "Environment variable FOO_PASSWORD is set." in final_done_html

        local_state_file = LocalStateFile.load_for_directory(project.directory_path)
        assert local_state_file.get_value(['variables', 'FOO_PASSWORD']) is None

        # now a no-browser prepare() should read password from the
        # keyring

    keyring.enable_fallback_keyring()
    try:
        with_directory_contents_completing_project_file(
            {DEFAULT_PROJECT_FILENAME: """
variables:
  FOO_PASSWORD: {}
"""}, prepare_with_browser)
    finally:
        keyring.disable_fallback_keyring()
예제 #25
0
파일: ws.py 프로젝트: rrandriamana/pypot
 def run(self):
     loop = IOLoop()
     app = Application([(r'/', WsSocketHandler)])
     app.listen(self.port)
     loop.start()
예제 #26
0
 def f():
     for i in range(10):
         loop = IOLoop(make_current=False)
         loop.close()
예제 #27
0
@gen.coroutine
def consumer():
    while True:
        item = yield q.get()
        try:
            print 'do %s' % item
            yield gen.sleep(5)

        finally:
            q.task_done()


@gen.coroutine
def producer():
    for item in range(15):
        yield q.put(item)
        print 'put %s' % item


@gen.coroutine
def main():
    IOLoop.current().spawn_callback(consumer)
    yield producer()
    yield q.join()

    print 'done'

if __name__ == '__main__':
    IOLoop().run_sync(main)
예제 #28
0
 def setUp(self):
     self.io_loop = IOLoop(make_current=False)
예제 #29
0
 def setUp(self):
     self.io_loop = IOLoop()
예제 #30
0
    def __init__(self,
                 n_workers=None,
                 threads_per_worker=None,
                 processes=True,
                 loop=None,
                 start=True,
                 ip=None,
                 scheduler_port=0,
                 silence_logs=logging.CRITICAL,
                 diagnostics_port=8787,
                 services={},
                 worker_services={},
                 nanny=None,
                 **worker_kwargs):
        if nanny is not None:
            warnings.warning("nanny has been deprecated, used processes=")
            processes = nanny
        self.status = None
        self.processes = processes
        self.silence_logs = silence_logs
        if silence_logs:
            silence_logging(level=silence_logs)
        if n_workers is None and threads_per_worker is None:
            if processes:
                n_workers = _ncores
                threads_per_worker = 1
            else:
                n_workers = 1
                threads_per_worker = _ncores
        if n_workers is None and threads_per_worker is not None:
            n_workers = max(1, _ncores // threads_per_worker)
        if n_workers and threads_per_worker is None:
            # Overcommit threads per worker, rather than undercommit
            threads_per_worker = max(1, int(math.ceil(_ncores / n_workers)))

        self.loop = loop or IOLoop()
        if start and not self.loop._running:
            self._thread = Thread(target=self.loop.start,
                                  name="LocalCluster loop")
            self._thread.daemon = True
            self._thread.start()
            while not self.loop._running:
                sleep(0.001)

        if diagnostics_port is not None:
            try:
                from distributed.bokeh.scheduler import BokehScheduler
                from distributed.bokeh.worker import BokehWorker
            except ImportError:
                logger.debug(
                    "To start diagnostics web server please install Bokeh")
            else:
                services[('bokeh', diagnostics_port)] = BokehScheduler
                worker_services[('bokeh', 0)] = BokehWorker

        self.scheduler = Scheduler(loop=self.loop, services=services)
        self.scheduler_port = scheduler_port

        self.workers = []
        self.n_workers = n_workers
        self.threads_per_worker = threads_per_worker
        self.worker_services = worker_services
        self.worker_kwargs = worker_kwargs

        if start:
            sync(self.loop, self._start, ip)

        clusters_to_close.add(self)