def ready(self, trans, **kwd):
        """
        GET /interactive_environments/ready/

        Queries the GIE proxy IPC to determine whether the current user's session's GIE launch is ready

        :returns:   ``true`` if ready else ``false``
        :rtype:     boolean
        """
        proxy_map = self.app.proxy_manager.query_proxy(trans)
        if not proxy_map.container_interface:
            # not using the new containers interface
            return True

        container_interfaces = build_container_interfaces(
            self.app.config.containers_config_file,
            containers_conf=self.app.config.containers_conf,
        )
        try:
            interface = container_interfaces[proxy_map.container_interface]
        except KeyError:
            log.error('Invalid container interface key: %s',
                      proxy_map.container_interface)
            return None
        container = interface.get_container(proxy_map.container_ids[0])
        return container.is_ready()
Ejemplo n.º 2
0
def _run_swarm_manager(args):
    containers_config_file = _containers_config_file(args)
    containers_conf = parse_containers_config(containers_config_file)
    container_conf = _container_conf(containers_conf, args.swarm)
    swarm_manager_conf = _swarm_manager_conf(container_conf)
    _configure_logging(args, swarm_manager_conf)
    docker_interface = build_container_interfaces(
        containers_config_file, containers_conf=containers_conf)[args.swarm]
    pidfile = _swarm_manager_pidfile(swarm_manager_conf)

    if not args.foreground:
        _swarm_manager_daemon(pidfile, swarm_manager_conf['log_file'],
                              swarm_manager_conf, docker_interface)
    else:
        if swarm_manager_conf['terminate_when_idle']:
            log.info(
                'running in the foreground, disabling automatic swarm manager termination'
            )
            swarm_manager_conf['terminate_when_idle'] = False
        else:
            log.info("running in the foreground")
        pidfile.acquire()
        try:
            _swarm_manager(swarm_manager_conf, docker_interface)
        finally:
            pidfile.release()
Ejemplo n.º 3
0
def _run_swarm_manager(args):
    containers_config_file = _containers_config_file(args)
    containers_conf = parse_containers_config(containers_config_file)
    container_conf = _container_conf(containers_conf, args.swarm)
    swarm_manager_conf = _swarm_manager_conf(container_conf)
    _configure_logging(args, swarm_manager_conf)
    docker_interface = build_container_interfaces(containers_config_file, containers_conf=containers_conf)[args.swarm]
    pidfile = _swarm_manager_pidfile(swarm_manager_conf)

    if not args.foreground:
        _swarm_manager_daemon(pidfile, swarm_manager_conf['log_file'], swarm_manager_conf, docker_interface)
    else:
        if swarm_manager_conf['terminate_when_idle']:
            log.info('running in the foreground, disabling automatic swarm manager termination')
            swarm_manager_conf['terminate_when_idle'] = False
        else:
            log.info("running in the foreground")
        try:
            pidfile.acquire()
        except lockfile.AlreadyLocked:
            pid = pidfile.read_pid()
            try:
                os.kill(pid, 0)
                log.warning("swarm manager is already running in pid %s", pid)
                return
            except OSError:
                log.warning("removing stale lockfile: %s", pidfile.path)
                pidfile.break_lock()
                pidfile.acquire()
        try:
            _swarm_manager(swarm_manager_conf, docker_interface)
        finally:
            pidfile.release()
Ejemplo n.º 4
0
 def load_container_interface(self):
     self.attr.container_interface = None
     key = None
     if string_as_bool_or_none(self.attr.viz_config.get("main", "container_interface")) is not None:
         key = self.attr.viz_config.get("main", "container_interface")
     elif self.attr.galaxy_config.enable_beta_containers_interface:
         # TODO: don't hardcode this, and allow for mapping
         key = '_default_'
     if key:
         containers = build_container_interfaces(
             self.attr.galaxy_config.containers_config_file,
             containers_conf=self.attr.galaxy_config.containers_conf,
         )
         try:
             self.attr.container_interface = containers[key]
         except KeyError:
             log.error("Unable to load '%s' container interface: invalid key", key)
Ejemplo n.º 5
0
 def load_container_interface(self):
     self.attr.container_interface = None
     key = None
     if string_as_bool_or_none(
             self.attr.viz_config.get("main",
                                      "container_interface")) is not None:
         key = self.attr.viz_config.get("main", "container_interface")
     elif self.attr.galaxy_config.enable_beta_containers_interface:
         # TODO: don't hardcode this, and allow for mapping
         key = '_default_'
     if key:
         containers = build_container_interfaces(
             self.attr.galaxy_config.containers_config_file,
             containers_conf=self.attr.galaxy_config.containers_conf,
         )
         try:
             self.attr.container_interface = containers[key]
         except KeyError:
             log.error(
                 "Unable to load '%s' container interface: invalid key",
                 key)
Ejemplo n.º 6
0
def _run_swarm_manager(args):
    containers_config_file = _containers_config_file(args)
    containers_conf = parse_containers_config(containers_config_file)
    container_conf = _container_conf(containers_conf, args.swarm)
    swarm_manager_conf = _swarm_manager_conf(container_conf)
    _configure_logging(args, swarm_manager_conf)
    docker_interface = build_container_interfaces(containers_config_file, containers_conf=containers_conf)[args.swarm]
    pidfile = _swarm_manager_pidfile(swarm_manager_conf)

    if not args.foreground:
        _swarm_manager_daemon(pidfile, swarm_manager_conf['log_file'], swarm_manager_conf, docker_interface)
    else:
        if swarm_manager_conf['terminate_when_idle']:
            log.info('running in the foreground, disabling automatic swarm manager termination')
            swarm_manager_conf['terminate_when_idle'] = False
        else:
            log.info("running in the foreground")
        pidfile.acquire()
        try:
            _swarm_manager(swarm_manager_conf, docker_interface)
        finally:
            pidfile.release()
Ejemplo n.º 7
0
    def __init__(self, **kwargs):
        if not log.handlers:
            # Paste didn't handle it, so we need a temporary basic log
            # configured.  The handler added here gets dumped and replaced with
            # an appropriately configured logger in configure_logging below.
            logging.basicConfig(level=logging.DEBUG)
        log.debug("python path is: %s", ", ".join(sys.path))
        self.name = 'galaxy'
        self.startup_timer = ExecutionTimer()
        self.new_installation = False
        # Read config file and check for errors
        self.config = config.Configuration(**kwargs)
        self.config.check()
        config.configure_logging(self.config)
        self.configure_fluent_log()
        # A lot of postfork initialization depends on the server name, ensure it is set immediately after forking before other postfork functions
        self.application_stack = application_stack_instance(app=self)
        self.application_stack.register_postfork_function(
            self.application_stack.set_postfork_server_name, self)
        self.config.reload_sanitize_whitelist(
            explicit='sanitize_whitelist_file' in kwargs)
        self.amqp_internal_connection_obj = galaxy.queues.connection_from_config(
            self.config)
        # control_worker *can* be initialized with a queue, but here we don't
        # want to and we'll allow postfork to bind and start it.
        self.control_worker = GalaxyQueueWorker(self)

        self._configure_tool_shed_registry()
        self._configure_object_store(fsmon=True)
        # Setup the database engine and ORM
        config_file = kwargs.get('global_conf', {}).get('__file__', None)
        if config_file:
            log.debug('Using "galaxy.ini" config file: %s', config_file)
        check_migrate_tools = self.config.check_migrate_tools
        self._configure_models(
            check_migrate_databases=self.config.check_migrate_databases,
            check_migrate_tools=check_migrate_tools,
            config_file=config_file)

        # Manage installed tool shed repositories.
        self.installed_repository_manager = installed_repository_manager.InstalledRepositoryManager(
            self)

        self._configure_datatypes_registry(self.installed_repository_manager)
        galaxy.model.set_datatypes_registry(self.datatypes_registry)

        # Security helper
        self._configure_security()
        # Tag handler
        self.tag_handler = GalaxyTagHandler(self.model.context)
        self.dataset_collections_service = DatasetCollectionManager(self)
        self.history_manager = HistoryManager(self)
        self.dependency_resolvers_view = DependencyResolversView(self)
        self.test_data_resolver = test_data.TestDataResolver(
            file_dirs=self.config.tool_test_data_directories)
        self.library_folder_manager = FolderManager()
        self.library_manager = LibraryManager()
        self.dynamic_tool_manager = DynamicToolManager(self)

        # Tool Data Tables
        self._configure_tool_data_tables(from_shed_config=False)
        # Load dbkey / genome build manager
        self._configure_genome_builds(data_table_name="__dbkeys__",
                                      load_old_style=True)

        # Genomes
        self.genomes = Genomes(self)
        # Data providers registry.
        self.data_provider_registry = DataProviderRegistry()

        # Initialize job metrics manager, needs to be in place before
        # config so per-destination modifications can be made.
        self.job_metrics = job_metrics.JobMetrics(
            self.config.job_metrics_config_file, app=self)

        # Initialize error report plugins.
        self.error_reports = ErrorReports(self.config.error_report_file,
                                          app=self)

        # Initialize the job management configuration
        self.job_config = jobs.JobConfiguration(self)

        # Setup a Tool Cache
        self.tool_cache = ToolCache()
        self.tool_shed_repository_cache = ToolShedRepositoryCache(self)
        # Watch various config files for immediate reload
        self.watchers = ConfigWatchers(self)
        self._configure_toolbox()

        # Load Data Manager
        self.data_managers = DataManagers(self)
        # Load the update repository manager.
        self.update_repository_manager = update_repository_manager.UpdateRepositoryManager(
            self)
        # Load proprietary datatype converters and display applications.
        self.installed_repository_manager.load_proprietary_converters_and_display_applications(
        )
        # Load datatype display applications defined in local datatypes_conf.xml
        self.datatypes_registry.load_display_applications(self)
        # Load datatype converters defined in local datatypes_conf.xml
        self.datatypes_registry.load_datatype_converters(self.toolbox)
        # Load external metadata tool
        self.datatypes_registry.load_external_metadata_tool(self.toolbox)
        # Load history import/export tools.
        load_lib_tools(self.toolbox)
        # visualizations registry: associates resources with visualizations, controls how to render
        self.visualizations_registry = VisualizationsRegistry(
            self,
            directories_setting=self.config.visualization_plugins_directory,
            template_cache_dir=self.config.template_cache)
        # Tours registry
        self.tour_registry = ToursRegistry(self.config.tour_config_dir)
        # Webhooks registry
        self.webhooks_registry = WebhooksRegistry(self.config.webhooks_dirs)
        # Load security policy.
        self.security_agent = self.model.security_agent
        self.host_security_agent = galaxy.model.security.HostAgent(
            model=self.security_agent.model,
            permitted_actions=self.security_agent.permitted_actions)
        # Load quota management.
        if self.config.enable_quotas:
            self.quota_agent = galaxy.quota.QuotaAgent(self.model)
        else:
            self.quota_agent = galaxy.quota.NoQuotaAgent(self.model)
        # Heartbeat for thread profiling
        self.heartbeat = None
        from galaxy import auth
        self.auth_manager = auth.AuthManager(self)
        # Start the heartbeat process if configured and available (wait until
        # postfork if using uWSGI)
        if self.config.use_heartbeat:
            if heartbeat.Heartbeat:
                self.heartbeat = heartbeat.Heartbeat(
                    self.config,
                    period=self.config.heartbeat_interval,
                    fname=self.config.heartbeat_log)
                self.heartbeat.daemon = True
                self.application_stack.register_postfork_function(
                    self.heartbeat.start)

        if self.config.enable_oidc:
            from galaxy.authnz import managers
            self.authnz_manager = managers.AuthnzManager(
                self, self.config.oidc_config,
                self.config.oidc_backends_config)

        self.sentry_client = None
        if self.config.sentry_dsn:

            def postfork_sentry_client():
                import raven
                self.sentry_client = raven.Client(
                    self.config.sentry_dsn,
                    transport=raven.transport.HTTPTransport)

            self.application_stack.register_postfork_function(
                postfork_sentry_client)

        # Transfer manager client
        if self.config.get_bool('enable_beta_job_managers', False):
            from galaxy.jobs import transfer_manager
            self.transfer_manager = transfer_manager.TransferManager(self)
        # Start the job manager
        from galaxy.jobs import manager
        self.job_manager = manager.JobManager(self)
        self.application_stack.register_postfork_function(
            self.job_manager.start)
        self.proxy_manager = ProxyManager(self.config)

        from galaxy.workflow import scheduling_manager
        # Must be initialized after job_config.
        self.workflow_scheduling_manager = scheduling_manager.WorkflowSchedulingManager(
            self)

        # Must be initialized after any component that might make use of stack messaging is configured. Alternatively if
        # it becomes more commonly needed we could create a prefork function registration method like we do with
        # postfork functions.
        self.application_stack.init_late_prefork()

        self.containers = {}
        if self.config.enable_beta_containers_interface:
            self.containers = build_container_interfaces(
                self.config.containers_config_file,
                containers_conf=self.config.containers_conf)

        # Configure handling of signals
        handlers = {}
        if self.heartbeat:
            handlers[signal.SIGUSR1] = self.heartbeat.dump_signal_handler
        self._configure_signal_handlers(handlers)

        self.database_heartbeat = DatabaseHeartbeat(
            application_stack=self.application_stack)
        self.application_stack.register_postfork_function(
            self.database_heartbeat.start)

        # Start web stack message handling
        self.application_stack.register_postfork_function(
            self.application_stack.start)

        self.model.engine.dispose()

        # Inject url_for for components to more easily optionally depend
        # on url_for.
        self.url_for = url_for

        self.server_starttime = int(time.time())  # used for cachebusting
        log.info("Galaxy app startup finished %s" % self.startup_timer)
Ejemplo n.º 8
0
    def __init__(self, **kwargs) -> None:
        startup_timer = ExecutionTimer()
        super().__init__(fsmon=True, **kwargs)
        self.haltables = [
            ("queue worker", self._shutdown_queue_worker),
            ("file watcher", self._shutdown_watcher),
            ("database heartbeat", self._shutdown_database_heartbeat),
            ("workflow scheduler", self._shutdown_scheduling_manager),
            ("object store", self._shutdown_object_store),
            ("job manager", self._shutdown_job_manager),
            ("application heartbeat", self._shutdown_heartbeat),
            ("repository manager", self._shutdown_repo_manager),
            ("database connection", self._shutdown_model),
            ("application stack", self._shutdown_application_stack),
        ]
        self._register_singleton(StructuredApp, self)
        # A lot of postfork initialization depends on the server name, ensure it is set immediately after forking before other postfork functions
        self.application_stack.register_postfork_function(
            self.application_stack.set_postfork_server_name, self)
        self.config.reload_sanitize_allowlist(
            explicit='sanitize_allowlist_file' in kwargs)
        self.amqp_internal_connection_obj = galaxy.queues.connection_from_config(
            self.config)
        # queue_worker *can* be initialized with a queue, but here we don't
        # want to and we'll allow postfork to bind and start it.
        self.queue_worker = self._register_singleton(GalaxyQueueWorker,
                                                     GalaxyQueueWorker(self))

        self._configure_tool_shed_registry()

        self.dependency_resolvers_view = self._register_singleton(
            DependencyResolversView, DependencyResolversView(self))
        self.test_data_resolver = self._register_singleton(
            TestDataResolver,
            TestDataResolver(file_dirs=self.config.tool_test_data_directories))
        self.dynamic_tool_manager = self._register_singleton(
            DynamicToolManager)
        self.api_keys_manager = self._register_singleton(ApiKeyManager)

        # Tool Data Tables
        self._configure_tool_data_tables(from_shed_config=False)
        # Load dbkey / genome build manager
        self._configure_genome_builds(data_table_name="__dbkeys__",
                                      load_old_style=True)

        # Genomes
        self.genomes = self._register_singleton(Genomes)
        # Data providers registry.
        self.data_provider_registry = self._register_singleton(
            DataProviderRegistry)

        # Initialize error report plugins.
        self.error_reports = self._register_singleton(
            ErrorReports, ErrorReports(self.config.error_report_file,
                                       app=self))

        # Setup a Tool Cache
        self.tool_cache = self._register_singleton(ToolCache)
        self.tool_shed_repository_cache = self._register_singleton(
            ToolShedRepositoryCache)
        # Watch various config files for immediate reload
        self.watchers = self._register_singleton(ConfigWatchers)
        self._configure_toolbox()
        # Load Data Manager
        self.data_managers = self._register_singleton(DataManagers)
        # Load the update repository manager.
        self.update_repository_manager = self._register_singleton(
            UpdateRepositoryManager, UpdateRepositoryManager(self))
        # Load proprietary datatype converters and display applications.
        self.installed_repository_manager.load_proprietary_converters_and_display_applications(
        )
        # Load datatype display applications defined in local datatypes_conf.xml
        self.datatypes_registry.load_display_applications(self)
        # Load datatype converters defined in local datatypes_conf.xml
        self.datatypes_registry.load_datatype_converters(self.toolbox)
        # Load external metadata tool
        self.datatypes_registry.load_external_metadata_tool(self.toolbox)
        # Load history import/export tools.
        load_lib_tools(self.toolbox)
        self.toolbox.persist_cache(register_postfork=True)
        # visualizations registry: associates resources with visualizations, controls how to render
        self.visualizations_registry = self._register_singleton(
            VisualizationsRegistry,
            VisualizationsRegistry(
                self,
                directories_setting=self.config.
                visualization_plugins_directory,
                template_cache_dir=self.config.template_cache_path))
        # Tours registry
        tour_registry = build_tours_registry(self.config.tour_config_dir)
        self.tour_registry = tour_registry
        self[ToursRegistry] = tour_registry  # type: ignore[misc]
        # Webhooks registry
        self.webhooks_registry = self._register_singleton(
            WebhooksRegistry, WebhooksRegistry(self.config.webhooks_dir))
        # Load security policy.
        self.security_agent = self.model.security_agent
        self.host_security_agent = galaxy.model.security.HostAgent(
            model=self.security_agent.model,
            permitted_actions=self.security_agent.permitted_actions)
        # Load quota management.
        self.quota_agent = self._register_singleton(
            QuotaAgent, get_quota_agent(self.config, self.model))
        # Heartbeat for thread profiling
        self.heartbeat = None
        self.auth_manager = self._register_singleton(
            auth.AuthManager, auth.AuthManager(self.config))
        # Start the heartbeat process if configured and available (wait until
        # postfork if using uWSGI)
        if self.config.use_heartbeat:
            if heartbeat.Heartbeat:
                self.heartbeat = heartbeat.Heartbeat(
                    self.config,
                    period=self.config.heartbeat_interval,
                    fname=self.config.heartbeat_log)
                self.heartbeat.daemon = True
                self.application_stack.register_postfork_function(
                    self.heartbeat.start)

        self.authnz_manager = None
        if self.config.enable_oidc:
            from galaxy.authnz import managers
            self.authnz_manager = managers.AuthnzManager(
                self, self.config.oidc_config_file,
                self.config.oidc_backends_config_file)

        self.containers = {}
        if self.config.enable_beta_containers_interface:
            self.containers = build_container_interfaces(
                self.config.containers_config_file,
                containers_conf=self.config.containers_conf)

        if not self.config.enable_celery_tasks and self.config.history_audit_table_prune_interval > 0:
            self.prune_history_audit_task = IntervalTask(
                func=lambda: galaxy.model.HistoryAudit.prune(self.model.session
                                                             ),
                name="HistoryAuditTablePruneTask",
                interval=self.config.history_audit_table_prune_interval,
                immediate_start=False,
                time_execution=True)
            self.application_stack.register_postfork_function(
                self.prune_history_audit_task.start)
            self.haltables.append(("HistoryAuditTablePruneTask",
                                   self.prune_history_audit_task.shutdown))
        # Start the job manager
        self.application_stack.register_postfork_function(
            self.job_manager.start)
        # If app is not job handler but uses mule messaging.
        # Can be removed when removing mule support.
        self.job_manager._check_jobs_at_startup()
        self.proxy_manager = ProxyManager(self.config)

        # Must be initialized after job_config.
        self.workflow_scheduling_manager = scheduling_manager.WorkflowSchedulingManager(
            self)

        self.trs_proxy = self._register_singleton(TrsProxy,
                                                  TrsProxy(self.config))
        # Must be initialized after any component that might make use of stack messaging is configured. Alternatively if
        # it becomes more commonly needed we could create a prefork function registration method like we do with
        # postfork functions.
        self.application_stack.init_late_prefork()

        self.interactivetool_manager = InteractiveToolManager(self)

        # Configure handling of signals
        handlers = {}
        if self.heartbeat:
            handlers[signal.SIGUSR1] = self.heartbeat.dump_signal_handler
        self._configure_signal_handlers(handlers)

        self.database_heartbeat = DatabaseHeartbeat(
            application_stack=self.application_stack)
        self.database_heartbeat.add_change_callback(self.watchers.change_state)
        self.application_stack.register_postfork_function(
            self.database_heartbeat.start)

        # Start web stack message handling
        self.application_stack.register_postfork_function(
            self.application_stack.start)
        self.application_stack.register_postfork_function(
            self.queue_worker.bind_and_start)
        # Delay toolbox index until after startup
        self.application_stack.register_postfork_function(
            lambda: send_local_control_task(self,
                                            'rebuild_toolbox_search_index'))

        # Inject url_for for components to more easily optionally depend
        # on url_for.
        self.url_for = url_for

        self.server_starttime = int(time.time())  # used for cachebusting
        # Limit lifetime of tool shed repository cache to app startup
        self.tool_shed_repository_cache = None
        log.info(f"Galaxy app startup finished {startup_timer}")
Ejemplo n.º 9
0
    def __init__(self, **kwargs):
        if not log.handlers:
            # Paste didn't handle it, so we need a temporary basic log
            # configured.  The handler added here gets dumped and replaced with
            # an appropriately configured logger in configure_logging below.
            logging.basicConfig(level=logging.DEBUG)
        log.debug("python path is: %s", ", ".join(sys.path))
        self.name = 'galaxy'
        self.startup_timer = ExecutionTimer()
        self.new_installation = False
        # Read config file and check for errors
        self.config = config.Configuration(**kwargs)
        self.config.check()
        config.configure_logging(self.config)
        self.configure_fluent_log()
        # A lot of postfork initialization depends on the server name, ensure it is set immediately after forking before other postfork functions
        self.application_stack = application_stack_instance(app=self)
        self.application_stack.register_postfork_function(self.application_stack.set_postfork_server_name, self)
        self.config.reload_sanitize_whitelist(explicit='sanitize_whitelist_file' in kwargs)
        self.amqp_internal_connection_obj = galaxy.queues.connection_from_config(self.config)
        # control_worker *can* be initialized with a queue, but here we don't
        # want to and we'll allow postfork to bind and start it.
        self.control_worker = GalaxyQueueWorker(self)

        self._configure_tool_shed_registry()
        self._configure_object_store(fsmon=True)
        # Setup the database engine and ORM
        config_file = kwargs.get('global_conf', {}).get('__file__', None)
        if config_file:
            log.debug('Using "galaxy.ini" config file: %s', config_file)
        check_migrate_tools = self.config.check_migrate_tools
        self._configure_models(check_migrate_databases=True, check_migrate_tools=check_migrate_tools, config_file=config_file)

        # Manage installed tool shed repositories.
        self.installed_repository_manager = installed_repository_manager.InstalledRepositoryManager(self)

        self._configure_datatypes_registry(self.installed_repository_manager)
        galaxy.model.set_datatypes_registry(self.datatypes_registry)

        # Security helper
        self._configure_security()
        # Tag handler
        self.tag_handler = GalaxyTagManager(self.model.context)
        self.dataset_collections_service = DatasetCollectionManager(self)
        self.history_manager = HistoryManager(self)
        self.dependency_resolvers_view = DependencyResolversView(self)
        self.test_data_resolver = test_data.TestDataResolver(file_dirs=self.config.tool_test_data_directories)
        self.library_folder_manager = FolderManager()
        self.library_manager = LibraryManager()

        # Tool Data Tables
        self._configure_tool_data_tables(from_shed_config=False)
        # Load dbkey / genome build manager
        self._configure_genome_builds(data_table_name="__dbkeys__", load_old_style=True)

        # Genomes
        self.genomes = Genomes(self)
        # Data providers registry.
        self.data_provider_registry = DataProviderRegistry()

        # Initialize job metrics manager, needs to be in place before
        # config so per-destination modifications can be made.
        self.job_metrics = job_metrics.JobMetrics(self.config.job_metrics_config_file, app=self)

        # Initialize error report plugins.
        self.error_reports = ErrorReports(self.config.error_report_file, app=self)

        # Initialize the job management configuration
        self.job_config = jobs.JobConfiguration(self)

        # Setup a Tool Cache
        self.tool_cache = ToolCache()
        self.tool_shed_repository_cache = ToolShedRepositoryCache(self)
        # Watch various config files for immediate reload
        self.watchers = ConfigWatchers(self)
        self._configure_toolbox()

        # Load Data Manager
        self.data_managers = DataManagers(self)
        # Load the update repository manager.
        self.update_repository_manager = update_repository_manager.UpdateRepositoryManager(self)
        # Load proprietary datatype converters and display applications.
        self.installed_repository_manager.load_proprietary_converters_and_display_applications()
        # Load datatype display applications defined in local datatypes_conf.xml
        self.datatypes_registry.load_display_applications(self)
        # Load datatype converters defined in local datatypes_conf.xml
        self.datatypes_registry.load_datatype_converters(self.toolbox)
        # Load external metadata tool
        self.datatypes_registry.load_external_metadata_tool(self.toolbox)
        # Load history import/export tools.
        load_lib_tools(self.toolbox)
        # visualizations registry: associates resources with visualizations, controls how to render
        self.visualizations_registry = VisualizationsRegistry(
            self,
            directories_setting=self.config.visualization_plugins_directory,
            template_cache_dir=self.config.template_cache)
        # Tours registry
        self.tour_registry = ToursRegistry(self.config.tour_config_dir)
        # Webhooks registry
        self.webhooks_registry = WebhooksRegistry(self.config.webhooks_dirs)
        # Load security policy.
        self.security_agent = self.model.security_agent
        self.host_security_agent = galaxy.security.HostAgent(
            model=self.security_agent.model,
            permitted_actions=self.security_agent.permitted_actions)
        # Load quota management.
        if self.config.enable_quotas:
            self.quota_agent = galaxy.quota.QuotaAgent(self.model)
        else:
            self.quota_agent = galaxy.quota.NoQuotaAgent(self.model)
        # Heartbeat for thread profiling
        self.heartbeat = None
        # Container for OpenID authentication routines
        if self.config.enable_openid:
            from galaxy.web.framework import openid_manager
            self.openid_manager = openid_manager.OpenIDManager(self.config.openid_consumer_cache_path)
            self.openid_providers = OpenIDProviders.from_file(self.config.openid_config_file)
        else:
            self.openid_providers = OpenIDProviders()
        from galaxy import auth
        self.auth_manager = auth.AuthManager(self)
        # Start the heartbeat process if configured and available (wait until
        # postfork if using uWSGI)
        if self.config.use_heartbeat:
            if heartbeat.Heartbeat:
                self.heartbeat = heartbeat.Heartbeat(
                    self.config,
                    period=self.config.heartbeat_interval,
                    fname=self.config.heartbeat_log
                )
                self.heartbeat.daemon = True
                self.application_stack.register_postfork_function(self.heartbeat.start)

        if self.config.enable_oidc:
            from galaxy.authnz import managers
            self.authnz_manager = managers.AuthnzManager(self, self.config.oidc_config, self.config.oidc_backends_config)

        self.sentry_client = None
        if self.config.sentry_dsn:

            def postfork_sentry_client():
                import raven
                self.sentry_client = raven.Client(self.config.sentry_dsn, transport=raven.transport.HTTPTransport)

            self.application_stack.register_postfork_function(postfork_sentry_client)

        # Transfer manager client
        if self.config.get_bool('enable_beta_job_managers', False):
            from galaxy.jobs import transfer_manager
            self.transfer_manager = transfer_manager.TransferManager(self)
        # Start the job manager
        from galaxy.jobs import manager
        self.job_manager = manager.JobManager(self)
        self.application_stack.register_postfork_function(self.job_manager.start)
        self.proxy_manager = ProxyManager(self.config)

        from galaxy.workflow import scheduling_manager
        # Must be initialized after job_config.
        self.workflow_scheduling_manager = scheduling_manager.WorkflowSchedulingManager(self)

        self.containers = {}
        if self.config.enable_beta_containers_interface:
            self.containers = build_container_interfaces(
                self.config.containers_config_file,
                containers_conf=self.config.containers_conf
            )

        # Configure handling of signals
        handlers = {}
        if self.heartbeat:
            handlers[signal.SIGUSR1] = self.heartbeat.dump_signal_handler
        self._configure_signal_handlers(handlers)

        # Start web stack message handling
        self.application_stack.register_postfork_function(self.application_stack.start)

        self.model.engine.dispose()
        self.server_starttime = int(time.time())  # used for cachebusting
        log.info("Galaxy app startup finished %s" % self.startup_timer)