Пример #1
0
 def instantiate(client, datadir):
     args = (None, None)
     logic = CommandAppLogic(client, datadir)
     apps_manager = AppsManager()
     apps_manager.load_apps()
     for app in apps_manager.apps.values():
         logic.register_new_task_type(app.task_type_info(*args))
     return logic
Пример #2
0
    def __init__(self,
                 node_name,
                 node,
                 keys_auth,
                 listen_address="",
                 listen_port=0,
                 root_path="res",
                 use_distributed_resources=True,
                 tasks_dir="tasks",
                 task_persistence=False):
        super(TaskManager, self).__init__()

        self.apps_manager = AppsManager()
        self.apps_manager.load_apps()

        apps = self.apps_manager.apps.values()
        task_types = [app.task_type_info(None, app.controller) for app in apps]
        self.task_types = {t.name.lower(): t for t in task_types}

        self.node_name = node_name
        self.node = node
        self.keys_auth = keys_auth
        self.key_id = keys_auth.get_key_id()

        self.tasks = {}
        self.tasks_states = {}
        self.subtask2task_mapping = {}

        self.listen_address = listen_address
        self.listen_port = listen_port

        # FIXME Remove this variable and make task persistance obligatory after it is more tested
        # Remember to also remove it from init params
        self.task_persistence = task_persistence

        self.tasks_dir = Path(tasks_dir)
        if not self.tasks_dir.is_dir():
            self.tasks_dir.mkdir(parents=True)
        self.root_path = root_path
        self.dir_manager = DirManager(self.get_task_manager_root())

        # resource_manager = OpenStackSwiftResourceManager(self.dir_manager,
        #                                                  resource_dir_method=self.dir_manager.get_task_temporary_dir)
        resource_manager = HyperdriveResourceManager(
            self.dir_manager,
            resource_dir_method=self.dir_manager.get_task_temporary_dir)
        self.task_result_manager = EncryptedResultPackageManager(
            resource_manager)

        self.activeStatus = [
            TaskStatus.computing, TaskStatus.starting, TaskStatus.waiting
        ]
        self.use_distributed_resources = use_distributed_resources

        self.comp_task_keeper = CompTaskKeeper(self.tasks_dir,
                                               persist=self.task_persistence)
        if self.task_persistence:
            self.restore_tasks()
Пример #3
0
 def __init__(self,
              datadir=None,
              transaction_system=False,
              **config_overrides):
     super(OptNode, self).__init__(datadir, transaction_system,
                                   **config_overrides)
     self.apps_manager = AppsManager()
     self.apps_manager.load_apps()
     self.default_environments = self.apps_manager.get_env_list()
Пример #4
0
    def __init__(self,
                 node_name,
                 node,
                 keys_auth,
                 root_path,
                 use_distributed_resources=True,
                 tasks_dir="tasks",
                 task_persistence=True,
                 apps_manager=AppsManager(),
                 finished_cb=None):
        super().__init__()

        self.apps_manager = apps_manager
        apps = list(apps_manager.apps.values())
        task_types = [app.task_type_info() for app in apps]
        self.task_types = {t.name.lower(): t for t in task_types}

        self.node_name = node_name
        self.node = node
        self.keys_auth = keys_auth
        self.key_id = keys_auth.key_id

        self.tasks: Dict[str, Task] = {}
        self.tasks_states: Dict[str, TaskState] = {}
        self.subtask2task_mapping: Dict[str, str] = {}

        self.task_persistence = task_persistence

        tasks_dir = Path(tasks_dir)
        self.tasks_dir = tasks_dir / "tmanager"
        if not self.tasks_dir.is_dir():
            self.tasks_dir.mkdir(parents=True)
        self.root_path = root_path
        self.dir_manager = DirManager(self.get_task_manager_root())

        resource_manager = HyperdriveResourceManager(
            self.dir_manager,
            resource_dir_method=self.dir_manager.get_task_temporary_dir,
        )
        self.task_result_manager = EncryptedResultPackageManager(
            resource_manager)

        self.activeStatus = [
            TaskStatus.computing, TaskStatus.starting, TaskStatus.waiting
        ]
        self.use_distributed_resources = use_distributed_resources

        self.comp_task_keeper = CompTaskKeeper(
            tasks_dir,
            persist=self.task_persistence,
        )

        self.requestor_stats_manager = RequestorTaskStatsManager()

        self.finished_cb = finished_cb

        if self.task_persistence:
            self.restore_tasks()
Пример #5
0
    def test_get_environments(self):
        from apps.appsmanager import AppsManager

        logic = GuiApplicationLogic()
        logic.customizer = Mock()
        logic.client = self.client

        apps_manager = AppsManager()
        apps_manager.load_apps()

        for env in apps_manager.get_env_list():
            self.client.environments_manager.add_environment(env)

        environments = sync_wait(logic.get_environments())

        assert len(environments) > 0
        assert all([bool(env) for env in environments])
        assert all([isinstance(env, dict) for env in environments])
Пример #6
0
 def setUp(self):
     super().setUp()
     am = AppsManager()
     am.load_all_apps()
     am._benchmark_enabled = Mock(return_value=True)
     self.b = BenchmarkManager("NODE1", Mock(), self.path,
                               am.get_benchmarks())
Пример #7
0
class OptNode(Node):
    def __init__(self,
                 datadir=None,
                 transaction_system=False,
                 **config_overrides):
        super(OptNode, self).__init__(datadir, transaction_system,
                                      **config_overrides)
        self.apps_manager = AppsManager()
        self.apps_manager.load_apps()
        self.default_environments = self.apps_manager.get_env_list()

    def _get_task_builder(self, task_def):
        return self.apps_manager.apps[task_def.task_type].builder

    @staticmethod
    def parse_node_addr(ctx, param, value):
        del ctx, param
        if value:
            try:
                SocketAddress(value, 1)
                return value
            except AddressValueError as e:
                raise click.BadParameter(
                    "Invalid network address specified: {}".format(e.message))
        return ''

    @staticmethod
    def parse_rpc_address(ctx, param, value):
        del ctx, param
        if value:
            try:
                return SocketAddress.parse(value)
            except AddressValueError as e:
                raise click.BadParameter(
                    "Invalid RPC address specified: {}".format(e.message))

    @staticmethod
    def parse_peer(ctx, param, value):
        del ctx, param
        addresses = []
        for arg in value:
            try:
                addresses.append(SocketAddress.parse(arg))
            except AddressValueError as e:
                raise click.BadParameter(
                    "Invalid peer address specified: {}".format(e.message))
        return addresses

    @staticmethod
    def parse_task_file(ctx, param, value):
        del ctx, param
        tasks = []
        for task_file in value:
            with open(task_file, 'r') as f:
                try:
                    task_def = json.loads(f.read())
                except ValueError as e:
                    raise click.BadParameter(
                        "Invalid task json file: {}".format(e.message))
            task_def.task_id = str(uuid.uuid4())
            tasks.append(task_def)
        return tasks
Пример #8
0
    def __init__(self,
                 node,
                 config_desc: ClientConfigDescriptor,
                 client,
                 use_ipv6=False,
                 use_docker_manager=True,
                 task_archiver=None,
                 apps_manager=AppsManager(),
                 task_finished_cb=None) -> None:
        self.client = client
        self.keys_auth = client.keys_auth
        self.config_desc = config_desc

        self.node = node
        self.task_archiver = task_archiver
        self.task_keeper = TaskHeaderKeeper(
            environments_manager=client.environments_manager,
            node=self.node,
            min_price=config_desc.min_price,
            task_archiver=task_archiver)
        self.task_manager = TaskManager(
            config_desc.node_name,
            self.node,
            self.keys_auth,
            root_path=TaskServer.__get_task_manager_root(client.datadir),
            use_distributed_resources=config_desc.
            use_distributed_resource_management,
            tasks_dir=os.path.join(client.datadir, 'tasks'),
            apps_manager=apps_manager,
            finished_cb=task_finished_cb,
        )
        benchmarks = self.task_manager.apps_manager.get_benchmarks()
        self.benchmark_manager = BenchmarkManager(
            node_name=config_desc.node_name,
            task_server=self,
            root_path=self.get_task_computer_root(),
            benchmarks=benchmarks)
        self.task_computer = TaskComputer(
            task_server=self,
            use_docker_manager=use_docker_manager,
            finished_cb=task_finished_cb)
        self.task_connections_helper = TaskConnectionsHelper()
        self.task_connections_helper.task_server = self
        self.task_sessions = {}
        self.task_sessions_incoming = weakref.WeakSet()

        self.max_trust = 1.0
        self.min_trust = 0.0

        self.last_messages = []
        self.last_message_time_threshold = config_desc.task_session_timeout

        self.results_to_send = {}
        self.failures_to_send = {}

        self.use_ipv6 = use_ipv6

        self.forwarded_session_request_timeout = \
            config_desc.waiting_for_task_session_timeout
        self.forwarded_session_requests = {}
        self.response_list = {}
        self.acl = get_acl(Path(client.datadir))
        self.resource_handshakes = {}

        network = TCPNetwork(
            ProtocolFactory(SafeProtocol, self, SessionFactory(TaskSession)),
            use_ipv6)
        PendingConnectionsServer.__init__(self, config_desc, network)
        # instantiate ReceivedMessageHandler connected to self
        # to register in golem.network.concent.handlers_library
        from golem.network.concent import \
            received_handler as concent_received_handler
        self.concent_handler = \
            concent_received_handler.TaskServerMessageHandler(self)

        dispatcher.connect(self.income_listener, signal='golem.income')

        dispatcher.connect(self.finished_task_listener,
                           signal='golem.taskmanager')
Пример #9
0
    def __init__(
            self,  # noqa pylint: disable=too-many-arguments
            datadir: str,
            app_config: AppConfig,
            config_desc: ClientConfigDescriptor,
            # SEE golem.core.variables.CONCENT_CHOICES
            concent_variant: dict,
            peers: Optional[List[SocketAddress]] = None,
            use_monitor: bool = None,
            use_talkback: bool = None,
            use_docker_manager: bool = True,
            geth_address: Optional[str] = None,
            password: Optional[str] = None) -> None:

        # DO NOT MAKE THIS IMPORT GLOBAL
        # otherwise, reactor will install global signal handlers on import
        # and will prevent the IOCP / kqueue reactors from being installed.
        from twisted.internet import reactor

        self._reactor = reactor
        self._app_config = app_config
        self._config_desc = config_desc
        self._datadir = datadir
        self._use_docker_manager = use_docker_manager
        self._docker_manager: Optional[DockerManager] = None

        self._use_monitor = config_desc.enable_monitor \
            if use_monitor is None else use_monitor
        self._use_talkback = config_desc.enable_talkback \
            if use_talkback is None else use_talkback

        self._keys_auth: Optional[KeysAuth] = None
        self._ets = TransactionSystem(
            Path(datadir) / 'transaction_system',
            EthereumConfig,
        )
        self._ets.backwards_compatibility_tx_storage(Path(datadir))
        self.concent_variant = concent_variant

        self.rpc_router: Optional[CrossbarRouter] = None
        self.rpc_session: Optional[Session] = None
        self._rpc_publisher: Optional[Publisher] = None

        self._peers: List[SocketAddress] = peers or []

        # Initialize database
        self._db = Database(db,
                            fields=DB_FIELDS,
                            models=DB_MODELS,
                            db_dir=datadir)

        self.client: Optional[Client] = None

        self.apps_manager = AppsManager()

        self._client_factory = lambda keys_auth: Client(
            datadir=datadir,
            app_config=app_config,
            config_desc=config_desc,
            keys_auth=keys_auth,
            database=self._db,
            transaction_system=self._ets,
            use_docker_manager=use_docker_manager,
            use_monitor=self._use_monitor,
            concent_variant=concent_variant,
            geth_address=geth_address,
            apps_manager=self.apps_manager,
            task_finished_cb=self._try_shutdown)

        if password is not None:
            if not self.set_password(password):
                raise Exception("Password incorrect")
Пример #10
0
class Node(object):
    """ Simple Golem Node connecting console user interface with Client
    :type client golem.client.Client:
    """
    def __init__(
            self,  # noqa pylint: disable=too-many-arguments
            datadir: str,
            app_config: AppConfig,
            config_desc: ClientConfigDescriptor,
            # SEE golem.core.variables.CONCENT_CHOICES
            concent_variant: dict,
            peers: Optional[List[SocketAddress]] = None,
            use_monitor: bool = None,
            use_talkback: bool = None,
            use_docker_manager: bool = True,
            geth_address: Optional[str] = None,
            password: Optional[str] = None) -> None:

        # DO NOT MAKE THIS IMPORT GLOBAL
        # otherwise, reactor will install global signal handlers on import
        # and will prevent the IOCP / kqueue reactors from being installed.
        from twisted.internet import reactor

        self._reactor = reactor
        self._app_config = app_config
        self._config_desc = config_desc
        self._datadir = datadir
        self._use_docker_manager = use_docker_manager
        self._docker_manager: Optional[DockerManager] = None

        self._use_monitor = config_desc.enable_monitor \
            if use_monitor is None else use_monitor
        self._use_talkback = config_desc.enable_talkback \
            if use_talkback is None else use_talkback

        self._keys_auth: Optional[KeysAuth] = None
        self._ets = TransactionSystem(
            Path(datadir) / 'transaction_system',
            EthereumConfig,
        )
        self._ets.backwards_compatibility_tx_storage(Path(datadir))
        self.concent_variant = concent_variant

        self.rpc_router: Optional[CrossbarRouter] = None
        self.rpc_session: Optional[Session] = None
        self._rpc_publisher: Optional[Publisher] = None

        self._peers: List[SocketAddress] = peers or []

        # Initialize database
        self._db = Database(db,
                            fields=DB_FIELDS,
                            models=DB_MODELS,
                            db_dir=datadir)

        self.client: Optional[Client] = None

        self.apps_manager = AppsManager()

        self._client_factory = lambda keys_auth: Client(
            datadir=datadir,
            app_config=app_config,
            config_desc=config_desc,
            keys_auth=keys_auth,
            database=self._db,
            transaction_system=self._ets,
            use_docker_manager=use_docker_manager,
            use_monitor=self._use_monitor,
            concent_variant=concent_variant,
            geth_address=geth_address,
            apps_manager=self.apps_manager,
            task_finished_cb=self._try_shutdown)

        if password is not None:
            if not self.set_password(password):
                raise Exception("Password incorrect")

    def start(self) -> None:

        try:
            rpc = self._start_rpc()

            def on_rpc_ready() -> Deferred:
                terms_ = self._check_terms()
                keys = self._start_keys_auth()
                docker = self._start_docker()
                return gatherResults([terms_, keys, docker],
                                     consumeErrors=True)

            chain_function(rpc, on_rpc_ready).addCallbacks(
                self._setup_client,
                self._error('keys or docker'),
            ).addErrback(self._error('setup client'))
            self._reactor.run()
        except Exception:  # pylint: disable=broad-except
            logger.exception("Application error")

    @rpc_utils.expose('ui.quit')
    def quit(self) -> None:
        def _quit():
            docker_manager = self._docker_manager
            if docker_manager:
                docker_manager.quit()

            reactor = self._reactor
            if reactor.running:
                reactor.callFromThread(reactor.stop)

        # Call in a separate thread and return early
        from threading import Thread
        Thread(target=_quit).start()

    @rpc_utils.expose('golem.password.set')
    def set_password(self, password: str) -> bool:
        logger.info("Got password")

        try:
            self._keys_auth = KeysAuth(
                datadir=self._datadir,
                private_key_name=PRIVATE_KEY,
                password=password,
                difficulty=self._config_desc.key_difficulty,
            )
            # When Golem is ready to use different Ethereum account for
            # payments and identity this should be called only when
            # idendity was not just created above for the first time.
            self._ets.backwards_compatibility_privkey(
                self._keys_auth._private_key,  # noqa pylint: disable=protected-access
                password,
            )
            self._ets.set_password(password)
        except WrongPassword:
            logger.info("Password incorrect")
            return False
        return True

    @rpc_utils.expose('golem.password.key_exists')
    def key_exists(self) -> bool:
        return KeysAuth.key_exists(self._datadir, PRIVATE_KEY)

    @rpc_utils.expose('golem.password.unlocked')
    def is_account_unlocked(self) -> bool:
        return self._keys_auth is not None

    @rpc_utils.expose('golem.mainnet')
    @classmethod
    def is_mainnet(cls) -> bool:
        return IS_MAINNET

    def _start_rpc(self) -> Deferred:
        self.rpc_router = rpc = CrossbarRouter(
            host=self._config_desc.rpc_address,
            port=self._config_desc.rpc_port,
            datadir=self._datadir,
        )
        self._reactor.addSystemEventTrigger("before", "shutdown", rpc.stop)

        deferred = rpc.start(self._reactor)
        return chain_function(deferred, self._start_session)

    def _start_session(self) -> Optional[Deferred]:
        if not self.rpc_router:
            self._stop_on_error("rpc", "RPC router is not available")
            return None

        crsb_user = self.rpc_router.cert_manager.CrossbarUsers.golemapp
        self.rpc_session = Session(
            self.rpc_router.address,
            cert_manager=self.rpc_router.cert_manager,
            use_ipv6=self._config_desc.use_ipv6,
            crsb_user=crsb_user,
            crsb_user_secret=self.rpc_router.cert_manager.get_secret(
                crsb_user))
        deferred = self.rpc_session.connect()

        def on_connect(*_):
            methods = rpc_utils.object_method_map(self)
            methods['sys.exposed_procedures'] = \
                self.rpc_session.exposed_procedures
            self.rpc_session.add_procedures(methods)
            self._rpc_publisher = Publisher(self.rpc_session)
            StatusPublisher.set_publisher(self._rpc_publisher)

        return deferred.addCallbacks(on_connect, self._error('rpc session'))

    @rpc_utils.expose('golem.terms')
    @staticmethod
    def are_terms_accepted():
        return terms.TermsOfUse.are_accepted()

    @rpc_utils.expose('golem.concent.terms')
    @classmethod
    def are_concent_terms_accepted(cls):
        return terms.ConcentTermsOfUse.are_accepted()

    @rpc_utils.expose('golem.terms.accept')
    def accept_terms(self,
                     enable_monitor: Optional[bool] = None,
                     enable_talkback: Optional[bool] = None) -> None:

        if enable_talkback is not None:
            self._config_desc.enable_talkback = enable_talkback
            self._use_talkback = enable_talkback

        if enable_monitor is not None:
            self._config_desc.enable_monitor = enable_monitor
            self._use_monitor = enable_monitor

        self._app_config.change_config(self._config_desc)
        return terms.TermsOfUse.accept()

    @rpc_utils.expose('golem.concent.terms.accept')
    @classmethod
    def accept_concent_terms(cls):
        return terms.ConcentTermsOfUse.accept()

    @rpc_utils.expose('golem.terms.show')
    @staticmethod
    def show_terms():
        return terms.TermsOfUse.show()

    @rpc_utils.expose('golem.concent.terms.show')
    @classmethod
    def show_concent_terms(cls):
        return terms.ConcentTermsOfUse.show()

    @rpc_utils.expose('golem.version')
    @staticmethod
    def get_golem_version():
        return golem.__version__

    @rpc_utils.expose('golem.graceful_shutdown')
    def graceful_shutdown(self) -> ShutdownResponse:
        if self.client is None:
            logger.warning('Shutdown called when client=None, try again later')
            return ShutdownResponse.off

        # is in shutdown? turn off as toggle
        if self._config_desc.in_shutdown:
            self.client.update_setting('in_shutdown', False)
            logger.info('Turning off shutdown mode')
            return ShutdownResponse.off

        # is not providing nor requesting, normal shutdown
        if not self._is_task_in_progress():
            logger.info('Node not working, executing normal shutdown')
            self.quit()
            return ShutdownResponse.quit

        # configure in_shutdown
        logger.info('Enabling shutdown mode, no more tasks can be started')
        self.client.update_setting('in_shutdown', True)

        # subscribe to events

        return ShutdownResponse.on

    def _try_shutdown(self) -> None:
        # is not in shutdown?
        if not self._config_desc.in_shutdown:
            logger.debug('Checking shutdown, no shutdown configure')
            return

        if self._is_task_in_progress():
            logger.info('Shutdown checked, a task is still in progress')
            return

        logger.info('Node done with all tasks, shutting down')
        self.quit()

    def _is_task_in_progress(self) -> bool:
        if self.client is None:
            logger.debug('_is_task_in_progress? False: client=None')
            return False

        task_server = self.client.task_server
        if task_server is None or task_server.task_manager is None:
            logger.debug('_is_task_in_progress? False: task_manager=None')
            return False

        task_requestor_progress = task_server.task_manager.get_progresses()
        if task_requestor_progress:
            logger.debug('_is_task_in_progress? requestor=%r', True)
            return True

        if task_server.task_computer is None:
            logger.debug('_is_task_in_progress? False: task_computer=None')
            return False

        task_provider_progress = task_server.task_computer.assigned_subtask
        logger.debug('_is_task_in_progress? provider=%r, requestor=False',
                     task_provider_progress)
        return bool(task_provider_progress)

    @require_rpc_session()
    def _check_terms(self) -> Optional[Deferred]:
        def wait_for_terms():
            sleep_time = 5
            while not self.are_terms_accepted() and self._reactor.running:
                logger.info(
                    'Terms of use must be accepted before using Golem. '
                    'Run `golemcli terms show` to display the terms '
                    'and `golemcli terms accept` to accept them.')
                time.sleep(sleep_time)
            if None in self.concent_variant.values():
                return  # Concent disabled
            while not terms.ConcentTermsOfUse.are_accepted() \
                    and self._reactor.running:
                logger.info(
                    'Concent terms of use must be accepted before using'
                    ' Concent service.'
                    ' Run `golemcli concent terms show`'
                    ' to display the terms'
                    ' and `golemcli concent terms accept` to accept them.', )
                time.sleep(sleep_time)

        return threads.deferToThread(wait_for_terms)

    @require_rpc_session()
    def _start_keys_auth(self) -> Optional[Deferred]:
        def create_keysauth():
            # If keys_auth already exists it means we used command line flag
            # and don't need to inform client about required password
            if self.is_account_unlocked():
                return

            tip_msg = 'Run `golemcli account unlock` and enter your password.'

            if self.key_exists():
                event = 'get_password'
                tip_msg = 'Waiting for password to unlock the account. ' \
                          f'{tip_msg}'
            else:
                event = 'new_password'
                tip_msg = 'New account, waiting for password to be set. ' \
                          f'{tip_msg}'

            while not self.is_account_unlocked() and self._reactor.running:
                logger.info(tip_msg)
                StatusPublisher.publish(Component.client, event, Stage.pre)
                time.sleep(5)

            StatusPublisher.publish(Component.client, event, Stage.post)

        return threads.deferToThread(create_keysauth)

    def _start_docker(self) -> Optional[Deferred]:
        if not self._use_docker_manager:
            return None

        def start_docker():
            self._docker_manager = DockerManager.install(self._config_desc)
            self._docker_manager.check_environment()  # noqa pylint: disable=no-member

        return threads.deferToThread(start_docker)

    @require_rpc_session()
    def _setup_client(self, *_) -> None:

        if not self._keys_auth:
            self._error("KeysAuth is not available")
            return

        from golem.tools.talkback import enable_sentry_logger
        enable_sentry_logger(self._use_talkback)

        self.client = self._client_factory(self._keys_auth)
        self._reactor.addSystemEventTrigger("before", "shutdown",
                                            self.client.quit)

        self.client.set_rpc_publisher(self._rpc_publisher)

        golem_async.async_run(
            golem_async.AsyncRequest(self._run),
            error=self._error('Cannot start the client'),
        )

    @require_rpc_session()
    def _run(self, *_) -> None:
        if not self.client:
            self._stop_on_error("client", "Client is not available")
            return

        self._setup_apps()
        self.client.sync()

        try:
            self.client.start()
            for peer in self._peers:
                self.client.connect(peer)
        except SystemExit:
            self._reactor.callFromThread(self._reactor.stop)
            return

        methods = self.client.get_wamp_rpc_mapping()

        def rpc_ready(_):
            logger.info('All procedures registered in WAMP router')
            self._rpc_publisher.publish(
                rpceventnames.Golem.procedures_registered, )

        # pylint: disable=no-member
        self.rpc_session.add_procedures(methods).addCallback(  # type: ignore
            rpc_ready, )
        # pylint: enable=no-member

    def _setup_apps(self) -> None:
        if not self.client:
            self._stop_on_error("client", "Client is not available")
            return

        self.apps_manager.load_all_apps()

        for env in self.apps_manager.get_env_list():
            env.accept_tasks = True
            self.client.environments_manager.add_environment(env)

    def _error(self, msg: str) -> Callable:
        return functools.partial(self._stop_on_error, msg)

    def _stop_on_error(self, msg: str, err: Any) -> None:
        if self._reactor.running:
            exc_info = (err.type, err.value, err.getTracebackObject()) \
                if isinstance(err, Failure) else None
            logger.error("Stopping because of %r error: %r",
                         msg,
                         err,
                         exc_info=exc_info)
            self._reactor.callFromThread(self._reactor.stop)
Пример #11
0
import click
from apps.appsmanager import AppsManager
from golem.core.common import config_logging
from golem.core.deferred import install_unhandled_error_logger
from golem.network.transport.tcpnetwork import SocketAddress
from golem.rpc.mapping.core import CORE_METHOD_MAP
from golem.rpc.session import object_method_map, Session, WebSocketAddress
from ipaddress import AddressValueError
from twisted.internet.defer import inlineCallbacks

config_logging("_gui")
logger = logging.getLogger("app")
install_unhandled_error_logger()

apps_manager = AppsManager()
apps_manager.load_apps()


def install_qt5_reactor():
    import qt5reactor
    qt5reactor.install()
    from twisted.internet import reactor
    return reactor


def register_rendering_task_types(logic):
    from gui.view.widget import TaskWidget
    for app in apps_manager.apps.values():
        task_type = app.task_type_info(TaskWidget(app.widget), app.controller)
        logic.register_new_task_type(task_type)
Пример #12
0
class TaskManager(TaskEventListener):
    """ Keeps and manages information about requested tasks
    """
    handle_task_key_error = HandleKeyError(log_task_key_error)
    handle_subtask_key_error = HandleKeyError(log_subtask_key_error)

    def __init__(self,
                 node_name,
                 node,
                 keys_auth,
                 listen_address="",
                 listen_port=0,
                 root_path="res",
                 use_distributed_resources=True,
                 tasks_dir="tasks",
                 task_persistence=False):
        super(TaskManager, self).__init__()

        self.apps_manager = AppsManager()
        self.apps_manager.load_apps()

        apps = self.apps_manager.apps.values()
        task_types = [app.task_type_info(None, app.controller) for app in apps]
        self.task_types = {t.name.lower(): t for t in task_types}

        self.node_name = node_name
        self.node = node
        self.keys_auth = keys_auth
        self.key_id = keys_auth.get_key_id()

        self.tasks = {}
        self.tasks_states = {}
        self.subtask2task_mapping = {}

        self.listen_address = listen_address
        self.listen_port = listen_port

        # FIXME Remove this variable and make task persistance obligatory after it is more tested
        # Remember to also remove it from init params
        self.task_persistence = task_persistence

        self.tasks_dir = Path(tasks_dir)
        if not self.tasks_dir.is_dir():
            self.tasks_dir.mkdir(parents=True)
        self.root_path = root_path
        self.dir_manager = DirManager(self.get_task_manager_root())

        # resource_manager = OpenStackSwiftResourceManager(self.dir_manager,
        #                                                  resource_dir_method=self.dir_manager.get_task_temporary_dir)
        resource_manager = HyperdriveResourceManager(
            self.dir_manager,
            resource_dir_method=self.dir_manager.get_task_temporary_dir)
        self.task_result_manager = EncryptedResultPackageManager(
            resource_manager)

        self.activeStatus = [
            TaskStatus.computing, TaskStatus.starting, TaskStatus.waiting
        ]
        self.use_distributed_resources = use_distributed_resources

        self.comp_task_keeper = CompTaskKeeper(self.tasks_dir,
                                               persist=self.task_persistence)
        if self.task_persistence:
            self.restore_tasks()

    def get_task_manager_root(self):
        return self.root_path

    def get_external_address(self):
        request = AsyncRequest(get_external_address, self.listen_port)
        return async_run(request)

    def create_task(self, dictionary):
        # FIXME: remove after the new interface has been integrated with
        if not isinstance(dictionary, dict):
            return dictionary

        type_name = dictionary['type'].lower()
        task_type = self.task_types[type_name]
        builder_type = task_type.task_builder_type

        definition = builder_type.build_definition(task_type, dictionary)
        builder = builder_type(self.node_name, definition, self.root_path,
                               self.dir_manager)

        return Task.build_task(builder)

    def get_task_definition_dict(self, task):
        if isinstance(task, dict):
            return task
        definition = task.task_definition
        task_type = self.task_types[definition.task_type.lower()]
        return task_type.task_builder_type.build_dictionary(definition)

    @inlineCallbacks
    def add_new_task(self, task):
        if task.header.task_id in self.tasks:
            raise RuntimeError("Task has been already added")
        if not self.key_id:
            raise ValueError("'key_id' is not set")
        if not SocketAddress.is_proper_address(self.listen_address,
                                               self.listen_port):
            raise IOError("Incorrect socket address")

        prev_pub_addr, prev_pub_port, prev_nat_type = self.node.pub_addr, self.node.pub_port, self.node.nat_type
        self.node.pub_addr, self.node.pub_port, self.node.nat_type = yield self.get_external_address(
        )

        if prev_pub_addr != self.node.pub_addr or \
           prev_pub_port != self.node.pub_port or \
           prev_nat_type != self.node.nat_type:
            self.update_task_signatures()

        task.header.task_owner_address = self.listen_address
        task.header.task_owner_port = self.listen_port
        task.header.task_owner_key_id = self.key_id
        task.header.task_owner = self.node
        task.header.signature = self.sign_task_header(task.header)

        self.dir_manager.clear_temporary(task.header.task_id,
                                         undeletable=task.undeletable)
        self.dir_manager.get_task_temporary_dir(task.header.task_id,
                                                create=True)

        task.register_listener(self)
        task.task_status = TaskStatus.waiting

        self.tasks[task.header.task_id] = task

        ts = TaskState()
        ts.status = TaskStatus.waiting
        ts.outputs = task.get_output_names()
        ts.total_subtasks = task.get_total_tasks()
        ts.time_started = time.time()

        self.tasks_states[task.header.task_id] = ts

        if self.task_persistence:
            self.dump_task(task.header.task_id)
            logger.info("Task {} added".format(task.header.task_id))
            self.notice_task_updated(task.header.task_id)

    def dump_task(self, task_id):
        logger.debug('DUMP TASK')
        try:
            data = self.tasks[task_id], self.tasks_states[task_id]
            filepath = self.tasks_dir / ('%s.pickle' % (task_id, ))
            logger.debug('DUMP TASK %r', filepath)
            with filepath.open('wb') as f:
                pickle.dump(data, f, protocol=2)
        except:
            logger.exception('DUMP ERROR task_id: %r task: %r state: %r',
                             task_id, self.tasks.get(task_id, '<not found>'),
                             self.tasks_states.get(task_id, '<not found>'))
            if filepath.exists():
                filepath.unlink()
            raise

    def restore_tasks(self):
        logger.debug('RESTORE TASKS')
        for path in self.tasks_dir.iterdir():
            logger.debug('RESTORE TASKS %r', path)
            if not path.suffix == '.pickle':
                continue
            logger.debug('RESTORE TASKS really %r', path)
            with path.open('rb') as f:
                try:
                    task, state = pickle.load(f)
                    self.tasks[task.header.task_id] = task
                    self.tasks_states[task.header.task_id] = state
                except (pickle.UnpicklingError, EOFError, ImportError):
                    logger.exception('Problem restoring task from: %s', path)
                    path.unlink()
                    continue
            dispatcher.send(signal='golem.taskmanager',
                            event='task_restored',
                            task=task,
                            state=state)

    @handle_task_key_error
    def resources_send(self, task_id):
        self.tasks_states[task_id].status = TaskStatus.waiting
        self.tasks[task_id].task_status = TaskStatus.waiting
        self.notice_task_updated(task_id)
        logger.info("Resources for task {} sent".format(task_id))

    def get_next_subtask(self,
                         node_id,
                         node_name,
                         task_id,
                         estimated_performance,
                         price,
                         max_resource_size,
                         max_memory_size,
                         num_cores=0,
                         address=""):
        """ Assign next subtask from task <task_id> to node with given id <node_id> and name. If subtask is assigned
        the function is returning a tuple (
        :param node_id:
        :param node_name:
        :param task_id:
        :param estimated_performance:
        :param price:
        :param max_resource_size:
        :param max_memory_size:
        :param num_cores:
        :param address:
        :return (ComputeTaskDef|None, bool, bool): Function returns a triplet. First element is either ComputeTaskDef
        that describe assigned subtask or None. The second element describes whether the task_id is a wrong task that
        isn't in task manager register. If task with <task_id> it's a known task then second element of a pair is always
        False (regardless new subtask was assigned or not). The third element describes whether we're waiting for
        client's other task results.
        """
        logger.debug('get_next_subtask(%r, %r, %r, %r, %r, %r, %r, %r, %r)',
                     node_id, node_name, task_id, estimated_performance, price,
                     max_resource_size, max_memory_size, num_cores, address)
        if task_id not in self.tasks:
            logger.info("Cannot find task {} in my tasks".format(task_id))
            return None, True, False

        task = self.tasks[task_id]

        if task.header.max_price < price:
            return None, False, False

        def has_subtasks():
            if self.tasks_states[task_id].status not in self.activeStatus:
                logger.debug('state no in activestatus')
                return False
            if not task.needs_computation():
                logger.debug('not task.needs_computation')
                return False
            if task.header.resource_size > (long(max_resource_size) * 1024):
                logger.debug('resources size >')
                return False
            if task.header.estimated_memory > (long(max_memory_size) * 1024):
                logger.debug('estimated memory >')
                return False
            return True

        if not has_subtasks():
            logger.info(
                "Cannot get next task for estimated performance {}".format(
                    estimated_performance))
            return None, False, False

        extra_data = task.query_extra_data(estimated_performance, num_cores,
                                           node_id, node_name)
        if extra_data.should_wait:
            return None, False, True

        ctd = extra_data.ctd

        def check_compute_task_def():
            if not isinstance(ctd, ComputeTaskDef) or not ctd.subtask_id:
                logger.debug('check ctd: ctd not instance or not subtask_id')
                return False
            if task_id != ctd.task_id or ctd.subtask_id in self.subtask2task_mapping:
                logger.debug(
                    'check ctd: %r != %r or %r in self.subtask2task_maping',
                    task_id, ctd.task_id, ctd.subtask_id)
                return False
            if ctd.subtask_id in self.tasks_states[ctd.task_id].subtask_states:
                logger.debug('check ctd: subtask_states')
                return False
            return True

        if not check_compute_task_def():
            return None, False, False

        ctd.key_id = task.header.task_owner_key_id
        ctd.return_address = task.header.task_owner_address
        ctd.return_port = task.header.task_owner_port
        ctd.task_owner = task.header.task_owner

        self.subtask2task_mapping[ctd.subtask_id] = task_id
        self.__add_subtask_to_tasks_states(node_name, node_id, price, ctd,
                                           address)
        self.notice_task_updated(task_id)
        return ctd, False, extra_data.should_wait

    def get_tasks_headers(self):
        ret = []
        for t in self.tasks.values():
            if t.needs_computation() and t.task_status in self.activeStatus:
                ret.append(t.header)

        return ret

    def get_trust_mod(self, subtask_id):
        if subtask_id in self.subtask2task_mapping:
            task_id = self.subtask2task_mapping[subtask_id]
            return self.tasks[task_id].get_trust_mod(subtask_id)
        else:
            logger.error("This is not my subtask {}".format(subtask_id))
            return 0

    def update_task_signatures(self):
        for task in self.tasks.values():
            task.header.signature = self.sign_task_header(task.header)

    def sign_task_header(self, task_header):
        return self.keys_auth.sign(task_header.to_binary())

    def verify_subtask(self, subtask_id):
        if subtask_id in self.subtask2task_mapping:
            task_id = self.subtask2task_mapping[subtask_id]
            return self.tasks[task_id].verify_subtask(subtask_id)
        else:
            return False

    def get_node_id_for_subtask(self, subtask_id):
        if subtask_id in self.subtask2task_mapping:
            subtask_state = self.tasks_states[self.subtask2task_mapping[
                subtask_id]].subtask_states[subtask_id]
            return subtask_state.computer.node_id
        else:
            return None

    def set_value(self, task_id, subtask_id, value):
        if type(value) not in (int, long):
            raise TypeError(
                "Incorrect 'value' type: {}. Should be int or long".format(
                    type(value)))
        task_state = self.tasks_states.get(task_id)
        if task_state is None:
            logger.warning("This is not my task {}".format(task_id))
            return
        subtask_state = task_state.subtask_states.get(subtask_id)
        if subtask_state is None:
            logger.warning("This is not my subtask {}".format(subtask_id))
            return
        subtask_state.value = value

    @handle_subtask_key_error
    def get_value(self, subtask_id):
        """ Return value of a given subtask
        :param subtask_id:  id of a computed subtask
        :return long: price that should be paid for given subtask
        """
        task_id = self.subtask2task_mapping[subtask_id]
        return self.tasks_states[task_id].subtask_states[subtask_id].value

    @handle_subtask_key_error
    def computed_task_received(self, subtask_id, result, result_type):
        task_id = self.subtask2task_mapping[subtask_id]

        subtask_state = self.tasks_states[task_id].subtask_states[subtask_id]
        subtask_status = subtask_state.subtask_status

        if not SubtaskStatus.is_computed(subtask_status):
            logger.warning(
                "Result for subtask {} when subtask state is {}".format(
                    subtask_id, subtask_status))
            self.notice_task_updated(task_id)
            return False

        self.tasks[task_id].computation_finished(subtask_id, result,
                                                 result_type)
        ss = self.tasks_states[task_id].subtask_states[subtask_id]
        ss.subtask_progress = 1.0
        ss.subtask_rem_time = 0.0
        ss.subtask_status = SubtaskStatus.finished
        ss.stdout = self.tasks[task_id].get_stdout(subtask_id)
        ss.stderr = self.tasks[task_id].get_stderr(subtask_id)
        ss.results = self.tasks[task_id].get_results(subtask_id)

        if not self.tasks[task_id].verify_subtask(subtask_id):
            logger.debug("Subtask {} not accepted\n".format(subtask_id))
            ss.subtask_status = SubtaskStatus.failure
            self.notice_task_updated(task_id)
            return False

        if self.tasks_states[task_id].status in self.activeStatus:
            if not self.tasks[task_id].finished_computation():
                self.tasks_states[task_id].status = TaskStatus.computing
            else:
                if self.tasks[task_id].verify_task():
                    logger.debug("Task {} accepted".format(task_id))
                    self.tasks_states[task_id].status = TaskStatus.finished
                else:
                    logger.debug("Task {} not accepted".format(task_id))
        self.notice_task_updated(task_id)
        return True

    @handle_subtask_key_error
    def task_computation_failure(self, subtask_id, err):
        task_id = self.subtask2task_mapping[subtask_id]

        subtask_state = self.tasks_states[task_id].subtask_states[subtask_id]
        subtask_status = subtask_state.subtask_status

        if not SubtaskStatus.is_computed(subtask_status):
            logger.warning(
                "Result for subtask {} when subtask state is {}".format(
                    subtask_id, subtask_status))
            self.notice_task_updated(task_id)
            return False

        self.tasks[task_id].computation_failed(subtask_id)
        ss = self.tasks_states[task_id].subtask_states[subtask_id]
        ss.subtask_progress = 1.0
        ss.subtask_rem_time = 0.0
        ss.subtask_status = SubtaskStatus.failure
        ss.stderr = str(err)

        self.notice_task_updated(task_id)
        return True

    def task_result_incoming(self, subtask_id):
        node_id = self.get_node_id_for_subtask(subtask_id)

        if node_id and subtask_id in self.subtask2task_mapping:
            task_id = self.subtask2task_mapping[subtask_id]
            if task_id in self.tasks:
                task = self.tasks[task_id]
                states = self.tasks_states[task_id].subtask_states[subtask_id]

                task.result_incoming(subtask_id)
                states.subtask_status = SubtaskStatus.downloading

                self.notify_update_task(task_id)
            else:
                logger.error("Unknown task id: {}".format(task_id))
        else:
            logger.error("Node_id {} or subtask_id {} does not exist".format(
                node_id, subtask_id))

    # CHANGE TO RETURN KEY_ID (check IF SUBTASK COMPUTER HAS KEY_ID
    def check_timeouts(self):
        nodes_with_timeouts = []
        for t in self.tasks.values():
            th = t.header
            if self.tasks_states[th.task_id].status not in self.activeStatus:
                continue
            cur_time = get_timestamp_utc()
            if cur_time > th.deadline:
                logger.info("Task {} dies".format(th.task_id))
                t.task_stats = TaskStatus.timeout
                self.tasks_states[th.task_id].status = TaskStatus.timeout
                self.notice_task_updated(th.task_id)
            ts = self.tasks_states[th.task_id]
            for s in ts.subtask_states.values():
                if SubtaskStatus.is_computed(s.subtask_status):
                    if cur_time > s.deadline:
                        logger.info("Subtask {} dies".format(s.subtask_id))
                        s.subtask_status = SubtaskStatus.failure
                        nodes_with_timeouts.append(s.computer.node_id)
                        t.computation_failed(s.subtask_id)
                        s.stderr = "[GOLEM] Timeout"
                        self.notice_task_updated(th.task_id)
        return nodes_with_timeouts

    def get_progresses(self):
        tasks_progresses = {}

        for t in self.tasks.values():
            if t.get_progress() < 1.0:
                ltss = LocalTaskStateSnapshot(t.header.task_id,
                                              t.get_total_tasks(),
                                              t.get_active_tasks(),
                                              t.get_progress(),
                                              t.short_extra_data_repr(2200.0))
                tasks_progresses[t.header.task_id] = ltss

        return tasks_progresses

    @handle_task_key_error
    def get_resources(self, task_id, resource_header, resource_type=0):
        task = self.tasks[task_id]
        return task.get_resources(resource_header, resource_type)

    @handle_task_key_error
    def restart_task(self, task_id):
        logger.info("restarting task")
        self.dir_manager.clear_temporary(
            task_id, undeletable=self.tasks[task_id].undeletable)

        self.tasks[task_id].restart()
        self.tasks[task_id].task_status = TaskStatus.waiting
        self.tasks_states[task_id].status = TaskStatus.waiting
        self.tasks_states[task_id].time_started = time.time()

        for ss in self.tasks_states[task_id].subtask_states.values():
            if ss.subtask_status != SubtaskStatus.failure:
                ss.subtask_status = SubtaskStatus.restarted

        self.notice_task_updated(task_id)

    @handle_subtask_key_error
    def restart_subtask(self, subtask_id):
        task_id = self.subtask2task_mapping[subtask_id]
        self.tasks[task_id].restart_subtask(subtask_id)
        self.tasks_states[task_id].status = TaskStatus.computing
        self.tasks_states[task_id].subtask_states[
            subtask_id].subtask_status = SubtaskStatus.restarted
        self.tasks_states[task_id].subtask_states[
            subtask_id].stderr = "[GOLEM] Restarted"

        self.notice_task_updated(task_id)

    @handle_task_key_error
    def abort_task(self, task_id):
        self.tasks[task_id].abort()
        self.tasks[task_id].task_status = TaskStatus.aborted
        self.tasks_states[task_id].status = TaskStatus.aborted
        for sub in self.tasks_states[task_id].subtask_states.values():
            del self.subtask2task_mapping[sub.subtask_id]
        self.tasks_states[task_id].subtask_states.clear()

        self.notice_task_updated(task_id)

    @handle_task_key_error
    def pause_task(self, task_id):
        self.tasks[task_id].task_status = TaskStatus.paused
        self.tasks_states[task_id].status = TaskStatus.paused

        self.notice_task_updated(task_id)

    @handle_task_key_error
    def resume_task(self, task_id):
        self.tasks[task_id].task_status = TaskStatus.starting
        self.tasks_states[task_id].status = TaskStatus.starting

        self.notice_task_updated(task_id)

    @handle_task_key_error
    def delete_task(self, task_id):
        for sub in self.tasks_states[task_id].subtask_states.values():
            del self.subtask2task_mapping[sub.subtask_id]
        self.tasks_states[task_id].subtask_states.clear()

        self.tasks[task_id].unregister_listener(self)
        del self.tasks[task_id]
        del self.tasks_states[task_id]

        self.dir_manager.clear_temporary(task_id)

    @handle_task_key_error
    def query_task_state(self, task_id):
        ts = self.tasks_states[task_id]
        t = self.tasks[task_id]

        ts.progress = t.get_progress()
        ts.elapsed_time = time.time() - ts.time_started

        if ts.progress > 0.0:
            ts.remaining_time = (ts.elapsed_time /
                                 ts.progress) - ts.elapsed_time
        else:
            ts.remaining_time = -0.0

        t.update_task_state(ts)

        return ts

    def get_subtasks(self, task_id):
        """
        Get all subtasks related to given task id
        :param task_id: Task ID
        :return: list of all subtasks related with @task_id or None if @task_id is not known
        """
        if task_id not in self.tasks_states:
            return None
        return [
            sub.subtask_id
            for sub in self.tasks_states[task_id].subtask_states.values()
        ]

    def change_config(self, root_path, use_distributed_resource_management):
        self.dir_manager = DirManager(root_path)
        self.use_distributed_resources = use_distributed_resource_management

    @handle_task_key_error
    def change_timeouts(self, task_id, full_task_timeout, subtask_timeout):
        task = self.tasks[task_id]
        task.header.deadline = timeout_to_deadline(full_task_timeout)
        task.header.subtask_timeout = subtask_timeout
        task.full_task_timeout = full_task_timeout
        task.header.last_checking = time.time()

    def get_task_id(self, subtask_id):
        return self.subtask2task_mapping[subtask_id]

    def get_task_dict(self, task_id):
        task = self.tasks[task_id]

        # single=True retrieves one preview file. If rendering frames,
        # it's the preview of the most recently computed frame.
        dictionary = {u'preview': task.get_preview(single=True)}
        dictionary.update(self.get_simple_task_dict(task))
        dictionary.update(self.get_task_definition_dict(task))
        return dictionary

    def get_tasks_dict(self):
        return [self.get_simple_task_dict(t) for t in self.tasks.itervalues()]

    def get_subtask_dict(self, subtask_id):
        task_id = self.subtask2task_mapping[subtask_id]
        task_state = self.tasks_states[task_id]
        subtask = task_state.subtask_states[subtask_id]
        return subtask.to_dictionary()

    def get_subtasks_dict(self, task_id):
        task_state = self.tasks_states[task_id]
        subtasks = task_state.subtask_states
        return [subtask.to_dictionary() for subtask in subtasks.itervalues()]

    def get_subtasks_borders(self, task_id):
        task = self.tasks[task_id]
        task_type_name = task.task_definition.task_type.lower()
        task_type = self.task_types[task_type_name]
        task_state = self.tasks_states[task_id]
        total_subtasks = task.get_total_tasks()

        return {
            to_unicode(subtask.subtask_id):
            task_type.get_task_border(subtask,
                                      task.task_definition,
                                      total_subtasks,
                                      as_path=True)
            for subtask in task_state.subtask_states.values()
        }

    def get_simple_task_dict(self, task):
        state = self.tasks_states.get(task.header.task_id)
        timeout = task.task_definition.full_task_timeout

        dictionary = {u'duration': max(timeout - state.remaining_time, 0)}
        dictionary.update(task.to_dictionary())
        dictionary.update(state.to_dictionary())
        return dictionary

    def get_task_preview(self, task_id, single=False):
        return self.tasks[task_id].get_preview(single=single)

    @handle_subtask_key_error
    def set_computation_time(self, subtask_id, computation_time):
        """
        Set computation time for subtask and also compute and set new value based on saved price for this subtask
        :param str subtask_id: subtask which was computed in given computation_time
        :param float computation_time: how long does it take to compute this task
        :return:
        """
        task_id = self.subtask2task_mapping[subtask_id]
        ss = self.tasks_states[task_id].subtask_states[subtask_id]
        ss.computation_time = computation_time
        ss.value = compute_subtask_value(ss.computer.price, computation_time)

    def add_comp_task_request(self, theader, price):
        """ Add a header of a task which this node may try to compute """
        self.comp_task_keeper.add_request(theader, price)

    @handle_task_key_error
    def get_payment_for_task_id(self, task_id):
        val = 0.0
        t = self.tasks_states[task_id]
        for ss in t.subtask_states.values():
            val += ss.value
        return val

    def __add_subtask_to_tasks_states(self, node_name, node_id, price, ctd,
                                      address):

        if ctd.task_id not in self.tasks_states:
            raise RuntimeError("Should never be here!")

        logger.debug('add_subtask_to_tasks_states(%r, %r, %r, %r, %r)',
                     node_name, node_id, price, ctd, address)

        ss = SubtaskState()
        ss.computer.node_id = node_id
        ss.computer.node_name = node_name
        ss.computer.performance = ctd.performance
        ss.computer.ip_address = address
        ss.computer.price = price
        ss.time_started = time.time()
        ss.deadline = ctd.deadline
        # TODO: read node ip address
        ss.subtask_definition = ctd.short_description
        ss.subtask_id = ctd.subtask_id
        ss.extra_data = ctd.extra_data
        ss.subtask_status = TaskStatus.starting
        ss.value = 0

        self.tasks_states[ctd.task_id].subtask_states[ctd.subtask_id] = ss

    def notify_update_task(self, task_id):
        self.notice_task_updated(task_id)

    @handle_task_key_error
    def notice_task_updated(self, task_id):
        # self.save_state()
        if self.task_persistence:
            self.dump_task(task_id)
        dispatcher.send(signal='golem.taskmanager',
                        event='task_status_updated',
                        task_id=task_id)
Пример #13
0
 def _get_loaded_app_manger():
     app_manager = AppsManager()
     app_manager.load_all_apps()
     app_manager._benchmark_enabled = mock.Mock(return_value=True)
     return app_manager
Пример #14
0
 def test_get_env_list(self):
     app = AppsManager()
     app.load_apps()
     apps = app.get_env_list()
     assert any(isinstance(app, BlenderEnvironment) for app in apps)
     assert any(isinstance(app, LuxRenderEnvironment) for app in apps)