Beispiel #1
0
 def __init__(self):
     Task.__init__(self, Mock(), Mock())
     self.src_code = ""
     self.extra_data = {}
     self.test_finished = False
     self.results = None
     self.tmp_dir = None
Beispiel #2
0
    def __init__(self,
                 src_code,
                 task_definition,
                 node_name,
                 environment,
                 resource_size=0,
                 owner_address="",
                 owner_port=0,
                 owner_key_id="",
                 max_pending_client_results=MAX_PENDING_CLIENT_RESULTS):
        """Create more specific task implementation

        """

        self.task_definition = task_definition
        task_timeout = task_definition.full_task_timeout
        deadline = timeout_to_deadline(task_timeout)
        th = TaskHeader(
            node_name=node_name,
            task_id=task_definition.task_id,
            task_owner_address=owner_address,
            task_owner_port=owner_port,
            task_owner_key_id=owner_key_id,
            environment=environment,
            task_owner=Node(),
            deadline=deadline,
            subtask_timeout=task_definition.subtask_timeout,
            resource_size=resource_size,
            estimated_memory=task_definition.estimated_memory,
            max_price=task_definition.max_price,
            docker_images=task_definition.docker_images,
        )

        Task.__init__(self, th, src_code)

        self.task_resources = list()

        self.total_tasks = 0
        self.last_task = 0

        self.num_tasks_received = 0
        self.subtasks_given = {}
        self.num_failed_subtasks = 0

        self.full_task_timeout = task_timeout
        self.counting_nodes = {}

        self.root_path = None

        self.stdout = {
        }  # for each subtask keep info about stdout received from computing node
        self.stderr = {
        }  # for each subtask keep info about stderr received from computing node
        self.results = {
        }  # for each subtask keep info about files containing results

        self.res_files = {}
        self.tmp_dir = None
        self.verificator = self.VERIFICATOR_CLASS()
        self.max_pending_client_results = max_pending_client_results
Beispiel #3
0
    def load(self, file_name, skip_test):

        try:
            definition = self.__read_from_file(file_name)
        except Exception as exc:
            return CommandResult(
                error="Error reading task from file '{}': {}".format(
                    file_name, exc))

        if hasattr(definition, 'resources'):
            definition.resources = {
                os.path.normpath(res)
                for res in definition.resources
            }
        datadir = sync_wait(Tasks.client.get_datadir())

        # TODO: unify GUI and CLI logic

        rendering_task_state = TaskDesc()
        rendering_task_state.definition = definition
        rendering_task_state.task_state.status = TaskStatus.starting

        if not Tasks.application_logic:
            Tasks.application_logic = CommandAppLogic.instantiate(
                Tasks.client, datadir)

        task_builder = Tasks.application_logic.get_builder(
            rendering_task_state)
        task = Task.build_task(task_builder)
        rendering_task_state.task_state.outputs = task.get_output_names()
        rendering_task_state.task_state.total_subtasks = task.get_total_tasks()
        task.header.task_id = str(uuid.uuid4())

        if not skip_test:

            test_task = Task.build_task(task_builder)
            test_task.header.task_id = str(uuid.uuid4())
            queue = Queue()

            TaskTester(test_task,
                       datadir,
                       success_callback=lambda *a, **kw: queue.put(True),
                       error_callback=lambda *a, **kw: queue.put(a)).run()

            test_result = queue.get()
            if test_result is not True:
                return CommandResult(
                    error="Test failed: {}".format(test_result))

        task_dict = DictSerializer.dump(task)
        task_def = task_dict['task_definition']
        task_def['resources'] = list(task_def.get('task_definition', []))
        deferred = Tasks.client.create_task(task_dict)
        return sync_wait(deferred, timeout=1800)
Beispiel #4
0
    def __init__(self, client_id, params, num_subtasks, public_key):
        """Creates a new dummy task
        :param string client_id: client id
        :param DummyTaskParameters params: task parameters
        1024 hashes on average
        """
        task_id = idgenerator.generate_id(public_key)
        owner_address = ''
        owner_port = 0
        owner_key_id = encode_hex(public_key)[2:]
        environment = self.ENVIRONMENT_NAME
        header = TaskHeader(task_id=task_id,
                            environment=environment,
                            task_owner=Node(node_name=client_id,
                                            pub_addr=owner_address,
                                            pub_port=owner_port,
                                            key=owner_key_id),
                            deadline=timeout_to_deadline(14400),
                            subtask_timeout=1200,
                            subtasks_count=num_subtasks,
                            resource_size=params.shared_data_size +
                            params.subtask_data_size,
                            estimated_memory=0,
                            max_price=MIN_PRICE)

        # load the script to be run remotely from the file in the current dir
        script_path = path.join(path.dirname(__file__), 'computation.py')
        with open(script_path, 'r') as f:
            src_code = f.read()
            src_code += '\noutput = run_dummy_task(' \
                'data_file, subtask_data, difficulty, result_size, tmp_path)'

        from apps.dummy.task.dummytaskstate import DummyTaskDefinition
        from apps.dummy.task.dummytaskstate import DummyTaskDefaults
        task_definition = DummyTaskDefinition(DummyTaskDefaults())
        Task.__init__(self, header, src_code, task_definition)

        self.task_id = task_id
        self.task_params = params
        self.task_resources = []
        self.resource_parts = {}

        self.shared_data_file = None
        self.subtasks_count = num_subtasks
        self.total_tasks = self.subtasks_count
        self.subtask_ids = []
        self.subtask_data = {}
        self.subtask_results = {}
        self.assigned_nodes = {}
        self.assigned_subtasks = {}
        self.total_tasks = 1
        self._lock = Lock()
    def test_run(self):
        benchmark = BlenderBenchmark()
        task_definition = benchmark.task_definition

        task_state = TaskDesc()
        task_state.status = TaskStatus.notStarted
        task_state.definition = task_definition

        dir_manager = DirManager(self.path)
        task = Task.build_task(
            BlenderRenderTaskBuilder("node name", task_definition, self.path,
                                     dir_manager))

        result = [None]

        def success(*_):
            result[0] = True

        def error(*_):
            result[0] = False

        self.br = BenchmarkRunner(task, self.path, success, error, benchmark)
        self.br.run()
        if self.br.tt:
            self.br.tt.join()

        assert result[0]
Beispiel #6
0
    def test_is_success(self):
        task = Task(Mock(), Mock(), Mock())

        task.query_extra_data_for_test_task = Mock()
        tt = TaskTester(task, self.path, Mock(), Mock())
        task_thread = Mock()

        # Proper task
        task_thread.error = None
        task_thread.result = ({"data": True}, 123)
        assert tt.is_success(task_thread)

        # Task thead result first arg is not tuple
        task_thread.result = {"data": True}
        assert not tt.is_success(task_thread)
        assert task_thread.error == "Wrong result format"
Beispiel #7
0
 def add_tasks(self, tasks):
     for task_def in tasks:
         task_builder = self._get_task_builder(task_def)
         golem_task = Task.build_task(
             task_builder(self.client.get_node_name(), task_def,
                          self.client.datadir))
         self.client.enqueue_new_task(golem_task)
    def run_benchmark(self, benchmark, label, cfg_param_name):
        task_state = TaskDesc()
        task_state.status = TaskStatus.notStarted
        task_state.definition = benchmark.task_definition
        self._validate_task_state(task_state)

        tb = self.get_builder(task_state)
        t = Task.build_task(tb)

        reactor = self.__get_reactor()

        self.br = BenchmarkRunner(
            t, self.datadir, lambda p: reactor.callFromThread(
                self._benchmark_computation_success,
                performance=p,
                label=label,
                cfg_param=cfg_param_name), self._benchmark_computation_error,
            benchmark)

        self.progress_dialog = TestingTaskProgressDialog(
            self.customizer.gui.window)
        self.progress_dialog_customizer = TestingTaskProgressDialogCustomizer(
            self.progress_dialog, self)
        self.progress_dialog_customizer.enable_ok_button(
            False)  # disable 'ok' button
        self.customizer.gui.setEnabled('recount',
                                       False)  # disable all 'recount' buttons
        self.progress_dialog.show()

        self.br.run()
Beispiel #9
0
    def __init__(self, client_id, params, num_subtasks):
        """Creates a new dummy task
        :param string client_id: client id
        :param DummyTaskParameters params: task parameters
        1024 hashes on average
        """
        task_id = SimpleAuth.generate_uuid().get_hex()
        owner_address = ''
        owner_port = 0
        owner_key_id = ''
        environment = self.ENVIRONMENT_NAME
        header = TaskHeader(
            client_id, task_id,
            owner_address, owner_port, owner_key_id, environment,
            task_owner=Node(),
            deadline=timeout_to_deadline(14400),
            subtask_timeout=1200,
            resource_size=params.shared_data_size + params.subtask_data_size,
            estimated_memory=0,
            max_price=MIN_PRICE)

        # load the script to be run remotely from the file in the current dir
        script_path = path.join(path.dirname(__file__), 'computation.py')
        with open(script_path, 'r') as f:
            src_code = f.read()
            src_code += '\noutput = run_dummy_task(' \
                        'data_file, subtask_data, difficulty, result_size)'

        Task.__init__(self, header, src_code)

        self.task_id = task_id
        self.task_params = params
        self.task_resources = []
        self.resource_parts = {}

        self.shared_data_file = None
        self.total_subtasks = num_subtasks
        self.subtask_ids = []
        self.subtask_data = {}
        self.subtask_results = {}
        self.assigned_nodes = {}
        self.assigned_subtasks = {}
        self._lock = Lock()
Beispiel #10
0
    def test_create_task(self, *_):
        c = self.client
        c.enqueue_new_task = Mock()

        # create a task
        t = Task(TaskHeader("node_name", "task_id", "10.10.10.10", 123,
                            "owner_id", "DEFAULT"),
                 src_code="print('hello')")

        c.create_task(DictSerializer.dump(t))
        self.assertTrue(c.enqueue_new_task.called)
Beispiel #11
0
    def test_computer(self):
        with self.assertRaises(TypeError):
            LocalComputer(None, self.path, self._success_callback,
                          self._failure_callback, self._get_bad_task_def)
        files = self.additional_dir_content([1])
        task = Task(Mock(), Mock())
        lc = LocalComputer(task, self.path, self._success_callback,
                           self._failure_callback, self._get_bad_task_def)
        self.assertIsInstance(lc, LocalComputer)
        lc.run()
        assert self.last_error is not None
        assert self.last_result is None
        assert self.error_counter == 1

        lc = LocalComputer(task,
                           self.path,
                           self._success_callback,
                           self._failure_callback,
                           self._get_better_task_def,
                           use_task_resources=False,
                           additional_resources=files)
        lc.run()
        lc.tt.join(60.0)
        path_ = path.join(lc.test_task_res_path, path.basename(files[0]))
        assert path.isfile(path_)
        assert self.error_counter == 1
        assert self.success_counter == 1

        tt = self.TestTaskThread({'data': "some data"}, "some error")
        lc.task_computed(tt)
        assert self.last_result == {"data": "some data"}
        assert self.last_result != "some error"
        assert self.error_counter == 1
        assert self.success_counter == 2

        tt = self.TestTaskThread({}, "some error")
        lc.task_computed(tt)
        assert self.last_error == "some error"
        assert self.error_counter == 2
        assert self.success_counter == 2

        tt = self.TestTaskThread({}, None)
        lc.task_computed(tt)
        assert self.last_error is None
        assert self.error_counter == 3
        assert self.success_counter == 2

        tt = self.TestTaskThread({'data': "some data"}, None)
        tt.error = True
        lc.task_computed(tt)
        assert self.last_error is None
        assert self.error_counter == 4
        assert self.success_counter == 2
Beispiel #12
0
 def run_benchmark(self, benchmark, task_builder, datadir, node_name,
                   success_callback, error_callback):
     task_state = TaskDesc()
     task_state.status = TaskStatus.notStarted
     task_state.definition = benchmark.task_definition
     self._validate_task_state(task_state)
     builder = task_builder(node_name, task_state.definition, datadir,
                            self.dir_manager)
     t = Task.build_task(builder)
     br = BenchmarkRunner(t, datadir, success_callback, error_callback,
                          benchmark)
     br.run()
Beispiel #13
0
 def __init__(self, task: Task, root_path, success_callback,
              error_callback):
     super(TaskTester, self).__init__(
         root_path=root_path,
         success_callback=success_callback,
         error_callback=error_callback,
         get_compute_task_def=task.query_extra_data_for_test_task,
         check_mem=True,
         comp_failed_warning=TaskTester.TESTER_WARNING,
         comp_success_message=TaskTester.TESTER_SUCCESS,
         resources=task.get_resources())
     self.task = task
Beispiel #14
0
    def add_new_task(self, task: Task, estimated_fee: int = 0) -> None:
        task_id = task.header.task_id
        if task_id in self.tasks:
            raise RuntimeError("Task {} has been already added".format(
                task.header.task_id))
        if not self.key_id:
            raise ValueError("'key_id' is not set")

        task.header.fixed_header.task_owner = self.node
        task.header.signature = self.sign_task_header(task.header)

        task.create_reference_data_for_task_validation()
        task.register_listener(self)

        ts = TaskState()
        ts.status = TaskStatus.notStarted
        ts.outputs = task.get_output_names()
        ts.subtasks_count = task.get_total_tasks()
        ts.time_started = time.time()
        ts.estimated_cost = task.price
        ts.estimated_fee = estimated_fee

        self.tasks[task_id] = task
        self.tasks_states[task_id] = ts
        logger.info("Task %s added", task_id)

        self.notice_task_updated(task_id, op=TaskOp.CREATED, persist=False)
Beispiel #15
0
    def create_task(self, dictionary):
        # FIXME: remove after the new interface has been integrated with
        if not isinstance(dictionary, dict):
            return dictionary

        type_name = dictionary['type'].lower()
        task_type = self.task_types[type_name]
        builder_type = task_type.task_builder_type

        definition = builder_type.build_definition(task_type, dictionary)
        builder = builder_type(self.node_name, definition, self.root_path,
                               self.dir_manager)

        return Task.build_task(builder)
Beispiel #16
0
    def test_task_computed(self):
        task = Task(Mock(), Mock(), Mock())

        result = [{"data": True}, 123]

        task.header.node_name = self.node
        task.header.task_id = self.name
        task.root_path = self.path
        task.after_test = lambda res, tmp_dir: {}
        task.query_extra_data_for_test_task = Mock()

        tt = TaskTester(task, self.path, Mock(), Mock())
        tt.tmp_dir = self.path
        task_thread = TaskThread(result)
        tt.task_computed(task_thread)

        task_thread = MemTaskThread(None, 30210, "Some error")
        with self.assertLogs(logger, level='WARNING'):
            tt.task_computed(task_thread)
        tt.error_callback.assert_called_with("Some error")

        task_thread = MemTaskThread("result", 2010, "Another error")
        self.assertIsNone(tt.get_progress())
        tt.tt = task_thread
        self.assertEqual(tt.get_progress(), "30%")
        task_thread.error = True
        self.assertEqual(tt.get_progress(), 0)
        tt.task_computed(task_thread)
        tt.error_callback.assert_called_with("Another error")

        self.message = ""

        def success_callback(res, est_mem, time_spent, after_test_data):
            self.message = "Success " + after_test_data["warnings"]

        task.header.node_name = self.node
        task.header.task_id = self.name
        task.root_path = self.path
        task.after_test = lambda res, tmp_dir: {"warnings": "bla ble"}
        task.query_extra_data_for_test_task = Mock()

        tt = TaskTester(task, self.path, success_callback, None)
        tt.tmp_dir = self.path
        task_thread = TaskThread(result)
        tt.task_computed(task_thread)
        self.assertTrue("bla" in self.message)
        self.assertTrue("ble" in self.message)
Beispiel #17
0
    def __init__(self, task: Task, root_path, success_callback, error_callback,
                 benchmark: CoreBenchmark) -> None:
        def get_compute_task_def():
            return task.query_extra_data(10000).ctd

        super().__init__(root_path=root_path,
                         success_callback=success_callback,
                         error_callback=error_callback,
                         get_compute_task_def=get_compute_task_def,
                         check_mem=True,
                         comp_failed_warning=BenchmarkRunner.RUNNER_WARNING,
                         comp_success_message=BenchmarkRunner.RUNNER_SUCCESS,
                         resources=task.get_resources())
        # probably this could be done differently
        self.benchmark = benchmark
Beispiel #18
0
 def build_and_serialize_task(self, task_state, cbk=None):
     tb = self.get_builder(task_state)
     t = Task.build_task(tb)
     t.header.max_price = str(t.header.max_price)
     t_serialized = DictSerializer.dump(t)
     if 'task_definition' in t_serialized:
         t_serialized_def = t_serialized['task_definition']
         t_serialized_def['resources'] = list(t_serialized_def['resources'])
         if 'max_price' in t_serialized_def:
             t_serialized_def['max_price'] = str(
                 t_serialized_def['max_price'])
     from pprint import pformat
     logger.debug('task serialized: %s', pformat(t_serialized))
     if cbk:
         cbk(t)
     return t_serialized
Beispiel #19
0
    def test_get_resources(self, mock_addr):
        mock_addr.return_value = self.addr_return
        task_id = "xyz"

        resources = ['first', 'second']

        task_mock = self._get_task_mock()
        with patch('golem.task.taskmanager.TaskManager.get_resources',
                   return_value=resources):
            sync_wait(self.tm.add_new_task(task_mock))
            assert self.tm.get_resources(task_id,
                                         task_mock.header) is resources

        task = Task(self._get_task_header("xyz", 120, 120),
                    "print 'hello world'")
        self.tm.tasks["xyz"] = task
        self.tm.get_resources("xyz", TaskResourceHeader(self.path), 0)
Beispiel #20
0
    def _get_task_mock(self,
                       task_id="xyz",
                       subtask_id="xxyyzz",
                       timeout=120.0,
                       subtask_timeout=120.0):
        header = self._get_task_header(task_id, timeout, subtask_timeout)
        task_mock = TaskMock(header, src_code='')

        ctd = ComputeTaskDef()
        ctd.task_id = task_id
        ctd.subtask_id = subtask_id
        ctd.environment = "DEFAULT"
        ctd.deadline = timeout_to_deadline(subtask_timeout)

        task_mock.query_extra_data_return_value = Task.ExtraData(
            should_wait=False, ctd=ctd)
        Task.get_progress = Mock()
        task_mock.get_progress.return_value = 0.3

        return task_mock
Beispiel #21
0
    def test_resource_send(self, mock_addr):
        from pydispatch import dispatcher
        mock_addr.return_value = self.addr_return
        self.tm.task_persistence = True
        t = Task(
            TaskHeader("ABC", "xyz", "10.10.10.10", 1023, "abcde", "DEFAULT"),
            "print 'hello world'")
        listener_mock = Mock()

        def listener(sender, signal, event, task_id):
            self.assertEquals(event, 'task_status_updated')
            self.assertEquals(task_id, t.header.task_id)
            listener_mock()

        dispatcher.connect(listener, signal='golem.taskmanager')
        try:
            sync_wait(self.tm.add_new_task(t))
            self.tm.resources_send("xyz")
            self.assertEquals(listener_mock.call_count, 2)
        finally:
            dispatcher.disconnect(listener, signal='golem.taskmanager')
Beispiel #22
0
    def test_update_signatures(self, _):

        node = Node("node", "key_id", "10.0.0.10", 40103, "1.2.3.4", 40103,
                    None, 40102, 40102)
        task = Task(
            TaskHeader("node",
                       "task_id",
                       "1.2.3.4",
                       1234,
                       "key_id",
                       "environment",
                       task_owner=node), '')

        self.tm.keys_auth = EllipticalKeysAuth(self.path)
        sync_wait(self.tm.add_new_task(task))
        sig = task.header.signature

        self.tm.update_task_signatures()
        assert task.header.signature == sig

        task.header.task_owner.pub_port = 40104
        self.tm.update_task_signatures()
        assert task.header.signature != sig
Beispiel #23
0
    def test_task_simple_serializer(self):
        with self.assertRaises(TypeError):
            Task.build_task("Not Task Builder")
        with self.assertRaises(TypeError):
            Task.register_listener("Not Listener")
        t = Task(Mock(), "")
        self.assertIsInstance(t, Task)
        self.assertEqual(t.get_stdout("abc"), "")
        self.assertEqual(t.get_stderr("abc"), "")
        self.assertEqual(t.get_results("abc"), [])

        t = Task(
            TaskHeader("ABC", "xyz", "10.10.10.10", 1023, "key", "DEFAULT",
                       Node()), "print 'Hello world'")

        tl1 = TaskEventListener()
        tl2 = TaskEventListener()
        t.register_listener(tl1)
        t.register_listener(tl2)
        assert len(t.listeners) == 2
        p = SimpleSerializer.dumps(t)
        u = SimpleSerializer.loads(p)
        assert t.src_code == u.src_code
        assert t.header.task_id == u.header.task_id
        assert t.header.task_owner.node_name == u.header.task_owner.node_name
        assert u.get_results("abc") == []
        assert len(t.listeners) == 2
        assert len(u.listeners) == 0
        t.unregister_listener(tl2)
        assert len(t.listeners) == 1
        assert t.listeners[0] == tl1
        t.listeners[0].notify_update_task("abc")
        t.unregister_listener(tl1)
        assert len(t.listeners) == 0
        with self.assertLogs(logger, level="WARNING"):
            t.unregister_listener(tl1)
Beispiel #24
0
class TestTaskTester(TestDirFixture, LogTestCase):
    task = Task(Mock(), Mock())
    node = 'node1'
    task_name = 'task1'

    def test_init(self):
        self.task.query_extra_data_for_test_task = Mock()
        self.assertIsNotNone(TaskTester(self.task, self.path, None, None))

    def test_task_computed(self):
        result = [{"data": True}, 123]

        self.task.header.node_name = self.node
        self.task.header.task_id = self.task_name
        self.task.root_path = self.path
        self.task.after_test = lambda res, tmp_dir: {}
        self.task.query_extra_data_for_test_task = Mock()

        tt = TaskTester(self.task, self.path, Mock(), Mock())
        tt.tmp_dir = self.path
        task_thread = TaskThread(result)
        tt.task_computed(task_thread)

        task_thread = MemTaskThread(None, 30210, "Some error")
        with self.assertLogs(logger, level=1):
            tt.task_computed(task_thread)
        tt.error_callback.assert_called_with("Some error")

        task_thread = MemTaskThread("result", 2010, "Another error")
        self.assertIsNone(tt.get_progress())
        tt.tt = task_thread
        self.assertEqual(tt.get_progress(), "30%")
        task_thread.error = True
        self.assertEqual(tt.get_progress(), 0)
        tt.task_computed(task_thread)
        tt.error_callback.assert_called_with("Another error")

        self.message = ""

        def success_callback(res, est_mem, time_spent, after_test_data):
            self.message = "Success " + after_test_data["warnings"]

        self.task.header.node_name = self.node
        self.task.header.task_id = self.task_name
        self.task.root_path = self.path
        self.task.after_test = lambda res, tmp_dir: {"warnings": "bla ble"}
        self.task.query_extra_data_for_test_task = Mock()

        tt = TaskTester(self.task, self.path, success_callback, None)
        tt.tmp_dir = self.path
        task_thread = TaskThread(result)
        tt.task_computed(task_thread)
        self.assertTrue("bla" in self.message)
        self.assertTrue("ble" in self.message)

    def test_is_success(self):
        self.task.query_extra_data_for_test_task = Mock()
        tt = TaskTester(self.task, self.path, Mock(), Mock())
        task_thread = Mock()

        # Proper task
        task_thread.error = None
        task_thread.result = ({"data": True}, 123)
        assert tt.is_success(task_thread)

        # Task thead result first arg is not tuple
        task_thread.result = {"data": True}
        assert not tt.is_success(task_thread)
        assert task_thread.error == "Wrong result format"
Beispiel #25
0
 def test_init(self):
     task = Task(Mock(), Mock(), Mock())
     task.query_extra_data_for_test_task = Mock()
     self.assertIsNotNone(TaskTester(task, self.path, None, None))