Example #1
0
    def test_task_header_unrelated_tags(self):
        task = Task({})
        task.tags = ['foo', 'bar']

        self.command.task_header(task)

        self.assertEqual(self.prompt.get_write_tags(), [])
Example #2
0
    def test_display_with_keyboard_interrupt(self, mock_display):
        # Setup
        task_list = []
        for i in range(0, 3):
            task = Task(TASK_TEMPLATE)
            task.task_id = 'task_%s' % i
            task_list.append(task)

        # Side effect to simulate keyboard interrupt
        def interrupt(context, renderer, task_id, quiet_waiting=True):
            if task_id == 'task_1':
                raise KeyboardInterrupt()
            else:
                return task_id

        mock_display.side_effect = interrupt

        # Test
        status._display_status(self.context, self.renderer, task_list)

        # Verify
        self.assertEqual(
            2, mock_display.call_count)  # not called for the third task
        for i, call_args in enumerate(mock_display.call_args_list):
            self.assertEqual(call_args[0][0], self.context)
            self.assertEqual(call_args[0][1], self.renderer)
            self.assertEqual(call_args[0][2], 'task_%s' % i)

            expected_quiet = i > 0
            self.assertEqual(call_args[1]['quiet_waiting'], expected_quiet)
Example #3
0
    def test_task_header_no_tags(self):
        task = Task({})
        task.tags = []

        self.command.task_header(task)

        self.assertEqual(self.prompt.get_write_tags(), [])
Example #4
0
    def test_task_header_no_tags(self):
        task = Task({})
        task.tags = []

        self.command.task_header(task)

        self.assertEqual(self.prompt.get_write_tags(), [])
    def test_run_already_in_progress(self, mock_sync, mock_get, mock_status):
        # Setup
        data = {
            options.OPTION_REPO_ID.keyword: 'test-repo',
            sp.NAME_BACKGROUND: False,
        }

        # Simulate a task already running
        task_data = copy.copy(CALL_REPORT_TEMPLATE)
        task_data['response'] = 'accepted'
        task_data['state'] = 'running'
        task = Task(task_data)
        mock_get.return_value = Response(200, [task])

        # Response from the sync call
        task_data = copy.copy(CALL_REPORT_TEMPLATE)
        task = Task(task_data)
        mock_sync.return_value = Response(202, [task])

        # Test
        self.command.run(**data)

        # Verify
        self.assertEqual(1, mock_status.call_count)

        tags = self.prompt.get_write_tags()
        self.assertEqual(2, len(tags))
        self.assertEqual(tags[1], 'in-progress')
Example #6
0
    def test_task_header_action_tag_only(self):
        task = Task({})
        task.tags = [tags.action_tag(tags.ACTION_UPDATE_DISTRIBUTOR)]

        self.command.task_header(task)

        self.assertEqual(self.prompt.get_write_tags(), [tags.ACTION_UPDATE_DISTRIBUTOR])
Example #7
0
    def test_task_header_action_tag_only(self):
        task = Task({})
        task.tags = [tags.action_tag(tags.ACTION_UPDATE_DISTRIBUTOR)]

        self.command.task_header(task)

        self.assertEqual(self.prompt.get_write_tags(), [tags.ACTION_UPDATE_DISTRIBUTOR])
Example #8
0
    def test_task_header_unrelated_tags(self):
        task = Task({})
        task.tags = ['foo', 'bar']

        self.command.task_header(task)

        self.assertEqual(self.prompt.get_write_tags(), [])
Example #9
0
    def test_display_with_keyboard_interrupt(self, mock_display):
        # Setup
        task_list = []
        for i in range(0, 3):
            task = Task(TASK_TEMPLATE)
            task.task_id = 'task_%s' % i
            task_list.append(task)

        # Side effect to simulate keyboard interrupt
        def interrupt(context, renderer, task_id, quiet_waiting=True):
            if task_id == 'task_1':
                raise KeyboardInterrupt()
            else:
                return task_id
        mock_display.side_effect = interrupt

        # Test
        status._display_status(self.context, self.renderer, task_list)

        # Verify
        self.assertEqual(2, mock_display.call_count) # not called for the third task
        for i, call_args in enumerate(mock_display.call_args_list):
            self.assertEqual(call_args[0][0], self.context)
            self.assertEqual(call_args[0][1], self.renderer)
            self.assertEqual(call_args[0][2], 'task_%s' % i)

            expected_quiet = i > 0
            self.assertEqual(call_args[1]['quiet_waiting'], expected_quiet)
Example #10
0
    def test_succeeded_one_change(self):
        task = Task(self.TASK_TEMPLATE)
        task.result = {'details': self._generate_details(), 'num_changes': 1}
        self.command.succeeded(task)

        tags = self.prompt.get_write_tags()
        self.assertTrue(content.TAG_CHANGE_MADE in tags)
    def test_succeeded_hands_off_errors(self, mock_render):
        task = Task(self.TASK_TEMPLATE)
        task.result = {
            'details': self._generate_details({'errors': {'foo/bar': {}}}),
            'num_changes': 1
        }
        self.command.succeeded('', task)

        mock_render.assert_called_once_with(task.result)
    def test_succeeded_hands_off_errors(self, mock_render):
        task = Task(self.TASK_TEMPLATE)
        task.result = {
            'details': self._generate_details({'errors': {'foo/bar': {}}}),
            'num_changes': 1
        }
        self.command.succeeded(task)

        mock_render.assert_called_once_with(task.result)
Example #13
0
    def test_succeeded_multiple_changes(self):
        task = Task(self.TASK_TEMPLATE)
        task.result = {'details': self._generate_details(), 'num_changes': 2}
        self.command.succeeded(task)

        tags = self.prompt.get_write_tags()
        self.assertTrue(content.TAG_CHANGE_MADE in tags)
        # make sure it's just 1 message even though there were 2 changes
        self.assertEqual(
            len(filter(lambda x: x == content.TAG_CHANGE_MADE, tags)), 1)
    def test_succeeded_no_change(self):
        task = Task(self.TASK_TEMPLATE)
        task.result = {
            'details': {constants.TYPE_PUPPET_MODULE: {'details': {}}},
            'num_changes': 0
        }
        self.command.succeeded(task)

        tags = self.prompt.get_write_tags()
        self.assertTrue(content.TAG_NO_CHANGES in tags)
    def test_succeeded_no_change(self):
        task = Task(self.TASK_TEMPLATE)
        task.result = {
            'details': {constants.TYPE_PUPPET_MODULE: {'details': {}}},
            'num_changes': 0
        }
        self.command.succeeded('', task)

        tags = self.prompt.get_write_tags()
        self.assertTrue(content.TAG_NO_CHANGES in tags)
    def test_succeeded_one_change(self):
        task = Task(self.TASK_TEMPLATE)
        task.result = {
            'details': self._generate_details(),
            'num_changes': 1
        }
        self.command.succeeded('', task)

        tags = self.prompt.get_write_tags()
        self.assertTrue(content.TAG_CHANGE_MADE in tags)
Example #17
0
    def _request(self, method, path, queries=(), body=None, ensure_encoding=True):
        """
        make a HTTP request to the pulp server and return the response

        :param method:  name of an HTTP method such as GET, POST, PUT, HEAD
                        or DELETE
        :type  method:  basestring

        :param path:    URL for this request
        :type  path:    basestring

        :param queries: mapping object or a sequence of 2-element tuples,
                        in either case representing key-value pairs to be used
                        as query parameters on the URL.
        :type  queries: mapping object or sequence of 2-element tuples

        :param body:    Data structure that will be JSON serialized and send as
                        the request's body.
        :type  body:    Anything that is JSON-serializable.

        :param ensure_encoding: toggle proper string encoding for the body
        :type ensure_encoding: bool

        :return:    Response object
        :rtype:     pulp.bindings.responses.Response

        :raises:    ConnectionException or one of the RequestExceptions
                    (depending on response codes) in case of unsuccessful
                    request
        """
        url = self._build_url(path, queries)
        if ensure_encoding:
            body = self._process_body(body)
        if not isinstance(body, (NoneType, basestring)):
            body = json.dumps(body)
        self.log.debug('sending %s request to %s' % (method, url))

        response_code, response_body = self.server_wrapper.request(method, url, body)

        if self.api_responses_logger:
            self.api_responses_logger.info('%s request to %s with parameters %s' % (method, url, body))
            self.api_responses_logger.info("Response status : %s \n" % response_code)
            self.api_responses_logger.info("Response body :\n %s\n" % json.dumps(response_body, indent=2))

        if response_code >= 300:
            self._handle_exceptions(response_code, response_body)
        elif response_code == 200 or response_code == 201:
            body = response_body
        elif response_code == 202:
            if isinstance(response_body, list):
                body = [Task(t) for t in response_body]
            else:
                body = Task(response_body)

        return Response(response_code, body)
Example #18
0
    def test_display_status_rejected(self):
        # Setup
        rejected_task = Task(TASK_TEMPLATE)
        rejected_task.response = RESPONSE_REJECTED

        # Test
        status._display_status(self.context, self.renderer, [rejected_task])

        # Verify
        expected_tags = ['ctrl-c', 'rejected-msg', 'rejected-desc']
        self.assertEqual(expected_tags, self.prompt.get_write_tags())
Example #19
0
    def test_display_status_rejected(self):
        # Setup
        rejected_task = Task(TASK_TEMPLATE)
        rejected_task.response = RESPONSE_REJECTED

        # Test
        status._display_status(self.context, self.renderer, [rejected_task])

        # Verify
        expected_tags = ['ctrl-c', 'rejected-msg', 'rejected-desc']
        self.assertEqual(expected_tags, self.prompt.get_write_tags())
    def test_succeeded_multiple_changes(self):
        task = Task(self.TASK_TEMPLATE)
        task.result = {
            'details': self._generate_details(),
            'num_changes': 2
        }
        self.command.succeeded('', task)

        tags = self.prompt.get_write_tags()
        self.assertTrue(content.TAG_CHANGE_MADE in tags)
        # make sure it's just 1 message even though there were 2 changes
        self.assertEqual(len(filter(lambda x: x==content.TAG_CHANGE_MADE, tags)), 1)
Example #21
0
    def test_task_header_with_dist_tags(self):
        task = Task({})
        task.tags = [
            tags.action_tag(tags.ACTION_UPDATE_DISTRIBUTOR),
            tags.resource_tag(tags.RESOURCE_REPOSITORY_DISTRIBUTOR_TYPE, 'some_distributor'),
        ]

        self.command.task_header(task)

        self.assertEqual(self.prompt.get_write_tags(), [tags.ACTION_UPDATE_DISTRIBUTOR])
        # the message in this case should end with the distributor type
        self.assertTrue(self.recorder.lines[0].strip().endswith('some_distributor'))
Example #22
0
    def test_display_status_postponed(self):
        # Setup
        postponed_task = Task(TASK_TEMPLATE)
        postponed_task.response = RESPONSE_POSTPONED
        postponed_task.state = STATE_WAITING

        # Test
        status._display_status(self.context, self.renderer, [postponed_task])

        # Verify
        expected_tags = ['ctrl-c', 'postponed']
        self.assertEqual(expected_tags, self.prompt.get_write_tags())
Example #23
0
    def test_task_header_with_dist_tags(self):
        task = Task({})
        task.tags = [
            tags.action_tag(tags.ACTION_UPDATE_DISTRIBUTOR),
            tags.resource_tag(tags.RESOURCE_REPOSITORY_DISTRIBUTOR_TYPE, 'some_distributor'),
        ]

        self.command.task_header(task)

        self.assertEqual(self.prompt.get_write_tags(), [tags.ACTION_UPDATE_DISTRIBUTOR])
        # the message in this case should end with the distributor type
        self.assertTrue(self.recorder.lines[0].strip().endswith('some_distributor'))
Example #24
0
    def test_display_status_postponed(self):
        # Setup
        postponed_task = Task(TASK_TEMPLATE)
        postponed_task.response = RESPONSE_POSTPONED
        postponed_task.state = STATE_WAITING

        # Test
        status._display_status(self.context, self.renderer, [postponed_task])

        # Verify
        expected_tags = ['ctrl-c', 'postponed']
        self.assertEqual(expected_tags, self.prompt.get_write_tags())
Example #25
0
        def poll(task_id):
            task = Task(TASK_TEMPLATE)

            # Wait for the first 2 polls
            if mock_get.call_count < 3:
                task.state = STATE_WAITING

            # Running for the next 10
            elif mock_get.call_count < 13:
                task.state = STATE_RUNNING

            # Finally finish
            else:
                task.state = STATE_FINISHED

            return Response(200, task)
Example #26
0
    def get_all_tasks(self, tags=()):
        """
        Retrieves all tasks in the system. If tags are specified, only tasks
        that contain all of the given tags are returned. All tasks will be
        represented by Task objects in a list in the response's response_body
        attribute.

        @param tags: if specified, only tasks that contain all tags in the given
                     list are returned; None to return all tasks
        @type  tags: list

        @return: response with a list of Task objects; empty list for no matching tasks
        @rtype:  Response
        """
        path = '/v2/tasks/'
        tags = [('tag', t) for t in tags]

        response = self.server.GET(path, queries=tags)

        tasks = []
        for doc in response.response_body:
            tasks.append(Task(doc))

        response.response_body = tasks
        return response
Example #27
0
 def test_progress(self):
     mock_renderer = mock.MagicMock()
     command = export.GroupExportStatusCommand(
         self.context, mock_renderer, ids.TYPE_ID_DISTRIBUTOR_GROUP_EXPORT)
     test_task = Task({"progress_report": 'foo'})
     command.progress(test_task, None)
     mock_renderer.display_report.assert_called_once_with('foo')
Example #28
0
        def poll(task_id):
            task = Task(TASK_TEMPLATE)

            # Wait for the first 2 polls
            if mock_get.call_count < 3:
                task.state = STATE_WAITING

            # Running for the next 10
            elif mock_get.call_count < 13:
                task.state = STATE_RUNNING

            # Finally finish
            else:
                task.state = STATE_ERROR

            return Response(200, task)
Example #29
0
    def get_all_tasks(self, tags=()):
        """
        Retrieves all tasks in the system. If tags are specified, only tasks
        that contain all of the given tags are returned. All tasks will be
        represented by Task objects in a list in the response's response_body
        attribute. By default, completed tasks are excluded but they can be included by setting
        include_completed to True.

        :param tags:              if specified, only tasks that contain all tags in the given
                                  list are returned; None to return all tasks
        :type  tags:              list
        :return:                  response with a list of Task objects; empty list for no matching
                                  tasks
        :rtype:                   Response
        """
        path = '/v2/tasks/'
        tags = [('tag', t) for t in tags]

        response = self.server.GET(path, queries=tags)

        tasks = []
        # sort based on id, which is chronological in mongo
        for doc in sorted(response.response_body, key=lambda x: x['id']):
            tasks.append(Task(doc))

        response.response_body = tasks
        return response
Example #30
0
    def test_run_async(self):
        # Setup
        repo_id = 'test-repo'
        data = {
            OPTION_REPO_ID.keyword: repo_id,
            OPTION_NAME.keyword: 'Test Repository',
            OPTION_DESCRIPTION.keyword: 'Repository Description',
            OPTION_NOTES.keyword: {
                'a': 'a',
                'b': 'b'
            },
            'distributor_configs': {
                'alpha': {
                    'beta': 'gamma'
                }
            },
            'importer_config': {
                'delta': 'epsilon'
            }
        }

        result_task = Task({})
        self.server_mock.request.return_value = 200, result_task
        self.command.poll = mock.Mock()

        # Test
        self.command.run(**data)

        # Verify
        self.assertEqual(1, self.server_mock.request.call_count)
        self.assertEqual('PUT', self.server_mock.request.call_args[0][0])

        url = self.server_mock.request.call_args[0][1]
        self.assertTrue(url.endswith('/repositories/%s/' % repo_id))

        body = self.server_mock.request.call_args[0][2]
        body = json.loads(body)

        body_target = {
            'delta': {
                'display_name': 'Test Repository',
                'description': 'Repository Description',
                'notes': {
                    'a': 'a',
                    'b': 'b'
                }
            },
            'distributor_configs': {
                'alpha': {
                    'beta': 'gamma'
                }
            },
            'importer_config': {
                'delta': 'epsilon'
            }
        }
        compare_dict(body, body_target)

        self.command.poll.assert_called_once_with([result_task], mock.ANY)
Example #31
0
    def test_poll_additional_spawned_tasks_list(self):
        """
        Test polling over a list where a task has spawned additional tasks that need to be
        added to the polling list

        Task Count: 3
        Statuses: None; normal progression
        Result: All Success
        """

        # Setup
        sim = TaskSimulator()
        sim.install(self.bindings)

        states_1 = [STATE_WAITING, STATE_RUNNING, STATE_FINISHED]
        states_2 = [STATE_WAITING, STATE_WAITING, STATE_RUNNING, STATE_FINISHED]
        states_3 = [STATE_WAITING, STATE_RUNNING, STATE_RUNNING, STATE_RUNNING, STATE_FINISHED]

        task_1_states = sim.add_task_states('1', states_1)
        sim.add_task_states('2', states_2)
        sim.add_task_states('3', states_3)

        container_task = Task({})
        task_list = sim.get_all_tasks().response_body
        task_1_states[2].spawned_tasks = task_list[1:]
        # Test
        container_task.spawned_tasks = sim.get_all_tasks().response_body
        completed_tasks = self.command.poll(task_list[:1], {})

        expected_tags = ['abort', # default, always displayed
                         # states_1
                         'delayed-spinner', 'running-spinner', 'succeeded',
                         # states_2
                         'header', 'delayed-spinner', 'running-spinner', 'running-spinner',
                         'succeeded',
                         # states_3
                         'header', 'delayed-spinner', 'running-spinner', 'running-spinner',
                         'running-spinner',  'succeeded',
                         ]
        found_tags = self.prompt.get_write_tags()
        self.assertEqual(expected_tags, found_tags)

        self.assertTrue(isinstance(completed_tasks, list))
        self.assertEqual(3, len(completed_tasks))
        for i in range(0, 3):
            self.assertEqual(STATE_FINISHED, completed_tasks[i].state)
Example #32
0
    def test_poll_spawned_tasks_list(self):
        """
        Test the structure where a command has both synchronous and asynchronous sections
        and returns a task structure with a result and a spawned_tasks list

        Task Count: 3
        Statuses: None; normal progression
        Result: All Success
        """

        # Setup
        sim = TaskSimulator()
        sim.install(self.bindings)

        states_1 = [STATE_WAITING, STATE_RUNNING, STATE_FINISHED]
        states_2 = [STATE_WAITING, STATE_WAITING, STATE_RUNNING, STATE_FINISHED]
        states_3 = [STATE_WAITING, STATE_RUNNING, STATE_RUNNING, STATE_RUNNING, STATE_FINISHED]

        sim.add_task_states('1', states_1)
        sim.add_task_states('2', states_2)
        sim.add_task_states('3', states_3)

        container_task = Task({})

        # Test
        container_task.spawned_tasks = sim.get_all_tasks().response_body
        completed_tasks = self.command.poll(container_task, {})

        expected_tags = ['abort', # default, always displayed
                         # states_1
                         'header', 'delayed-spinner', 'running-spinner', 'running-spinner', 'succeeded',
                         # states_2
                         'header', 'delayed-spinner', 'delayed-spinner', 'running-spinner', 'running-spinner',
                         'succeeded',
                         # states_3
                         'header', 'delayed-spinner', 'running-spinner', 'running-spinner',
                         'running-spinner', 'running-spinner', 'succeeded',
                         ]
        found_tags = self.prompt.get_write_tags()
        self.assertEqual(expected_tags, found_tags)

        self.assertTrue(isinstance(completed_tasks, list))
        self.assertEqual(3, len(completed_tasks))
        for i in range(0, 3):
            self.assertEqual(STATE_FINISHED, completed_tasks[i].state)
Example #33
0
    def search(self, **kwargs):
        """
        Call the superclass search, and intercept the results so that we can turn the items back into Tasks.

        :param kwargs: Arguments to pass to SearchAPI.search()
        :type  kwargs: dict
        """
        tasks = super(TaskSearchAPI, self).search(**kwargs)

        return [Task(task) for task in tasks]
Example #34
0
    def test_display_status(self, mock_display):
        # Setup
        task_list = []
        for i in range(0, 2):
            task = Task(TASK_TEMPLATE)
            task.task_id = 'task_%s' % i
            task_list.append(task)

        # Test
        status._display_status(self.context, self.renderer, task_list)

        # Verify
        self.assertEqual(2, mock_display.call_count)
        for i, call_args in enumerate(mock_display.call_args_list):
            self.assertEqual(call_args[0][0], self.context)
            self.assertEqual(call_args[0][1], self.renderer)
            self.assertEqual(call_args[0][2], 'task_%s' % i)

            expected_quiet = i > 0
            self.assertEqual(call_args[1]['quiet_waiting'], expected_quiet)
Example #35
0
    def test_display_status(self, mock_display):
        # Setup
        task_list = []
        for i in range(0, 2):
            task = Task(TASK_TEMPLATE)
            task.task_id = 'task_%s' % i
            task_list.append(task)

        # Test
        status._display_status(self.context, self.renderer, task_list)

        # Verify
        self.assertEqual(2, mock_display.call_count)
        for i, call_args in enumerate(mock_display.call_args_list):
            self.assertEqual(call_args[0][0], self.context)
            self.assertEqual(call_args[0][1], self.renderer)
            self.assertEqual(call_args[0][2], 'task_%s' % i)

            expected_quiet = i > 0
            self.assertEqual(call_args[1]['quiet_waiting'], expected_quiet)
Example #36
0
    def get_task_group(self, task_group_id):
        """
        Retrieves the status of all tasks in the task group.

        @param task_group_id: ID of the task group to retrieve
        @type task_group_id: str
        @return: response with the status of all tasks in the task group in its body
        @rtype: Response
        """
        path = 'v2/task_groups/%s/' % task_group_id
        response = self.server.GET(path)
        response.response_body = [Task(t) for t in response.response_body]
        return response
Example #37
0
    def get_task(self, task_id):
        """
        Retrieves the status of the given task if it exists.

        @return: response with a Task object in the response_body
        @rtype:  Response

        @raise NotFoundException: if there is no task with the given ID
        """
        path = '/v2/tasks/%s/' % task_id
        response = self.server.GET(path)

        # Since it was a 200, the connection parsed the response body into a
        # Document. We know this will be task data, so convert the object here.
        response.response_body = Task(response.response_body)
        return response
    def test_run(self, mock_get, mock_status):
        # Setup
        data = {
            options.OPTION_REPO_ID.keyword: 'test-repo',
        }

        # Simulate a task already running
        task_data = copy.copy(CALL_REPORT_TEMPLATE)
        task_data['response'] = 'accepted'
        task_data['state'] = 'running'
        task = Task(task_data)
        mock_get.return_value = Response(200, [task])

        # Test
        self.command.run(**data)

        # Verify
        self.assertEqual(1, mock_get.call_count)
        self.assertEqual(1, mock_status.call_count)
    def test_run(self, mock_sync, mock_get, mock_status):
        # Setup
        data = {
            options.OPTION_REPO_ID.keyword: 'test-repo',
            sp.NAME_BACKGROUND: False,
        }

        # No tasks are running
        mock_get.return_value = Response(200, [])

        # Response from the sync call
        task_data = copy.copy(CALL_REPORT_TEMPLATE)
        task = Task(task_data)
        mock_sync.return_value = Response(202, [task])

        # Test
        self.command.run(**data)

        # Verify
        self.assertEqual(1, mock_status.call_count)
    def test_run_background(self, mock_publish, mock_get, mock_status):
        # Setup
        data = {
            options.OPTION_REPO_ID.keyword: 'test-repo',
            sp.NAME_BACKGROUND: True,
        }

        # No tasks are running
        mock_get.return_value = Response(200, [])

        # Response from the sync call
        task_data = copy.copy(CALL_REPORT_TEMPLATE)
        task = Task(task_data)
        mock_publish.return_value = Response(202, task)

        # Test
        self.command.run(**data)

        # Verify
        self.assertEqual(0, mock_status.call_count)  # since its background

        tags = self.prompt.get_write_tags()
        self.assertEqual(2, len(tags))
        self.assertEqual(tags[1], 'background')
Example #41
0
 def test_succeeded(self):
     self.command.prompt = mock.Mock()
     task = Task({})
     self.command.succeeded(task)
     self.assertTrue(self.command.prompt.render_success_message.called)
Example #42
0
 def test_succeeded_error_in_result(self):
     self.command.prompt = mock.Mock()
     task = Task({'result': {'details': {'errors': ['foo']}}})
     self.command.succeeded(task)
     self.assertTrue(self.command.prompt.render_failure_message.called)
    def test_schedules_strategy(self):
        try:
            consumer_content.ConsumerContentScheduleStrategy(
                self.mock_context, self.action)
        except Exception, e:
            self.fail(str(e))


POSTPONED_TASK = Task({
    'call_request_id': '1',
    'call_request_group_id': None,
    'call_request_tags': [],
    'start_time': None,
    'finish_time': None,
    'response': 'postponed',
    'reasons': [],
    'state': 'waiting',
    'progress': {},
    'result': None,
    'exception': None,
    'traceback': None
})


class InstallCommandTests(base.PulpClientTests):
    def setUp(self):
        super(InstallCommandTests, self).setUp()
        self.command = consumer_content.ConsumerContentInstallCommand(
            self.context)

    def test_structure(self):
Example #44
0
    def test_poll_spawned_tasks_list(self):
        """
        Test the structure where a command has both synchronous and asynchronous sections
        and returns a task structure with a result and a spawned_tasks list

        Task Count: 3
        Statuses: None; normal progression
        Result: All Success
        """

        # Setup
        sim = TaskSimulator()
        sim.install(self.bindings)

        states_1 = [STATE_WAITING, STATE_RUNNING, STATE_FINISHED]
        states_2 = [
            STATE_WAITING, STATE_WAITING, STATE_RUNNING, STATE_FINISHED
        ]
        states_3 = [
            STATE_WAITING, STATE_RUNNING, STATE_RUNNING, STATE_RUNNING,
            STATE_FINISHED
        ]

        sim.add_task_states('1', states_1)
        sim.add_task_states('2', states_2)
        sim.add_task_states('3', states_3)

        container_task = Task({})

        # Test
        container_task.spawned_tasks = sim.get_all_tasks().response_body
        completed_tasks = self.command.poll(container_task, {})

        expected_tags = [
            'abort',  # default, always displayed
            # states_1
            'header',
            'delayed-spinner',
            'running-spinner',
            'running-spinner',
            'succeeded',
            # states_2
            'header',
            'delayed-spinner',
            'delayed-spinner',
            'running-spinner',
            'running-spinner',
            'succeeded',
            # states_3
            'header',
            'delayed-spinner',
            'running-spinner',
            'running-spinner',
            'running-spinner',
            'running-spinner',
            'succeeded'
        ]
        found_tags = self.prompt.get_write_tags()
        self.assertEqual(set(expected_tags), set(found_tags))

        self.assertTrue(isinstance(completed_tasks, list))
        self.assertEqual(3, len(completed_tasks))
        for i in range(0, 3):
            self.assertEqual(STATE_FINISHED, completed_tasks[i].state)
Example #45
0
    def test_poll_additional_spawned_tasks_list(self):
        """
        Test polling over a list where a task has spawned additional tasks that need to be
        added to the polling list

        Task Count: 3
        Statuses: None; normal progression
        Result: All Success
        """

        # Setup
        sim = TaskSimulator()
        sim.install(self.bindings)

        states_1 = [STATE_WAITING, STATE_RUNNING, STATE_FINISHED]
        states_2 = [
            STATE_WAITING, STATE_WAITING, STATE_RUNNING, STATE_FINISHED
        ]
        states_3 = [
            STATE_WAITING, STATE_RUNNING, STATE_RUNNING, STATE_RUNNING,
            STATE_FINISHED
        ]

        task_1_states = sim.add_task_states('1', states_1)
        sim.add_task_states('2', states_2)
        sim.add_task_states('3', states_3)

        container_task = Task({})
        task_list = sim.get_all_tasks().response_body
        task_1_states[2].spawned_tasks = task_list[1:]
        # Test
        container_task.spawned_tasks = sim.get_all_tasks().response_body
        completed_tasks = self.command.poll(task_list[:1], {})

        expected_tags = [
            'abort',  # default, always displayed
            # states_1
            'delayed-spinner',
            'running-spinner',
            'succeeded',
            # states_2
            'header',
            'delayed-spinner',
            'running-spinner',
            'running-spinner',
            'succeeded',
            # states_3
            'header',
            'delayed-spinner',
            'running-spinner',
            'running-spinner',
            'running-spinner',
            'succeeded'
        ]
        found_tags = self.prompt.get_write_tags()
        self.assertEqual(set(expected_tags), set(found_tags))

        self.assertTrue(isinstance(completed_tasks, list))
        self.assertEqual(3, len(completed_tasks))
        for i in range(0, 3):
            self.assertEqual(STATE_FINISHED, completed_tasks[i].state)