def test_iterates(self, logger):
        """
        If the predicate returns something falsey followed by something truthy,
        then ``loop_until`` returns it immediately.
        """
        result = object()
        results = [None, result]

        def predicate():
            return results.pop(0)
        clock = Clock()

        d = loop_until(clock, predicate)

        self.assertNoResult(d)

        clock.advance(0.1)
        self.assertEqual(
            self.successResultOf(d),
            result)

        action = LoggedAction.of_type(logger.messages, LOOP_UNTIL_ACTION)[0]
        assertContainsFields(self, action.start_message, {
            'predicate': predicate,
        })
        assertContainsFields(self, action.end_message, {
            'result': result,
        })
        self.assertTrue(action.succeeded)
        message = LoggedMessage.of_type(
            logger.messages, LOOP_UNTIL_ITERATION_MESSAGE)[0]
        self.assertEqual(action.children, [message])
        assertContainsFields(self, message.message, {
            'result': None,
        })
Beispiel #2
0
    def test_boto_ec2response_error(self, logger):
        """
        1. Test that invalid parameters to Boto's EBS API calls
        raise the right exception after logging to Eliot.
        2. Verify Eliot log output for expected message fields
        from logging decorator for boto.exception.EC2Exception
        originating from boto.ec2.connection.EC2Connection.
        """
        # Test 1: Create volume with size 0.
        # Raises: EC2ResponseError
        self.assertRaises(EC2ResponseError, self.api.create_volume,
                          dataset_id=uuid4(), size=0,)

        # Test 2: Set EC2 connection zone to an invalid string.
        # Raises: EC2ResponseError
        self.api.zone = u'invalid_zone'
        self.assertRaises(
            EC2ResponseError,
            self.api.create_volume,
            dataset_id=uuid4(),
            size=self.minimum_allocatable_size,
        )

        # Validate decorated method for exception logging
        # actually logged to ``Eliot`` logger.
        expected_message_keys = {AWS_CODE.key, AWS_MESSAGE.key,
                                 AWS_REQUEST_ID.key}
        for logged in LoggedMessage.of_type(logger.messages,
                                            BOTO_EC2RESPONSE_ERROR,):
            key_subset = set(key for key in expected_message_keys
                             if key in logged.message.keys())
            self.assertEqual(expected_message_keys, key_subset)
Beispiel #3
0
    def test_multiple_iterations(self, logger):
        """
        If the predicate returns something falsey followed by something truthy,
        then ``loop_until`` returns it immediately.
        """
        result = object()
        results = [None, False, result]
        expected_results = results[:-1]

        def predicate():
            return results.pop(0)

        clock = Clock()

        d = loop_until(clock, predicate)

        clock.advance(0.1)
        self.assertNoResult(d)
        clock.advance(0.1)

        self.assertEqual(self.successResultOf(d), result)

        action = LoggedAction.of_type(logger.messages, LOOP_UNTIL_ACTION)[0]
        assertContainsFields(self, action.start_message, {"predicate": predicate})
        assertContainsFields(self, action.end_message, {"result": result})
        self.assertTrue(action.succeeded)
        messages = LoggedMessage.of_type(logger.messages, LOOP_UNTIL_ITERATION_MESSAGE)
        self.assertEqual(action.children, messages)
        self.assertEqual([messages[0].message["result"], messages[1].message["result"]], expected_results)
Beispiel #4
0
    def test_novaclient_exception(self, logger):
        """
        The details of ``novaclient.exceptions.ClientException`` are logged
        when it is raised by the decorated method and the exception is still
        raised.
        """
        result = NovaClientException(
            code=INTERNAL_SERVER_ERROR,
            message="Some things went wrong with some other things.",
            details={"key": "value"},
            request_id="abcdefghijklmnopqrstuvwxyz",
            url="/foo/bar",
            method="POST",
        )
        logging_dummy = LoggingDummy(Dummy(result))
        self.assertRaises(NovaClientException, logging_dummy.raise_method)

        logged = LoggedMessage.of_type(
            logger.messages,
            NOVA_CLIENT_EXCEPTION,
        )[0]
        assertContainsFields(
            self,
            logged.message,
            {
                u"code": result.code,
                u"message": result.message,
                u"details": result.details,
                u"request_id": result.request_id,
                u"url": result.url,
                u"method": result.method,
            },
        )
Beispiel #5
0
    def test_novaclient_exception(self, logger):
        """
        The details of ``novaclient.exceptions.ClientException`` are logged
        when it is raised by the decorated method and the exception is still
        raised.
        """
        result = NovaClientException(
            code=INTERNAL_SERVER_ERROR,
            message="Some things went wrong with some other things.",
            details={"key": "value"},
            request_id="abcdefghijklmnopqrstuvwxyz",
            url="/foo/bar",
            method="POST",
        )
        logging_dummy = LoggingDummy(Dummy(result))
        self.assertRaises(NovaClientException, logging_dummy.raise_method)

        logged = LoggedMessage.of_type(
            logger.messages, NOVA_CLIENT_EXCEPTION,
        )[0]
        assertContainsFields(
            self, logged.message, {
                u"code": result.code,
                u"message": result.message,
                u"details": result.details,
                u"request_id": result.request_id,
                u"url": result.url,
                u"method": result.method,
            },
        )
Beispiel #6
0
    def test_keystone_client_exception(self, logger):
        """
        ``keystoneclient.openstack.common.apiclient.exceptions.BadRequest`` is
        treated similarly to ``novaclient.exceptions.ClientException``.

        See ``test_novaclient_exception``.
        """
        response = Response()
        response._content = "hello world"
        result = KeystoneHttpError(
            message="Some things went wrong with some other things.",
            details={"key": "value"},
            response=response,
            request_id="abcdefghijklmnopqrstuvwxyz",
            url="/foo/bar",
            method="POST",
            http_status=INTERNAL_SERVER_ERROR,
        )
        logging_dummy = LoggingDummy(Dummy(result))
        self.assertRaises(KeystoneHttpError, logging_dummy.raise_method)

        logged = LoggedMessage.of_type(
            logger.messages, KEYSTONE_HTTP_ERROR,
        )[0]
        assertContainsFields(
            self, logged.message, {
                u"code": result.http_status,
                u"message": result.message,
                u"details": result.details,
                u"request_id": result.request_id,
                u"url": result.url,
                u"method": result.method,
                u"response": "hello world",
            },
        )
Beispiel #7
0
    def test_iterates(self, logger):
        """
        If the predicate returns something falsey followed by something truthy,
        then ``loop_until`` returns it immediately.
        """
        result = object()
        results = [None, result]

        def predicate():
            return results.pop(0)

        clock = Clock()

        d = loop_until(clock, predicate)

        self.assertNoResult(d)

        clock.advance(0.1)
        self.assertEqual(self.successResultOf(d), result)

        action = LoggedAction.of_type(logger.messages, LOOP_UNTIL_ACTION)[0]
        assertContainsFields(self, action.start_message, {
            'predicate': predicate,
        })
        assertContainsFields(self, action.end_message, {
            'result': result,
        })
        self.assertTrue(action.succeeded)
        message = LoggedMessage.of_type(logger.messages,
                                        LOOP_UNTIL_ITERATION_MESSAGE)[0]
        self.assertEqual(action.children, [message])
        assertContainsFields(self, message.message, {
            'result': None,
        })
Beispiel #8
0
    def _assertTracebackLogged(self, logger):
        """
        Assert that a traceback for an L{ArbitraryException} was logged as a
        child of a L{REQUEST} action.
        """
        # Get rid of it so it doesn't fail the test later.
        tracebacks = logger.flushTracebacks(exceptionType)
        if len(tracebacks) > 1:
            self.fail("Multiple tracebacks: %s" % tracebacks)
        traceback = tracebacks[0]

        # Verify it contains what it's supposed to contain.
        assertContainsFields(
            self,
            traceback,
            {
                u"exception": exceptionType,
                u"message_type": u"eliot:traceback",
                # Just assume the traceback it contains is correct.  The code
                # that generates that string isn't part of this unit, anyway.
            })

        # Verify that it is a child of one of the request actions.
        for request in LoggedAction.ofType(logger.messages, REQUEST):
            if LoggedMessage(traceback) in request.descendants():
                break
        else:
            self.fail(
                "Traceback was logged outside of the context of a request "
                "action.")
Beispiel #9
0
 def assert_logged_multiple_iterations(self, logger):
     """
     Function passed to ``retry_failure`` is run in the context of a
     ``LOOP_UNTIL_ACTION``.
     """
     iterations = LoggedMessage.of_type(logger.messages, ITERATION_MESSAGE)
     loop = assertHasAction(self, logger, LOOP_UNTIL_ACTION, True)
     self.assertEqual(sorted(iterations, key=lambda m: m.message["iteration"]), loop.children)
Beispiel #10
0
 def assertOutputLogging(self, logger):
     """
     The L{IOutputExecutor} is invoked in the FSM's transition action
     context.
     """
     loggedTransition = LoggedAction.ofType(
         logger.messages, LOG_FSM_TRANSITION)[0]
     loggedAnimal = LoggedMessage.ofType(logger.messages, LOG_ANIMAL)[0]
     self.assertIn(loggedAnimal, loggedTransition.children)
Beispiel #11
0
 def assertOutputLogging(self, logger):
     """
     The L{IOutputExecutor} is invoked in the FSM's transition action
     context.
     """
     loggedTransition = LoggedAction.ofType(logger.messages,
                                            LOG_FSM_TRANSITION)[0]
     loggedAnimal = LoggedMessage.ofType(logger.messages, LOG_ANIMAL)[0]
     self.assertIn(loggedAnimal, loggedTransition.children)
def no_such_executable_logged(case, logger):
    """
    Validate the error logging behavior of ``_sync_command_error_squashed``.
    """
    assertHasMessage(case, logger, ZFS_ERROR, {
        'status': 1,
        'zfs_command': 'nonsense garbage made up no such command',
        'output': '[Errno 2] No such file or directory'})
    case.assertEqual(len(LoggedMessage.ofType(logger.messages, ZFS_ERROR)), 1)
def error_status_logged(case, logger):
    """
    Validate the error logging behavior of ``_sync_command_error_squashed``.
    """
    assertHasMessage(case, logger, ZFS_ERROR, {
        'status': 1,
        'zfs_command': 'python -c raise SystemExit(1)',
        'output': ''})
    case.assertEqual(len(LoggedMessage.ofType(logger.messages, ZFS_ERROR)), 1)
Beispiel #14
0
 def assert_logged_multiple_iterations(self, logger):
     """
     Function passed to ``retry_failure`` is run in the context of a
     ``LOOP_UNTIL_ACTION``.
     """
     iterations = LoggedMessage.of_type(logger.messages, ITERATION_MESSAGE)
     loop = assertHasAction(self, logger, LOOP_UNTIL_ACTION, True)
     self.assertEqual(
         sorted(iterations, key=lambda m: m.message["iteration"]),
         loop.children)
Beispiel #15
0
    def test_volume_busy_error_on_busy_state(self, logger):
        """
        A ``VolumeBusy`` is raised if a volume's attachment state is "busy"
        when attempting to detach the volume. This can occur if an attempt
        is made to detach a volume that has not been unmounted.
        """
        try:
            config = get_blockdevice_config()
        except InvalidConfig as e:
            self.skipTest(str(e))

        # Create a volume
        dataset_id = uuid4()
        flocker_volume = self.api.create_volume(
            dataset_id=dataset_id,
            size=self.minimum_allocatable_size,
        )
        ec2_client = get_ec2_client_for_test(config)

        # Attach the volume.
        instance_id = self.api.compute_instance_id()
        self.api.attach_volume(flocker_volume.blockdevice_id, instance_id)

        volume = ec2_client.connection.Volume(flocker_volume.blockdevice_id)

        def clean_volume(volume):
            volume.detach_from_instance()
            _wait_for_volume_state_change(VolumeOperations.DETACH, volume)
            volume.delete()

        self.addCleanup(clean_volume, volume)

        # Artificially set the volume's attachment state to "busy", by
        # monkey-patching the EBS driver's ``_get_ebs_volume`` method.
        def busy_ebs_volume(volume_id):
            busy_volume = ec2_client.connection.Volume(volume_id)
            busy_volume.load()
            if busy_volume.attachments:
                busy_volume.attachments[0]['State'] = "busy"
            return busy_volume

        self.patch(self.api, "_get_ebs_volume", busy_ebs_volume)

        # Try to detach and get the VolumeBusy exception.
        self.assertRaises(
            VolumeBusy, self.api.detach_volume, flocker_volume.blockdevice_id)

        expected_keys = set(["volume_id", "attachments"])
        messages = [
            message.message for message
            in LoggedMessage.of_type(logger.messages, VOLUME_BUSY_MESSAGE)
        ]
        self.assertThat(messages, AllMatch(ContainsAll(expected_keys)))
Beispiel #16
0
    def test_volume_busy_error_on_busy_state(self, logger):
        """
        A ``VolumeBusy`` is raised if a volume's attachment state is "busy"
        when attempting to detach the volume. This can occur if an attempt
        is made to detach a volume that has not been unmounted.
        """
        try:
            config = get_blockdevice_config()
        except InvalidConfig as e:
            self.skipTest(str(e))

        # Create a volume
        dataset_id = uuid4()
        flocker_volume = self.api.create_volume(
            dataset_id=dataset_id,
            size=self.minimum_allocatable_size,
        )
        ec2_client = get_ec2_client_for_test(config)

        # Attach the volume.
        instance_id = self.api.compute_instance_id()
        self.api.attach_volume(flocker_volume.blockdevice_id, instance_id)

        volume = ec2_client.connection.Volume(flocker_volume.blockdevice_id)

        def clean_volume(volume):
            volume.detach_from_instance()
            _wait_for_volume_state_change(VolumeOperations.DETACH, volume)
            volume.delete()

        self.addCleanup(clean_volume, volume)

        # Artificially set the volume's attachment state to "busy", by
        # monkey-patching the EBS driver's ``_get_ebs_volume`` method.
        def busy_ebs_volume(volume_id):
            busy_volume = ec2_client.connection.Volume(volume_id)
            busy_volume.load()
            if busy_volume.attachments:
                busy_volume.attachments[0]['State'] = "busy"
            return busy_volume

        self.patch(self.api, "_get_ebs_volume", busy_ebs_volume)

        # Try to detach and get the VolumeBusy exception.
        self.assertRaises(VolumeBusy, self.api.detach_volume,
                          flocker_volume.blockdevice_id)

        expected_keys = set(["volume_id", "attachments"])
        messages = [
            message.message for message in LoggedMessage.of_type(
                logger.messages, VOLUME_BUSY_MESSAGE)
        ]
        self.assertThat(messages, AllMatch(ContainsAll(expected_keys)))
def error_status_logged(case, logger):
    """
    Validate the error logging behavior of ``_sync_command_error_squashed``.
    """
    errors = LoggedMessage.ofType(logger.messages, ZFS_ERROR)
    assertContainsFields(
        case, errors[0].message,
        {'status': 1,
         'zfs_command': 'python -c raise SystemExit(1)',
         'output': '',
         u'message_type': 'filesystem:zfs:error',
         u'task_level': u'/'})
    case.assertEqual(1, len(errors))
def no_such_executable_logged(case, logger):
    """
    Validate the error logging behavior of ``_sync_command_error_squashed``.
    """
    errors = LoggedMessage.ofType(logger.messages, ZFS_ERROR)
    assertContainsFields(
        case, errors[0].message,
        {'status': 1,
         'zfs_command': 'nonsense garbage made up no such command',
         'output': '[Errno 2] No such file or directory',
         u'message_type': 'filesystem:zfs:error',
         u'task_level': u'/'})
    case.assertEqual(1, len(errors))
Beispiel #19
0
    def test_unblinded_tokens_spent(
        self,
        logger,
        get_config,
        now,
        announcement,
        voucher,
        num_passes,
        public_key,
    ):
        """
        The ``ZKAPAuthorizerStorageServer`` returned by ``get_storage_client``
        spends unblinded tokens from the plugin database.
        """
        tempdir = self.useFixture(TempDir())
        node_config = get_config(
            tempdir.join(b"node"),
            b"tub.port",
        )

        store = VoucherStore.from_node_config(node_config, lambda: now)

        controller = PaymentController(
            store,
            DummyRedeemer(public_key),
            default_token_count=num_passes,
            num_redemption_groups=1,
            clock=Clock(),
        )
        # Get a token inserted into the store.
        redeeming = controller.redeem(voucher)
        self.assertThat(
            redeeming,
            succeeded(Always()),
        )

        storage_client = storage_server.get_storage_client(
            node_config,
            announcement,
            get_rref,
        )

        # None of the remote methods are implemented by our fake server and I
        # would like to continue to avoid to have a real server in these
        # tests, at least until creating a real server doesn't involve so much
        # complex setup.  So avoid using any of the client APIs that make a
        # remote call ... which is all of them.
        pass_group = storage_client._get_passes(u"request binding message",
                                                num_passes)
        pass_group.mark_spent()

        # There should be no unblinded tokens left to extract.
        self.assertThat(
            lambda: storage_client._get_passes(u"request binding message", 1),
            raises(NotEnoughTokens),
        )

        messages = LoggedMessage.of_type(logger.messages, GET_PASSES)
        self.assertThat(
            messages,
            MatchesAll(
                HasLength(1),
                AllMatch(
                    AfterPreprocessing(
                        lambda logged_message: logged_message.message,
                        ContainsDict({
                            u"message":
                            Equals(u"request binding message"),
                            u"count":
                            Equals(num_passes),
                        }),
                    ), ),
            ),
        )
    def test_unblinded_tokens_extracted(
        self,
        logger,
        get_config,
        now,
        announcement,
        voucher,
        storage_index,
        renew_secret,
        cancel_secret,
        sharenums,
        size,
    ):
        """
        The ``ZKAPAuthorizerStorageServer`` returned by ``get_storage_client``
        extracts unblinded tokens from the plugin database.
        """
        tempdir = self.useFixture(TempDir())
        node_config = get_config(
            tempdir.join(b"node"),
            b"tub.port",
        )

        store = VoucherStore.from_node_config(node_config, lambda: now)
        # Give it enough for the allocate_buckets call below.
        expected_pass_cost = required_passes(store.pass_value,
                                             [size] * len(sharenums))
        # And few enough redemption groups given the number of tokens.
        num_redemption_groups = expected_pass_cost

        controller = PaymentController(
            store,
            DummyRedeemer(),
            default_token_count=expected_pass_cost,
            num_redemption_groups=num_redemption_groups,
        )
        # Get a token inserted into the store.
        redeeming = controller.redeem(voucher)
        self.assertThat(
            redeeming,
            succeeded(Always()),
        )

        storage_client = storage_server.get_storage_client(
            node_config,
            announcement,
            get_rref,
        )

        # For now, merely making the call spends the passes - regardless of
        # the ultimate success or failure of the operation.
        storage_client.allocate_buckets(
            storage_index,
            renew_secret,
            cancel_secret,
            sharenums,
            size,
            LocalReferenceable(None),
        )

        # There should be no unblinded tokens left to extract.
        self.assertThat(
            lambda: store.extract_unblinded_tokens(1),
            raises(NotEnoughTokens),
        )

        messages = LoggedMessage.of_type(logger.messages, GET_PASSES)
        self.assertThat(
            messages,
            MatchesAll(
                HasLength(1),
                AllMatch(
                    AfterPreprocessing(
                        lambda logged_message: logged_message.message,
                        ContainsDict({
                            u"message":
                            Equals(allocate_buckets_message(storage_index)),
                            u"count":
                            Equals(expected_pass_cost),
                        }),
                    ), ),
            ),
        )
Beispiel #21
0
        exception = ProcessTerminated(99)
        process_protocol.processEnded(Failure(exception))
        self.assertEqual(self.failureResultOf(result).value, exception)


def no_such_executable_logged(case, logger):
    """
    Validate the error logging behavior of ``_sync_command_error_squashed``.
    """
    assertHasMessage(
        case, logger, ZFS_ERROR, {
            'status': 1,
            'zfs_command': 'nonsense garbage made up no such command',
            'output': '[Errno 2] No such file or directory'
        })
    case.assertEqual(len(LoggedMessage.ofType(logger.messages, ZFS_ERROR)), 1)


def error_status_logged(case, logger):
    """
    Validate the error logging behavior of ``_sync_command_error_squashed``.
    """
    assertHasMessage(case, logger, ZFS_ERROR, {
        'status': 1,
        'zfs_command': 'python -c raise SystemExit(1)',
        'output': ''
    })
    case.assertEqual(len(LoggedMessage.ofType(logger.messages, ZFS_ERROR)), 1)


class SyncCommandTests(SynchronousTestCase):
Beispiel #22
0
async def test_blueprint_stop_failure(bootsteps_graph,
                                      mock_execution_order_strategy_class,
                                      logger):
    mock_step1 = create_mock_step("step1")
    mock_step2 = create_start_stop_mock_step("step2")
    mock_step3 = create_mock_step("step3")
    mock_step4 = create_start_stop_mock_step("step4",
                                             mock_class=TrioCoroutineMock)
    mock_step4.stop.side_effect = expected_exception = RuntimeError(
        "Expected Failure")
    mock_step5 = create_mock_step("step5")
    mock_step6 = create_mock_step("step6",
                                  spec=AsyncStep,
                                  mock_class=TrioCoroutineMock)

    # We're using a parent mock simply to record the order of calls to different
    # steps
    m = Mock()
    m.attach_mock(mock_step1, "mock_step1")
    m.attach_mock(mock_step2, "mock_step2")
    m.attach_mock(mock_step3, "mock_step3")
    m.attach_mock(mock_step4, "mock_step4")
    m.attach_mock(mock_step5, "mock_step5")
    m.attach_mock(mock_step6, "mock_step6")

    expected_execution_order = [
        [m.mock_step1, m.mock_step2],
        [m.mock_step3, m.mock_step4, m.mock_step5],
        [m.mock_step6],
    ]
    mock_iterator = MagicMock()
    mock_iterator.__iter__.return_value = expected_execution_order
    reversed_func = Mock(return_value=reversed(expected_execution_order))
    mock_iterator.__reversed__ = reversed_func
    mock_execution_order_strategy_class.return_value = mock_iterator

    blueprint = Blueprint(
        bootsteps_graph,
        name="Test",
        execution_order_strategy_class=mock_execution_order_strategy_class,
    )

    with pytest.raises(RuntimeError):
        async with trio.open_nursery() as nursery:
            nursery.start_soon(blueprint.stop)

    with trio.fail_after(1):
        assert (await blueprint.state_changes_receive_channel.receive() ==
                BlueprintState.TERMINATING)

    with trio.fail_after(1):
        assert await blueprint.state_changes_receive_channel.receive() == (
            BlueprintState.FAILED,
            expected_exception,
        )

    mock_execution_order_strategy_class.assert_called_once_with(
        blueprint._steps)

    assert_parallelized_steps_are_in_order(m.method_calls,
                                           [[call.mock_step4.stop()]])

    mock_step1.assert_not_called()
    mock_step2.stop.assert_not_called()
    mock_step3.assert_not_called()
    mock_step5.assert_not_called()
    mock_step6.assert_not_called()

    logged_actions = LoggedAction.of_type(logger.messages,
                                          "bootsteps:blueprint:stop")
    assert len(logged_actions) == 1
    logged_action = logged_actions[0]
    assert_log_message_field_equals(logged_action.start_message, "name",
                                    blueprint.name)
    assert_logged_action_failed(logged_action)

    messages = LoggedMessage.of_type(logger.messages,
                                     "bootsteps:blueprint:next_bootsteps")
    assert len(messages) == 1, messages

    assert_log_message_field_equals(messages[0].message, "name",
                                    blueprint.name)
    assert_log_message_field_equals(messages[0].message, "next_bootsteps",
                                    [m.mock_step4])

    logged_actions = LoggedAction.of_type(
        logger.messages, "bootsteps:blueprint:executing_bootstep")
    assert len(logged_actions) == 1

    start_messages = [
        logged_action.start_message for logged_action in logged_actions
    ]

    assert_field_equals_in_any_message(start_messages, "bootstep",
                                       m.mock_step4.stop)

    assert_logged_action_failed(logged_actions[0])
Beispiel #23
0
async def test_blueprint_start(bootsteps_graph,
                               mock_execution_order_strategy_class, logger):
    mock_step1 = create_mock_step("step1")
    mock_step2 = create_start_stop_mock_step("step2")
    mock_step3 = create_mock_step("step3")
    mock_step4 = create_start_stop_mock_step("step4",
                                             mock_class=TrioCoroutineMock)
    mock_step5 = create_mock_step("step5")
    mock_step6 = create_mock_step("step6",
                                  spec=AsyncStep,
                                  mock_class=TrioCoroutineMock)

    # We're using a parent mock simply to record the order of calls to different
    # steps
    m = Mock()
    m.attach_mock(mock_step1, "mock_step1")
    m.attach_mock(mock_step2, "mock_step2")
    m.attach_mock(mock_step3, "mock_step3")
    m.attach_mock(mock_step4, "mock_step4")
    m.attach_mock(mock_step5, "mock_step5")
    m.attach_mock(mock_step6, "mock_step6")

    expected_execution_order = [
        [m.mock_step1, m.mock_step2],
        [m.mock_step3, m.mock_step4, m.mock_step5],
        [m.mock_step6],
    ]
    mock_iterator = MagicMock()
    mock_iterator.__iter__.return_value = expected_execution_order
    mock_execution_order_strategy_class.return_value = mock_iterator

    blueprint = Blueprint(
        bootsteps_graph,
        name="Test",
        execution_order_strategy_class=mock_execution_order_strategy_class,
    )

    async with trio.open_nursery() as nursery:
        nursery.start_soon(blueprint.start)

        with trio.fail_after(1):
            assert (await blueprint.state_changes_receive_channel.receive() ==
                    BlueprintState.RUNNING)
        with trio.fail_after(1):
            assert (await blueprint.state_changes_receive_channel.receive() ==
                    BlueprintState.COMPLETED)

    mock_execution_order_strategy_class.assert_called_once_with(
        blueprint._steps)

    assert_parallelized_steps_are_in_order(
        m.method_calls,
        [
            [call.mock_step1(), call.mock_step2.start()],
            [call.mock_step3(),
             call.mock_step4.start(),
             call.mock_step5()],
            [call.mock_step6()],
        ],
    )

    mock_step6.assert_awaited_once_with()
    mock_step4.start.assert_awaited_once_with()

    logged_actions = LoggedAction.of_type(logger.messages,
                                          "bootsteps:blueprint:start")
    assert len(logged_actions) == 1
    logged_action = logged_actions[0]
    assert_log_message_field_equals(logged_action.start_message, "name",
                                    blueprint.name)
    assert_logged_action_succeeded(logged_action)

    messages = LoggedMessage.of_type(logger.messages,
                                     "bootsteps:blueprint:next_bootsteps")
    assert len(messages) == 3

    assert_log_message_field_equals(messages[0].message, "name",
                                    blueprint.name)
    assert_log_message_field_equals(messages[0].message, "next_bootsteps",
                                    [m.mock_step1, m.mock_step2])

    assert_log_message_field_equals(messages[1].message, "name",
                                    blueprint.name)
    assert_log_message_field_equals(
        messages[1].message,
        "next_bootsteps",
        [m.mock_step3, m.mock_step4, m.mock_step5],
    )

    assert_log_message_field_equals(messages[2].message, "name",
                                    blueprint.name)
    assert_log_message_field_equals(messages[2].message, "next_bootsteps",
                                    [m.mock_step6])

    logged_actions = LoggedAction.of_type(
        logger.messages, "bootsteps:blueprint:executing_bootstep")
    assert len(logged_actions) == 6

    start_messages = [
        logged_action.start_message for logged_action in logged_actions
    ]

    assert_field_equals_in_any_message(start_messages, "bootstep",
                                       m.mock_step1)
    assert_field_equals_in_any_message(start_messages, "bootstep",
                                       m.mock_step2.start)
    assert_field_equals_in_any_message(start_messages, "bootstep",
                                       m.mock_step3)
    assert_field_equals_in_any_message(start_messages, "bootstep",
                                       m.mock_step4.start)
    assert_field_equals_in_any_message(start_messages, "bootstep",
                                       m.mock_step5)
    assert_field_equals_in_any_message(start_messages, "bootstep",
                                       m.mock_step6)

    assert_logged_action_succeeded(logged_actions[0])
    assert_logged_action_succeeded(logged_actions[1])
    assert_logged_action_succeeded(logged_actions[2])
    assert_logged_action_succeeded(logged_actions[3])
    assert_logged_action_succeeded(logged_actions[4])
    assert_logged_action_succeeded(logged_actions[5])
Beispiel #24
0
        ``Deferred`` returned from ``zfs_command`` errbacks with
        whatever error the process exited with.
        """
        reactor = FakeProcessReactor()
        result = zfs_command(reactor, [b"-H", b"lalala"])
        process_protocol = reactor.processes[0].processProtocol
        exception = ProcessTerminated(99)
        process_protocol.processEnded(Failure(exception))
        self.assertEqual(self.failureResultOf(result).value, exception)


def no_such_executable_logged(case, logger):
    """
    Validate the error logging behavior of ``_sync_command_error_squashed``.
    """
    errors = LoggedMessage.ofType(logger.messages, ZFS_ERROR)
    assertContainsFields(
        case, errors[0].message, {
            'status': 1,
            'zfs_command': 'nonsense garbage made up no such command',
            'output': '[Errno 2] No such file or directory',
            u'message_type': 'filesystem:zfs:error',
            u'task_level': u'/'
        })
    case.assertEqual(1, len(errors))


def error_status_logged(case, logger):
    """
    Validate the error logging behavior of ``_sync_command_error_squashed``.
    """