def test_scenario_throws_exception_when_rate_drops(self, _logger):
        """
        ``write_request_load_scenario`` raises ``RequestRateTooLow`` if rate
        drops below the requested rate.

        Establish the requested rate by having the ``FakeFlockerClient``
        respond to all requests, then lower the rate by dropping
        alternate requests. This should result in ``RequestRateTooLow``
        being raised.
        """
        c = Clock()
        control_service = self.get_dropping_flocker_client_instance()
        cluster = self.make_cluster(control_service)
        sample_size = 5
        s = write_request_load_scenario(c, cluster, sample_size=sample_size,
                                        tolerance_percentage=0)

        s.start()

        # Advance the clock by `sample_size` seconds to establish the
        # requested rate.
        c.pump(repeat(1, sample_size))

        control_service.drop_requests = True

        # Advance the clock by 2 seconds so that a request is dropped
        # and a new rate which is below the target can be established.
        c.advance(2)

        failure = self.failureResultOf(s.maintained())
        self.assertIsInstance(failure.value, RequestRateTooLow)
    def test_write_request_load_succeeds(self, _logger):
        """
        ``write_request_load_scenario`` starts and stops without collapsing.
        """
        c = Clock()
        cluster = self.make_cluster(self.get_fake_flocker_client_instance())
        sample_size = 5
        s = write_request_load_scenario(c, cluster, sample_size=sample_size)

        d = s.start()

        # Request rate samples are recorded every second and we need to
        # collect enough samples to establish the rate which is defined
        # by `sample_size`. Therefore, advance the clock by
        # `sample_size` seconds to obtain enough samples.
        c.pump(repeat(1, sample_size))
        s.maintained().addBoth(lambda x: self.fail())
        d.addCallback(lambda ignored: s.stop())

        def verify_scenario_returns_metrics(result):
            self.assertIsInstance(result, dict)

        d.addCallback(verify_scenario_returns_metrics)

        self.successResultOf(d)
    def test_scenario_succeeds_when_rate_has_tolerated_drop(self, _logger):
        """
        ``write_request_load_scenario`` succeeds even if the rate drops,
        if it is within the tolerance percentage.

        Establish the requested rate by having the ``FakeFlockerClient``
        respond to all requests, then lower the rate by dropping
        alternate requests.
        """
        c = Clock()

        control_service = self.get_dropping_flocker_client_instance()
        cluster = self.make_cluster(control_service)
        sample_size = 5
        s = write_request_load_scenario(c, cluster, sample_size=sample_size,
                                        tolerance_percentage=0.6)
        cluster.get_control_service(c).drop_requests = True
        d = s.start()

        s.maintained().addBoth(lambda x: self.fail())
        d.addCallback(lambda ignored: s.stop())
        # Generate enough samples to finish the scenario
        c.pump(repeat(1, sample_size*s.request_rate))

        self.successResultOf(d)
Exemple #4
0
    def test_write_request_load_succeeds(self, _logger):
        """
        ``write_request_load_scenario`` starts and stops without collapsing.
        """
        c = Clock()
        cluster = self.make_cluster(self.get_fake_flocker_client_instance())
        sample_size = 5
        s = write_request_load_scenario(c, cluster, sample_size=sample_size)

        d = s.start()

        # Request rate samples are recorded every second and we need to
        # collect enough samples to establish the rate which is defined
        # by `sample_size`. Therefore, advance the clock by
        # `sample_size` seconds to obtain enough samples.
        c.pump(repeat(1, sample_size))
        s.maintained().addBoth(lambda x: self.fail())
        d.addCallback(lambda ignored: s.stop())

        def verify_scenario_returns_metrics(result):
            self.assertIsInstance(result, dict)

        d.addCallback(verify_scenario_returns_metrics)

        self.successResultOf(d)
Exemple #5
0
    def test_scenario_succeeds_when_rate_has_tolerated_drop(self, _logger):
        """
        ``write_request_load_scenario`` succeeds even if the rate drops,
        if it is within the tolerance percentage.

        Establish the requested rate by having the ``FakeFlockerClient``
        respond to all requests, then lower the rate by dropping
        alternate requests.
        """
        c = Clock()

        control_service = self.get_dropping_flocker_client_instance()
        cluster = self.make_cluster(control_service)
        sample_size = 5
        s = write_request_load_scenario(c,
                                        cluster,
                                        sample_size=sample_size,
                                        tolerance_percentage=0.6)
        cluster.get_control_service(c).drop_requests = True
        d = s.start()

        s.maintained().addBoth(lambda x: self.fail())
        d.addCallback(lambda ignored: s.stop())
        # Generate enough samples to finish the scenario
        c.pump(repeat(1, sample_size * s.request_rate))

        self.successResultOf(d)
Exemple #6
0
    def test_cleanup_deletes_dataset(self, _logger):
        """
        ``write_request_load_scenario`` deletes the dataset created by
        the setup when the scenario is stopped.
        """
        reactor = Clock()
        cluster = self.make_cluster(self.get_fake_flocker_client_instance())
        write_scenario = write_request_load_scenario(reactor,
                                                     cluster,
                                                     5,
                                                     sample_size=3)

        d = write_scenario.start()

        d.addCallback(write_scenario.stop)

        def list_datasets(ignored):
            return write_scenario.request.control_service.list_datasets_state()

        d.addCallback(list_datasets)

        def verify_dataset_has_been_deleted(datasets):
            self.assertEqual(datasets, [])

        d.addCallback(verify_dataset_has_been_deleted)
    def test_scenario_throws_exception_when_rate_drops(self, _logger):
        """
        ``read_request_load_scenario`` raises ``RequestRateTooLow`` if rate
        drops below the requested rate.

        Establish the requested rate by having the ``FakeFlockerClient``
        respond to all requests, then lower the rate by dropping
        alternate requests. This should result in ``RequestRateTooLow``
        being raised.
        """
        c = Clock()

        cluster = self.make_cluster(self.get_error_response_client_instance(c))
        sample_size = 5
        cluster.get_control_service(c).delay = 0
        s = write_request_load_scenario(c, cluster, sample_size=sample_size,
                                        tolerance_percentage=0.0)
        s.start()

        # Advance the clock by `sample_size` seconds to establish the
        # requested rate.
        c.pump(repeat(1, sample_size))

        cluster.get_control_service(c).fail_requests = True

        # Advance the clock by 2 seconds so that a request is dropped
        # and a new rate which is below the target can be established.
        time_to_advance = s.tolerated_errors / sample_size
        c.pump(repeat(1, time_to_advance))

        failure = self.failureResultOf(s.maintained())

        _logger.flushTracebacks(FakeNetworkError)

        self.assertIsInstance(failure.value, RequestRateTooLow)
Exemple #8
0
 def test_scenario_throws_exception_when_already_started(self, _logger):
     """
     start method in the ``RequestLoadScenario`` throws a
     ``RequestScenarioAlreadyStarted`` if the scenario is already started.
     """
     c = Clock()
     cluster = self.make_cluster(self.get_fake_flocker_client_instance())
     sample_size = 5
     s = write_request_load_scenario(c, cluster, sample_size=sample_size)
     # Start and stop
     s.start()
     c.pump(repeat(1, sample_size))
     self.assertRaises(RequestScenarioAlreadyStarted, s.start)
 def test_scenario_throws_exception_when_already_started(self, _logger):
     """
     start method in the ``RequestLoadScenario`` throws a
     ``RequestScenarioAlreadyStarted`` if the scenario is already started.
     """
     c = Clock()
     cluster = self.make_cluster(self.get_fake_flocker_client_instance())
     sample_size = 5
     s = write_request_load_scenario(c, cluster, sample_size=sample_size)
     # Start and stop
     s.start()
     c.pump(repeat(1, sample_size))
     self.assertRaises(RequestScenarioAlreadyStarted, s.start)
Exemple #10
0
    def test_setup_timeout_when_datasat_not_created(self):
        """
        ``write_request_load_scenario`` should timeout if the setup the dataset
        creation does not complete within the given time.
        """
        c = Clock()
        cluster = self.make_cluster(
            self.get_unresponsive_flocker_client_instance())
        s = write_request_load_scenario(c, cluster, 5, sample_size=3)

        d = s.start()
        c.pump(repeat(1, s.request.timeout + 1))

        failure = self.failureResultOf(d)
        self.assertIsInstance(failure.value, DatasetCreationTimeout)
    def test_setup_timeout_when_datasat_not_created(self):
        """
        ``write_request_load_scenario`` should timeout if the setup the dataset
        creation does not complete within the given time.
        """
        c = Clock()
        cluster = self.make_cluster(
            self.get_unresponsive_flocker_client_instance())
        s = write_request_load_scenario(c, cluster, 5, sample_size=3)

        d = s.start()
        c.pump(repeat(1, s.request.timeout+1))

        failure = self.failureResultOf(d)
        self.assertIsInstance(failure.value, DatasetCreationTimeout)
Exemple #12
0
    def test_scenario_stops_only_when_no_outstanding_requests(self, logger):
        """
        ``write_request_load_scenario`` should only be considered as stopped
        when all outstanding requests made by it have completed.
        """
        c = Clock()

        control_service = self.get_error_response_client_instance(c)
        cluster = self.make_cluster(control_service)
        delay = 1

        control_service.delay = delay
        sample_size = 5
        s = write_request_load_scenario(c,
                                        cluster,
                                        request_rate=10,
                                        sample_size=sample_size)

        d = s.start()
        s.maintained().addBoth(lambda x: self.fail())

        # Advance the clock by `sample_size` seconds to establish the
        # requested rate.
        c.pump(repeat(1, sample_size))

        # Force the control service to fail requests for one second.
        # These requests will fail after the delay period set in the
        # control service.
        control_service.fail_requests = True
        c.advance(1)
        control_service.fail_requests = False

        d.addCallback(lambda ignored: s.stop())

        # The scenario should not successfully stop until after the
        # delay period for the failed requests.
        self.assertNoResult(d)
        c.advance(delay)

        # The scenario requests that failed will have been logged.
        logger.flushTracebacks(FakeNetworkError)

        self.successResultOf(d)
    def test_setup_generates_dataset(self, _logger):
        """
        ``write_request_load_scenario`` starts and stops without collapsing.
        """
        c = Clock()
        cluster = self.make_cluster(self.get_fake_flocker_client_instance())
        s = write_request_load_scenario(c, cluster, 5, sample_size=3)

        def assert_created(returned_datasets):
            self.assertNotEqual(returned_datasets, [])

        # Create a datasest and verify we get a success
        d = s.scenario_setup._create_dataset(self.node1)
        self.successResultOf(d)

        # Verify that a dataset is actually being created
        d2 = s.scenario_setup.control_service.list_datasets_configuration()
        d2.addCallback(assert_created)
        s.stop()
Exemple #14
0
    def test_scenario_throws_exception_if_overloaded(self, __logger):
        """
        ``write_request_load_scenario`` raises ``RequestOverload`` if the
        difference between sent requests and received requests exceeds
        the tolerated difference once we start monitoring the scenario.

        Note that, right now, the only way to make it fail is to generate
        this difference before we start monitoring the scenario.
        Once we implement some kind of tolerance, to allow fluctuations
        in the rate, we can update this tests to trigger the exception
        in a more realistic manner.
        """
        # XXX Update this test when we add tolerance for rate fluctuations.
        # See FLOC-3757.
        c = Clock()
        control_service = self.get_dropping_flocker_client_instance()
        cluster = self.make_cluster(control_service)
        target_rate = 10
        sample_size = 20
        s = write_request_load_scenario(c,
                                        cluster,
                                        request_rate=target_rate,
                                        sample_size=sample_size)
        dropped_rate = target_rate / 2
        seconds_to_overload = s.max_outstanding / dropped_rate

        s.start()
        # Reach initial rate
        control_service.drop_requests = True
        # Initially, we generate enough dropped requests so that the scenario
        # is overloaded when we start monitoring.
        c.pump(repeat(1, seconds_to_overload + 1))
        # We stop dropping requests
        control_service.drop_requests = False
        # Now we generate the initial rate to start monitoring the scenario
        c.pump(repeat(1, sample_size))
        # We only need to advance one more second (first loop in the monitoring
        # loop) to trigger RequestOverload
        c.advance(1)

        failure = self.failureResultOf(s.maintained())
        self.assertIsInstance(failure.value, RequestOverload)
    def test_scenario_stops_only_when_no_outstanding_requests(self, logger):
        """
        ``write_request_load_scenario`` should only be considered as stopped
        when all outstanding requests made by it have completed.
        """
        c = Clock()

        control_service = self.get_error_response_client_instance(c)
        cluster = self.make_cluster(control_service)
        delay = 1

        control_service.delay = delay
        sample_size = 5
        s = write_request_load_scenario(
            c, cluster, request_rate=10, sample_size=sample_size
        )

        d = s.start()
        s.maintained().addBoth(lambda x: self.fail())

        # Advance the clock by `sample_size` seconds to establish the
        # requested rate.
        c.pump(repeat(1, sample_size))

        # Force the control service to fail requests for one second.
        # These requests will fail after the delay period set in the
        # control service.
        control_service.fail_requests = True
        c.advance(1)
        control_service.fail_requests = False

        d.addCallback(lambda ignored: s.stop())

        # The scenario should not successfully stop until after the
        # delay period for the failed requests.
        self.assertNoResult(d)
        c.advance(delay)

        # The scenario requests that failed will have been logged.
        logger.flushTracebacks(FakeNetworkError)

        self.successResultOf(d)
    def test_write_scenario_start_stop_start_succeeds(self, _logger):
        """
        ``write_request_load_scenario`` starts, stops and starts
        without collapsing.
        """
        c = Clock()
        cluster = self.make_cluster(self.get_fake_flocker_client_instance())
        sample_size = 5
        s = write_request_load_scenario(c, cluster, sample_size=sample_size)
        # Start and stop
        s.start()
        c.pump(repeat(1, sample_size))
        s.stop()

        # Start again and check it succeeds.
        d = s.start()
        c.pump(repeat(1, sample_size))
        s.maintained().addBoth(lambda x: self.fail())
        d.addCallback(lambda ignored: s.stop())
        self.successResultOf(d)
    def test_scenario_throws_exception_if_requested_rate_not_reached(
        self, _logger
    ):
        """
        ``write_request_load_scenario`` raises ``RequestRateNotReached`` if
        the target rate cannot be established within a given timeframe.
        """
        c = Clock()
        control_service = self.get_dropping_flocker_client_instance()
        cluster = self.make_cluster(control_service)
        s = write_request_load_scenario(c, cluster)
        control_service.drop_requests = True
        d = s.start()

        # Continue the clock for one second longer than the timeout
        # value to allow the timeout to be triggered.
        c.advance(s.timeout + 1)

        failure = self.failureResultOf(d)
        self.assertIsInstance(failure.value, RequestRateNotReached)
Exemple #18
0
    def test_write_scenario_start_stop_start_succeeds(self, _logger):
        """
        ``write_request_load_scenario`` starts, stops and starts
        without collapsing.
        """
        c = Clock()
        cluster = self.make_cluster(self.get_fake_flocker_client_instance())
        sample_size = 5
        s = write_request_load_scenario(c, cluster, sample_size=sample_size)
        # Start and stop
        s.start()
        c.pump(repeat(1, sample_size))
        s.stop()

        # Start again and check it succeeds.
        d = s.start()
        c.pump(repeat(1, sample_size))
        s.maintained().addBoth(lambda x: self.fail())
        d.addCallback(lambda ignored: s.stop())
        self.successResultOf(d)
    def test_scenario_throws_exception_if_overloaded(self, __logger):
        """
        ``write_request_load_scenario`` raises ``RequestOverload`` if the
        difference between sent requests and received requests exceeds
        the tolerated difference once we start monitoring the scenario.

        Note that, right now, the only way to make it fail is to generate
        this difference before we start monitoring the scenario.
        Once we implement some kind of tolerance, to allow fluctuations
        in the rate, we can update this tests to trigger the exception
        in a more realistic manner.
        """
        # XXX Update this test when we add tolerance for rate fluctuations.
        # See FLOC-3757.
        c = Clock()
        control_service = self.get_dropping_flocker_client_instance()
        cluster = self.make_cluster(control_service)
        target_rate = 10
        sample_size = 20
        s = write_request_load_scenario(
            c, cluster, request_rate=target_rate, sample_size=sample_size
        )
        dropped_rate = target_rate / 2
        seconds_to_overload = s.max_outstanding / dropped_rate

        s.start()
        # Reach initial rate
        control_service.drop_requests = True
        # Initially, we generate enough dropped requests so that the scenario
        # is overloaded when we start monitoring.
        c.pump(repeat(1, seconds_to_overload+1))
        # We stop dropping requests
        control_service.drop_requests = False
        # Now we generate the initial rate to start monitoring the scenario
        c.pump(repeat(1, sample_size))
        # We only need to advance one more second (first loop in the monitoring
        # loop) to trigger RequestOverload
        c.advance(1)

        failure = self.failureResultOf(s.maintained())
        self.assertIsInstance(failure.value, RequestOverload)
Exemple #20
0
    def test_scenario_timeouts_if_requests_not_completed(self, _logger):
        """
        ``write_request_load_scenario`` should timeout if the outstanding
        requests for the scenarion do not complete within the specified
        time.
        """
        c = Clock()

        control_service = self.get_error_response_client_instance(c)
        cluster = self.make_cluster(control_service)
        sample_size = 5
        s = write_request_load_scenario(c,
                                        cluster,
                                        request_rate=10,
                                        sample_size=sample_size)

        # Set the delay for the requests to be longer than the scenario
        # timeout
        control_service.delay = s.timeout + 10

        d = s.start()
        s.maintained().addBoth(lambda x: self.fail())

        # Advance the clock by `sample_size` seconds to establish the
        # requested rate.
        c.pump(repeat(1, sample_size))

        control_service.fail_requests = True
        c.advance(1)
        control_service.fail_requests = False

        d.addCallback(lambda ignored: s.stop())

        # Advance the clock by the timeout value so it is triggered
        # before the requests complete.
        self.assertNoResult(d)
        c.advance(s.timeout + 1)
        self.assertTrue(s.rate_measurer.outstanding() > 0)
        self.successResultOf(d)
    def test_scenario_timeouts_if_requests_not_completed(self, _logger):
        """
        ``write_request_load_scenario`` should timeout if the outstanding
        requests for the scenarion do not complete within the specified
        time.
        """
        c = Clock()

        control_service = self.get_error_response_client_instance(c)
        cluster = self.make_cluster(control_service)
        sample_size = 5
        s = write_request_load_scenario(
            c, cluster, request_rate=10, sample_size=sample_size
        )

        # Set the delay for the requests to be longer than the scenario
        # timeout
        control_service.delay = s.timeout + 10

        d = s.start()
        s.maintained().addBoth(lambda x: self.fail())

        # Advance the clock by `sample_size` seconds to establish the
        # requested rate.
        c.pump(repeat(1, sample_size))

        control_service.fail_requests = True
        c.advance(1)
        control_service.fail_requests = False

        d.addCallback(lambda ignored: s.stop())

        # Advance the clock by the timeout value so it is triggered
        # before the requests complete.
        self.assertNoResult(d)
        c.advance(s.timeout + 1)
        self.assertTrue(s.rate_measurer.outstanding() > 0)
        self.successResultOf(d)
    def test_cleanup_deletes_dataset(self, _logger):
        """
        ``write_request_load_scenario`` deletes the dataset created by
        the setup when the scenario is stopped.
        """
        reactor = Clock()
        cluster = self.make_cluster(self.get_fake_flocker_client_instance())
        write_scenario = write_request_load_scenario(
            reactor, cluster, 5, sample_size=3
        )

        d = write_scenario.start()

        d.addCallback(write_scenario.stop)

        def list_datasets(ignored):
            return write_scenario.request.control_service.list_datasets_state()

        d.addCallback(list_datasets)

        def verify_dataset_has_been_deleted(datasets):
            self.assertEqual(datasets, [])

        d.addCallback(verify_dataset_has_been_deleted)