Example #1
0
def get_cluster(options, env):
    """
    Obtain a cluster from the command line options and environment.

    :param BenchmarkOption options: Parsed command line options.
    :param dict env: Dictionary of environment variables.
    :return BenchmarkCluster: Cluster to benchmark.
    """
    cluster_option = options['cluster']
    if cluster_option:
        try:
            cluster = BenchmarkCluster.from_cluster_yaml(
                FilePath(cluster_option)
            )
        except IOError as e:
            usage(
                options, 'Cluster file {!r} not found.'.format(e.filename)
            )
    else:
        try:
            cluster = BenchmarkCluster.from_acceptance_test_env(env)
        except KeyError as e:
            usage(
                options, 'Environment variable {!r} not set.'.format(e.args[0])
            )
        except ValueError as e:
            usage(options, e.args[0])
        except ValidationError as e:
            usage(options, e.message)
    return cluster
Example #2
0
def get_cluster(options, env):
    """
    Obtain a cluster from the command line options and environment.

    :param BenchmarkOption options: Parsed command line options.
    :param dict env: Dictionary of environment variables.
    :return BenchmarkCluster: Cluster to benchmark.
    """
    cluster_option = options['cluster']
    if cluster_option:
        try:
            cluster = BenchmarkCluster.from_cluster_yaml(
                FilePath(cluster_option))
        except IOError as e:
            usage(options, 'Cluster file {!r} not found.'.format(e.filename))
    else:
        try:
            cluster = BenchmarkCluster.from_acceptance_test_env(env)
        except KeyError as e:
            usage(options,
                  'Environment variable {!r} not set.'.format(e.args[0]))
        except ValueError as e:
            usage(options, e.args[0])
        except ValidationError as e:
            usage(options, e.message)
    return cluster
Example #3
0
 def setUp(self):
     super(BenchmarkClusterTests, self).setUp()
     node = Node(
         # Node public_address is actually the internal cluster address
         uuid=uuid4(), public_address=CONTROL_SERVICE_PRIVATE_IP
     )
     self.control_service = FakeFlockerClient([node])
     self.cluster = BenchmarkCluster(
         CONTROL_SERVICE_PUBLIC_IP,
         lambda reactor: self.control_service,
         {
             CONTROL_SERVICE_PRIVATE_IP: CONTROL_SERVICE_PUBLIC_IP,
         },
         DEFAULT_VOLUME_SIZE,
     )
    def test_read_request_load_start_stop_start_succeeds(self, _logger):
        """
        ``read_request_load_scenario`` starts, stops and starts
        without collapsing.
        """
        c = Clock()

        node1 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.1'))
        node2 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.2'))
        cluster = BenchmarkCluster(
            node1.public_address,
            lambda reactor: FakeFlockerClient([node1, node2]),
            {node1.public_address, node2.public_address},
            default_volume_size=DEFAULT_VOLUME_SIZE)

        sample_size = 5
        s = read_request_load_scenario(c, cluster, sample_size=sample_size)
        # Start and stop
        s.start()
        c.pump(repeat(1, sample_size))
        s.stop()

        # Start again and verify the scenario succeeds
        d = s.start()
        c.pump(repeat(1, sample_size))
        s.maintained().addBoth(lambda x: self.fail())
        d.addCallback(lambda ignored: s.stop())
        c.pump(repeat(1, sample_size))
        self.successResultOf(d)
    def test_read_request_load_succeeds(self, _logger):
        """
        ``read_request_load_scenario`` starts and stops without collapsing.
        """
        c = Clock()

        node1 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.1'))
        node2 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.2'))
        cluster = BenchmarkCluster(
            node1.public_address,
            lambda reactor: FakeFlockerClient([node1, node2]),
            {node1.public_address, node2.public_address},
            default_volume_size=DEFAULT_VOLUME_SIZE)

        sample_size = 5
        s = read_request_load_scenario(c, cluster, sample_size=sample_size)

        d = s.start()

        # Request rate samples are recorded every second and we need to
        # collect enough samples to establish the rate which is defined
        # by `sample_size`. Therefore, advance the clock by
        # `sample_size` seconds to obtain enough samples.
        c.pump(repeat(1, sample_size))
        s.maintained().addBoth(lambda x: self.fail())
        d.addCallback(lambda ignored: s.stop())
        c.pump(repeat(1, sample_size))
        self.successResultOf(d)
Example #6
0
    def test_get_probe_timeout(self):
        """
        CreateContainer probe times-out if get_probe runs too long.
        """
        clock = Clock()

        node_id = uuid4()
        node = Node(uuid=node_id, public_address=IPAddress('10.0.0.1'))
        control_service = FakeFlockerClient([node], node_id)

        cluster = BenchmarkCluster(
            IPAddress('10.0.0.1'),
            lambda reactor: control_service,
            {},
            None,
        )
        operation = CreateContainer(clock, cluster)
        d = operation.get_probe()

        clock.advance(DEFAULT_TIMEOUT.total_seconds())

        # No control_service.synchronize_state() call, so cluster state
        # never shows container is created.

        # The Deferred fails if container not created within 10 minutes.
        self.failureResultOf(d)
Example #7
0
class BenchmarkClusterTests(TestCase):

    def setUp(self):
        super(BenchmarkClusterTests, self).setUp()
        node = Node(
            # Node public_address is actually the internal cluster address
            uuid=uuid4(), public_address=CONTROL_SERVICE_PRIVATE_IP
        )
        self.control_service = FakeFlockerClient([node])
        self.cluster = BenchmarkCluster(
            CONTROL_SERVICE_PUBLIC_IP,
            lambda reactor: self.control_service,
            {
                CONTROL_SERVICE_PRIVATE_IP: CONTROL_SERVICE_PUBLIC_IP,
            },
            DEFAULT_VOLUME_SIZE,
        )

    def test_control_node_address(self):
        """
        The ``control_node_address`` method gives expected results.
        """
        self.assertEqual(
            self.cluster.control_node_address(), CONTROL_SERVICE_PUBLIC_IP)

    def test_control_service(self):
        """
        The ``control_service`` method gives expected results.
        """
        self.assertIs(
            self.cluster.get_control_service(Clock()), self.control_service)

    def test_public_address(self):
        """
        The ``public_address`` method gives expected results.
        """
        self.assertEqual(
            self.cluster.public_address(CONTROL_SERVICE_PRIVATE_IP),
            CONTROL_SERVICE_PUBLIC_IP
        )

    def test_default_volume_size(self):
        """
        The ``default_volume_size`` method gives expected results.
        """
        self.assertEqual(
            self.cluster.default_volume_size(), DEFAULT_VOLUME_SIZE)
Example #8
0
 def start_read_request(result):
     cluster = BenchmarkCluster(
         IPAddress('10.0.0.1'),
         lambda reactor: control_service,
         {},
         None,
     )
     request = ReadRequest(Clock(), cluster, 'list_datasets_state')
     return request.get_probe()
Example #9
0
 def make_cluster(self, FlockerClientInstance):
     """
     Create a cluster that can be used by the scenario tests.
     """
     return BenchmarkCluster(
         self.node1.public_address,
         lambda reactor: FlockerClientInstance,
         {self.node1.public_address, self.node2.public_address},
         default_volume_size=DEFAULT_VOLUME_SIZE,
     )
Example #10
0
 def setUp(self):
     node = Node(
         # Node public_address is actually the internal cluster address
         uuid=uuid4(), public_address=CONTROL_SERVICE_PRIVATE_IP
     )
     self.control_service = FakeFlockerClient([node])
     self.cluster = BenchmarkCluster(
         CONTROL_SERVICE_PUBLIC_IP, lambda reactor: self.control_service, {
             CONTROL_SERVICE_PRIVATE_IP: CONTROL_SERVICE_PUBLIC_IP,
         }
     )
 def make_cluster(self, make_flocker_client):
     """
     Create a cluster that can be used by the scenario tests.
     """
     node1 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.1'))
     node2 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.2'))
     return BenchmarkCluster(
         node1.public_address,
         lambda reactor: make_flocker_client(
             FakeFlockerClient([node1, node2]), reactor),
         {node1.public_address, node2.public_address},
         default_volume_size=DEFAULT_VOLUME_SIZE,
     )
Example #12
0
class BenchmarkClusterTests(SynchronousTestCase):

    def setUp(self):
        node = Node(
            # Node public_address is actually the internal cluster address
            uuid=uuid4(), public_address=CONTROL_SERVICE_PRIVATE_IP
        )
        self.control_service = FakeFlockerClient([node])
        self.cluster = BenchmarkCluster(
            CONTROL_SERVICE_PUBLIC_IP, lambda reactor: self.control_service, {
                CONTROL_SERVICE_PRIVATE_IP: CONTROL_SERVICE_PUBLIC_IP,
            }
        )

    def test_control_node_address(self):
        """
        The ``control_node_address`` method gives expected results.
        """
        self.assertEqual(
            self.cluster.control_node_address(), CONTROL_SERVICE_PUBLIC_IP)

    def test_control_service(self):
        """
        The ``control_service`` method gives expected results.
        """
        self.assertIs(
            self.cluster.get_control_service(Clock()), self.control_service)

    def test_public_address(self):
        """
        The ``public_address`` method gives expected results.
        """
        self.assertEqual(
            self.cluster.public_address(CONTROL_SERVICE_PRIVATE_IP),
            CONTROL_SERVICE_PUBLIC_IP
        )
Example #13
0
    def test_empty_cluster(self):
        """
        CreateDataset fails if no nodes in cluster.
        """
        control_service = FakeFlockerClient()

        cluster = BenchmarkCluster(
            IPAddress('10.0.0.1'),
            lambda reactor: control_service,
            {},
            None,
        )

        d = CreateDataset(Clock(), cluster).get_probe()

        self.failureResultOf(d, EmptyClusterError)
Example #14
0
    def test_create_container(self, _logger):
        """
        CreateContainer probe waits for cluster to converge.
        """
        clock = Clock()

        node_id = uuid4()
        node = Node(uuid=node_id, public_address=IPAddress('10.0.0.1'))
        control_service = FakeFlockerClient([node], node_id)

        cluster = BenchmarkCluster(
            IPAddress('10.0.0.1'),
            lambda reactor: control_service,
            {},
            None,
        )
        operation = CreateContainer(clock, cluster)
        d = operation.get_probe()

        def run_probe(probe):
            def cleanup(result):
                cleaned_up = probe.cleanup()
                cleaned_up.addCallback(lambda _ignored: result)
                return cleaned_up

            d = probe.run()
            d.addCallback(cleanup)
            return d

        d.addCallback(run_probe)

        # Advance the clock because probe periodically polls the state.
        # Due to multiple steps, need to synchronize state a few times.
        control_service.synchronize_state()  # creation of pull container
        clock.advance(1)
        control_service.synchronize_state()  # deletion of pull container
        clock.advance(1)

        # The Deferred does not fire before the container has been created.
        self.assertNoResult(d)

        control_service.synchronize_state()  # creation of test container
        clock.advance(1)

        # The Deferred fires once the container has been created.
        self.successResultOf(d)
Example #15
0
    def test_cpu_time(self):
        """
        Fake Flocker cluster gives expected results.
        """
        clock = Clock()
        node1 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.1'))
        node2 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.2'))
        metric = CPUTime(clock,
                         BenchmarkCluster(
                             IPAddress('10.0.0.1'),
                             lambda reactor: FakeFlockerClient([node1, node2]),
                             {},
                             None,
                         ),
                         _LocalRunner(),
                         processes=[_pid_1_name])
        d = metric.measure(lambda: clock.advance(5))

        # Although it is unlikely, it's possible that we could get a CPU
        # time != 0, so filter values out.
        def remove_process_times(node_cpu_times):
            for process_times in node_cpu_times.values():
                if process_times:
                    for process in process_times:
                        if process != WALLCLOCK_LABEL:
                            process_times[process] = 0
            return node_cpu_times

        d.addCallback(remove_process_times)

        def check(result):
            self.assertEqual(
                result, {
                    '10.0.0.1': {
                        _pid_1_name: 0,
                        WALLCLOCK_LABEL: 5
                    },
                    '10.0.0.2': {
                        _pid_1_name: 0,
                        WALLCLOCK_LABEL: 5
                    }
                })

        d.addCallback(check)
        return d
Example #16
0
    def test_create_dataset(self, logger):
        """
        CreateDataset probe waits for cluster to converge.
        """
        clock = Clock()

        node_id = uuid4()
        node = Node(uuid=node_id, public_address=IPAddress('10.0.0.1'))
        control_service = FakeFlockerClient([node], node_id)

        cluster = BenchmarkCluster(
            IPAddress('10.0.0.1'),
            lambda reactor: control_service,
            {},
            None,
        )
        operation = CreateDataset(clock, cluster)
        d = operation.get_probe()

        def run_probe(probe):
            def cleanup(result):
                cleaned_up = probe.cleanup()
                cleaned_up.addCallback(lambda _ignored: result)
                return cleaned_up

            d = probe.run()
            d.addCallback(cleanup)
            return d

        d.addCallback(run_probe)

        # Advance the clock because probe periodically polls the state.

        # The Deferred does not fire before the dataset has been created.
        clock.advance(1)
        self.assertNoResult(d)

        # Trigger convergence of the fake Flocker cluster.
        control_service.synchronize_state()

        # The Deferred fires once the dataset has been created.
        clock.advance(1)
        self.successResultOf(d)
    def test_scenario_throws_exception_when_already_started(self, _logger):
        """
        start method in the ``RequestLoadScenario`` throws a
        ``RequestScenarioAlreadyStarted`` if the scenario is already started.
        """
        c = Clock()

        node1 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.1'))
        node2 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.2'))
        cluster = BenchmarkCluster(
            node1.public_address,
            lambda reactor: FakeFlockerClient([node1, node2]),
            {node1.public_address, node2.public_address},
            default_volume_size=DEFAULT_VOLUME_SIZE)

        sample_size = 5
        s = read_request_load_scenario(c, cluster, sample_size=sample_size)

        s.start()

        self.assertRaises(RequestScenarioAlreadyStarted, s.start)