def test_get_probe_timeout(self): """ CreateContainer probe times-out if get_probe runs too long. """ clock = Clock() node_id = uuid4() node = Node(uuid=node_id, public_address=IPAddress('10.0.0.1')) control_service = FakeFlockerClient([node], node_id) cluster = BenchmarkCluster( IPAddress('10.0.0.1'), lambda reactor: control_service, {}, None, ) operation = CreateContainer(clock, cluster) d = operation.get_probe() clock.advance(DEFAULT_TIMEOUT.total_seconds()) # No control_service.synchronize_state() call, so cluster state # never shows container is created. # The Deferred fails if container not created within 10 minutes. self.failureResultOf(d)
def test_create_dataset(self, logger): """ CreateDataset probe waits for cluster to converge. """ clock = Clock() node_id = uuid4() node = Node(uuid=node_id, public_address=IPAddress('10.0.0.1')) control_service = FakeFlockerClient([node], node_id) cluster = BenchmarkCluster( IPAddress('10.0.0.1'), lambda reactor: control_service, {}, None, ) operation = CreateDataset(clock, cluster) d = operation.get_probe() def run_probe(probe): def cleanup(result): cleaned_up = probe.cleanup() cleaned_up.addCallback(lambda _ignored: result) return cleaned_up d = probe.run() d.addCallback(cleanup) return d d.addCallback(run_probe) # Advance the clock because probe periodically polls the state. # The Deferred does not fire before the dataset has been created. clock.advance(1) self.assertNoResult(d) # Trigger convergence of the fake Flocker cluster. control_service.synchronize_state() # The Deferred fires once the dataset has been created. clock.advance(1) self.successResultOf(d)
def test_create_dataset(self, logger): """ CreateDataset probe waits for cluster to converge. """ clock = Clock() node_id = uuid4() node = Node(uuid=node_id, public_address=IPAddress("10.0.0.1")) control_service = FakeFlockerClient([node], node_id) cluster = BenchmarkCluster(IPAddress("10.0.0.1"), lambda reactor: control_service, {}, None) operation = CreateDataset(clock, cluster) d = operation.get_probe() def run_probe(probe): def cleanup(result): cleaned_up = probe.cleanup() cleaned_up.addCallback(lambda _ignored: result) return cleaned_up d = probe.run() d.addCallback(cleanup) return d d.addCallback(run_probe) # Advance the clock because probe periodically polls the state. # The Deferred does not fire before the dataset has been created. clock.advance(1) self.assertNoResult(d) # Trigger convergence of the fake Flocker cluster. control_service.synchronize_state() # The Deferred fires once the dataset has been created. clock.advance(1) self.successResultOf(d)
def setUp(self): super(BenchmarkClusterTests, self).setUp() node = Node( # Node public_address is actually the internal cluster address uuid=uuid4(), public_address=CONTROL_SERVICE_PRIVATE_IP ) self.control_service = FakeFlockerClient([node]) self.cluster = BenchmarkCluster( CONTROL_SERVICE_PUBLIC_IP, lambda reactor: self.control_service, { CONTROL_SERVICE_PRIVATE_IP: CONTROL_SERVICE_PUBLIC_IP, }, DEFAULT_VOLUME_SIZE, )
def test_run_probe_timeout(self): """ CreateContainer probe times-out if probe.run runs too long. """ clock = Clock() node_id = uuid4() node = Node(uuid=node_id, public_address=IPAddress('10.0.0.1')) control_service = FakeFlockerClient([node], node_id) cluster = BenchmarkCluster( IPAddress('10.0.0.1'), lambda reactor: control_service, {}, None, ) operation = CreateContainer(clock, cluster) d = operation.get_probe() control_service.synchronize_state() # creation of pull container clock.advance(1) control_service.synchronize_state() # deletion of pull container clock.advance(1) # get_probe has completed successfully probe = self.successResultOf(d) d = probe.run() clock.advance(DEFAULT_TIMEOUT.total_seconds()) # No control_service.synchronize_state() call, so cluster state # never shows container is created. # The Deferred fails if container not created within 10 minutes. self.failureResultOf(d)
def test_create_container(self, _logger): """ CreateContainer probe waits for cluster to converge. """ clock = Clock() node_id = uuid4() node = Node(uuid=node_id, public_address=IPAddress('10.0.0.1')) control_service = FakeFlockerClient([node], node_id) cluster = BenchmarkCluster( IPAddress('10.0.0.1'), lambda reactor: control_service, {}, None, ) operation = CreateContainer(clock, cluster) d = operation.get_probe() def run_probe(probe): def cleanup(result): cleaned_up = probe.cleanup() cleaned_up.addCallback(lambda _ignored: result) return cleaned_up d = probe.run() d.addCallback(cleanup) return d d.addCallback(run_probe) # Advance the clock because probe periodically polls the state. # Due to multiple steps, need to synchronize state a few times. control_service.synchronize_state() # creation of pull container clock.advance(1) control_service.synchronize_state() # deletion of pull container clock.advance(1) # The Deferred does not fire before the container has been created. self.assertNoResult(d) control_service.synchronize_state() # creation of test container clock.advance(1) # The Deferred fires once the container has been created. self.successResultOf(d)
def test_empty_cluster(self): """ CreateDataset fails if no nodes in cluster. """ control_service = FakeFlockerClient() cluster = BenchmarkCluster( IPAddress('10.0.0.1'), lambda reactor: control_service, {}, None, ) d = CreateDataset(Clock(), cluster).get_probe() self.failureResultOf(d, EmptyClusterError)
def test_cpu_time(self): """ Fake Flocker cluster gives expected results. """ clock = Clock() node1 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.1')) node2 = Node(uuid=uuid4(), public_address=IPAddress('10.0.0.2')) metric = CPUTime(clock, BenchmarkCluster( IPAddress('10.0.0.1'), lambda reactor: FakeFlockerClient([node1, node2]), {}, None, ), _LocalRunner(), processes=[_pid_1_name]) d = metric.measure(lambda: clock.advance(5)) # Although it is unlikely, it's possible that we could get a CPU # time != 0, so filter values out. def remove_process_times(node_cpu_times): for process_times in node_cpu_times.values(): if process_times: for process in process_times: if process != WALLCLOCK_LABEL: process_times[process] = 0 return node_cpu_times d.addCallback(remove_process_times) def check(result): self.assertEqual( result, { '10.0.0.1': { _pid_1_name: 0, WALLCLOCK_LABEL: 5 }, '10.0.0.2': { _pid_1_name: 0, WALLCLOCK_LABEL: 5 } }) d.addCallback(check) return d
def test_read_request(self): """ ReadRequest probe returns the cluster state. """ control_service = FastConvergingFakeFlockerClient(FakeFlockerClient()) primary = uuid4() # Create a single dataset on the cluster d = control_service.create_dataset(primary=primary) # Get the probe to read the state of the cluster def start_read_request(result): cluster = BenchmarkCluster( IPAddress('10.0.0.1'), lambda reactor: control_service, {}, None, ) request = ReadRequest(Clock(), cluster, 'list_datasets_state') return request.get_probe() d.addCallback(start_read_request) # Run the probe to read the state of the cluster def run_probe(probe): def cleanup(result): cleaned_up = probe.cleanup() cleaned_up.addCallback(lambda _ignored: result) return cleaned_up d = probe.run() d.addCallback(cleanup) return d d.addCallback(run_probe) # Only want to check the primaries of the cluster state def state_to_primary(states): return [state.primary for state in states] d.addCallback(state_to_primary) self.assertEqual(self.successResultOf(d), [primary])