def setUp(self): """ Set up fake pool, clock, treq, responses, and RCS. """ self.pool = object() self.nodes = {'nodes': []} self.clock = Clock() self.get_calls = 0 class FakeTreq(object): @classmethod def get(cls, url, headers, pool): self.get_calls += 1 self.assertIs(self.pool, pool) self.assertEqual(["token"], headers.get('x-auth-token')) self.assertEqual( ['clburl', 'loadbalancers', 'clb_id', 'nodes'], url.split('/')) return succeed(Response(200)) @classmethod def json_content(cls, resp): return succeed(self.nodes) self.rcs = _FakeRCS() self.clb = CloudLoadBalancer(pool=self.pool, treq=FakeTreq) self.clb.clb_id = 'clb_id'
def get_clb(self, method, url, treq_args_kwargs, response, str_body): """ Stub out treq, and return a cloud load balancer """ clb = CloudLoadBalancer( pool=self.pool, treq=get_fake_treq(self, method, url, treq_args_kwargs, (response, str_body))) clb.clb_id = 12345 return clb
def get_clb(self, method, url, treq_args_kwargs, response, str_body): """ Stub out treq, and return a cloud load balancer """ clb = CloudLoadBalancer(pool=self.pool, treq=get_fake_treq(self, method, url, treq_args_kwargs, (response, str_body))) clb.clb_id = 12345 return clb
def create_another_clb(self): """ Create another CLB and wait for it to become active. It will not be added to the helper. This is used, for example, to create a CLB that is not associated with an autoscaling group. """ # Create another loadbalancer not to be used in autoscale # The CLB will not be added to the helper, since when the helper # creates a group, it automatically adds the clb clb_other = CloudLoadBalancer(pool=self.helper.pool, treq=self.helper.treq) yield clb_other.start(self.rcs, self) yield clb_other.wait_for_state(self.rcs, "ACTIVE", timeout_default) returnValue(clb_other)
def test_delete_clb_does_not_retry_on_get_failure(self): """ Deleting a CLB will retry if the state wonky until it times out. """ clock = Clock() self.clb_id = 12345 _treq = self.get_fake_treq_for_delete( Response(400, strbody="Something is wrong")) clb = CloudLoadBalancer(pool=self.pool, treq=_treq) clb.clb_id = self.clb_id d = clb.delete(self.rcs, clock=clock) self.failureResultOf(d, UpstreamError)
def setUp(self): """ Set up fake pool, clock, treq, responses, and RCS. """ self.pool = object() self.nodes = {'nodes': []} self.clock = Clock() self.get_calls = 0 class FakeTreq(object): @classmethod def get(cls, url, headers, pool): self.get_calls += 1 self.assertIs(self.pool, pool) self.assertEqual(["token"], headers.get('x-auth-token')) self.assertEqual(['clburl', 'loadbalancers', 'clb_id', 'nodes'], url.split('/')) return succeed(Response(200)) @classmethod def json_content(cls, resp): return succeed(self.nodes) self.rcs = _FakeRCS() self.clb = CloudLoadBalancer(pool=self.pool, treq=FakeTreq) self.clb.clb_id = 'clb_id'
def test_delete_clb_retries_until_success(self): """ Deleting a CLB will retry until the CLB is deleted (or in error or suspended mode, in which case it will give up). """ self.clb_id = 12345 success_treqs = [ # All of these particular immutable states count as success. self.get_fake_treq_for_delete( Response(200, strbody=json.dumps( {"loadBalancer": {"status": state}})), del_response=Response(400)) for state in ("PENDING_DELETE", "DELETED", "ERROR", "SUSPENDED") ] + [ # 404 from get-ting the server, meaning it's already gone. self.get_fake_treq_for_delete( Response(404, strbody=( '{"message": "No such load balancer", "code": 404}')), del_response=Response(400)) ] for success_treq in success_treqs: clock = Clock() _treq = self.get_fake_treq_for_delete( Response( 200, strbody='{"loadBalancer": {"status": "PENDING_UPDATE"}}'), del_response=Response(400)) clb = CloudLoadBalancer(pool=self.pool, treq=_treq) clb.clb_id = self.clb_id d = clb.delete(self.rcs, clock=clock) self.assertNoResult(d) clock.pump([3]) self.assertNoResult(d) clb.treq = success_treq clock.pump([3]) self.assertEqual(self.successResultOf(d), None)
def test_delete_clb_retries_until_success(self): """ Deleting a CLB will retry until the CLB is deleted (or in error or suspended mode, in which case it will give up). """ self.clb_id = 12345 success_treqs = [ # All of these particular immutable states count as success. self.get_fake_treq_for_delete(Response( 200, strbody=json.dumps({"loadBalancer": { "status": state }})), del_response=Response(400)) for state in ("PENDING_DELETE", "DELETED", "ERROR", "SUSPENDED") ] + [ # 404 from get-ting the server, meaning it's already gone. self.get_fake_treq_for_delete(Response( 404, strbody=('{"message": "No such load balancer", "code": 404}')), del_response=Response(400)) ] for success_treq in success_treqs: clock = Clock() _treq = self.get_fake_treq_for_delete(Response( 200, strbody='{"loadBalancer": {"status": "PENDING_UPDATE"}}'), del_response=Response(400)) clb = CloudLoadBalancer(pool=self.pool, treq=_treq) clb.clb_id = self.clb_id d = clb.delete(self.rcs, clock=clock) self.assertNoResult(d) clock.pump([3]) self.assertNoResult(d) clb.treq = success_treq clock.pump([3]) self.assertEqual(self.successResultOf(d), None)
def __init__(self, test_case, num_clbs=0): """ Set up the test case, HTTP pool, identity, and cleanup. """ setup_test_log_observer(test_case) self.test_case = test_case self.pool = HTTPConnectionPool(reactor, False) self.treq = LoggingTreq(log=log, log_response=True) self.test_case.addCleanup(self.pool.closeCachedConnections) self.clbs = [CloudLoadBalancer(pool=self.pool, treq=self.treq) for _ in range(num_clbs)]
def test_delete_clb_retries_until_timeout(self): """ Deleting a CLB will retry if the state wonky until it times out. """ clock = Clock() self.clb_id = 12345 _treq = self.get_fake_treq_for_delete(Response( 200, strbody='{"loadBalancer": {"status": "PENDING_UPDATE"}}'), del_response=Response(400)) clb = CloudLoadBalancer(pool=self.pool, treq=_treq) clb.clb_id = self.clb_id d = clb.delete(self.rcs, clock=clock) self.assertNoResult(d) timeout = 60 for _ in range((timeout - 1) / 3): clock.pump([3]) self.assertNoResult(d) clock.pump([3]) self.failureResultOf(d, TimedOutError)
def test_delete_clb_retries_until_timeout(self): """ Deleting a CLB will retry if the state wonky until it times out. """ clock = Clock() self.clb_id = 12345 _treq = self.get_fake_treq_for_delete( Response( 200, strbody='{"loadBalancer": {"status": "PENDING_UPDATE"}}'), del_response=Response(400)) clb = CloudLoadBalancer(pool=self.pool, treq=_treq) clb.clb_id = self.clb_id d = clb.delete(self.rcs, clock=clock) self.assertNoResult(d) timeout = 60 for _ in range((timeout - 1) / 3): clock.pump([3]) self.assertNoResult(d) clock.pump([3]) self.failureResultOf(d, TimedOutError)
class WaitForNodesTestCase(SynchronousTestCase): """ Tests for :func:`CloudLoadBalancer.wait_for_nodes`. """ def setUp(self): """ Set up fake pool, clock, treq, responses, and RCS. """ self.pool = object() self.nodes = {'nodes': []} self.clock = Clock() self.get_calls = 0 class FakeTreq(object): @classmethod def get(cls, url, headers, pool): self.get_calls += 1 self.assertIs(self.pool, pool) self.assertEqual(["token"], headers.get('x-auth-token')) self.assertEqual(['clburl', 'loadbalancers', 'clb_id', 'nodes'], url.split('/')) return succeed(Response(200)) @classmethod def json_content(cls, resp): return succeed(self.nodes) self.rcs = _FakeRCS() self.clb = CloudLoadBalancer(pool=self.pool, treq=FakeTreq) self.clb.clb_id = 'clb_id' def test_retries_until_matcher_matches(self): """ If the matcher does not matches the load balancer state, retries until it does. """ d = self.clb.wait_for_nodes( self.rcs, Equals(['done']), timeout=5, period=1, clock=self.clock) self.clock.pump((1, 1, 1)) self.assertNoResult(d) self.assertEqual(4, self.get_calls) self.nodes = {'nodes': ['done']} self.clock.pump([1]) self.assertEqual(['done'], self.successResultOf(d)) self.assertEqual(5, self.get_calls) def test_retries_until_timeout(self): """ If the matcher does not matches the load balancer state, retries until it times out. """ d = self.clb.wait_for_nodes( self.rcs, Equals(['done']), timeout=5, period=1, clock=self.clock) self.clock.pump((1, 1, 1, 1, 1)) self.assertEqual(5, self.get_calls) self.failureResultOf(d, TimedOutError)
class WaitForNodesTestCase(SynchronousTestCase): """ Tests for :func:`CloudLoadBalancer.wait_for_nodes`. """ def setUp(self): """ Set up fake pool, clock, treq, responses, and RCS. """ self.pool = object() self.nodes = {'nodes': []} self.clock = Clock() self.get_calls = 0 class FakeTreq(object): @classmethod def get(cls, url, headers, pool): self.get_calls += 1 self.assertIs(self.pool, pool) self.assertEqual(["token"], headers.get('x-auth-token')) self.assertEqual( ['clburl', 'loadbalancers', 'clb_id', 'nodes'], url.split('/')) return succeed(Response(200)) @classmethod def json_content(cls, resp): return succeed(self.nodes) self.rcs = _FakeRCS() self.clb = CloudLoadBalancer(pool=self.pool, treq=FakeTreq) self.clb.clb_id = 'clb_id' def test_retries_until_matcher_matches(self): """ If the matcher does not matches the load balancer state, retries until it does. """ d = self.clb.wait_for_nodes(self.rcs, Equals(['done']), timeout=5, period=1, clock=self.clock) self.clock.pump((1, 1, 1)) self.assertNoResult(d) self.assertEqual(4, self.get_calls) self.nodes = {'nodes': ['done']} self.clock.pump([1]) self.assertEqual(['done'], self.successResultOf(d)) self.assertEqual(5, self.get_calls) def test_retries_until_timeout(self): """ If the matcher does not matches the load balancer state, retries until it times out. """ d = self.clb.wait_for_nodes(self.rcs, Equals(['done']), timeout=5, period=1, clock=self.clock) self.clock.pump((1, 1, 1, 1, 1)) self.assertEqual(5, self.get_calls) self.failureResultOf(d, TimedOutError)