def check(nodes): mismatch = matcher.match(nodes["nodes"]) if mismatch: msg("Waiting for CLB node state for CLB {}.\nMismatch: {}". format(self.clb_id, mismatch.describe())) raise TransientRetryError(mismatch.describe()) return nodes['nodes']
def check(result): response, group_state = result mismatch = matcher.match(group_state['group']) if mismatch: msg("Waiting for group {} to reach desired group state.\n" "Mismatch: {}".format(self.group_id, mismatch.describe())) raise TransientRetryError(mismatch.describe()) msg("Success: desired group state reached:\n{}\nmatches:\n{}". format(group_state['group'], matcher)) return rcs
def _pending_update_to_transient(f): """ A cloud load balancer locks on every update, so to ensure that the test doesn't fail because of that, we want to retry POST/PUT/DELETE commands issued by the test. This is a utility function that checks if a treq API failure is a 422 PENDING_UDPATE failure, and if so, re-raises a TransientRetryError instead. """ f.trap(UpstreamError, APIError) if f.check(UpstreamError): return _pending_update_to_transient(f.value.reason) if f.value.code == 422 and 'PENDING_UPDATE' in f.value.body: raise TransientRetryError() return f
def do_work(): servers = yield list_servers(rcs, pool, _treq=_treq) servers = servers['servers'] if group is not None: servers = [ server for server in servers if (group.group_id == server['metadata'].get( "rax:autoscale:group:id", None)) ] mismatch = matcher.match(servers) if mismatch: msg("{0}.\nMismatch: {1}".format(message, mismatch.describe())) raise TransientRetryError(mismatch.describe()) returnValue(servers)
def check(content): states = pbag([s['stack_status'] for s in content['stacks']]) if not (states == expected_states): msg("Waiting for group {} to reach desired group state.\n" "{} (actual) {} (expected)".format(self.group.group_id, states, expected_states)) raise TransientRetryError( "Group states of {} did not match expected {})".format( states, expected_states)) msg("Success: desired group state reached:\n{}".format( expected_states)) return self.rcs
def check_status(server): status = server['server']['status'] if status == 'ACTIVE': time_building = clock.seconds() - start_time log.msg(("Server changed from 'BUILD' to 'ACTIVE' within " "{time_building} seconds"), time_building=time_building) return server elif status != 'BUILD': raise UnexpectedServerStatus(server_id, status, 'ACTIVE') else: raise TransientRetryError() # just poll again
def really_delete(): yield self.treq.delete( self.endpoint(rcs), headers=headers(str(rcs.token)), pool=self.pool, ).addCallback(self.treq.content) try: state = yield self.get_state(rcs) except UpstreamError as e: if not e.reason.check(APIError) or e.reason.value.code != 404: raise e else: if state['loadBalancer']['status'] not in ("PENDING_DELETE", "SUSPENDED", "ERROR", "DELETED"): raise TransientRetryError() if state['loadBalancer']['status'] in ("ERROR", "SUSPENDED"): msg("Could not delete CLB {0} because it is in {1} state, " "but considering this good enough.".format( self.clb_id, state['loadBalancer']['status']))
def check(state): lb_state = state["loadBalancer"]["status"] if lb_state == state_desired: return rcs raise TransientRetryError()