def test_filters_clb_types(self): """ Only one CLB step is returned per CLB """ steps = pbag([ AddNodesToCLB(lb_id='5', address_configs=s( ('1.1.1.1', CLBDescription(lb_id='5', port=80)))), RemoveNodesFromCLB(lb_id='5', node_ids=s('1')), # Unoptimizable step CreateServer(server_config=pmap({})), ]) # returned steps could be pbag of any of the 2 lists below depending # on how `one_clb_step` iterates over the steps. Since it is pbag the # order of elements is not guaranteed list1 = [ AddNodesToCLB(lb_id='5', address_configs=s( ('1.1.1.1', CLBDescription(lb_id='5', port=80)))), CreateServer(server_config=pmap({})) ] list2 = [ RemoveNodesFromCLB(lb_id='5', node_ids=s('1')), CreateServer(server_config=pmap({})) ] self.assertEqual( matches(MatchesAny(Equals(pbag(list1)), Equals(pbag(list2)))), optimize_steps(steps))
def test_log_steps(self): """The steps to be executed are logged to cloud feeds.""" step = CreateServer(server_config=pmap({"foo": "bar"})) step.as_effect = lambda: Effect("create-server") def plan(*args, **kwargs): return pbag([step]) sequence = [ parallel_sequence([ [parallel_sequence([ [(Log('convergence-create-servers', {'num_servers': 1, 'server_config': {'foo': 'bar'}, 'cloud_feed': True}), noop)] ])] ]), (Log(msg='execute-convergence', fields=mock.ANY), noop), parallel_sequence([ [("create-server", lambda i: (StepResult.RETRY, []))] ]), (Log(msg='execute-convergence-results', fields=mock.ANY), noop) ] self.assertEqual( perform_sequence(self.get_seq() + sequence, self._invoke(plan)), (StepResult.RETRY, ScalingGroupStatus.ACTIVE))
def test_mixed_optimization(self): """ Mixes of optimizable and unoptimizable steps still get optimized correctly. """ steps = pbag([ # CLB adds AddNodesToCLB(lb_id='5', address_configs=s( ('1.1.1.1', CLBDescription(lb_id='5', port=80)))), AddNodesToCLB(lb_id='5', address_configs=s( ('1.1.1.2', CLBDescription(lb_id='5', port=80)))), AddNodesToCLB(lb_id='6', address_configs=s( ('1.1.1.1', CLBDescription(lb_id='6', port=80)))), AddNodesToCLB(lb_id='6', address_configs=s( ('1.1.1.2', CLBDescription(lb_id='6', port=80)))), RemoveNodesFromCLB(lb_id='5', node_ids=s('1')), RemoveNodesFromCLB(lb_id='5', node_ids=s('2')), RemoveNodesFromCLB(lb_id='6', node_ids=s('3')), RemoveNodesFromCLB(lb_id='6', node_ids=s('4')), # Unoptimizable steps CreateServer(server_config=pmap({})), ]) self.assertEqual( optimize_steps(steps), pbag([ # Optimized CLB adds AddNodesToCLB( lb_id='5', address_configs=s( ('1.1.1.1', CLBDescription(lb_id='5', port=80)), ('1.1.1.2', CLBDescription(lb_id='5', port=80)))), AddNodesToCLB( lb_id='6', address_configs=s( ('1.1.1.1', CLBDescription(lb_id='6', port=80)), ('1.1.1.2', CLBDescription(lb_id='6', port=80)))), RemoveNodesFromCLB(lb_id='5', node_ids=s('1', '2')), RemoveNodesFromCLB(lb_id='6', node_ids=s('3', '4')), # Unoptimizable steps CreateServer(server_config=pmap({})) ]))
def test_no_clb_steps(self): """ Returns same steps when there are no CLB steps passed """ steps = [ CreateServer(server_config=pmap({"name": "server"})), DeleteServer(server_id="abc") ] self.assertEqual(list(one_clb_step(steps)), steps)
def test_create_server_noname(self): """ :obj:`CreateServer.as_effect`, when no name is provided in the launch config, will generate the name will from scratch. This only verifies intent; result reporting is tested in :meth:`test_create_server`. """ create = CreateServer( server_config=freeze({'server': {'flavorRef': '1'}})) eff = create.as_effect() self.assertEqual(eff.intent, Func(generate_server_name)) eff = resolve_effect(eff, 'random-name') self.assertEqual( eff.intent, service_request( ServiceType.CLOUD_SERVERS, 'POST', 'servers', data={'server': {'name': 'random-name', 'flavorRef': '1'}}, success_pred=has_code(202), reauth_codes=(401,)).intent)
def test_create_server_request_with_name(self): """ :obj:`CreateServer.as_effect` produces a request for creating a server. If the name is given, a randomly generated suffix is appended to the server name. """ create = CreateServer( server_config=freeze({'server': {'name': 'myserver', 'flavorRef': '1'}})) eff = create.as_effect() self.assertEqual(eff.intent, Func(generate_server_name)) eff = resolve_effect(eff, 'random-name') self.assertEqual( eff.intent, service_request( ServiceType.CLOUD_SERVERS, 'POST', 'servers', data={'server': {'name': 'myserver-random-name', 'flavorRef': '1'}}, success_pred=has_code(202), reauth_codes=(401,)).intent)
def test_create_servers(self): """Logs :obj:`CreateServer`.""" cfg = {'configgy': 'configged', 'nested': {'a': 'b'}} cfg2 = {'configgy': 'configged', 'nested': {'a': 'c'}} creates = pbag([ CreateServer(server_config=freeze(cfg)), CreateServer(server_config=freeze(cfg)), CreateServer(server_config=freeze(cfg2)) ]) self.assert_logs(creates, [ Log('convergence-create-servers', fields={ 'num_servers': 2, 'server_config': cfg, 'cloud_feed': True }), Log('convergence-create-servers', fields={ 'num_servers': 1, 'server_config': cfg2, 'cloud_feed': True }) ])
def _assert_create_server_with_errs_has_status(self, exceptions, status): """ Helper function to make a :class:`CreateServer` effect, and resolve it with the provided exceptions, asserting that the result is the provided status, with the reason being the exception. """ eff = CreateServer( server_config=freeze({'server': {'flavorRef': '1'}})).as_effect() eff = resolve_effect(eff, 'random-name') for exc in exceptions: self.assertEqual( resolve_effect(eff, service_request_error_response(exc), is_error=True), (status, [ErrorReason.Exception( matches(ContainsAll([type(exc), exc])))]) )
def test_optimize_leaves_other_steps(self): """ Unoptimizable steps pass the optimizer unchanged. """ steps = pbag([ AddNodesToCLB(lb_id='5', address_configs=s( ('1.1.1.1', CLBDescription(lb_id='5', port=80)))), RemoveNodesFromCLB(lb_id='6', node_ids=s('1')), CreateServer(server_config=pmap({})), BulkRemoveFromRCv3(lb_node_pairs=pset([("lb-1", "node-a")])), BulkAddToRCv3(lb_node_pairs=pset([("lb-2", "node-b")])) # Note that the add & remove pair should not be the same; # the optimizer might reasonably optimize opposite # operations away in the future. ]) self.assertEqual(optimize_steps(steps), steps)
def test_mixed(self): """ When there are multiple steps of same CLB then first step of each CLB is returned """ steps = [ CreateServer(server_config=pmap({"name": "server"})), DeleteServer(server_id="abc"), AddNodesToCLB(lb_id='5', address_configs=s( ('1.1.1.1', CLBDescription(lb_id='5', port=80)))), RemoveNodesFromCLB(lb_id='5', node_ids=s('1')), RemoveNodesFromCLB(lb_id='6', node_ids=s('3')), AddNodesToCLB(lb_id='6', address_configs=s( ('2.1.1.1', CLBDescription(lb_id='6', port=80)))), ChangeCLBNode(lb_id='7', node_id='9', condition=CLBNodeCondition.ENABLED, weight=10, type=CLBNodeType.PRIMARY), RemoveNodesFromCLB(lb_id='7', node_ids=s('4')), AddNodesToCLB(lb_id='7', address_configs=s( ('3.1.1.1', CLBDescription(lb_id='9', port=80)))), ChangeCLBNode(lb_id='5', node_id='11', condition=CLBNodeCondition.ENABLED, weight=10, type=CLBNodeType.PRIMARY) ] self.assertEqual( list(one_clb_step(steps)), ( steps[:3] + # Non-CLB steps and 1 step for CLB 5 [steps[4]] + # One step for CLB 6 [steps[6]]) # One step for CLB 7 )
def _create_some_steps(self, counts={}): """ Creates some steps for testing. :param counts: A mapping of supported step classes to the number of those steps to create. If unspecified, assumed to be zero. :return: A pbag of steps. """ create_servers = [ CreateServer(server_config=pmap({"sentinel": i})) for i in xrange(counts.get(CreateServer, 0)) ] delete_servers = [ DeleteServer(server_id='abc-' + str(i)) for i in xrange(counts.get(DeleteServer, 0)) ] remove_from_clbs = [ RemoveNodesFromCLB(lb_id='1', node_ids=(str(i), )) for i in xrange(counts.get(RemoveNodesFromCLB, 0)) ] return pbag(create_servers + delete_servers + remove_from_clbs)
def test_create_server_success_case(self): """ :obj:`CreateServer.as_effect`, when it results in a successful create, returns with :obj:`StepResult.RETRY`. """ eff = CreateServer( server_config=freeze({'server': {'flavorRef': '1'}})).as_effect() seq = [ (Func(generate_server_name), lambda _: 'random-name'), (service_request( ServiceType.CLOUD_SERVERS, 'POST', 'servers', data={'server': {'name': 'random-name', 'flavorRef': '1'}}, success_pred=has_code(202), reauth_codes=(401,)).intent, lambda _: (StubResponse(202, {}), {"server": {}})), (Log('request-create-server', ANY), lambda _: None) ] self.assertEqual( perform_sequence(seq, eff), (StepResult.RETRY, [ErrorReason.String('waiting for server to become active')]))
def converge_launch_server(desired_state, servers_with_cheese, load_balancer_nodes, load_balancers, now, timeout=3600): """ Create steps that indicate how to transition from the state provided by the given parameters to the :obj:`DesiredServerGroupState` described by ``desired_state``. :param DesiredServerGroupState desired_state: The desired group state. :param set servers_with_cheese: a list of :obj:`NovaServer` instances. This must only contain servers that are being managed for the specified group. :param load_balancer_nodes: a set of :obj:`ILBNode` providers. This must contain all the load balancer mappings for all the load balancers (of all types) on the tenant. :param dict load_balancers: Collection of load balancer objects accessed based on its ID. The object is opaque and is not used by planner directly. It is intended to contain extra info for specific LB provider :param float now: number of seconds since the POSIX epoch indicating the time at which the convergence was requested. :param float timeout: Number of seconds after which we will delete a server in BUILD. :rtype: :obj:`pbag` of `IStep` """ newest_to_oldest = sorted(servers_with_cheese, key=lambda s: -s.created) servers = defaultdict(lambda: [], groupby(get_destiny, newest_to_oldest)) servers_in_active = servers[Destiny.CONSIDER_AVAILABLE] building_too_long, waiting_for_build = partition_bool( lambda server: now - server.created >= timeout, servers[Destiny.WAIT_WITH_TIMEOUT]) create_server = CreateServer(server_config=desired_state.server_config) # delete any servers that have been building for too long delete_timeout_steps = [DeleteServer(server_id=server.id) for server in building_too_long] # create servers create_steps = [create_server] * ( desired_state.capacity - ( len(servers_in_active) + len(waiting_for_build) + len(servers[Destiny.WAIT]) + len(servers[Destiny.AVOID_REPLACING]))) # Scale down over capacity, starting with building, then WAIT, then # AVOID_REPLACING, then active, preferring older. Also, finish # draining/deleting servers already in draining state servers_in_preferred_order = ( servers_in_active + servers[Destiny.AVOID_REPLACING] + servers[Destiny.WAIT] + waiting_for_build) servers_to_delete = servers_in_preferred_order[desired_state.capacity:] def drain_and_delete_a_server(server): return _drain_and_delete( server, desired_state.draining_timeout, [node for node in load_balancer_nodes if node.matches(server)], now) try: scale_down_steps = list( mapcat(drain_and_delete_a_server, servers_to_delete + servers[Destiny.DRAIN])) except DrainingUnavailable as de: return pbag([fail_convergence(de)]) # delete all servers in error - draining does not need to be # handled because servers in error presumably are not serving # traffic anyway delete_error_steps = [DeleteServer(server_id=server.id) for server in servers[Destiny.DELETE]] # clean up all the load balancers from deleted and errored servers cleanup_errored_and_deleted_steps = [ remove_node_from_lb(lb_node) for server in servers[Destiny.DELETE] + servers[Destiny.CLEANUP] for lb_node in load_balancer_nodes if lb_node.matches(server)] # converge all the servers that remain to their desired load balancer state still_active_servers = filter(lambda s: s not in servers_to_delete, servers_in_active) try: lb_converge_steps = [ step for server in still_active_servers for step in _converge_lb_state( server, [node for node in load_balancer_nodes if node.matches(server)], load_balancers, now, # Temporarily using build timeout as node offline timeout. # See https://github.com/rackerlabs/otter/issues/1905 timeout) ] except DrainingUnavailable as de: return pbag([fail_convergence(de)]) # Converge again if we expect state transitions on any servers converge_later = [] if any((s not in servers_to_delete for s in waiting_for_build)): converge_later = [ ConvergeLater(reasons=[ErrorReason.String('waiting for servers')])] unavail_fmt = ('Waiting for server {server_id} to transition to ACTIVE ' 'from {status}') reasons = [ErrorReason.UserMessage(unavail_fmt.format(server_id=s.id, status=s.state.name)) for s in servers[Destiny.WAIT] if s not in servers_to_delete] if reasons: converge_later.append(ConvergeLater(limited=True, reasons=reasons)) return pbag(create_steps + scale_down_steps + delete_error_steps + cleanup_errored_and_deleted_steps + delete_timeout_steps + lb_converge_steps + converge_later)