def _log_remove_from_clb(steps): lbs = groupby(lambda s: s.lb_id, steps) effs = [ cf_msg('convergence-remove-clb-nodes', lb_id=lb, nodes=sorted(concat(s.node_ids for s in lbsteps))) for lb, lbsteps in sorted(lbs.iteritems())] return parallel(effs)
def _log_bulk_rcv3(event, steps): by_lbs = groupby(lambda s: s[0], concat(s.lb_node_pairs for s in steps)) effs = [ cf_msg(event, lb_id=lb_id, servers=sorted(p[1] for p in pairs)) for lb_id, pairs in sorted(by_lbs.iteritems()) ] return parallel(effs)
def test_cf_msg(self): """ `cf_msg` returns Effect with `Log` intent with cloud_feed=True """ seq = [(Log('message', dict(cloud_feed=True, a=2, b=3)), lambda _: 'logged')] self.assertEqual(perform_sequence(seq, cf_msg('message', a=2, b=3)), 'logged')
def _log_remove_from_clb(steps): lbs = groupby(lambda s: s.lb_id, steps) effs = [ cf_msg('convergence-remove-clb-nodes', lb_id=lb, nodes=sorted(concat(s.node_ids for s in lbsteps))) for lb, lbsteps in sorted(lbs.iteritems()) ] return parallel(effs)
def _log_bulk_rcv3(event, steps): by_lbs = groupby(lambda s: s[0], concat(s.lb_node_pairs for s in steps)) effs = [ cf_msg(event, lb_id=lb_id, servers=sorted(p[1] for p in pairs)) for lb_id, pairs in sorted(by_lbs.iteritems()) ] return parallel(effs)
def test_cf_msg(self): """ `cf_msg` returns Effect with `Log` intent with cloud_feed=True """ seq = [ (Log('message', dict(cloud_feed=True, a=2, b=3)), lambda _: 'logged') ] self.assertEqual(perform_sequence(seq, cf_msg('message', a=2, b=3)), 'logged')
def _log_set_metadata(steps): by_kv = groupby(lambda s: (s.key, s.value), steps) effs = [ cf_msg('convergence-set-server-metadata', servers=sorted(s.server_id for s in kvsteps), key=key, value=value) for (key, value), kvsteps in sorted(by_kv.iteritems()) ] return parallel(effs)
def _(steps): by_cfg = groupby(lambda s: s.server_config, steps) effs = [ cf_msg( 'convergence-create-servers', num_servers=len(cfg_steps), server_config=dict(cfg)) # We sort the items with `thaw` because PMap does not support # comparison for cfg, cfg_steps in sorted(by_cfg.iteritems(), key=thaw)] return parallel(effs)
def _(steps): by_cfg = groupby(lambda s: s.server_config, steps) effs = [ cf_msg('convergence-create-servers', num_servers=len(cfg_steps), server_config=dict(cfg)) # We sort the items with `thaw` because PMap does not support # comparison for cfg, cfg_steps in sorted(by_cfg.iteritems(), key=thaw) ] return parallel(effs)
def _log_set_metadata(steps): by_kv = groupby(lambda s: (s.key, s.value), steps) effs = [ cf_msg( 'convergence-set-server-metadata', servers=sorted(s.server_id for s in kvsteps), key=key, value=value ) for (key, value), kvsteps in sorted(by_kv.iteritems()) ] return parallel(effs)
def _log_add_nodes_clb(steps): lbs = defaultdict(list) for step in steps: for (address, config) in step.address_configs: lbs[step.lb_id].append('%s:%s' % (address, config.port)) effs = [ cf_msg('convergence-add-clb-nodes', lb_id=lb_id, addresses=sorted(addresses)) for lb_id, addresses in sorted(lbs.iteritems()) ] return parallel(effs)
def _log_add_nodes_clb(steps): lbs = defaultdict(list) for step in steps: for (address, config) in step.address_configs: lbs[step.lb_id].append('%s:%s' % (address, config.port)) effs = [ cf_msg('convergence-add-clb-nodes', lb_id=lb_id, addresses=sorted(addresses)) for lb_id, addresses in sorted(lbs.iteritems()) ] return parallel(effs)
def _log_change_clb_node(steps): lbs = groupby(lambda s: (s.lb_id, s.condition, s.weight, s.type), steps) effs = [ cf_msg('convergence-change-clb-nodes', lb_id=lb, nodes=sorted([s.node_id for s in grouped_steps]), condition=condition.name, weight=weight, type=node_type.name) for (lb, condition, weight, node_type), grouped_steps in sorted(lbs.iteritems()) ] return parallel(effs)
def _log_change_clb_node(steps): lbs = groupby(lambda s: (s.lb_id, s.condition, s.weight, s.type), steps) effs = [ cf_msg('convergence-change-clb-nodes', lb_id=lb, nodes=sorted([s.node_id for s in grouped_steps]), condition=condition.name, weight=weight, type=node_type.name) for (lb, condition, weight, node_type), grouped_steps in sorted(lbs.iteritems()) ] return parallel(effs)
def convergence_succeeded(executor, scaling_group, group_state, resources, now): """ Handle convergence success """ if group_state.status == ScalingGroupStatus.DELETING: # servers have been deleted. Delete the group for real yield Effect(DeleteGroup(tenant_id=scaling_group.tenant_id, group_id=scaling_group.uuid)) yield do_return(ConvergenceIterationStatus.GroupDeleted()) elif group_state.status == ScalingGroupStatus.ERROR: yield Effect(UpdateGroupStatus(scaling_group=scaling_group, status=ScalingGroupStatus.ACTIVE)) yield cf_msg('group-status-active', status=ScalingGroupStatus.ACTIVE.name) # update servers cache with latest servers yield executor.update_cache(scaling_group, now, include_deleted=False, **resources) yield do_return(ConvergenceIterationStatus.Stop())
def convergence_succeeded(executor, scaling_group, group_state, resources): """ Handle convergence success """ if group_state.status == ScalingGroupStatus.DELETING: # servers have been deleted. Delete the group for real yield Effect(DeleteGroup(tenant_id=scaling_group.tenant_id, group_id=scaling_group.uuid)) yield do_return(ConvergenceIterationStatus.GroupDeleted()) elif group_state.status == ScalingGroupStatus.ERROR: yield Effect(UpdateGroupStatus(scaling_group=scaling_group, status=ScalingGroupStatus.ACTIVE)) yield cf_msg('group-status-active', status=ScalingGroupStatus.ACTIVE.name) # update servers cache with latest servers. # See [Convergence servers cache] comment on top of the file. now = yield Effect(Func(datetime.utcnow)) yield executor.update_cache(scaling_group, now, include_deleted=False, **resources) yield do_return(ConvergenceIterationStatus.Stop())
def _log_delete_servers(steps): return cf_msg('convergence-delete-servers', servers=sorted([s.server_id for s in steps]))
def _log_delete_servers(steps): return cf_msg( 'convergence-delete-servers', servers=sorted([s.server_id for s in steps]))