def test_unordered_flow_retry_and_task_same_requires_provides(self):
     flow = uf.Flow('uf', retry.AlwaysRevert('rt', requires=['x']))
     flow.add(utils.TaskOneReturn(provides=['x']))
     self.assertEqual(set(['x']), flow.requires)
     self.assertEqual(set(['x']), flow.provides)
示例#2
0
 def test_run_empty_flow(self):
     flow = uf.Flow('p-1')
     engine = self._make_engine(flow)
     self.assertRaises(exc.EmptyFlow, engine.run)
示例#3
0
 def test_retry_in_unordered_flow(self):
     flo = uf.Flow("test", retry.AlwaysRevert("c"))
     compilation = compiler.PatternCompiler(flo).compile()
     self.assertEqual(3, len(compilation.execution_graph))
     self.assertEqual(2, compilation.execution_graph.number_of_edges())
示例#4
0
    def get_failover_amphora_flow(self, failed_amphora, lb_amp_count):
        """Get a Taskflow flow to failover an amphora.

        1. Build a replacement amphora.
        2. Delete the old amphora.
        3. Update the amphorae listener configurations.
        4. Update the VRRP configurations if needed.

        :param failed_amphora: The amphora object to failover.
        :param lb_amp_count: The number of amphora on this load balancer.
        :returns: The flow that will provide the failover.
        """
        failover_amp_flow = linear_flow.Flow(constants.FAILOVER_AMPHORA_FLOW)

        # Revert amphora to status ERROR if this flow goes wrong
        failover_amp_flow.add(
            lifecycle_tasks.AmphoraToErrorOnRevertTask(
                requires=constants.AMPHORA,
                inject={constants.AMPHORA: failed_amphora}))

        if failed_amphora.role in (constants.ROLE_MASTER,
                                   constants.ROLE_BACKUP):
            amp_role = 'master_or_backup'
        elif failed_amphora.role == constants.ROLE_STANDALONE:
            amp_role = 'standalone'
        elif failed_amphora.role is None:
            amp_role = 'spare'
        else:
            amp_role = 'undefined'
        LOG.info(
            "Performing failover for amphora: %s", {
                "id": failed_amphora.id,
                "load_balancer_id": failed_amphora.load_balancer_id,
                "lb_network_ip": failed_amphora.lb_network_ip,
                "compute_id": failed_amphora.compute_id,
                "role": amp_role
            })

        failover_amp_flow.add(
            database_tasks.MarkAmphoraPendingDeleteInDB(
                requires=constants.AMPHORA,
                inject={constants.AMPHORA: failed_amphora}))

        failover_amp_flow.add(
            database_tasks.MarkAmphoraHealthBusy(
                requires=constants.AMPHORA,
                inject={constants.AMPHORA: failed_amphora}))

        failover_amp_flow.add(
            network_tasks.GetVIPSecurityGroupID(
                requires=constants.LOADBALANCER_ID,
                provides=constants.VIP_SG_ID))

        is_spare = True
        is_vrrp_ipv6 = False
        if failed_amphora.load_balancer_id:
            is_spare = False
            if failed_amphora.vrrp_ip:
                is_vrrp_ipv6 = utils.is_ipv6(failed_amphora.vrrp_ip)

            # Get a replacement amphora and plug all of the networking.
            #
            # Do this early as the compute services have been observed to be
            # unreliable. The community decided the chance that deleting first
            # would open resources for an instance is less likely than the
            # compute service failing to boot an instance for other reasons.

            # TODO(johnsom) Move this back out to run for spares after
            #               delete amphora API is available.
            failover_amp_flow.add(
                self.get_amphora_for_lb_failover_subflow(
                    prefix=constants.FAILOVER_LOADBALANCER_FLOW,
                    role=failed_amphora.role,
                    failed_amp_vrrp_port_id=failed_amphora.vrrp_port_id,
                    is_vrrp_ipv6=is_vrrp_ipv6,
                    is_spare=is_spare))

        failover_amp_flow.add(
            self.get_delete_amphora_flow(
                failed_amphora,
                retry_attempts=CONF.controller_worker.amphora_delete_retries,
                retry_interval=(
                    CONF.controller_worker.amphora_delete_retry_interval)))
        failover_amp_flow.add(
            database_tasks.DisableAmphoraHealthMonitoring(
                requires=constants.AMPHORA,
                inject={constants.AMPHORA: failed_amphora}))

        if not failed_amphora.load_balancer_id:
            # This is an unallocated amphora (spares pool), we are done.
            return failover_amp_flow

        failover_amp_flow.add(
            database_tasks.GetLoadBalancer(requires=constants.LOADBALANCER_ID,
                                           inject={
                                               constants.LOADBALANCER_ID:
                                               failed_amphora.load_balancer_id
                                           },
                                           provides=constants.LOADBALANCER))

        failover_amp_flow.add(
            database_tasks.GetAmphoraeFromLoadbalancer(
                name=constants.GET_AMPHORAE_FROM_LB,
                requires=constants.LOADBALANCER_ID,
                inject={
                    constants.LOADBALANCER_ID: failed_amphora.load_balancer_id
                },
                provides=constants.AMPHORAE))

        # Setup timeouts for our requests to the amphorae
        timeout_dict = {
            constants.CONN_MAX_RETRIES:
            CONF.haproxy_amphora.active_connection_max_retries,
            constants.CONN_RETRY_INTERVAL:
            CONF.haproxy_amphora.active_connection_rety_interval
        }

        # Listeners update needs to be run on all amphora to update
        # their peer configurations. So parallelize this with an
        # unordered subflow.
        update_amps_subflow = unordered_flow.Flow(
            constants.UPDATE_AMPS_SUBFLOW)

        for amp_index in range(0, lb_amp_count):
            update_amps_subflow.add(
                amphora_driver_tasks.AmphoraIndexListenerUpdate(
                    name=str(amp_index) + '-' + constants.AMP_LISTENER_UPDATE,
                    requires=(constants.LOADBALANCER, constants.AMPHORAE),
                    inject={
                        constants.AMPHORA_INDEX: amp_index,
                        constants.TIMEOUT_DICT: timeout_dict
                    }))

        failover_amp_flow.add(update_amps_subflow)

        # Configure and enable keepalived in the amphora
        if lb_amp_count == 2:
            failover_amp_flow.add(
                self.get_vrrp_subflow(constants.GET_VRRP_SUBFLOW,
                                      timeout_dict,
                                      create_vrrp_group=False))

        # Reload the listener. This needs to be done here because
        # it will create the required haproxy check scripts for
        # the VRRP deployed above.
        # A "U" or newer amphora-agent will remove the need for this
        # task here.
        # TODO(johnsom) Remove this in the "W" cycle
        reload_listener_subflow = unordered_flow.Flow(
            constants.AMPHORA_LISTENER_RELOAD_SUBFLOW)

        for amp_index in range(0, lb_amp_count):
            reload_listener_subflow.add(
                amphora_driver_tasks.AmphoraIndexListenersReload(
                    name=(str(amp_index) + '-' +
                          constants.AMPHORA_RELOAD_LISTENER),
                    requires=(constants.LOADBALANCER, constants.AMPHORAE),
                    inject={
                        constants.AMPHORA_INDEX: amp_index,
                        constants.TIMEOUT_DICT: timeout_dict
                    }))

        failover_amp_flow.add(reload_listener_subflow)

        # Remove any extraneous ports
        # Note: Nova sometimes fails to delete ports attached to an instance.
        #       For example, if you create an LB with a listener, then
        #       'openstack server delete' the amphora, you will see the vrrp
        #       port attached to that instance will remain after the instance
        #       is deleted.
        # TODO(johnsom) Fix this as part of
        #               https://storyboard.openstack.org/#!/story/2007077

        # Mark LB ACTIVE
        failover_amp_flow.add(
            database_tasks.MarkLBActiveInDB(mark_subobjects=True,
                                            requires=constants.LOADBALANCER))

        return failover_amp_flow
示例#5
0
# be used with more than one engine (sharing the execution thread pool between
# them); this allows for saving resources and reusing threads in situations
# where this is benefical.


class DelayedTask(task.Task):
    def __init__(self, name):
        super(DelayedTask, self).__init__(name=name)
        self._wait_for = random.random()

    def execute(self):
        print("Running '%s' in thread '%s'" % (self.name, tu.get_ident()))
        time.sleep(self._wait_for)


f1 = uf.Flow("f1")
f1.add(DelayedTask("f1-1"))
f1.add(DelayedTask("f1-2"))

f2 = uf.Flow("f2")
f2.add(DelayedTask("f2-1"))
f2.add(DelayedTask("f2-2"))

# Run them all using the same futures (thread-pool based) executor...
with futures.ThreadPoolExecutor() as ex:
    e1 = engines.load(f1, engine='parallel', executor=ex)
    e2 = engines.load(f2, engine='parallel', executor=ex)
    iters = [e1.run_iter(), e2.run_iter()]
    # Iterate over a copy (so we can remove from the source list).
    cloned_iters = list(iters)
    while iters:
 def test_unordered_flow_add_nothing(self):
     f = uf.Flow('test')
     result = f.add()
     self.assertIs(f, result)
     self.assertEqual(len(f), 0)
示例#7
0
# Upper bound of numbers to sum for example purposes...
UPPER_BOUND = 10000

# How many mappers we want to have.
SPLIT = 10

# How big of a chunk we want to give each mapper.
CHUNK_SIZE = UPPER_BOUND // SPLIT

# This will be the workflow we will compose and run.
w = linear_flow.Flow("root")

# The mappers will run in parallel.
store = {}
provided = []
mappers = unordered_flow.Flow('map')
for i, chunk in enumerate(chunk_iter(CHUNK_SIZE, UPPER_BOUND)):
    mapper_name = 'mapper_%s' % i
    # Give that mapper some information to compute.
    store[mapper_name] = chunk
    # The reducer uses all of the outputs of the mappers, so it needs
    # to be recorded that it needs access to them (under a specific name).
    provided.append("reduction_%s" % i)
    mappers.add(SumMapper(name=mapper_name,
                          rebind={'inputs': mapper_name},
                          provides=provided[-1]))
w.add(mappers)

# The reducer will run last (after all the mappers).
w.add(TotalReducer('reducer', requires=provided))
示例#8
0
 def test_nested_flows_provides_same_values(self):
     flow = lf.Flow('lf').add(
         uf.Flow('uf').add(utils.TaskOneReturn(provides='x')))
     self.assertRaises(exceptions.DependencyFailure, flow.add,
                       gf.Flow('gf').add(utils.TaskOneReturn(provides='x')))
        # task that is created). A name based off the volume id that is to be
        # created is more easily tied back to the original task so that the
        # volume create can be resumed/revert, and is much easier to use for
        # audit and tracking purposes.
        base_name = reflection.get_callable_name(self)
        super(VolumeCreator, self).__init__(name="%s-%s" % (base_name,
                                                            volume_id))
        self._volume_id = volume_id

    def execute(self):
        print("Making volume %s" % (self._volume_id))
        time.sleep(random.random() * MAX_CREATE_TIME)
        print("Finished making volume %s" % (self._volume_id))


# Assume there is no ordering dependency between volumes.
flow = uf.Flow("volume-maker")
for i in range(0, VOLUME_COUNT):
    flow.add(VolumeCreator(volume_id="vol-%s" % (i)))


# Show how much time the overall engine loading and running takes.
with show_time(name=flow.name.title()):
    eng = engines.load(flow, engine=engine)
    # This context manager automatically adds (and automatically removes) a
    # helpful set of state transition notification printing helper utilities
    # that show you exactly what transitions the engine is going through
    # while running the various volume create tasks.
    with printing.PrintingListener(eng):
        eng.run()
示例#10
0
 def test_unordered_flow_provides_required_values(self):
     flow = uf.Flow('uf')
     self.assertRaises(exceptions.InvariantViolation, flow.add,
                       utils.TaskOneReturn('task1', provides='x'),
                       utils.TaskOneArg('task2'))
示例#11
0
 def test_unordered_flow_provides_same_values_one_add(self):
     flow = uf.Flow('uf')
     self.assertRaises(exceptions.DependencyFailure, flow.add,
                       utils.TaskOneReturn(provides='x'),
                       utils.TaskOneReturn(provides='x'))
示例#12
0
        self.logger.debug('Result of Task ' + self.name + ':')
        self.logger.debug(json.dumps(result, indent=4, sort_keys=True))
        return result


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger("Main")

    hosts = ['10.175.150.16']
    module_name = 'shell'
    module_args = 'echo "Hello World"'
    pattern = '*'

    linearflow = linear_flow.Flow('Liner_Flow')
    unorderedflow = unordered_flow.Flow('Unordered_Flow')

    for i in range(0, 5):
        linear_task_name = 'Linear_task_' + str(i)
        linear_task = AnsibleTask(linear_task_name, hosts, module_name,
                                  module_args, pattern)
        unordered_task_name = 'Unordered_task_' + str(i)
        unordered_task = AnsibleTask(unordered_task_name, hosts, module_name,
                                     module_args, pattern)
        linearflow.add(linear_task)
        unorderedflow.add(unordered_task)

    flow = linear_flow.Flow('Final_Flow')
    flow.add(linearflow, unorderedflow)

    eng = engines.load(flow)
示例#13
0
 def create_flow(self, name):
     return unordered_flow.Flow(name)
示例#14
0
 def test_unordered_flow_retry_two_tasks_provide_same_value(self):
     flow = uf.Flow('uf', retry.AlwaysRevert('rt', provides=['y']))
     flow.add(utils.TaskOneReturn('t1', provides=['x']),
              utils.TaskOneReturn('t2', provides=['x']))
     self.assertEqual(set(['x', 'y']), flow.provides)
flow = lf.Flow('root').add(
    # Provide the initial values for other tasks to depend on.
    #
    # x1 = 2, y1 = 3, x2 = 5, x3 = 8
    Provider("provide-adder", 2, 3, 5, 8, provides=('x1', 'y1', 'x2', 'y2')),
    # Note here that we define the flow that contains the 2 adders to be an
    # unordered flow since the order in which these execute does not matter,
    # another way to solve this would be to use a graph_flow pattern, which
    # also can run in parallel (since they have no ordering dependencies).
    uf.Flow('adders').add(
        # Calculate 'z1 = x1+y1 = 5'
        #
        # Rebind here means that the execute() function x argument will be
        # satisfied from a previous output named 'x1', and the y argument
        # of execute() will be populated from the previous output named 'y1'
        #
        # The output (result of adding) will be mapped into a variable named
        # 'z1' which can then be refereed to and depended on by other tasks.
        Adder(name="add", provides='z1', rebind=['x1', 'y1']),
        # z2 = x2+y2 = 13
        Adder(name="add-2", provides='z2', rebind=['x2', 'y2']),
    ),
    # r = z1+z2 = 18
    Adder(name="sum-1", provides='r', rebind=['z1', 'z2']))

# The result here will be all results (from all tasks) which is stored in an
# in-memory storage location that backs this engine since it is not configured
# with persistence storage.
result = taskflow.engines.run(flow, engine='parallel')
print(result)
示例#16
0
 def test_unordered_flow_without_dependencies(self):
     flow = uf.Flow('uf').add(utils.TaskNoRequiresNoReturns('task1'),
                              utils.TaskNoRequiresNoReturns('task2'))
     self.assertEquals(flow.requires, set())
     self.assertEquals(flow.provides, set())
示例#17
0
    def get_failover_flow(self,
                          role=constants.ROLE_STANDALONE,
                          load_balancer=None):
        """Creates a flow to failover a stale amphora

        :returns: The flow for amphora failover
        """

        failover_amphora_flow = linear_flow.Flow(
            constants.FAILOVER_AMPHORA_FLOW)

        failover_amphora_flow.add(
            lifecycle_tasks.AmphoraToErrorOnRevertTask(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))

        failover_amphora_flow.add(
            network_tasks.FailoverPreparationForAmphora(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))

        # Note: It seems intuitive to boot an amphora prior to deleting
        #       the old amphora, however this is a complicated issue.
        #       If the target host (due to anit-affinity) is resource
        #       constrained, this will fail where a post-delete will
        #       succeed. Since this is async with the API it would result
        #       in the LB ending in ERROR though the amps are still alive.
        #       Consider in the future making this a complicated
        #       try-on-failure-retry flow, or move upgrade failovers to be
        #       synchronous with the API. For now spares pool and act/stdby
        #       will mitigate most of this delay.

        # Delete the old amphora
        failover_amphora_flow.add(
            database_tasks.MarkAmphoraPendingDeleteInDB(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))
        failover_amphora_flow.add(
            database_tasks.MarkAmphoraHealthBusy(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))
        failover_amphora_flow.add(
            compute_tasks.ComputeDelete(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))
        failover_amphora_flow.add(
            network_tasks.WaitForPortDetach(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))
        failover_amphora_flow.add(
            database_tasks.MarkAmphoraDeletedInDB(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))

        # If this is an unallocated amp (spares pool), we're done
        if not load_balancer:
            failover_amphora_flow.add(
                database_tasks.DisableAmphoraHealthMonitoring(
                    rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                    requires=constants.AMPHORA))
            return failover_amphora_flow

        # Save failed amphora details for later
        failover_amphora_flow.add(
            database_tasks.GetAmphoraDetails(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA,
                provides=constants.AMP_DATA))

        # Get a new amphora
        # Note: Role doesn't matter here.  We will update it later.
        get_amp_subflow = self.get_amphora_for_lb_subflow(
            prefix=constants.FAILOVER_AMPHORA_FLOW)
        failover_amphora_flow.add(get_amp_subflow)

        # Update the new amphora with the failed amphora details
        failover_amphora_flow.add(
            database_tasks.UpdateAmpFailoverDetails(
                requires=(constants.AMPHORA, constants.AMP_DATA)))

        # Update the data stored in the flow from the database
        failover_amphora_flow.add(
            database_tasks.ReloadLoadBalancer(
                requires=constants.LOADBALANCER_ID,
                provides=constants.LOADBALANCER))

        failover_amphora_flow.add(
            database_tasks.ReloadAmphora(requires=constants.AMPHORA_ID,
                                         provides=constants.AMPHORA))

        # Prepare to reconnect the network interface(s)
        failover_amphora_flow.add(
            network_tasks.GetAmphoraeNetworkConfigs(
                requires=constants.LOADBALANCER,
                provides=constants.AMPHORAE_NETWORK_CONFIG))
        failover_amphora_flow.add(
            database_tasks.GetListenersFromLoadbalancer(
                requires=constants.LOADBALANCER, provides=constants.LISTENERS))
        failover_amphora_flow.add(
            database_tasks.GetAmphoraeFromLoadbalancer(
                requires=constants.LOADBALANCER, provides=constants.AMPHORAE))

        # Plug the VIP ports into the new amphora
        # The reason for moving these steps here is the udp listeners want to
        # do some kernel configuration before Listener update for forbidding
        # failure during rebuild amphora.
        failover_amphora_flow.add(
            network_tasks.PlugVIPPort(
                requires=(constants.AMPHORA,
                          constants.AMPHORAE_NETWORK_CONFIG)))
        failover_amphora_flow.add(
            amphora_driver_tasks.AmphoraPostVIPPlug(
                requires=(constants.AMPHORA, constants.LOADBALANCER,
                          constants.AMPHORAE_NETWORK_CONFIG)))

        # Listeners update needs to be run on all amphora to update
        # their peer configurations. So parallelize this with an
        # unordered subflow.
        update_amps_subflow = unordered_flow.Flow(
            constants.UPDATE_AMPS_SUBFLOW)

        timeout_dict = {
            constants.CONN_MAX_RETRIES:
            CONF.haproxy_amphora.active_connection_max_retries,
            constants.CONN_RETRY_INTERVAL:
            CONF.haproxy_amphora.active_connection_rety_interval
        }

        # Setup parallel flows for each amp. We don't know the new amp
        # details at flow creation time, so setup a subflow for each
        # amp on the LB, they let the task index into a list of amps
        # to find the amphora it should work on.
        amp_index = 0
        for amp in load_balancer.amphorae:
            if amp.status == constants.DELETED:
                continue
            update_amps_subflow.add(
                amphora_driver_tasks.AmpListenersUpdate(
                    name=constants.AMP_LISTENER_UPDATE + '-' + str(amp_index),
                    requires=(constants.LOADBALANCER, constants.AMPHORAE),
                    inject={
                        constants.AMPHORA_INDEX: amp_index,
                        constants.TIMEOUT_DICT: timeout_dict
                    }))
            amp_index += 1

        failover_amphora_flow.add(update_amps_subflow)

        # Plug the member networks into the new amphora
        failover_amphora_flow.add(
            network_tasks.CalculateAmphoraDelta(
                requires=(constants.LOADBALANCER, constants.AMPHORA),
                provides=constants.DELTA))

        failover_amphora_flow.add(
            network_tasks.HandleNetworkDelta(requires=(constants.AMPHORA,
                                                       constants.DELTA),
                                             provides=constants.ADDED_PORTS))

        failover_amphora_flow.add(
            amphora_driver_tasks.AmphoraePostNetworkPlug(
                requires=(constants.LOADBALANCER, constants.ADDED_PORTS)))

        failover_amphora_flow.add(
            database_tasks.ReloadLoadBalancer(
                name='octavia-failover-LB-reload-2',
                requires=constants.LOADBALANCER_ID,
                provides=constants.LOADBALANCER))

        # Handle the amphora role and VRRP if necessary
        if role == constants.ROLE_MASTER:
            failover_amphora_flow.add(
                database_tasks.MarkAmphoraMasterInDB(
                    name=constants.MARK_AMP_MASTER_INDB,
                    requires=constants.AMPHORA))
            vrrp_subflow = self.get_vrrp_subflow(role)
            failover_amphora_flow.add(vrrp_subflow)
        elif role == constants.ROLE_BACKUP:
            failover_amphora_flow.add(
                database_tasks.MarkAmphoraBackupInDB(
                    name=constants.MARK_AMP_BACKUP_INDB,
                    requires=constants.AMPHORA))
            vrrp_subflow = self.get_vrrp_subflow(role)
            failover_amphora_flow.add(vrrp_subflow)
        elif role == constants.ROLE_STANDALONE:
            failover_amphora_flow.add(
                database_tasks.MarkAmphoraStandAloneInDB(
                    name=constants.MARK_AMP_STANDALONE_INDB,
                    requires=constants.AMPHORA))

        failover_amphora_flow.add(
            amphora_driver_tasks.ListenersStart(
                requires=(constants.LOADBALANCER, constants.AMPHORA)))
        failover_amphora_flow.add(
            database_tasks.DisableAmphoraHealthMonitoring(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))

        return failover_amphora_flow
示例#18
0
 def test_unordered_flow_self_requires(self):
     flow = uf.Flow('uf')
     with self.assertRaises(exceptions.InvariantViolationException):
         flow.add(utils.TaskNoRequiresNoReturns(rebind=['x'], provides='x'))
 def test_unordered_flow_two_task_same_provide(self):
     task1 = _task(name='task1', provides=['a', 'b'])
     task2 = _task(name='task2', provides=['a', 'c'])
     f = uf.Flow('test')
     f.add(task2, task1)
     self.assertEqual(len(f), 2)
示例#20
0
 def test_unordered_flow_reuires_rebind_values(self):
     flow = uf.Flow('uf').add(utils.TaskOneArg('task1', rebind=['q']),
                              utils.TaskMultiArg('task2'))
     self.assertEquals(flow.requires, set(['x', 'y', 'z', 'q']))
     self.assertEquals(flow.provides, set())
示例#21
0
    def get_vrrp_subflow(self,
                         prefix,
                         timeout_dict=None,
                         create_vrrp_group=True):
        sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW
        vrrp_subflow = linear_flow.Flow(sf_name)

        # Optimization for failover flow. No reason to call this
        # when configuring the secondary amphora.
        if create_vrrp_group:
            vrrp_subflow.add(
                database_tasks.CreateVRRPGroupForLB(
                    name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB,
                    requires=constants.LOADBALANCER_ID))

        vrrp_subflow.add(
            network_tasks.GetAmphoraeNetworkConfigs(
                name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
                requires=constants.LOADBALANCER_ID,
                provides=constants.AMPHORAE_NETWORK_CONFIG))

        # VRRP update needs to be run on all amphora to update
        # their peer configurations. So parallelize this with an
        # unordered subflow.
        update_amps_subflow = unordered_flow.Flow('VRRP-update-subflow')

        # We have three tasks to run in order, per amphora
        amp_0_subflow = linear_flow.Flow('VRRP-amp-0-update-subflow')

        amp_0_subflow.add(
            amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface(
                name=sf_name + '-0-' + constants.AMP_UPDATE_VRRP_INTF,
                requires=constants.AMPHORAE,
                inject={
                    constants.AMPHORA_INDEX: 0,
                    constants.TIMEOUT_DICT: timeout_dict
                },
                provides=constants.AMP_VRRP_INT))

        amp_0_subflow.add(
            amphora_driver_tasks.AmphoraIndexVRRPUpdate(
                name=sf_name + '-0-' + constants.AMP_VRRP_UPDATE,
                requires=(constants.LOADBALANCER_ID,
                          constants.AMPHORAE_NETWORK_CONFIG,
                          constants.AMPHORAE, constants.AMP_VRRP_INT),
                inject={
                    constants.AMPHORA_INDEX: 0,
                    constants.TIMEOUT_DICT: timeout_dict
                }))

        amp_0_subflow.add(
            amphora_driver_tasks.AmphoraIndexVRRPStart(
                name=sf_name + '-0-' + constants.AMP_VRRP_START,
                requires=constants.AMPHORAE,
                inject={
                    constants.AMPHORA_INDEX: 0,
                    constants.TIMEOUT_DICT: timeout_dict
                }))

        amp_1_subflow = linear_flow.Flow('VRRP-amp-1-update-subflow')

        amp_1_subflow.add(
            amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface(
                name=sf_name + '-1-' + constants.AMP_UPDATE_VRRP_INTF,
                requires=constants.AMPHORAE,
                inject={
                    constants.AMPHORA_INDEX: 1,
                    constants.TIMEOUT_DICT: timeout_dict
                },
                provides=constants.AMP_VRRP_INT))

        amp_1_subflow.add(
            amphora_driver_tasks.AmphoraIndexVRRPUpdate(
                name=sf_name + '-1-' + constants.AMP_VRRP_UPDATE,
                requires=(constants.LOADBALANCER_ID,
                          constants.AMPHORAE_NETWORK_CONFIG,
                          constants.AMPHORAE, constants.AMP_VRRP_INT),
                inject={
                    constants.AMPHORA_INDEX: 1,
                    constants.TIMEOUT_DICT: timeout_dict
                }))
        amp_1_subflow.add(
            amphora_driver_tasks.AmphoraIndexVRRPStart(
                name=sf_name + '-1-' + constants.AMP_VRRP_START,
                requires=constants.AMPHORAE,
                inject={
                    constants.AMPHORA_INDEX: 1,
                    constants.TIMEOUT_DICT: timeout_dict
                }))

        update_amps_subflow.add(amp_0_subflow)
        update_amps_subflow.add(amp_1_subflow)

        vrrp_subflow.add(update_amps_subflow)

        return vrrp_subflow
示例#22
0
 def test_unordered_flow_provides_values(self):
     flow = uf.Flow('uf').add(
         utils.TaskOneReturn('task1', provides='x'),
         utils.TaskMultiReturn('task2', provides=['a', 'b', 'c']))
     self.assertEquals(flow.requires, set())
     self.assertEquals(flow.provides, set(['x', 'a', 'b', 'c']))
示例#23
0
 def test_run_empty_unordered_flow(self):
     flow = uf.Flow('flow-1', utils.OneReturnRetry(provides='x'))
     engine = self._make_engine(flow)
     engine.run()
     self.assertEqual(engine.storage.fetch_all(), {'x': 1})
示例#24
0
 def test_unordered_flow_provides_required_value_other_call(self):
     flow = uf.Flow('uf')
     flow.add(utils.TaskOneArg('task2'))
     with self.assertRaises(exceptions.InvariantViolationException):
         flow.add(utils.TaskOneReturn('task1', provides='x'))
示例#25
0
    def execute(self, output):
        if self._show_name:
            print("%s: %s" % (self.name, output))
        else:
            print(output)


# This will be the work that we want done, which for this example is just to
# print 'hello world' (like a song) using different tasks and different
# execution models.
song = lf.Flow("beats")

# Unordered flows when ran can be ran in parallel; and a chorus is everyone
# singing at once of course!
hi_chorus = uf.Flow('hello')
world_chorus = uf.Flow('world')
for (name, hello, world) in [('bob', 'hello', 'world'),
                             ('joe', 'hellooo', 'worllllld'),
                             ('sue', "helloooooo!", 'wooorllld!')]:
    hi_chorus.add(PrinterTask("%s@hello" % name,
                              # This will show up to the execute() method of
                              # the task as the argument named 'output' (which
                              # will allow us to print the character we want).
                              inject={'output': hello}))
    world_chorus.add(PrinterTask("%s@world" % name,
                                 inject={'output': world}))

# The composition starts with the conductor and then runs in sequence with
# the chorus running in parallel, but no matter what the 'hello' chorus must
# always run before the 'world' chorus (otherwise the world will fall apart).
        raise IOError("Joe not home right now.")
        print("Calling joe %s." % joe_number)

    def revert(self, joe_number, *args, **kwargs):
        print("Calling Joe %s and apologizing." % joe_number)


class CallSuzzie(task.Task):
    def execute(self, suzzie_number, *args, **kwargs):
        raise IOError("Suzzie not home right now.")


# Create your flow and associated tasks (the work to be done).

u_flow = uf.Flow('simple-unordered').add(
    CallJim(inject={'jim_number': '8105760129'}),
    CallJoe(inject={'joe_number': '8660519397'}),
    CallSuzzie(inject={'suzzie_number': '9481075653'})
)

flow = lf.Flow('simple-linear').add(
    CallJohn(inject={'john_number': '8105760129'}),
    u_flow
)


try:
    # Now run that flow using the provided initial data (store below).
    taskflow.engines.run(flow, engine='parallel', executor='threaded', max_workers=2)
except Exception as e:
    print("Flow failed: %s" % e)
示例#27
0
 def test_parallel_flow_one_task(self):
     flow = uf.Flow('p-1').add(utils.SaveOrderTask(name='task1'))
     self._make_engine(flow).run()
     self.assertEqual(self.values, ['task1'])
示例#28
0
    def get_failover_LB_flow(self, amps, lb):
        """Failover a load balancer.

        1. Validate the VIP port is correct and present.
        2. Build a replacement amphora.
        3. Delete the failed amphora.
        4. Configure the replacement amphora listeners.
        5. Configure VRRP for the listeners.
        6. Build the second replacement amphora.
        7. Delete the second failed amphora.
        8. Delete any extraneous amphora.
        9. Configure the listeners on the new amphorae.
        10. Configure the VRRP on the new amphorae.
        11. Reload the listener configurations to pick up VRRP changes.
        12. Mark the load balancer back to ACTIVE.

        :returns: The flow that will provide the failover.
        """
        lb_topology = lb[constants.FLAVOR][constants.LOADBALANCER_TOPOLOGY]
        # Pick one amphora to be failed over if any exist.
        failed_amp = None
        if amps:
            failed_amp = amps.pop()

        failover_LB_flow = linear_flow.Flow(
            constants.FAILOVER_LOADBALANCER_FLOW)

        # Revert LB to provisioning_status ERROR if this flow goes wrong
        failover_LB_flow.add(
            lifecycle_tasks.LoadBalancerToErrorOnRevertTask(
                requires=constants.LOADBALANCER))

        # Setup timeouts for our requests to the amphorae
        timeout_dict = {
            constants.CONN_MAX_RETRIES:
            CONF.haproxy_amphora.active_connection_max_retries,
            constants.CONN_RETRY_INTERVAL:
            CONF.haproxy_amphora.active_connection_rety_interval
        }

        if failed_amp:
            failed_amp_role = failed_amp.get(constants.ROLE)
            if failed_amp_role in (constants.ROLE_MASTER,
                                   constants.ROLE_BACKUP):
                amp_role = 'master_or_backup'
            elif failed_amp_role == constants.ROLE_STANDALONE:
                amp_role = 'standalone'
            elif failed_amp_role is None:
                amp_role = 'spare'
            else:
                amp_role = 'undefined'
            LOG.info(
                "Performing failover for amphora: %s", {
                    "id": failed_amp.get(constants.ID),
                    "load_balancer_id": lb.get(constants.ID),
                    "lb_network_ip": failed_amp.get(constants.LB_NETWORK_IP),
                    "compute_id": failed_amp.get(constants.COMPUTE_ID),
                    "role": amp_role
                })

            failover_LB_flow.add(
                database_tasks.MarkAmphoraPendingDeleteInDB(
                    requires=constants.AMPHORA,
                    inject={constants.AMPHORA: failed_amp}))

            failover_LB_flow.add(
                database_tasks.MarkAmphoraHealthBusy(
                    requires=constants.AMPHORA,
                    inject={constants.AMPHORA: failed_amp}))

        # Check that the VIP port exists and is ok
        failover_LB_flow.add(
            network_tasks.AllocateVIPforFailover(
                requires=constants.LOADBALANCER, provides=constants.VIP))

        # Update the database with the VIP information
        failover_LB_flow.add(
            database_tasks.UpdateVIPAfterAllocation(
                requires=(constants.LOADBALANCER_ID, constants.VIP),
                provides=constants.LOADBALANCER))

        # Make sure the SG has the correct rules and re-apply to the
        # VIP port. It is not used on the VIP port, but will help lock
        # the SG as in use.
        failover_LB_flow.add(
            network_tasks.UpdateVIPSecurityGroup(
                requires=constants.LOADBALANCER_ID,
                provides=constants.VIP_SG_ID))

        new_amp_role = constants.ROLE_STANDALONE
        if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY:
            new_amp_role = constants.ROLE_BACKUP

        # Get a replacement amphora and plug all of the networking.
        #
        # Do this early as the compute services have been observed to be
        # unreliable. The community decided the chance that deleting first
        # would open resources for an instance is less likely than the compute
        # service failing to boot an instance for other reasons.
        if failed_amp:
            failed_vrrp_is_ipv6 = False
            if failed_amp.get(constants.VRRP_IP):
                failed_vrrp_is_ipv6 = utils.is_ipv6(
                    failed_amp[constants.VRRP_IP])
            failover_LB_flow.add(
                self.amp_flows.get_amphora_for_lb_failover_subflow(
                    prefix=constants.FAILOVER_LOADBALANCER_FLOW,
                    role=new_amp_role,
                    failed_amp_vrrp_port_id=failed_amp.get(
                        constants.VRRP_PORT_ID),
                    is_vrrp_ipv6=failed_vrrp_is_ipv6))
        else:
            failover_LB_flow.add(
                self.amp_flows.get_amphora_for_lb_failover_subflow(
                    prefix=constants.FAILOVER_LOADBALANCER_FLOW,
                    role=new_amp_role))

        if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY:
            failover_LB_flow.add(
                database_tasks.MarkAmphoraBackupInDB(
                    name=constants.MARK_AMP_BACKUP_INDB,
                    requires=constants.AMPHORA))

        # Delete the failed amp
        if failed_amp:
            failover_LB_flow.add(
                self.amp_flows.get_delete_amphora_flow(failed_amp))

        # Update the data stored in the flow from the database
        failover_LB_flow.add(
            database_tasks.ReloadLoadBalancer(
                requires=constants.LOADBALANCER_ID,
                provides=constants.LOADBALANCER))

        # Configure the listener(s)
        # We will run update on this amphora again later if this is
        # an active/standby load balancer because we want this amp
        # functional as soon as possible. It must run again to update
        # the configurations for the new peers.
        failover_LB_flow.add(
            amphora_driver_tasks.AmpListenersUpdate(
                name=constants.AMP_LISTENER_UPDATE,
                requires=(constants.LOADBALANCER, constants.AMPHORA),
                inject={constants.TIMEOUT_DICT: timeout_dict}))

        # Bring up the new "backup" amphora VIP now to reduce the outage
        # on the final failover. This dropped the outage from 8-9 seconds
        # to less than one in my lab.
        # This does mean some steps have to be repeated later to reconfigure
        # for the second amphora as a peer.
        if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY:

            failover_LB_flow.add(
                database_tasks.CreateVRRPGroupForLB(
                    name=new_amp_role + '-' +
                    constants.CREATE_VRRP_GROUP_FOR_LB,
                    requires=constants.LOADBALANCER_ID))

            failover_LB_flow.add(
                network_tasks.GetAmphoraNetworkConfigsByID(
                    name=(new_amp_role + '-' +
                          constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID),
                    requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID),
                    provides=constants.FIRST_AMP_NETWORK_CONFIGS))

            failover_LB_flow.add(
                amphora_driver_tasks.AmphoraUpdateVRRPInterface(
                    name=new_amp_role + '-' + constants.AMP_UPDATE_VRRP_INTF,
                    requires=constants.AMPHORA,
                    inject={constants.TIMEOUT_DICT: timeout_dict},
                    provides=constants.FIRST_AMP_VRRP_INTERFACE))

            failover_LB_flow.add(
                amphora_driver_tasks.AmphoraVRRPUpdate(
                    name=new_amp_role + '-' + constants.AMP_VRRP_UPDATE,
                    requires=(constants.LOADBALANCER_ID, constants.AMPHORA),
                    rebind={
                        constants.AMPHORAE_NETWORK_CONFIG:
                        constants.FIRST_AMP_NETWORK_CONFIGS,
                        constants.AMP_VRRP_INT:
                        constants.FIRST_AMP_VRRP_INTERFACE
                    },
                    inject={constants.TIMEOUT_DICT: timeout_dict}))

            failover_LB_flow.add(
                amphora_driver_tasks.AmphoraVRRPStart(
                    name=new_amp_role + '-' + constants.AMP_VRRP_START,
                    requires=constants.AMPHORA,
                    inject={constants.TIMEOUT_DICT: timeout_dict}))

            # Start the listener. This needs to be done here because
            # it will create the required haproxy check scripts for
            # the VRRP deployed above.
            # A "V" or newer amphora-agent will remove the need for this
            # task here.
            # TODO(johnsom) Remove this in the "X" cycle
            failover_LB_flow.add(
                amphora_driver_tasks.ListenersStart(
                    name=new_amp_role + '-' + constants.AMP_LISTENER_START,
                    requires=(constants.LOADBALANCER, constants.AMPHORA)))

            #  #### Work on standby amphora if needed #####

            new_amp_role = constants.ROLE_MASTER
            failed_amp = None
            if amps:
                failed_amp = amps.pop()

            if failed_amp:
                failed_amp_role = failed_amp.get(constants.ROLE)
                if failed_amp_role in (constants.ROLE_MASTER,
                                       constants.ROLE_BACKUP):
                    amp_role = 'master_or_backup'
                elif failed_amp_role == constants.ROLE_STANDALONE:
                    amp_role = 'standalone'
                elif failed_amp_role is None:
                    amp_role = 'spare'
                else:
                    amp_role = 'undefined'
                LOG.info(
                    "Performing failover for amphora: %s", {
                        "id": failed_amp.get(constants.ID),
                        "load_balancer_id": lb.get(constants.ID),
                        "lb_network_ip": failed_amp.get(
                            constants.LB_NETWORK_IP),
                        "compute_id": failed_amp.get(constants.COMPUTE_ID),
                        "role": amp_role
                    })

                failover_LB_flow.add(
                    database_tasks.MarkAmphoraPendingDeleteInDB(
                        name=(new_amp_role + '-' +
                              constants.MARK_AMPHORA_PENDING_DELETE),
                        requires=constants.AMPHORA,
                        inject={constants.AMPHORA: failed_amp}))

                failover_LB_flow.add(
                    database_tasks.MarkAmphoraHealthBusy(
                        name=(new_amp_role + '-' +
                              constants.MARK_AMPHORA_HEALTH_BUSY),
                        requires=constants.AMPHORA,
                        inject={constants.AMPHORA: failed_amp}))

            # Get a replacement amphora and plug all of the networking.
            #
            # Do this early as the compute services have been observed to be
            # unreliable. The community decided the chance that deleting first
            # would open resources for an instance is less likely than the
            # compute service failing to boot an instance for other reasons.
            failover_LB_flow.add(
                self.amp_flows.get_amphora_for_lb_failover_subflow(
                    prefix=(new_amp_role + '-' +
                            constants.FAILOVER_LOADBALANCER_FLOW),
                    role=new_amp_role))

            failover_LB_flow.add(
                database_tasks.MarkAmphoraMasterInDB(
                    name=constants.MARK_AMP_MASTER_INDB,
                    requires=constants.AMPHORA))

            # Delete the failed amp
            if failed_amp:
                failover_LB_flow.add(
                    self.amp_flows.get_delete_amphora_flow(failed_amp))
                failover_LB_flow.add(
                    database_tasks.DisableAmphoraHealthMonitoring(
                        name=(new_amp_role + '-' +
                              constants.DISABLE_AMP_HEALTH_MONITORING),
                        requires=constants.AMPHORA,
                        inject={constants.AMPHORA: failed_amp}))

        # Remove any extraneous amphora
        # Note: This runs in all topology situations.
        #       It should run before the act/stdby final listener update so
        #       that we don't bother attempting to update dead amphorae.
        delete_extra_amps_flow = unordered_flow.Flow(
            constants.DELETE_EXTRA_AMPHORAE_FLOW)
        for amp in amps:
            LOG.debug(
                'Found extraneous amphora %s on load balancer %s. '
                'Deleting.', amp.get(constants.ID), lb.get(id))
            delete_extra_amps_flow.add(
                self.amp_flows.get_delete_amphora_flow(amp))

        failover_LB_flow.add(delete_extra_amps_flow)

        if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY:
            # Update the data stored in the flow from the database
            failover_LB_flow.add(
                database_tasks.ReloadLoadBalancer(
                    name=new_amp_role + '-' +
                    constants.RELOAD_LB_AFTER_AMP_ASSOC,
                    requires=constants.LOADBALANCER_ID,
                    provides=constants.LOADBALANCER))

            failover_LB_flow.add(
                database_tasks.GetAmphoraeFromLoadbalancer(
                    name=new_amp_role + '-' + constants.GET_AMPHORAE_FROM_LB,
                    requires=constants.LOADBALANCER_ID,
                    provides=constants.AMPHORAE))

            # Listeners update needs to be run on all amphora to update
            # their peer configurations. So parallelize this with an
            # unordered subflow.
            update_amps_subflow = unordered_flow.Flow(
                constants.UPDATE_AMPS_SUBFLOW)

            # Setup parallel flows for each amp. We don't know the new amp
            # details at flow creation time, so setup a subflow for each
            # amp on the LB, they let the task index into a list of amps
            # to find the amphora it should work on.
            update_amps_subflow.add(
                amphora_driver_tasks.AmphoraIndexListenerUpdate(
                    name=(constants.AMPHORA + '-0-' +
                          constants.AMP_LISTENER_UPDATE),
                    requires=(constants.LOADBALANCER, constants.AMPHORAE),
                    inject={
                        constants.AMPHORA_INDEX: 0,
                        constants.TIMEOUT_DICT: timeout_dict
                    }))
            update_amps_subflow.add(
                amphora_driver_tasks.AmphoraIndexListenerUpdate(
                    name=(constants.AMPHORA + '-1-' +
                          constants.AMP_LISTENER_UPDATE),
                    requires=(constants.LOADBALANCER, constants.AMPHORAE),
                    inject={
                        constants.AMPHORA_INDEX: 1,
                        constants.TIMEOUT_DICT: timeout_dict
                    }))

            failover_LB_flow.add(update_amps_subflow)

            # Configure and enable keepalived in the amphora
            failover_LB_flow.add(
                self.amp_flows.get_vrrp_subflow(new_amp_role + '-' +
                                                constants.GET_VRRP_SUBFLOW,
                                                timeout_dict,
                                                create_vrrp_group=False))

            # #### End of standby ####

            # Reload the listener. This needs to be done here because
            # it will create the required haproxy check scripts for
            # the VRRP deployed above.
            # A "V" or newer amphora-agent will remove the need for this
            # task here.
            # TODO(johnsom) Remove this in the "X" cycle
            failover_LB_flow.add(
                amphora_driver_tasks.AmphoraIndexListenersReload(
                    name=(new_amp_role + '-' +
                          constants.AMPHORA_RELOAD_LISTENER),
                    requires=(constants.LOADBALANCER, constants.AMPHORAE),
                    inject={
                        constants.AMPHORA_INDEX: 1,
                        constants.TIMEOUT_DICT: timeout_dict
                    }))

        # Remove any extraneous ports
        # Note: Nova sometimes fails to delete ports attached to an instance.
        #       For example, if you create an LB with a listener, then
        #       'openstack server delete' the amphora, you will see the vrrp
        #       port attached to that instance will remain after the instance
        #       is deleted.
        # TODO(johnsom) Fix this as part of
        #               https://storyboard.openstack.org/#!/story/2007077

        # Mark LB ACTIVE
        failover_LB_flow.add(
            database_tasks.MarkLBActiveInDB(mark_subobjects=True,
                                            requires=constants.LOADBALANCER))

        return failover_LB_flow
示例#29
0
    def get_batch_update_members_flow(self, old_members, new_members,
                                      updated_members):
        """Create a flow to batch update members

        :returns: The flow for batch updating members
        """
        batch_update_members_flow = linear_flow.Flow(
            constants.BATCH_UPDATE_MEMBERS_FLOW)
        unordered_members_flow = unordered_flow.Flow(
            constants.UNORDERED_MEMBER_UPDATES_FLOW)
        unordered_members_active_flow = unordered_flow.Flow(
            constants.UNORDERED_MEMBER_ACTIVE_FLOW)

        # Delete old members
        unordered_members_flow.add(
            lifecycle_tasks.MembersToErrorOnRevertTask(
                inject={constants.MEMBERS: old_members},
                name='{flow}-deleted'.format(
                    flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW)))
        for m in old_members:
            unordered_members_flow.add(
                model_tasks.DeleteModelObject(
                    inject={constants.OBJECT: m},
                    name='{flow}-{id}'.format(
                        id=m.id, flow=constants.DELETE_MODEL_OBJECT_FLOW)))
            unordered_members_flow.add(
                database_tasks.DeleteMemberInDB(
                    inject={constants.MEMBER: m},
                    name='{flow}-{id}'.format(
                        id=m.id, flow=constants.DELETE_MEMBER_INDB)))
            unordered_members_flow.add(
                database_tasks.DecrementMemberQuota(
                    inject={constants.MEMBER: m},
                    name='{flow}-{id}'.format(
                        id=m.id, flow=constants.DECREMENT_MEMBER_QUOTA_FLOW)))

        # Create new members
        unordered_members_flow.add(
            lifecycle_tasks.MembersToErrorOnRevertTask(
                inject={constants.MEMBERS: new_members},
                name='{flow}-created'.format(
                    flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW)))
        for m in new_members:
            unordered_members_active_flow.add(
                database_tasks.MarkMemberActiveInDB(
                    inject={constants.MEMBER: m},
                    name='{flow}-{id}'.format(
                        id=m.id, flow=constants.MARK_MEMBER_ACTIVE_INDB)))

        # Update existing members
        unordered_members_flow.add(
            lifecycle_tasks.MembersToErrorOnRevertTask(
                # updated_members is a list of (obj, dict), only pass `obj`
                inject={constants.MEMBERS: [m[0] for m in updated_members]},
                name='{flow}-updated'.format(
                    flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW)))
        for m, um in updated_members:
            um.pop('id', None)
            unordered_members_active_flow.add(
                database_tasks.MarkMemberActiveInDB(
                    inject={constants.MEMBER: m},
                    name='{flow}-{id}'.format(
                        id=m.id, flow=constants.MARK_MEMBER_ACTIVE_INDB)))

        batch_update_members_flow.add(unordered_members_flow)

        # Done, do real updates
        batch_update_members_flow.add(
            network_tasks.CalculateDelta(requires=constants.LOADBALANCER,
                                         provides=constants.DELTAS))
        batch_update_members_flow.add(
            network_tasks.HandleNetworkDeltas(requires=constants.DELTAS,
                                              provides=constants.ADDED_PORTS))
        batch_update_members_flow.add(
            amphora_driver_tasks.AmphoraePostNetworkPlug(
                requires=(constants.LOADBALANCER, constants.ADDED_PORTS)))

        # Update the Listener (this makes the changes active on the Amp)
        batch_update_members_flow.add(
            amphora_driver_tasks.ListenersUpdate(
                requires=(constants.LOADBALANCER, constants.LISTENERS)))

        # Mark all the members ACTIVE here, then pool then LB/Listeners
        batch_update_members_flow.add(unordered_members_active_flow)
        batch_update_members_flow.add(
            database_tasks.MarkPoolActiveInDB(requires=constants.POOL))
        batch_update_members_flow.add(
            database_tasks.MarkLBAndListenersActiveInDB(
                requires=(constants.LOADBALANCER, constants.LISTENERS)))

        return batch_update_members_flow
示例#30
0
 def test_retry_in_unordered_flow_requires_and_provides(self):
     flow = uf.Flow(
         'uf',
         retry.AlwaysRevert('rt', requires=['x', 'y'], provides=['a', 'b']))
     self.assertEqual(flow.requires, set(['x', 'y']))
     self.assertEqual(flow.provides, set(['a', 'b']))