Ejemplo n.º 1
0
    def get_create_all_listeners_flow(self):
        """Create a flow to create all listeners

        :returns: The flow for creating all listeners
        """
        create_all_listeners_flow = linear_flow.Flow(
            constants.CREATE_LISTENERS_FLOW)
        create_all_listeners_flow.add(
            database_tasks.GetListenersFromLoadbalancer(
                requires=constants.LOADBALANCER,
                provides=constants.LISTENERS))
        create_all_listeners_flow.add(database_tasks.ReloadLoadBalancer(
            requires=constants.LOADBALANCER_ID,
            provides=constants.LOADBALANCER))
        create_all_listeners_flow.add(amphora_driver_tasks.ListenersUpdate(
            requires=[constants.LOADBALANCER, constants.LISTENERS]))
        create_all_listeners_flow.add(network_tasks.UpdateVIP(
            requires=constants.LOADBALANCER))
        return create_all_listeners_flow
Ejemplo n.º 2
0
    def get_failover_flow(self, role=constants.ROLE_STANDALONE,
                          load_balancer=None):
        """Creates a flow to failover a stale amphora

        :returns: The flow for amphora failover
        """

        failover_amphora_flow = linear_flow.Flow(
            constants.FAILOVER_AMPHORA_FLOW)

        failover_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
            rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
            requires=constants.AMPHORA))

        failover_amphora_flow.add(network_tasks.FailoverPreparationForAmphora(
            rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
            requires=constants.AMPHORA))

        # Note: It seems intuitive to boot an amphora prior to deleting
        #       the old amphora, however this is a complicated issue.
        #       If the target host (due to anit-affinity) is resource
        #       constrained, this will fail where a post-delete will
        #       succeed. Since this is async with the API it would result
        #       in the LB ending in ERROR though the amps are still alive.
        #       Consider in the future making this a complicated
        #       try-on-failure-retry flow, or move upgrade failovers to be
        #       synchronous with the API. For now spares pool and act/stdby
        #       will mitigate most of this delay.

        # Delete the old amphora
        failover_amphora_flow.add(
            database_tasks.MarkAmphoraPendingDeleteInDB(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))
        failover_amphora_flow.add(
            database_tasks.MarkAmphoraHealthBusy(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))
        failover_amphora_flow.add(compute_tasks.ComputeDelete(
            rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
            requires=constants.AMPHORA))
        failover_amphora_flow.add(network_tasks.WaitForPortDetach(
            rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
            requires=constants.AMPHORA))
        failover_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB(
            rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
            requires=constants.AMPHORA))

        # If this is an unallocated amp (spares pool), we're done
        if not load_balancer:
            failover_amphora_flow.add(
                database_tasks.DisableAmphoraHealthMonitoring(
                    rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                    requires=constants.AMPHORA))
            return failover_amphora_flow

        # Save failed amphora details for later
        failover_amphora_flow.add(
            database_tasks.GetAmphoraDetails(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA,
                provides=constants.AMP_DATA))

        # Get a new amphora
        # Note: Role doesn't matter here.  We will update it later.
        get_amp_subflow = self.get_amphora_for_lb_subflow(
            prefix=constants.FAILOVER_AMPHORA_FLOW)
        failover_amphora_flow.add(get_amp_subflow)

        # Update the new amphora with the failed amphora details
        failover_amphora_flow.add(database_tasks.UpdateAmpFailoverDetails(
            requires=(constants.AMPHORA, constants.AMP_DATA)))

        # Update the data stored in the flow from the database
        failover_amphora_flow.add(database_tasks.ReloadLoadBalancer(
            requires=constants.LOADBALANCER_ID,
            provides=constants.LOADBALANCER))

        failover_amphora_flow.add(database_tasks.ReloadAmphora(
            requires=constants.AMPHORA,
            provides=constants.AMPHORA))

        # Prepare to reconnect the network interface(s)
        failover_amphora_flow.add(network_tasks.GetAmphoraeNetworkConfigs(
            requires=constants.LOADBALANCER,
            provides=constants.AMPHORAE_NETWORK_CONFIG))
        failover_amphora_flow.add(database_tasks.GetListenersFromLoadbalancer(
            requires=constants.LOADBALANCER, provides=constants.LISTENERS))
        failover_amphora_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
            requires=constants.LOADBALANCER, provides=constants.AMPHORAE))

        # Plug the VIP ports into the new amphora
        # The reason for moving these steps here is the udp listeners want to
        # do some kernel configuration before Listener update for forbidding
        # failure during rebuild amphora.
        failover_amphora_flow.add(network_tasks.PlugVIPPort(
            requires=(constants.AMPHORA, constants.AMPHORAE_NETWORK_CONFIG)))
        failover_amphora_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug(
            requires=(constants.AMPHORA, constants.LOADBALANCER,
                      constants.AMPHORAE_NETWORK_CONFIG)))

        # Listeners update needs to be run on all amphora to update
        # their peer configurations. So parallelize this with an
        # unordered subflow.
        update_amps_subflow = unordered_flow.Flow(
            constants.UPDATE_AMPS_SUBFLOW)

        timeout_dict = {
            constants.CONN_MAX_RETRIES:
                CONF.haproxy_amphora.active_connection_max_retries,
            constants.CONN_RETRY_INTERVAL:
                CONF.haproxy_amphora.active_connection_rety_interval}

        # Setup parallel flows for each amp. We don't know the new amp
        # details at flow creation time, so setup a subflow for each
        # amp on the LB, they let the task index into a list of amps
        # to find the amphora it should work on.
        amp_index = 0
        db_lb = self.lb_repo.get(db_apis.get_session(),
                                 id=load_balancer[constants.LOADBALANCER_ID])
        for amp in db_lb.amphorae:
            if amp.status == constants.DELETED:
                continue
            update_amps_subflow.add(
                amphora_driver_tasks.AmpListenersUpdate(
                    name=constants.AMP_LISTENER_UPDATE + '-' + str(amp_index),
                    requires=(constants.LOADBALANCER, constants.AMPHORAE),
                    inject={constants.AMPHORA_INDEX: amp_index,
                            constants.TIMEOUT_DICT: timeout_dict}))
            amp_index += 1

        failover_amphora_flow.add(update_amps_subflow)

        # Plug the member networks into the new amphora
        failover_amphora_flow.add(network_tasks.CalculateAmphoraDelta(
            requires=(constants.LOADBALANCER, constants.AMPHORA),
            provides=constants.DELTA))

        failover_amphora_flow.add(network_tasks.HandleNetworkDelta(
            requires=(constants.AMPHORA, constants.DELTA),
            provides=constants.ADDED_PORTS))

        failover_amphora_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug(
            requires=(constants.LOADBALANCER, constants.ADDED_PORTS)))

        failover_amphora_flow.add(database_tasks.ReloadLoadBalancer(
            name='octavia-failover-LB-reload-2',
            requires=constants.LOADBALANCER_ID,
            provides=constants.LOADBALANCER))

        # Handle the amphora role and VRRP if necessary
        if role == constants.ROLE_MASTER:
            failover_amphora_flow.add(database_tasks.MarkAmphoraMasterInDB(
                name=constants.MARK_AMP_MASTER_INDB,
                requires=constants.AMPHORA))
            vrrp_subflow = self.get_vrrp_subflow(role)
            failover_amphora_flow.add(vrrp_subflow)
        elif role == constants.ROLE_BACKUP:
            failover_amphora_flow.add(database_tasks.MarkAmphoraBackupInDB(
                name=constants.MARK_AMP_BACKUP_INDB,
                requires=constants.AMPHORA))
            vrrp_subflow = self.get_vrrp_subflow(role)
            failover_amphora_flow.add(vrrp_subflow)
        elif role == constants.ROLE_STANDALONE:
            failover_amphora_flow.add(
                database_tasks.MarkAmphoraStandAloneInDB(
                    name=constants.MARK_AMP_STANDALONE_INDB,
                    requires=constants.AMPHORA))

        failover_amphora_flow.add(amphora_driver_tasks.ListenersStart(
            requires=(constants.LOADBALANCER, constants.AMPHORA)))
        failover_amphora_flow.add(
            database_tasks.DisableAmphoraHealthMonitoring(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))

        return failover_amphora_flow