Ejemplo n.º 1
0
    def get_failover_flow(self,
                          role=constants.ROLE_STANDALONE,
                          load_balancer=None):
        """Creates a flow to failover a stale amphora

        :returns: The flow for amphora failover
        """

        failover_amphora_flow = linear_flow.Flow(
            constants.FAILOVER_AMPHORA_FLOW)

        failover_amphora_flow.add(
            lifecycle_tasks.AmphoraToErrorOnRevertTask(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))

        failover_amphora_flow.add(
            network_tasks.FailoverPreparationForAmphora(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))

        # Note: It seems intuitive to boot an amphora prior to deleting
        #       the old amphora, however this is a complicated issue.
        #       If the target host (due to anit-affinity) is resource
        #       constrained, this will fail where a post-delete will
        #       succeed. Since this is async with the API it would result
        #       in the LB ending in ERROR though the amps are still alive.
        #       Consider in the future making this a complicated
        #       try-on-failure-retry flow, or move upgrade failovers to be
        #       synchronous with the API. For now spares pool and act/stdby
        #       will mitigate most of this delay.

        # Delete the old amphora
        failover_amphora_flow.add(
            database_tasks.MarkAmphoraPendingDeleteInDB(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))
        failover_amphora_flow.add(
            database_tasks.MarkAmphoraHealthBusy(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))
        failover_amphora_flow.add(
            compute_tasks.ComputeDelete(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))
        failover_amphora_flow.add(
            network_tasks.WaitForPortDetach(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))
        failover_amphora_flow.add(
            database_tasks.MarkAmphoraDeletedInDB(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))

        # If this is an unallocated amp (spares pool), we're done
        if not load_balancer:
            failover_amphora_flow.add(
                database_tasks.DisableAmphoraHealthMonitoring(
                    rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                    requires=constants.AMPHORA))
            return failover_amphora_flow

        # Save failed amphora details for later
        failover_amphora_flow.add(
            database_tasks.GetAmphoraDetails(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA,
                provides=constants.AMP_DATA))

        # Get a new amphora
        # Note: Role doesn't matter here.  We will update it later.
        get_amp_subflow = self.get_amphora_for_lb_subflow(
            prefix=constants.FAILOVER_AMPHORA_FLOW)
        failover_amphora_flow.add(get_amp_subflow)

        # Update the new amphora with the failed amphora details
        failover_amphora_flow.add(
            database_tasks.UpdateAmpFailoverDetails(
                requires=(constants.AMPHORA, constants.AMP_DATA)))

        # Update the data stored in the flow from the database
        failover_amphora_flow.add(
            database_tasks.ReloadLoadBalancer(
                requires=constants.LOADBALANCER_ID,
                provides=constants.LOADBALANCER))

        failover_amphora_flow.add(
            database_tasks.ReloadAmphora(requires=constants.AMPHORA_ID,
                                         provides=constants.AMPHORA))

        # Prepare to reconnect the network interface(s)
        failover_amphora_flow.add(
            network_tasks.GetAmphoraeNetworkConfigs(
                requires=constants.LOADBALANCER,
                provides=constants.AMPHORAE_NETWORK_CONFIG))
        failover_amphora_flow.add(
            database_tasks.GetListenersFromLoadbalancer(
                requires=constants.LOADBALANCER, provides=constants.LISTENERS))
        failover_amphora_flow.add(
            database_tasks.GetAmphoraeFromLoadbalancer(
                requires=constants.LOADBALANCER, provides=constants.AMPHORAE))

        # Plug the VIP ports into the new amphora
        # The reason for moving these steps here is the udp listeners want to
        # do some kernel configuration before Listener update for forbidding
        # failure during rebuild amphora.
        failover_amphora_flow.add(
            network_tasks.PlugVIPPort(
                requires=(constants.AMPHORA,
                          constants.AMPHORAE_NETWORK_CONFIG)))
        failover_amphora_flow.add(
            amphora_driver_tasks.AmphoraPostVIPPlug(
                requires=(constants.AMPHORA, constants.LOADBALANCER,
                          constants.AMPHORAE_NETWORK_CONFIG)))

        # Listeners update needs to be run on all amphora to update
        # their peer configurations. So parallelize this with an
        # unordered subflow.
        update_amps_subflow = unordered_flow.Flow(
            constants.UPDATE_AMPS_SUBFLOW)

        timeout_dict = {
            constants.CONN_MAX_RETRIES:
            CONF.haproxy_amphora.active_connection_max_retries,
            constants.CONN_RETRY_INTERVAL:
            CONF.haproxy_amphora.active_connection_rety_interval
        }

        # Setup parallel flows for each amp. We don't know the new amp
        # details at flow creation time, so setup a subflow for each
        # amp on the LB, they let the task index into a list of amps
        # to find the amphora it should work on.
        amp_index = 0
        for amp in load_balancer.amphorae:
            if amp.status == constants.DELETED:
                continue
            update_amps_subflow.add(
                amphora_driver_tasks.AmpListenersUpdate(
                    name=constants.AMP_LISTENER_UPDATE + '-' + str(amp_index),
                    requires=(constants.LOADBALANCER, constants.AMPHORAE),
                    inject={
                        constants.AMPHORA_INDEX: amp_index,
                        constants.TIMEOUT_DICT: timeout_dict
                    }))
            amp_index += 1

        failover_amphora_flow.add(update_amps_subflow)

        # Plug the member networks into the new amphora
        failover_amphora_flow.add(
            network_tasks.CalculateAmphoraDelta(
                requires=(constants.LOADBALANCER, constants.AMPHORA),
                provides=constants.DELTA))

        failover_amphora_flow.add(
            network_tasks.HandleNetworkDelta(requires=(constants.AMPHORA,
                                                       constants.DELTA),
                                             provides=constants.ADDED_PORTS))

        failover_amphora_flow.add(
            amphora_driver_tasks.AmphoraePostNetworkPlug(
                requires=(constants.LOADBALANCER, constants.ADDED_PORTS)))

        failover_amphora_flow.add(
            database_tasks.ReloadLoadBalancer(
                name='octavia-failover-LB-reload-2',
                requires=constants.LOADBALANCER_ID,
                provides=constants.LOADBALANCER))

        # Handle the amphora role and VRRP if necessary
        if role == constants.ROLE_MASTER:
            failover_amphora_flow.add(
                database_tasks.MarkAmphoraMasterInDB(
                    name=constants.MARK_AMP_MASTER_INDB,
                    requires=constants.AMPHORA))
            vrrp_subflow = self.get_vrrp_subflow(role)
            failover_amphora_flow.add(vrrp_subflow)
        elif role == constants.ROLE_BACKUP:
            failover_amphora_flow.add(
                database_tasks.MarkAmphoraBackupInDB(
                    name=constants.MARK_AMP_BACKUP_INDB,
                    requires=constants.AMPHORA))
            vrrp_subflow = self.get_vrrp_subflow(role)
            failover_amphora_flow.add(vrrp_subflow)
        elif role == constants.ROLE_STANDALONE:
            failover_amphora_flow.add(
                database_tasks.MarkAmphoraStandAloneInDB(
                    name=constants.MARK_AMP_STANDALONE_INDB,
                    requires=constants.AMPHORA))

        failover_amphora_flow.add(
            amphora_driver_tasks.ListenersStart(
                requires=(constants.LOADBALANCER, constants.AMPHORA)))
        failover_amphora_flow.add(
            database_tasks.DisableAmphoraHealthMonitoring(
                rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
                requires=constants.AMPHORA))

        return failover_amphora_flow
Ejemplo n.º 2
0
    def get_amphora_for_lb_failover_subflow(self,
                                            prefix,
                                            role=constants.ROLE_STANDALONE,
                                            failed_amp_vrrp_port_id=None,
                                            is_vrrp_ipv6=False,
                                            is_spare=False):
        """Creates a new amphora that will be used in a failover flow.

        :requires: loadbalancer_id, flavor, vip, vip_sg_id, loadbalancer
        :provides: amphora_id, amphora
        :param prefix: The flow name prefix to use on the flow and tasks.
        :param role: The role this amphora will have in the topology.
        :param failed_amp_vrrp_port_id: The base port ID of the failed amp.
        :param is_vrrp_ipv6: True if the base port IP is IPv6.
        :param is_spare: True if we are getting a spare amphroa.
        :return: A Taskflow sub-flow that will create the amphora.
        """

        sf_name = prefix + '-' + constants.CREATE_AMP_FOR_FAILOVER_SUBFLOW

        amp_for_failover_flow = linear_flow.Flow(sf_name)

        # Try to allocate or boot an amphora instance (unconfigured)
        amp_for_failover_flow.add(
            self.get_amphora_for_lb_subflow(
                prefix=prefix + '-' + constants.FAILOVER_LOADBALANCER_FLOW,
                role=role,
                is_spare=is_spare))

        # If we are getting a spare amphora, this is all we need to do.
        if is_spare:
            return amp_for_failover_flow

        # Create the VIP base (aka VRRP) port for the amphora.
        amp_for_failover_flow.add(
            network_tasks.CreateVIPBasePort(
                name=prefix + '-' + constants.CREATE_VIP_BASE_PORT,
                requires=(constants.VIP, constants.VIP_SG_ID,
                          constants.AMPHORA_ID),
                provides=constants.BASE_PORT))

        # Attach the VIP base (aka VRRP) port to the amphora.
        amp_for_failover_flow.add(
            compute_tasks.AttachPort(
                name=prefix + '-' + constants.ATTACH_PORT,
                requires=(constants.AMPHORA, constants.PORT),
                rebind={constants.PORT: constants.BASE_PORT}))

        # Update the amphora database record with the VIP base port info.
        amp_for_failover_flow.add(
            database_tasks.UpdateAmpFailoverDetails(
                name=prefix + '-' + constants.UPDATE_AMP_FAILOVER_DETAILS,
                requires=(constants.AMPHORA, constants.VIP,
                          constants.BASE_PORT)))

        # Make sure the amphora in the flow storage is up to date
        # or the vrrp_ip will be empty
        amp_for_failover_flow.add(
            database_tasks.ReloadAmphora(name=prefix + '-' +
                                         constants.RELOAD_AMPHORA,
                                         requires=constants.AMPHORA_ID,
                                         provides=constants.AMPHORA))

        # Update the amphora networking for the plugged VIP port
        amp_for_failover_flow.add(
            network_tasks.GetAmphoraNetworkConfigsByID(
                name=prefix + '-' +
                constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID,
                requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID),
                provides=constants.AMPHORAE_NETWORK_CONFIG))

        # Disable the base (vrrp) port on the failed amphora
        # This prevents a DAD failure when bringing up the new amphora.
        # Keepalived will handle this for act/stdby.
        if (role == constants.ROLE_STANDALONE and failed_amp_vrrp_port_id
                and is_vrrp_ipv6):
            amp_for_failover_flow.add(
                network_tasks.AdminDownPort(
                    name=prefix + '-' + constants.ADMIN_DOWN_PORT,
                    inject={constants.PORT_ID: failed_amp_vrrp_port_id}))

        amp_for_failover_flow.add(
            amphora_driver_tasks.AmphoraPostVIPPlug(
                name=prefix + '-' + constants.AMPHORA_POST_VIP_PLUG,
                requires=(constants.AMPHORA, constants.LOADBALANCER,
                          constants.AMPHORAE_NETWORK_CONFIG)))

        # Plug member ports
        amp_for_failover_flow.add(
            network_tasks.CalculateAmphoraDelta(
                name=prefix + '-' + constants.CALCULATE_AMPHORA_DELTA,
                requires=(constants.LOADBALANCER, constants.AMPHORA,
                          constants.AVAILABILITY_ZONE, constants.VRRP_PORT),
                rebind={constants.VRRP_PORT: constants.BASE_PORT},
                provides=constants.DELTA))

        amp_for_failover_flow.add(
            network_tasks.HandleNetworkDelta(
                name=prefix + '-' + constants.HANDLE_NETWORK_DELTA,
                requires=(constants.AMPHORA, constants.DELTA),
                provides=constants.ADDED_PORTS))

        amp_for_failover_flow.add(
            amphora_driver_tasks.AmphoraePostNetworkPlug(
                name=prefix + '-' + constants.AMPHORAE_POST_NETWORK_PLUG,
                requires=(constants.LOADBALANCER, constants.ADDED_PORTS)))

        return amp_for_failover_flow
Ejemplo n.º 3
0
    def test_calculate_amphora_delta(self, mock_get_net_driver):
        DELETE_NETWORK_ID = uuidutils.generate_uuid()
        MEMBER_NETWORK_ID = uuidutils.generate_uuid()
        MEMBER_SUBNET_ID = uuidutils.generate_uuid()
        VRRP_PORT_ID = uuidutils.generate_uuid()
        mock_driver = mock.MagicMock()
        mock_get_net_driver.return_value = mock_driver
        member_mock = mock.MagicMock()
        member_mock.subnet_id = MEMBER_SUBNET_ID
        pool_mock = mock.MagicMock()
        pool_mock.members = [member_mock]
        lb_mock = mock.MagicMock()
        lb_mock.pools = [pool_mock]
        amphora_mock = mock.MagicMock()
        amphora_mock.id = AMPHORA_ID
        amphora_mock.compute_id = COMPUTE_ID
        amphora_mock.vrrp_port_id = VRRP_PORT_ID
        vrrp_port_mock = mock.MagicMock()
        vrrp_port_mock.network_id = self.boot_net_id
        mock_subnet = mock.MagicMock()
        mock_subnet.network_id = MEMBER_NETWORK_ID
        nic1_delete_mock = mock.MagicMock()
        nic1_delete_mock.network_id = DELETE_NETWORK_ID
        nic2_keep_mock = mock.MagicMock()
        nic2_keep_mock.network_id = self.boot_net_id

        mock_driver.get_port.return_value = vrrp_port_mock
        mock_driver.get_subnet.return_value = mock_subnet
        mock_driver.get_plugged_networks.return_value = [
            nic1_delete_mock, nic2_keep_mock
        ]

        calc_amp_delta = network_tasks.CalculateAmphoraDelta()

        # Test vrrp_port_id is None
        result = calc_amp_delta.execute(lb_mock, amphora_mock, {})

        self.assertEqual(AMPHORA_ID, result.amphora_id)
        self.assertEqual(COMPUTE_ID, result.compute_id)
        self.assertEqual(1, len(result.add_nics))
        self.assertEqual(MEMBER_NETWORK_ID, result.add_nics[0].network_id)
        self.assertEqual(1, len(result.delete_nics))
        self.assertEqual(DELETE_NETWORK_ID, result.delete_nics[0].network_id)
        mock_driver.get_port.assert_called_once_with(VRRP_PORT_ID)
        mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID)
        mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)

        # Test with vrrp_port_id
        mock_driver.reset_mock()

        result = calc_amp_delta.execute(lb_mock,
                                        amphora_mock, {},
                                        vrrp_port=vrrp_port_mock)

        self.assertEqual(AMPHORA_ID, result.amphora_id)
        self.assertEqual(COMPUTE_ID, result.compute_id)
        self.assertEqual(1, len(result.add_nics))
        self.assertEqual(MEMBER_NETWORK_ID, result.add_nics[0].network_id)
        self.assertEqual(1, len(result.delete_nics))
        self.assertEqual(DELETE_NETWORK_ID, result.delete_nics[0].network_id)
        mock_driver.get_port.assert_not_called()
        mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID)
        mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)