예제 #1
0
 def __init__(
     self,
     service: str,
     instance: str,
     cluster: str,
     config_dict: NativeServiceConfigDict,
     branch_dict: BranchDict,
     soa_dir: str,
     service_namespace_config: Optional[ServiceNamespaceConfig] = None,
 ) -> None:
     super(NativeServiceConfig, self).__init__(
         cluster=cluster,
         instance=instance,
         service=service,
         config_dict=config_dict,
         branch_dict=branch_dict,
         soa_dir=soa_dir,
     )
     # service_namespace_config may be omitted/set to None at first, then set
     # after initializing. e.g. we do this in load_paasta_native_job_config
     # so we can call get_nerve_namespace() to figure out what SNC to read.
     # It may also be set to None if this service is not in nerve.
     if service_namespace_config is not None:
         self.service_namespace_config = service_namespace_config
     else:
         self.service_namespace_config = ServiceNamespaceConfig()
예제 #2
0
def marathon_smartstack_status(
    service: str,
    instance: str,
    job_config: marathon_tools.MarathonServiceConfig,
    service_namespace_config: ServiceNamespaceConfig,
    tasks: Sequence[MarathonTask],
    should_return_individual_backends: bool = False,
) -> Mapping[str, Any]:
    registration = job_config.get_registrations()[0]
    discover_location_type = service_namespace_config.get_discover()
    monitoring_blacklist = job_config.get_monitoring_blacklist(
        system_deploy_blacklist=settings.system_paasta_config.
        get_deploy_blacklist())
    filtered_slaves = get_all_slaves_for_blacklist_whitelist(
        blacklist=monitoring_blacklist, whitelist=None)
    grouped_slaves = get_mesos_slaves_grouped_by_attribute(
        slaves=filtered_slaves, attribute=discover_location_type)

    # rebuild the dict, replacing the slave object with just their hostname
    slave_hostname_by_location = {
        attribute_value: [slave["hostname"] for slave in slaves]
        for attribute_value, slaves in grouped_slaves.items()
    }

    expected_smartstack_count = marathon_tools.get_expected_instance_count_for_namespace(
        service, instance, settings.cluster)
    expected_count_per_location = int(expected_smartstack_count /
                                      len(slave_hostname_by_location))
    smartstack_status: MutableMapping[str, Any] = {
        "registration": registration,
        "expected_backends_per_location": expected_count_per_location,
        "locations": [],
    }

    for location, hosts in slave_hostname_by_location.items():
        synapse_host = hosts[0]
        sorted_backends = sorted(
            get_backends(
                registration,
                synapse_host=synapse_host,
                synapse_port=settings.system_paasta_config.get_synapse_port(),
                synapse_haproxy_url_format=settings.system_paasta_config.
                get_synapse_haproxy_url_format(),
            ),
            key=lambda backend: backend["status"],
            reverse=True,  # put 'UP' backends above 'MAINT' backends
        )
        matched_backends_and_tasks = match_backends_and_tasks(
            sorted_backends, tasks)
        location_dict = build_smartstack_location_dict(
            location, matched_backends_and_tasks,
            should_return_individual_backends)
        smartstack_status["locations"].append(location_dict)

    return smartstack_status
예제 #3
0
def marathon_service_mesh_status(
    service: str,
    service_mesh: pik.ServiceMesh,
    instance: str,
    job_config: marathon_tools.MarathonServiceConfig,
    service_namespace_config: ServiceNamespaceConfig,
    tasks: Sequence[MarathonTask],
    should_return_individual_backends: bool = False,
) -> Mapping[str, Any]:
    registration = job_config.get_registrations()[0]
    discover_location_type = service_namespace_config.get_discover()

    grouped_slaves = get_mesos_slaves_grouped_by_attribute(
        slaves=get_slaves(), attribute=discover_location_type)

    # rebuild the dict, replacing the slave object with just their hostname
    slave_hostname_by_location = {
        attribute_value: [slave["hostname"] for slave in slaves]
        for attribute_value, slaves in grouped_slaves.items()
    }

    expected_instance_count = marathon_tools.get_expected_instance_count_for_namespace(
        service, instance, settings.cluster)
    expected_count_per_location = int(expected_instance_count /
                                      len(slave_hostname_by_location))
    service_mesh_status: MutableMapping[str, Any] = {
        "registration": registration,
        "expected_backends_per_location": expected_count_per_location,
        "locations": [],
    }

    for location, hosts in slave_hostname_by_location.items():
        if service_mesh == pik.ServiceMesh.SMARTSTACK:
            service_mesh_status["locations"].append(
                _build_smartstack_location_dict_for_backends(
                    synapse_host=hosts[0],
                    registration=registration,
                    tasks=tasks,
                    location=location,
                    should_return_individual_backends=
                    should_return_individual_backends,
                ))
        elif service_mesh == pik.ServiceMesh.ENVOY:
            service_mesh_status["locations"].append(
                _build_envoy_location_dict_for_backends(
                    envoy_host=hosts[0],
                    registration=registration,
                    tasks=tasks,
                    location=location,
                    should_return_individual_backends=
                    should_return_individual_backends,
                ))

    return service_mesh_status
예제 #4
0
def test_get_deployments_strings_non_listening_service():
    with mock.patch(
            "paasta_tools.cli.cmds.info.get_actual_deployments",
            autospec=True) as mock_get_actual_deployments, mock.patch(
                "paasta_tools.cli.cmds.info.load_service_namespace_config",
                autospec=True) as mock_load_service_namespace_config:
        mock_get_actual_deployments.return_value = [
            "clusterA.main", "clusterB.main"
        ]
        mock_load_service_namespace_config.return_value = ServiceNamespaceConfig(
        )
        actual = info.get_deployments_strings("unused", "/fake/soa/dir")
        assert " - clusterA (N/A)" in actual
        assert " - clusterB (N/A)" in actual
예제 #5
0
def test_get_deployments_strings_non_listening_service():
    with mock.patch(
            'paasta_tools.cli.cmds.info.get_actual_deployments',
            autospec=True,
    ) as mock_get_actual_deployments, mock.patch(
            'paasta_tools.cli.cmds.info.load_service_namespace_config',
            autospec=True,
    ) as mock_load_service_namespace_config:
        mock_get_actual_deployments.return_value = [
            'clusterA.main', 'clusterB.main'
        ]
        mock_load_service_namespace_config.return_value = ServiceNamespaceConfig(
        )
        actual = info.get_deployments_strings('unused', '/fake/soa/dir')
        assert ' - clusterA (N/A)' in actual
        assert ' - clusterB (N/A)' in actual
예제 #6
0
 def __init__(self,
              service,
              instance,
              cluster,
              config_dict,
              branch_dict,
              service_namespace_config=None):
     super(PaastaNativeServiceConfig, self).__init__(
         cluster=cluster,
         instance=instance,
         service=service,
         config_dict=config_dict,
         branch_dict=branch_dict,
     )
     # service_namespace_config may be omitted/set to None at first, then set after initializing. e.g. we do this in
     # load_paasta_native_job_config so we can call get_nerve_namespace() to figure out what SNC to read.
     # It may also be set to None if this service is not in nerve.
     if service_namespace_config is not None:
         self.service_namespace_config = service_namespace_config
     else:
         self.service_namespace_config = ServiceNamespaceConfig()
예제 #7
0
def test_get_deployments_strings_protocol_tcp_case():
    with mock.patch(
            "paasta_tools.cli.cmds.info.get_actual_deployments",
            autospec=True) as mock_get_actual_deployments, mock.patch(
                "paasta_tools.cli.cmds.info.load_service_namespace_config",
                autospec=True) as mock_load_service_namespace_config:
        mock_get_actual_deployments.return_value = [
            "clusterA.main", "clusterB.main"
        ]
        mock_load_service_namespace_config.return_value = ServiceNamespaceConfig(
            {
                "mode": "tcp",
                "proxy_port": 8080
            })
        actual = info.get_deployments_strings("unused", "/fake/soa/dir")
        assert (" - clusterA (%s)" %
                PaastaColors.cyan("tcp://paasta-clusterA.yelp:8080/")
                in actual)
        assert (" - clusterB (%s)" %
                PaastaColors.cyan("tcp://paasta-clusterB.yelp:8080/")
                in actual)
예제 #8
0
def test_get_deployments_strings_protocol_tcp_case():
    with mock.patch(
            'paasta_tools.cli.cmds.info.get_actual_deployments',
            autospec=True,
    ) as mock_get_actual_deployments, mock.patch(
            'paasta_tools.cli.cmds.info.load_service_namespace_config',
            autospec=True,
    ) as mock_load_service_namespace_config:
        mock_get_actual_deployments.return_value = [
            'clusterA.main', 'clusterB.main'
        ]
        mock_load_service_namespace_config.return_value = ServiceNamespaceConfig(
            {
                'mode': 'tcp',
                'proxy_port': 8080
            })
        actual = info.get_deployments_strings('unused', '/fake/soa/dir')
        assert ' - clusterA (%s)' % PaastaColors.cyan(
            'tcp://paasta-clusterA.yelp:8080/') in actual
        assert ' - clusterB (%s)' % PaastaColors.cyan(
            'tcp://paasta-clusterB.yelp:8080/') in actual
예제 #9
0
    def get_sidecar_containers(
        self,
        system_paasta_config: SystemPaastaConfig,
        service_namespace_config: ServiceNamespaceConfig,
    ) -> Sequence[V1Container]:
        registrations = " ".join(self.get_registrations())
        # s_m_j currently asserts that services are healthy in smartstack before
        # continuing a bounce. this readiness check lets us achieve the same thing
        readiness_probe: Optional[V1Probe]
        if system_paasta_config.get_enable_nerve_readiness_check():
            readiness_probe = V1Probe(
                _exec=V1ExecAction(command=[
                    system_paasta_config.get_nerve_readiness_check_script(),
                    str(self.get_container_port()),
                ] + self.get_registrations(), ),
                initial_delay_seconds=10,
                period_seconds=10,
            )
        else:
            readiness_probe = None

        sidecars = []
        if service_namespace_config.is_in_smartstack():
            sidecars.append(
                V1Container(
                    image=system_paasta_config.get_hacheck_sidecar_image_url(),
                    lifecycle=V1Lifecycle(pre_stop=V1Handler(
                        _exec=V1ExecAction(command=[
                            "/bin/sh",
                            "-c",
                            f"/usr/bin/hadown {registrations}; sleep 31",
                        ], ), ), ),
                    name=HACHECK_POD_NAME,
                    env=self.get_kubernetes_environment(),
                    ports=[
                        V1ContainerPort(container_port=6666, ),
                    ],
                    readiness_probe=readiness_probe,
                ))
        return sidecars
def test_kubernetes_smartstack_status(
    mock_get_expected_instance_count_for_namespace,
    mock_get_all_nodes,
    mock_kube_smartstack_replication_checker,
    mock_get_backends,
    mock_match_backends_and_pods,
):
    mock_get_all_nodes.return_value = [{
        "hostname": "host1.paasta.party",
        "attributes": {
            "region": "us-north-3"
        }
    }]

    mock_kube_smartstack_replication_checker.return_value.get_allowed_locations_and_hosts.return_value = {
        "us-north-3":
        [SmartstackHost(hostname="host1.paasta.party", pool="default")]
    }

    mock_get_expected_instance_count_for_namespace.return_value = 2
    mock_backend = HaproxyBackend(
        status="UP",
        svname="host1_1.2.3.4:123",
        check_status="L7OK",
        check_code="0",
        check_duration="1",
        lastchg="9876",
    )
    mock_pod = mock.create_autospec(V1Pod)
    mock_match_backends_and_pods.return_value = [(mock_backend, mock_pod)]

    mock_job_config = kubernetes_tools.KubernetesDeploymentConfig(
        service="fake_service",
        cluster="fake_cluster",
        instance="fake_instance",
        config_dict={"bounce_method": "fake_bounce"},
        branch_dict=None,
    )
    mock_service_namespace_config = ServiceNamespaceConfig()

    smartstack_status = instance.kubernetes_smartstack_status(
        "fake_service",
        "fake_instance",
        mock_job_config,
        mock_service_namespace_config,
        pods=[mock_pod],
        should_return_individual_backends=True,
    )
    assert smartstack_status == {
        "registration":
        "fake_service.fake_instance",
        "expected_backends_per_location":
        2,
        "locations": [{
            "name":
            "us-north-3",
            "running_backends_count":
            1,
            "backends": [{
                "hostname": "host1:1.2.3.4",
                "port": 123,
                "status": "UP",
                "check_status": "L7OK",
                "check_code": "0",
                "last_change": 9876,
                "has_associated_task": True,
                "check_duration": 1,
            }],
        }],
    }
def test_marathon_smartstack_status(
    mock_get_slaves,
    mock_get_expected_instance_count_for_namespace,
    mock_get_backends,
    mock_match_backends_and_tasks,
):
    mock_get_slaves.return_value = [{
        "hostname": "host1.paasta.party",
        "attributes": {
            "region": "us-north-3"
        }
    }]
    mock_get_expected_instance_count_for_namespace.return_value = 2

    mock_backend = HaproxyBackend(
        status="UP",
        svname="host1_1.2.3.4:123",
        check_status="L7OK",
        check_code="0",
        check_duration="1",
        lastchg="9876",
    )
    mock_task = mock.create_autospec(MarathonTask)
    mock_match_backends_and_tasks.return_value = [(mock_backend, mock_task)]

    settings.system_paasta_config = mock.create_autospec(SystemPaastaConfig)
    settings.system_paasta_config.get_deploy_blacklist.return_value = []
    mock_service_config = marathon_tools.MarathonServiceConfig(
        service="fake_service",
        cluster="fake_cluster",
        instance="fake_instance",
        config_dict={"bounce_method": "fake_bounce"},
        branch_dict=None,
    )
    mock_service_namespace_config = ServiceNamespaceConfig()

    smartstack_status = instance.marathon_smartstack_status(
        "fake_service",
        "fake_instance",
        mock_service_config,
        mock_service_namespace_config,
        tasks=[mock_task],
        should_return_individual_backends=True,
    )
    assert smartstack_status == {
        "registration":
        "fake_service.fake_instance",
        "expected_backends_per_location":
        2,
        "locations": [{
            "name":
            "us-north-3",
            "running_backends_count":
            1,
            "backends": [{
                "hostname": "host1",
                "port": 123,
                "status": "UP",
                "check_status": "L7OK",
                "check_code": "0",
                "last_change": 9876,
                "has_associated_task": True,
                "check_duration": 1,
            }],
        }],
    }
예제 #12
0
async def test_kubernetes_smartstack_status():
    with asynctest.patch(
        "paasta_tools.api.views.instance.pik.match_backends_and_pods", autospec=True
    ) as mock_match_backends_and_pods, asynctest.patch(
        "paasta_tools.api.views.instance.pik.smartstack_tools.get_backends",
        autospec=True,
    ), asynctest.patch(
        "paasta_tools.api.views.instance.pik.KubeSmartstackEnvoyReplicationChecker",
        autospec=True,
    ) as mock_kube_smartstack_replication_checker, asynctest.patch(
        "paasta_tools.api.views.instance.pik.kubernetes_tools.get_all_nodes",
        autospec=True,
    ) as mock_get_all_nodes, asynctest.patch(
        "paasta_tools.api.views.instance.marathon_tools.get_expected_instance_count_for_namespace",
        autospec=True,
    ) as mock_get_expected_instance_count_for_namespace:
        mock_get_all_nodes.return_value = [
            {"hostname": "host1.paasta.party", "attributes": {"region": "us-north-3"}}
        ]

        mock_kube_smartstack_replication_checker.return_value.get_allowed_locations_and_hosts.return_value = {
            "us-north-3": [
                DiscoveredHost(hostname="host1.paasta.party", pool="default")
            ]
        }

        mock_get_expected_instance_count_for_namespace.return_value = 2
        mock_backend = HaproxyBackend(
            status="UP",
            svname="host1_1.2.3.4:123",
            check_status="L7OK",
            check_code="0",
            check_duration="1",
            lastchg="9876",
        )
        mock_pod = mock.create_autospec(V1Pod)
        mock_match_backends_and_pods.return_value = [(mock_backend, mock_pod)]

        mock_job_config = kubernetes_tools.KubernetesDeploymentConfig(
            service="fake_service",
            cluster="fake_cluster",
            instance="fake_instance",
            config_dict={"bounce_method": "fake_bounce"},
            branch_dict=None,
        )
        mock_service_namespace_config = ServiceNamespaceConfig()
        mock_settings = mock.Mock()

        smartstack_status = await instance.pik.mesh_status(
            service="fake_service",
            service_mesh=ServiceMesh.SMARTSTACK,
            instance="fake_instance",
            job_config=mock_job_config,
            service_namespace_config=mock_service_namespace_config,
            pods_task=wrap_value_in_task([mock_pod]),
            should_return_individual_backends=True,
            settings=mock_settings,
        )
        assert smartstack_status == {
            "registration": "fake_service.fake_instance",
            "expected_backends_per_location": 2,
            "locations": [
                {
                    "name": "us-north-3",
                    "running_backends_count": 1,
                    "backends": [
                        {
                            "hostname": "host1:1.2.3.4",
                            "port": 123,
                            "status": "UP",
                            "check_status": "L7OK",
                            "check_code": "0",
                            "last_change": 9876,
                            "has_associated_task": True,
                            "check_duration": 1,
                        }
                    ],
                }
            ],
        }