コード例 #1
0
ファイル: utils.py プロジェクト: bakdata/streams-explorer
def get_streaming_app_stateful_set(
    name: str = "test-app",
    input_topics: str = "input-topic",
    output_topic: str = "output-topic",
    error_topic: str = "error-topic",
    multiple_inputs: Optional[str] = None,
    multiple_outputs: Optional[str] = None,
    extra: Dict[str, str] = {},
    env_prefix: str = "APP_",
    pipeline: Optional[str] = None,
    consumer_group: Optional[str] = None,
    service_name: str = "test-service",
    config_type: ConfigType = ConfigType.ENV,
) -> V1StatefulSet:
    template = get_template(
        input_topics,
        output_topic,
        error_topic,
        multiple_inputs=multiple_inputs,
        multiple_outputs=multiple_outputs,
        extra=extra,
        env_prefix=env_prefix,
        consumer_group=consumer_group,
        config_type=config_type,
    )
    metadata = get_metadata(name, pipeline=pipeline)
    spec = V1StatefulSetSpec(
        service_name=service_name,
        template=template,
        selector=V1LabelSelector(),
    )

    return V1StatefulSet(metadata=metadata, spec=spec)
コード例 #2
0
ファイル: utils.py プロジェクト: survivant/streams-explorer
def get_streaming_app_stateful_set(
    name,
    input_topics,
    output_topic,
    error_topic,
    multiple_inputs=None,
    multiple_outputs=None,
    env_prefix="APP_",
    pipeline=None,
    consumer_group=None,
    service_name="test-service",
) -> V1StatefulSet:
    template = get_template(
        input_topics,
        output_topic,
        error_topic,
        multiple_inputs=multiple_inputs,
        multiple_outputs=multiple_outputs,
        env_prefix=env_prefix,
        consumer_group=consumer_group,
    )
    metadata = get_metadata(name, pipeline=pipeline)
    spec = V1StatefulSetSpec(
        service_name=service_name,
        template=template,
        selector="app=test-app,release=test-release",
    )

    return V1StatefulSet(metadata=metadata, spec=spec)
コード例 #3
0
def test_update_kubernetes_application():
    with mock.patch(
            'paasta_tools.setup_kubernetes_job.update_deployment',
            autospec=True,
    ) as mock_update_deployment, mock.patch(
            'paasta_tools.setup_kubernetes_job.update_stateful_set',
            autospec=True,
    ) as mock_update_stateful_set:
        mock_stateful_set = V1StatefulSet()
        mock_client = mock.Mock()
        update_kubernetes_application(mock_client, mock_stateful_set)
        assert not mock_update_deployment.called
        mock_update_stateful_set.assert_called_with(
            kube_client=mock_client,
            formatted_stateful_set=mock_stateful_set,
        )

        mock_update_stateful_set.reset_mock()
        mock_deployment = V1Deployment()
        mock_client = mock.Mock()
        update_kubernetes_application(mock_client, mock_deployment)
        assert not mock_update_stateful_set.called
        mock_update_deployment.assert_called_with(
            kube_client=mock_client,
            formatted_deployment=mock_deployment,
        )

        with raises(Exception):
            update_kubernetes_application(mock_client, mock.Mock())
コード例 #4
0
    def __init__(self) -> None:
        metadata = V1ObjectMeta(name="postgres", labels={"app": "postgres"})
        label_selector = V1LabelSelector(match_labels={"app": "postgres"})
        env = [V1EnvVar(name="POSTGRES_HOST_AUTH_METHOD", value="trust")]
        ports = [V1ContainerPort(container_port=5432, name="sql")]
        volume_mounts = [
            V1VolumeMount(name="data", mount_path="/data"),
            V1VolumeMount(
                name="postgres-init", mount_path="/docker-entrypoint-initdb.d"
            ),
        ]

        volume_config = V1ConfigMapVolumeSource(
            name="postgres-init",
        )

        volumes = [V1Volume(name="postgres-init", config_map=volume_config)]

        container = V1Container(
            name="postgres",
            image="postgres:14.3",
            env=env,
            ports=ports,
            volume_mounts=volume_mounts,
        )

        pod_spec = V1PodSpec(containers=[container], volumes=volumes)
        template_spec = V1PodTemplateSpec(metadata=metadata, spec=pod_spec)
        claim_templates = [
            V1PersistentVolumeClaim(
                metadata=V1ObjectMeta(name="data"),
                spec=V1PersistentVolumeClaimSpec(
                    access_modes=["ReadWriteOnce"],
                    resources=V1ResourceRequirements(requests={"storage": "1Gi"}),
                ),
            )
        ]

        self.stateful_set = V1StatefulSet(
            api_version="apps/v1",
            kind="StatefulSet",
            metadata=metadata,
            spec=V1StatefulSetSpec(
                service_name="postgres",
                replicas=1,
                selector=label_selector,
                template=template_spec,
                volume_claim_templates=claim_templates,
            ),
        )
コード例 #5
0
def fake_stateful_set():
    fake_stateful_set = V1StatefulSet(
        metadata=mock.Mock(
            namespace="paasta",
            labels={
                "yelp.com/paasta_service": "service",
                "yelp.com/paasta_instance": "instance-2",
                "yelp.com/paasta_git_sha": "1234",
                "yelp.com/paasta_config_sha": "1234",
            },
        ),
        spec=mock.Mock(replicas=0),
    )
    type(fake_stateful_set.metadata).name = (
        mock.PropertyMock(return_value="service-instance-2"),
    )
    return fake_stateful_set
コード例 #6
0
ファイル: kubernetes_tools.py プロジェクト: phonix2012/paasta
    def format_kubernetes_app(self) -> Union[V1Deployment, V1StatefulSet]:
        """Create the configuration that will be passed to the Kubernetes REST API."""

        try:
            system_paasta_config = load_system_paasta_config()
            docker_url = self.get_docker_url()
            code_sha = get_code_sha_from_dockerurl(docker_url)
            if self.get_persistent_volumes():
                complete_config = V1StatefulSet(
                    api_version='apps/v1',
                    kind='StatefulSet',
                    metadata=self.get_kubernetes_metadata(code_sha),
                    spec=V1StatefulSetSpec(
                        service_name="{service}-{instance}".format(
                            service=self.get_sanitised_service_name(),
                            instance=self.get_sanitised_instance_name(),
                        ),
                        volume_claim_templates=self.get_volume_claim_templates(
                        ),
                        replicas=self.get_desired_instances(),
                        selector=V1LabelSelector(match_labels={
                            "service":
                            self.get_service(),
                            "instance":
                            self.get_instance(),
                        }, ),
                        template=self.get_pod_template_spec(
                            code_sha=code_sha,
                            system_paasta_config=system_paasta_config,
                        ),
                    ),
                )
            else:
                complete_config = V1Deployment(
                    api_version='apps/v1',
                    kind='Deployment',
                    metadata=self.get_kubernetes_metadata(code_sha),
                    spec=V1DeploymentSpec(
                        replicas=self.get_desired_instances(),
                        selector=V1LabelSelector(match_labels={
                            "service":
                            self.get_service(),
                            "instance":
                            self.get_instance(),
                        }, ),
                        template=self.get_pod_template_spec(
                            code_sha=code_sha,
                            system_paasta_config=system_paasta_config,
                        ),
                        strategy=self.get_deployment_strategy_config(),
                    ),
                )

            config_hash = get_config_hash(
                self.sanitize_for_config_hash(complete_config),
                force_bounce=self.get_force_bounce(),
            )
            complete_config.metadata.labels['config_sha'] = config_hash
            complete_config.spec.template.metadata.labels[
                'config_sha'] = config_hash
        except Exception as e:
            raise InvalidKubernetesConfig(e, self.get_service(),
                                          self.get_instance())
        log.debug("Complete configuration for instance is: %s",
                  complete_config)
        return complete_config
コード例 #7
0
def test_flush_manager(options: Dict[str, Any], emulator: str, web: str):
    print("starting test")
    api = CoreV1Api()
    # max number of loops to run when waiting for kube actions to complete
    max_wait_loops = 20 if options["cluster"] is None else 60

    # server has invalid PUBSUB_EMULATOR, so that only flush can deliver messages
    static_pvs = options["cluster"] is None
    if static_pvs:
        create_static_pvs(api)

    print("waiting for pods to be healthy")
    for _ in range(max_wait_loops):
        if all(pod.status.phase == "Running"
               for pod in api.list_namespaced_pod("default").items):
            break
        time.sleep(1)
    else:
        assert False, "pods did not become healthy"

    # create a subscription to the defined topic
    print("creating pubsub subscription")
    os.environ["PUBSUB_EMULATOR_HOST"] = emulator
    sub_client = SubscriberClient()
    topic_path = "projects/{project}/topics/{topic}".format(**options)
    subscription_path = "projects/{project}/subscriptions/{topic}".format(
        **options)
    try:
        sub_client.create_subscription(subscription_path,
                                       topic_path,
                                       retry=None)
    except AlreadyExists:
        pass

    print("posting message 0")
    requests.post(web, headers={
        "host": "web"
    }, json={
        "id": 0
    }).raise_for_status()
    print("setting up race condition: attached pvc is also deleted")
    delete_pvcs(api)
    print("setting up race condition: pod unschedulable due to missing pvc")
    with pytest.raises(ApiException) as excinfo:
        restart_web_pods(api)
    assert excinfo.value.reason == "Conflict"
    print("posting message 1")
    with pytest.raises(requests.exceptions.ConnectionError):
        requests.post(web, headers={
            "host": "web"
        }, json={
            "id": 1
        }).raise_for_status()

    print("starting flush-manager")
    # TODO optionally run flush-manager via subprocess.Popen, to ensure testing
    # current code and enable code coverage
    _sa = kube_resource("kube/flush-manager.sa.yml", **options)
    _cluster_role = kube_resource("kube/flush-manager.clusterrole.yml",
                                  **options)
    _cluster_role_binding = kube_resource(
        "kube/flush-manager.clusterrolebinding.yml", **options)
    _role = kube_resource("kube/flush-manager.role.yml", **options)
    _role_binding = kube_resource("kube/flush-manager.rolebinding.yml",
                                  **options)
    _deploy = kube_resource("kube/flush-manager.deploy.yml", **options)
    with _sa, _cluster_role, _cluster_role_binding, _role, _role_binding, _deploy:
        print("posting message 2 until successful")
        for i in range(max_wait_loops):
            try:
                requests.post(web, headers={
                    "host": "web"
                }, json={
                    "id": 2
                }).raise_for_status()
            except requests.exceptions.ConnectionError:
                if i > 0 and static_pvs:
                    create_static_pvs(api)
            else:
                break
            time.sleep(1)
        else:
            assert False, "pod did not recover"
        # scale to 0 pods
        print("scaling web to 0 pods")
        AppsV1Api().patch_namespaced_stateful_set_scale(
            name="web",
            namespace="default",
            body=V1StatefulSet(api_version="apps/v1",
                               kind="StatefulSet",
                               spec=dict(replicas=0)),
        )
        # wait for no pvcs
        print("waiting for cleanup to complete")
        for _ in range(max_wait_loops):
            if not api.list_persistent_volume().items:
                break
            time.sleep(1)
        else:
            print("pvs were not cleaned up")
            assert [] == api.list_persistent_volume().items
    # assert jobs and pvcs also deleted
    assert [] == list_pvcs(api)
    assert [] == BatchV1Api().list_namespaced_job("default").items
    # assert received message id 0 and 2
    assert [b'{"id": 0}', b'{"id": 2}'] == [
        element.message.data
        for element in sub_client.pull(subscription_path, 2).received_messages
    ]
コード例 #8
0
    def __init__(self) -> None:
        metadata = V1ObjectMeta(name="environmentd",
                                labels={"app": "environmentd"})
        label_selector = V1LabelSelector(match_labels={"app": "environmentd"})

        value_from = V1EnvVarSource(field_ref=V1ObjectFieldSelector(
            field_path="metadata.name"))

        env = [
            V1EnvVar(name="MZ_POD_NAME", value_from=value_from),
            V1EnvVar(name="AWS_REGION", value="minio"),
            V1EnvVar(name="AWS_ACCESS_KEY_ID", value="minio"),
            V1EnvVar(name="AWS_SECRET_ACCESS_KEY", value="minio123"),
        ]

        ports = [V1ContainerPort(container_port=5432, name="sql")]

        volume_mounts = [
            V1VolumeMount(name="data", mount_path="/data"),
        ]

        s3_endpoint = urllib.parse.quote("http://minio-service.default:9000")

        container = V1Container(
            name="environmentd",
            image=self.image("environmentd"),
            args=[
                "--storaged-image=" + self.image("storaged"),
                "--computed-image=" + self.image("computed"),
                "--availability-zone=kind-worker",
                "--availability-zone=kind-worker2",
                "--availability-zone=kind-worker3",
                f"--persist-blob-url=s3://minio:minio123@persist/persist?endpoint={s3_endpoint}&region=minio",
                "--orchestrator=kubernetes",
                "--orchestrator-kubernetes-image-pull-policy=never",
                "--persist-consensus-url=postgres://[email protected]?options=--search_path=consensus",
                "--adapter-stash-url=postgres://[email protected]?options=--search_path=catalog",
                "--storage-stash-url=postgres://[email protected]?options=--search_path=storage",
                "--unsafe-mode",
            ],
            env=env,
            ports=ports,
            volume_mounts=volume_mounts,
        )

        pod_spec = V1PodSpec(containers=[container])
        template_spec = V1PodTemplateSpec(metadata=metadata, spec=pod_spec)
        claim_templates = [
            V1PersistentVolumeClaim(
                metadata=V1ObjectMeta(name="data"),
                spec=V1PersistentVolumeClaimSpec(
                    access_modes=["ReadWriteOnce"],
                    resources=V1ResourceRequirements(
                        requests={"storage": "1Gi"}),
                ),
            )
        ]

        self.stateful_set = V1StatefulSet(
            api_version="apps/v1",
            kind="StatefulSet",
            metadata=metadata,
            spec=V1StatefulSetSpec(
                service_name="environmentd",
                replicas=1,
                pod_management_policy="Parallel",
                selector=label_selector,
                template=template_spec,
                volume_claim_templates=claim_templates,
            ),
        )