Exemplo n.º 1
0
    def test_job_logs_multiple_pods(self, mock_core_client):
        namespace = "treesbecomelogs"
        manager = JobManager(
            namespace=namespace, signer=Mock(), register=StaticJobDefinitionsRegister()
        )
        job_name = "ahoymatey"
        pod_name_1, pod_name_2 = "p1", "p2"
        container_name = "c1"
        mock_core_client.list_namespaced_pod.return_value.items = [
            V1Pod(
                metadata=V1ObjectMeta(name=pod_name_1),
                spec=V1PodSpec(containers=[V1Container(name=container_name)]),
            ),
            V1Pod(
                metadata=V1ObjectMeta(name=pod_name_2),
                spec=V1PodSpec(containers=[V1Container(name=container_name)]),
            ),
        ]
        log_msg = "this is a log"
        mock_core_client.read_namespaced_pod_log.return_value = log_msg

        logs = manager.job_logs(job_name)

        assert logs == {
            pod_name_1: {container_name: [log_msg]},
            pod_name_2: {container_name: [log_msg]},
        }
Exemplo n.º 2
0
    def from_file(cls, pod_dict):
        """
        expects input in format:
          {
            "name": "bonita-webapp-0",
            "namespace": "default",
            "containers": {
              "limits": {
                "memory": "24Gi"
              },
              "requests": {
                "cpu": "3",
                "memory": "12Gi"
              }
            },
            "initContainers": null
          },
        """
        containers = pod_dict['containers']
        if containers is not None:
            c_limits = containers.get('limits')
            c_requests = containers.get('requests')
        else:
            c_limits, c_requests = {}, {}

        init_containers = pod_dict['initContainers']
        if init_containers is not None:
            ic_limits = containers.get('limits')
            ic_requests = containers.get('requests')
        else:
            ic_limits, ic_requests = {}, {}
        pod = V1Pod(
            metadata=V1ObjectMeta(
                name=pod_dict['name'],
                namespace=pod_dict['namespace']
            ),
            spec=V1PodSpec(
                containers=[
                    V1Container(
                        name='1',
                        resources=V1ResourceRequirements(
                            limits=c_limits,
                            requests=c_requests
                        )
                    )
                ],
                init_containers=[
                    V1Container(
                        name='1',
                        resources=V1ResourceRequirements(
                            limits=ic_limits,
                            requests=ic_requests
                        ))
                ]
            )
        )
        return cls.from_k8s(pod)
Exemplo n.º 3
0
def generate_delaying_proxy_deployment(concourse_cfg: ConcourseConfig):
    ensure_not_none(concourse_cfg)

    external_url = concourse_cfg.external_url()
    label = {'app': 'delaying-proxy'}

    return V1Deployment(
        kind='Deployment',
        metadata=V1ObjectMeta(name='delaying-proxy'),
        spec=V1DeploymentSpec(
            replicas=1,
            selector=V1LabelSelector(match_labels=label),
            template=V1PodTemplateSpec(
                metadata=V1ObjectMeta(labels=label),
                spec=V1PodSpec(containers=[
                    V1Container(
                        image=
                        'eu.gcr.io/gardener-project/cc/github-enterprise-proxy:0.1.0',
                        image_pull_policy='IfNotPresent',
                        name='delaying-proxy',
                        ports=[
                            V1ContainerPort(container_port=8080),
                        ],
                        liveness_probe=V1Probe(
                            tcp_socket=V1TCPSocketAction(port=8080),
                            initial_delay_seconds=10,
                            period_seconds=10,
                        ),
                        env=[
                            V1EnvVar(name='CONCOURSE_URL', value=external_url),
                        ],
                    ),
                ], ))))
Exemplo n.º 4
0
def test_transformer():
    service_name = 'isvc-transformer'
    default_endpoint_spec = V1alpha2EndpointSpec(
        predictor=V1alpha2PredictorSpec(
            min_replicas=1,
            pytorch=V1alpha2PyTorchSpec(
                storage_uri='gs://kfserving-samples/models/pytorch/cifar10',
                model_class_name="Net",
                resources=V1ResourceRequirements(
                    requests={'cpu': '100m', 'memory': '256Mi'},
                    limits={'cpu': '100m', 'memory': '256Mi'}))),
        transformer=V1alpha2TransformerSpec(
            min_replicas=1,
            custom=V1alpha2CustomSpec(
                container=V1Container(
                  image='gcr.io/kubeflow-ci/kfserving/image-transformer:latest',
                  name='kfserving-container',
                  resources=V1ResourceRequirements(
                    requests={'cpu': '100m', 'memory': '256Mi'},
                    limits={'cpu': '100m', 'memory': '256Mi'})))))

    isvc = V1alpha2InferenceService(api_version=api_version,
                                    kind=constants.KFSERVING_KIND,
                                    metadata=client.V1ObjectMeta(
                                        name=service_name, namespace=KFSERVING_TEST_NAMESPACE),
                                    spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec))

    KFServing.create(isvc)
    wait_for_isvc_ready(service_name)
    probs = predict(service_name, './data/transformer.json')
    assert(np.argmax(probs) == 3)
    KFServing.delete(service_name, KFSERVING_TEST_NAMESPACE)
Exemplo n.º 5
0
    def setUp(self):
        super().setUp()
        self.cluster_dict = getExampleClusterDefinition()
        self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict)
        self.name = self.cluster_object.metadata.name
        self.namespace = self.cluster_object.metadata.namespace

        self.stateful_set = V1beta1StatefulSet(
            metadata=self._createMeta(self.name),
            spec=V1beta1StatefulSetSpec(
                replicas=3,
                service_name=self.name,
                template=V1PodTemplateSpec(
                    metadata=V1ObjectMeta(labels=KubernetesResources.
                                          createDefaultLabels(self.name)),
                    spec=V1PodSpec(containers=[
                        V1Container(
                            name="mongodb",
                            env=[
                                V1EnvVar(name="POD_IP",
                                         value_from=V1EnvVarSource(
                                             field_ref=V1ObjectFieldSelector(
                                                 api_version="v1",
                                                 field_path="status.podIP")))
                            ],
                            command=[
                                "mongod", "--replSet", self.name, "--bind_ip",
                                "0.0.0.0", "--smallfiles", "--noprealloc"
                            ],
                            image="mongo:3.6.4",
                            ports=[
                                V1ContainerPort(name="mongodb",
                                                container_port=27017,
                                                protocol="TCP")
                            ],
                            volume_mounts=[
                                V1VolumeMount(name="mongo-storage",
                                              read_only=False,
                                              mount_path="/data/db")
                            ],
                            resources=V1ResourceRequirements(limits={
                                "cpu": "100m",
                                "memory": "64Mi"
                            },
                                                             requests={
                                                                 "cpu": "100m",
                                                                 "memory":
                                                                 "64Mi"
                                                             }))
                    ])),
                volume_claim_templates=[
                    V1PersistentVolumeClaim(
                        metadata=V1ObjectMeta(name="mongo-storage"),
                        spec=V1PersistentVolumeClaimSpec(
                            access_modes=["ReadWriteOnce"],
                            resources=V1ResourceRequirements(
                                requests={"storage": "30Gi"})))
                ],
            ),
        )
Exemplo n.º 6
0
def get_template(
    input_topics,
    output_topic,
    error_topic,
    multiple_inputs=None,
    multiple_outputs=None,
    env_prefix="APP_",
    consumer_group=None,
) -> List[V1EnvVar]:
    env = [
        V1EnvVar(name="ENV_PREFIX", value=env_prefix),
        V1EnvVar(name=env_prefix + "OUTPUT_TOPIC", value=output_topic),
        V1EnvVar(name=env_prefix + "ERROR_TOPIC", value=error_topic),
    ]
    if input_topics:
        env.append(
            V1EnvVar(name=env_prefix + "INPUT_TOPICS", value=input_topics))
    if multiple_inputs:
        env.append(
            V1EnvVar(name=env_prefix + "EXTRA_INPUT_TOPICS",
                     value=multiple_inputs))
    if multiple_outputs:
        env.append(
            V1EnvVar(name=env_prefix + "EXTRA_OUTPUT_TOPICS",
                     value=multiple_outputs))

    container = V1Container(name="test-container", env=env)
    pod_spec = V1PodSpec(containers=[container])
    spec_metadata = None
    if consumer_group is not None:
        spec_metadata = V1ObjectMeta(
            annotations={"consumerGroup": consumer_group}, )
    return V1PodTemplateSpec(spec=pod_spec, metadata=spec_metadata)
Exemplo n.º 7
0
    def test_job_logs_not_ready(self, mock_core_client):
        namespace = "notready"
        manager = JobManager(
            namespace=namespace, signer=Mock(), register=StaticJobDefinitionsRegister()
        )
        pod_name = "p"
        container_name = "c"
        mock_core_client.list_namespaced_pod.return_value.items = [
            V1Pod(
                metadata=V1ObjectMeta(name=pod_name),
                spec=V1PodSpec(containers=[V1Container(name=container_name)]),
            )
        ]
        mock_core_client.read_namespaced_pod_log.side_effect = ApiException(
            http_resp=Mock(
                data={
                    "message": f'container "{container_name}" in pod "{pod_name}" is waiting to start: ContainerCreating'
                }
            )
        )

        # No exception
        logs = manager.job_logs("whatever")

        assert logs == {pod_name: {container_name: ["ContainerCreating"]}}
Exemplo n.º 8
0
def test_transformer():
    service_name = 'isvc-transformer'
    predictor = V1beta1PredictorSpec(
        min_replicas=1,
        pytorch=V1beta1TorchServeSpec(
            storage_uri='gs://kfserving-samples/models/pytorch/cifar10',
            model_class_name="Net",
            resources=V1ResourceRequirements(requests={
                'cpu': '100m',
                'memory': '256Mi'
            },
                                             limits={
                                                 'cpu': '100m',
                                                 'memory': '256Mi'
                                             })),
    )
    transformer = V1beta1TransformerSpec(
        min_replicas=1,
        containers=[
            V1Container(
                image=
                '809251082950.dkr.ecr.us-west-2.amazonaws.com/kfserving/image-transformer:latest',
                name='kfserving-container',
                resources=V1ResourceRequirements(requests={
                    'cpu': '100m',
                    'memory': '256Mi'
                },
                                                 limits={
                                                     'cpu': '100m',
                                                     'memory': '256Mi'
                                                 }))
        ])

    isvc = V1beta1InferenceService(
        api_version=constants.KFSERVING_V1BETA1,
        kind=constants.KFSERVING_KIND,
        metadata=client.V1ObjectMeta(name=service_name,
                                     namespace=KFSERVING_TEST_NAMESPACE),
        spec=V1beta1InferenceServiceSpec(predictor=predictor,
                                         transformer=transformer))

    KFServing.create(isvc)
    try:
        KFServing.wait_isvc_ready(service_name,
                                  namespace=KFSERVING_TEST_NAMESPACE)
    except RuntimeError as e:
        print(
            KFServing.api_instance.get_namespaced_custom_object(
                "serving.knative.dev", "v1", KFSERVING_TEST_NAMESPACE,
                "services", service_name + "-predictor-default"))
        pods = KFServing.core_api.list_namespaced_pod(
            KFSERVING_TEST_NAMESPACE,
            label_selector='serving.kubeflow.org/inferenceservice={}'.format(
                service_name))
        for pod in pods.items:
            print(pod)
        raise e
    res = predict(service_name, './data/transformer.json')
    assert (np.argmax(res["predictions"]) == 3)
    KFServing.delete(service_name, KFSERVING_TEST_NAMESPACE)
def pod_with_preferred_affinity():
    return V1Pod(
        status=V1PodStatus(phase='Pending',
                           conditions=[
                               V1PodCondition(status='False',
                                              type='PodScheduled',
                                              reason='Unschedulable')
                           ]),
        spec=V1PodSpec(
            containers=[
                V1Container(
                    name='container',
                    resources=V1ResourceRequirements(requests={'cpu': '1.5'}))
            ],
            affinity=V1Affinity(node_affinity=V1NodeAffinity(
                required_during_scheduling_ignored_during_execution=
                V1NodeSelector(node_selector_terms=[
                    V1NodeSelectorTerm(match_expressions=[
                        V1NodeSelectorRequirement(
                            key='clusterman.com/scheduler', operator='Exists')
                    ])
                ]),
                preferred_during_scheduling_ignored_during_execution=[
                    V1PreferredSchedulingTerm(
                        weight=10,
                        preference=V1NodeSelectorTerm(match_expressions=[
                            V1NodeSelectorRequirement(
                                key='clusterman.com/pool',
                                operator='In',
                                values=['bar'])
                        ]))
                ]))))
Exemplo n.º 10
0
def test_sdk_e2e():

    container = V1Container(
        name="tensorflow",
        image="gcr.io/kubeflow-ci/tf-mnist-with-summaries:1.0",
        command=[
            "python", "/var/tf_mnist/mnist_with_summaries.py",
            "--log_dir=/train/logs", "--learning_rate=0.01", "--batch_size=150"
        ])

    worker = V1ReplicaSpec(
        replicas=1,
        restart_policy="Never",
        template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container])))

    tfjob = V1TFJob(api_version="kubeflow.org/v1",
                    kind="TFJob",
                    metadata=V1ObjectMeta(name="mnist-ci-test",
                                          namespace=SDK_TEST_NAMESPACE),
                    spec=V1TFJobSpec(clean_pod_policy="None",
                                     tf_replica_specs={"Worker": worker}))

    TFJOB_CLIENT.create(tfjob, namespace=SDK_TEST_NAMESPACE)

    TFJOB_CLIENT.wait_for_job("mnist-ci-test", namespace=SDK_TEST_NAMESPACE)
    if not TFJOB_CLIENT.if_job_succeeded("mnist-ci-test",
                                         namespace=SDK_TEST_NAMESPACE):
        raise RuntimeError("The TFJob is not succeeded.")

    TFJOB_CLIENT.delete("mnist-ci-test", namespace=SDK_TEST_NAMESPACE)
def mock_cluster_connector():
    with mock.patch('clusterman.kubernetes.kubernetes_cluster_connector.kubernetes'), \
            mock.patch('clusterman.kubernetes.kubernetes_cluster_connector.staticconf'):
        mock_cluster_connector = KubernetesClusterConnector(
            'kubernetes-test', 'bar')
        mock_cluster_connector._nodes_by_ip = {
            '10.10.10.1':
            KubernetesNode(metadata=V1ObjectMeta(name='node1'),
                           status=V1NodeStatus(allocatable={
                               'cpu': '4',
                               'gpu': 2
                           },
                                               capacity={
                                                   'cpu': '4',
                                                   'gpu': '2'
                                               })),
            '10.10.10.2':
            KubernetesNode(metadata=V1ObjectMeta(name='node2'),
                           status=V1NodeStatus(allocatable={'cpu': '6.5'},
                                               capacity={'cpu': '8'}))
        }
        mock_cluster_connector._pods_by_ip = {
            '10.10.10.1': [],
            '10.10.10.2': [
                V1Pod(metadata=V1ObjectMeta(name='pod1'),
                      status=V1PodStatus(phase='Running'),
                      spec=V1PodSpec(containers=[
                          V1Container(name='container1',
                                      resources=V1ResourceRequirements(
                                          requests={'cpu': '1.5'}))
                      ])),
            ]
        }
        return mock_cluster_connector
Exemplo n.º 12
0
    def get_sidecar_containers(
            self,
            system_paasta_config: SystemPaastaConfig) -> List[V1Container]:
        registrations = " ".join(self.get_registrations())
        # s_m_j currently asserts that services are healthy in smartstack before
        # continuing a bounce. this readiness check lets us achieve the same thing
        readiness_probe: Optional[V1Probe]
        if system_paasta_config.get_enable_nerve_readiness_check():
            readiness_probe = V1Probe(
                _exec=V1ExecAction(command=[
                    system_paasta_config.get_nerve_readiness_check_script(),
                ] + self.get_registrations(), ),
                initial_delay_seconds=10,
                period_seconds=10,
            )
        else:
            readiness_probe = None

        hacheck_sidecar = V1Container(
            image=system_paasta_config.get_hacheck_sidecar_image_url(),
            lifecycle=V1Lifecycle(pre_stop=V1Handler(_exec=V1ExecAction(
                command=[
                    "/bin/sh",
                    "-c",
                    f"/usr/bin/hadown {registrations}; sleep 31",
                ], ), ), ),
            name="hacheck",
            env=self.get_kubernetes_environment(),
            ports=[
                V1ContainerPort(container_port=6666, ),
            ],
            readiness_probe=readiness_probe,
        )
        return [hacheck_sidecar]
Exemplo n.º 13
0
 def get_kubernetes_containers(
     self,
     docker_volumes: Sequence[DockerVolume],
     system_paasta_config: SystemPaastaConfig,
     aws_ebs_volumes: Sequence[AwsEbsVolume],
     service_namespace_config: ServiceNamespaceConfig,
 ) -> Sequence[V1Container]:
     service_container = V1Container(
         image=self.get_docker_url(),
         command=self.get_cmd(),
         args=self.get_args(),
         env=self.get_container_env(),
         resources=self.get_resource_requirements(),
         lifecycle=V1Lifecycle(pre_stop=V1Handler(_exec=V1ExecAction(
             command=[
                 "/bin/sh",
                 "-c",
                 "sleep 30",
             ], ), ), ),
         name=self.get_sanitised_deployment_name(),
         liveness_probe=self.get_liveness_probe(service_namespace_config),
         ports=[
             V1ContainerPort(container_port=8888, ),
         ],
         volume_mounts=self.get_volume_mounts(
             docker_volumes=docker_volumes,
             aws_ebs_volumes=aws_ebs_volumes,
             persistent_volumes=self.get_persistent_volumes(),
         ),
     )
     containers = [service_container] + self.get_sidecar_containers(
         system_paasta_config=system_paasta_config)
     return containers
Exemplo n.º 14
0
 def create_work_pod(self):
   broker.coreV1.create_namespaced_pod(
     namespace='nectar',
     body=V1Pod(
       metadata=V1ObjectMeta(
         name=self.pod_name,
         labels=self.pod_labels()
       ),
       spec=V1PodSpec(
         restart_policy='Never',
         containers=[
           V1Container(
             name='docker',
             image='docker:latest',
             command=["/bin/sh"],
             args=["-c", self.command()],
             env=[
               V1EnvVar(
                 name='DOCKER_HOST',
                 value=self.daemon_host()
               )
             ]
           )
         ]
       )
     )
   )
Exemplo n.º 15
0
def get_streaming_app_cronjob(
    name: str = "test-cronjob",
    input_topics: Optional[str] = None,
    output_topic: Optional[str] = "output-topic",
    error_topic: Optional[str] = "error-topic",
    env_prefix: str = "APP_",
    pipeline: Optional[str] = None,
) -> V1beta1CronJob:
    env = get_env(
        input_topics,
        output_topic,
        error_topic,
        env_prefix=env_prefix,
    )
    container = V1Container(name="test-container", env=env)
    pod_spec = V1PodSpec(containers=[container])
    pod_template_spec = V1PodTemplateSpec(spec=pod_spec)
    job_spec = V1JobSpec(
        template=pod_template_spec,
        selector=None,
    )
    job_template = V1beta1JobTemplateSpec(spec=job_spec)
    spec = V1beta1CronJobSpec(job_template=job_template, schedule="* * * * *")
    metadata = get_metadata(name, pipeline=pipeline)
    return V1beta1CronJob(metadata=metadata, spec=spec)
Exemplo n.º 16
0
def test_sdk_e2e():
    container = V1Container(
        name="pytorch",
        image="gcr.io/kubeflow-ci/pytorch-dist-mnist-test:v1.0",
        args=["--backend", "gloo"],
    )

    master = V1ReplicaSpec(
        replicas=1,
        restart_policy="OnFailure",
        template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container])))

    worker = V1ReplicaSpec(
        replicas=1,
        restart_policy="OnFailure",
        template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container])))

    pytorchjob = V1PyTorchJob(api_version="kubeflow.org/v1",
                              kind="PyTorchJob",
                              metadata=V1ObjectMeta(
                                  name="pytorchjob-mnist-ci-test",
                                  namespace='default'),
                              spec=V1PyTorchJobSpec(clean_pod_policy="None",
                                                    pytorch_replica_specs={
                                                        "Master": master,
                                                        "Worker": worker
                                                    }))

    PYTORCH_CLIENT.create(pytorchjob)
    wait_for_pytorchjob_ready("pytorchjob-mnist-ci-test")

    PYTORCH_CLIENT.delete('pytorchjob-mnist-ci-test', namespace='default')
Exemplo n.º 17
0
def pending_pods():
    return [
        (
            V1Pod(
                metadata=V1ObjectMeta(name='pod1'),
                status=V1PodStatus(
                    phase='Pending',
                    conditions=[
                        V1PodCondition(status='False', type='PodScheduled', reason='Unschedulable')
                    ],
                ),
                spec=V1PodSpec(containers=[
                    V1Container(
                        name='container1',
                        resources=V1ResourceRequirements(requests={'cpu': '1.5', 'memory': '150MB'})
                    ),
                    V1Container(
                        name='container1',
                        resources=V1ResourceRequirements(requests={'cpu': '1.5', 'memory': '350MB'})
                    )
                ]),
            ),
            PodUnschedulableReason.InsufficientResources,
        ),
        (
            V1Pod(
                metadata=V1ObjectMeta(name='pod2'),
                status=V1PodStatus(
                    phase='Pending',
                    conditions=[
                        V1PodCondition(status='False', type='PodScheduled', reason='Unschedulable')
                    ],
                ),
                spec=V1PodSpec(containers=[
                    V1Container(
                        name='container1',
                        resources=V1ResourceRequirements(requests={'cpu': '1.5'})
                    ),
                    V1Container(
                        name='container1',
                        resources=V1ResourceRequirements(requests={'cpu': '1.5', 'mem': '300MB'})
                    )
                ]),
            ),
            PodUnschedulableReason.Unknown,
        )
    ]
Exemplo n.º 18
0
 def get_reference_object(self) -> V1Deployment:
     """Get deployment object for outpost"""
     # Generate V1ContainerPort objects
     container_ports = []
     for port in self.controller.deployment_ports:
         container_ports.append(
             V1ContainerPort(
                 container_port=port.port,
                 name=port.name,
                 protocol=port.protocol.upper(),
             ))
     meta = self.get_object_meta(name=self.name)
     secret_name = f"authentik-outpost-{self.controller.outpost.uuid.hex}-api"
     image_prefix = CONFIG.y("outposts.docker_image_base")
     return V1Deployment(
         metadata=meta,
         spec=V1DeploymentSpec(
             replicas=self.outpost.config.kubernetes_replicas,
             selector=V1LabelSelector(match_labels=self.get_pod_meta()),
             template=V1PodTemplateSpec(
                 metadata=V1ObjectMeta(labels=self.get_pod_meta()),
                 spec=V1PodSpec(containers=[
                     V1Container(
                         name=str(self.outpost.type),
                         image=
                         f"{image_prefix}-{self.outpost.type}:{__version__}",
                         ports=container_ports,
                         env=[
                             V1EnvVar(
                                 name="AUTHENTIK_HOST",
                                 value_from=V1EnvVarSource(
                                     secret_key_ref=V1SecretKeySelector(
                                         name=secret_name,
                                         key="authentik_host",
                                     )),
                             ),
                             V1EnvVar(
                                 name="AUTHENTIK_TOKEN",
                                 value_from=V1EnvVarSource(
                                     secret_key_ref=V1SecretKeySelector(
                                         name=secret_name,
                                         key="token",
                                     )),
                             ),
                             V1EnvVar(
                                 name="AUTHENTIK_INSECURE",
                                 value_from=V1EnvVarSource(
                                     secret_key_ref=V1SecretKeySelector(
                                         name=secret_name,
                                         key="authentik_host_insecure",
                                     )),
                             ),
                         ],
                     )
                 ]),
             ),
         ),
     )
def running_pod_1():
    return V1Pod(metadata=V1ObjectMeta(name='running_pod_1'),
                 status=V1PodStatus(phase='Running', host_ip='10.10.10.2'),
                 spec=V1PodSpec(containers=[
                     V1Container(name='container1',
                                 resources=V1ResourceRequirements(
                                     requests={'cpu': '1.5'}))
                 ],
                                node_selector={'clusterman.com/pool': 'bar'}))
Exemplo n.º 20
0
def test_transformer():
    service_name = 'isvc-transformer'
    default_endpoint_spec = V1alpha2EndpointSpec(
        predictor=V1alpha2PredictorSpec(
            min_replicas=1,
            pytorch=V1alpha2PyTorchSpec(
                storage_uri='gs://kfserving-samples/models/pytorch/cifar10',
                model_class_name="Net",
                resources=V1ResourceRequirements(requests={
                    'cpu': '100m',
                    'memory': '256Mi'
                },
                                                 limits={
                                                     'cpu': '100m',
                                                     'memory': '256Mi'
                                                 }))),
        transformer=V1alpha2TransformerSpec(
            min_replicas=1,
            custom=V1alpha2CustomSpec(container=V1Container(
                image='gcr.io/kubeflow-ci/kfserving/image-transformer:latest',
                name='kfserving-container',
                resources=V1ResourceRequirements(requests={
                    'cpu': '100m',
                    'memory': '256Mi'
                },
                                                 limits={
                                                     'cpu': '100m',
                                                     'memory': '256Mi'
                                                 })))))

    isvc = V1alpha2InferenceService(
        api_version=api_version,
        kind=constants.KFSERVING_KIND,
        metadata=client.V1ObjectMeta(name=service_name,
                                     namespace=KFSERVING_TEST_NAMESPACE),
        spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec))

    KFServing.create(isvc)
    try:
        KFServing.wait_isvc_ready(service_name,
                                  namespace=KFSERVING_TEST_NAMESPACE)
    except RuntimeError as e:
        print(
            KFServing.api_instance.get_namespaced_custom_object(
                "serving.knative.dev", "v1", KFSERVING_TEST_NAMESPACE,
                "services", service_name + "-predictor"))
        pods = KFServing.core_api.list_namespaced_pod(
            KFSERVING_TEST_NAMESPACE,
            label_selector='serving.kubeflow.org/inferenceservice={}'.format(
                service_name))
        for pod in pods.items:
            print(pod)
        raise e
    res = predict(service_name, './data/transformer.json')
    assert (np.argmax(res["predictions"]) == 3)
    KFServing.delete(service_name, KFSERVING_TEST_NAMESPACE)
Exemplo n.º 21
0
def resource_container(cpu, gpu, memory):
    c = V1Container(name='container')
    settings = {
        "cpu": str(cpu),
        "memory": str(memory),
        "nvidia.com/gpu": str(gpu)

    }
    c.resources = V1ResourceRequirements(limits=settings, requests=settings)
    return c
def add_trigger_environment_variable(container: V1Container):
    if container.env is None:
        container.env = []
    env_var = _get_env_var(container.env, "CHAOS_TOOLKIT_TRIGGER_ROLLOUT")
    if env_var is not None:
        env_var.value = str(uuid.uuid4())
    else:
        container.env.append(
            V1EnvVar("CHAOS_TOOLKIT_TRIGGER_ROLLOUT", str(uuid.uuid4()))
        )
Exemplo n.º 23
0
    def __init__(self) -> None:
        metadata = V1ObjectMeta(name="testdrive")

        container = V1Container(
            name="testdrive",
            image=self.image("testdrive"),
            command=["sleep", "infinity"],
        )

        pod_spec = V1PodSpec(containers=[container])
        self.pod = V1Pod(metadata=metadata, spec=pod_spec)
Exemplo n.º 24
0
 def __create_app_deployment(self, labels):
     container_port = V1ContainerPort(container_port=self.container_port)
     config_map_ref = V1ConfigMapEnvSource(name=INFRA_DB_CONFIG)
     container = V1Container(name=self.container_name, image=self.image_name, image_pull_policy='IfNotPresent',
                             ports=[container_port], env_from=[V1EnvFromSource(config_map_ref=config_map_ref)])
     pod_spec = V1PodSpec(containers=[container])
     pod_temp_spec = V1PodTemplateSpec(metadata=V1ObjectMeta(name=self.container_name, labels=labels), spec=pod_spec)
     deployment_spec = V1DeploymentSpec(replicas=1, selector=V1LabelSelector(match_labels=labels),
                                        template=pod_temp_spec)
     deployment = V1Deployment(metadata=V1ObjectMeta(name=self.container_name), spec=deployment_spec)
     self.appsApi.create_namespaced_deployment(namespace=TODO_APP_NAMESPACE, body=deployment)
def unevictable_pod():
    return V1Pod(metadata=V1ObjectMeta(
        name='unevictable_pod',
        annotations={'clusterman.com/safe_to_evict': 'false'},
        owner_references=[]),
                 status=V1PodStatus(phase='Running', host_ip='10.10.10.2'),
                 spec=V1PodSpec(containers=[
                     V1Container(name='container1',
                                 resources=V1ResourceRequirements(
                                     requests={'cpu': '1.5'}))
                 ]))
Exemplo n.º 26
0
def ensure_whoami(api_apps_v1, api_core_v1, api_custom, domain):
    name = 'whoami'
    port_name = 'web'
    ensure_single_container_deployment(
        api_apps_v1,
        V1Container(
            name=name,
            image='containous/whoami',
            ports=[V1ContainerPort(name=port_name, container_port=8000)]),
        name, 'default')
    ensure_ingress_routed_svc(api_core_v1, api_custom, domain, name, name,
                              name, 'default', port_name, 80, 8000)
Exemplo n.º 27
0
    def create(self):
        pod = V1Pod(api_version='v1',
                    metadata=V1ObjectMeta(name=self.pod_name,
                                          labels=self.labels()),
                    spec=V1PodSpec(containers=[
                        V1Container(name="primary",
                                    image=self.image(),
                                    image_pull_policy="Always")
                    ]))

        return broker.coreV1.create_namespaced_pod(body=pod,
                                                   namespace=self.namespace)
Exemplo n.º 28
0
def create(subs):
    pod = broker.client.V1Pod(api_version='v1',
                              metadata=V1ObjectMeta(name=subs.get('name'),
                                                    labels=subs.get('labels')),
                              spec=V1PodSpec(containers=[
                                  V1Container(name="primary",
                                              image=subs.get('image', 'nginx'),
                                              image_pull_policy="Always")
                              ]))

    return broker.coreV1.create_namespaced_pod(body=pod,
                                               namespace=subs.get('ns'))
Exemplo n.º 29
0
def _create_flush_job(
    batch_api: BatchV1Api,
    command: List[str],
    env: List[V1EnvVar],
    image: str,
    name: str,
    namespace: str,
    service_account_name: str,
) -> V1Job:
    logger.info(f"creating job: {name}")
    try:
        return batch_api.create_namespaced_job(
            namespace=namespace,
            body=V1Job(
                api_version="batch/v1",
                kind="Job",
                metadata=V1ObjectMeta(name=name, namespace=namespace),
                spec=V1JobSpec(
                    template=V1PodTemplateSpec(
                        spec=V1PodSpec(
                            containers=[
                                V1Container(
                                    image=image,
                                    command=command,
                                    name="flush",
                                    volume_mounts=[
                                        V1VolumeMount(mount_path="/data", name="queue")
                                    ],
                                    env=env,
                                )
                            ],
                            restart_policy="OnFailure",
                            volumes=[
                                V1Volume(
                                    name="queue",
                                    persistent_volume_claim=(
                                        V1PersistentVolumeClaimVolumeSource(
                                            claim_name=name
                                        )
                                    ),
                                )
                            ],
                            service_account_name=service_account_name,
                        )
                    )
                ),
            ),
        )
    except ApiException as e:
        if e.reason == CONFLICT and json.loads(e.body)["reason"] == ALREADY_EXISTS:
            logger.info(f"using existing job: {name}")
            return batch_api.read_namespaced_job(name, namespace)
        raise
Exemplo n.º 30
0
 def create_test_pod():
     core_v1.create_namespaced_pod(
         "default",
         V1Pod(
             metadata=V1ObjectMeta(name=name, ),
             spec=V1PodSpec(containers=[
                 V1Container(
                     name="test",
                     image="alpine",
                     tty=True,
                 )
             ]),
         ))