示例#1
0
 def create_iperf(self, name='server'):
     self.core_v1_api.create_namespaced_pod(
         namespace="default",
         body=client.V1Pod(
             api_version="v1",
             kind="Pod",
             metadata=client.V1ObjectMeta(
                 name=name,
                 namespace="default"
             ),
             spec=client.V1PodSpec(
                 containers=[
                     client.V1Container(
                         name="iperf",
                         tty=True,
                         image="zhuangweikang/k8stc:latest",
                         image_pull_policy="IfNotPresent",
                         security_context=client.V1SecurityContext(
                             capabilities=client.V1Capabilities(add=["NET_ADMIN"])),
                         resources=client.V1ResourceRequirements(
                             limits={"cpu": "100m", "memory": "1Gi"},
                             requests={"cpu": "100m", "memory": "1Gi"})
                     )
                 ],
                 restart_policy="Never"
             )
         ), async_req=False)
def create_deployment_object(deployment_name, deployment_image,
                             deployment_replicas):
    # Configureate Pod template container
    container = client.V1Container(
        name=deployment_name,
        image=deployment_image,
        #ports=[client.V1ContainerPort(container_port=80)],
        resources=client.V1ResourceRequirements(requests={
            "cpu": "100m",
            "memory": "200Mi"
        },
                                                limits={
                                                    "cpu": "500m",
                                                    "memory": "500Mi"
                                                }))
    # Create and configurate a spec section
    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels={"app": deployment_name}),
        spec=client.V1PodSpec(containers=[container]))
    # Create the specification of deployment
    spec = client.V1DeploymentSpec(
        replicas=deployment_replicas,
        template=template,
        selector={'matchLabels': {
            'app': deployment_name
        }})
    # Instantiate the deployment object
    deployment = client.V1Deployment(
        api_version="apps/v1",
        kind="Deployment",
        metadata=client.V1ObjectMeta(name=deployment_name),
        spec=spec)

    return deployment
示例#3
0
文件: api.py 项目: xuw10/pipeline
def create_datavol_pvc(body, i):
    # It always deletes the current one and creates a new
    pvc_nm = body['vol_name' + i]
    size = body['vol_size' + i] + 'Gi'

    annotations = {
        "rok/creds-secret-name": rok_secret_name(),
        "jupyter-dataset": pvc_nm,
    }

    if body["ws_type"] == "Existing":
        annotations['rok/origin'] = body["vol_rok_url" + i]

    labels = {"component": "singleuser-storage"}

    # Create a PVC for the New/Existing Type
    pvc = client.V1PersistentVolumeClaim(
        metadata=client.V1ObjectMeta(
            name=pvc_nm,
            namespace=body['ns'],
            annotations=annotations,
            labels=labels,
        ),
        spec=client.V1PersistentVolumeClaimSpec(
            access_modes=['ReadWriteOnce'],
            resources=client.V1ResourceRequirements(
                requests={'storage': size})))

    delete_existing_pvc(pvc.metadata.name, pvc.metadata.namespace)
    provision_new_pvc(pvc)
示例#4
0
def create_pod_object_migration(migration_number):
    # Configure Resources
    resources = client.V1ResourceRequirements(requests={"cpu": "250m"},
                                              limits={"cpu": "250m"})

    # Configurate Pod container
    container = client.V1Container(
        name="migration-hybrid",
        image=
        "docker-energylab.iai-artifactory.iai.kit.edu/opt-framework-scheduling-hybrid/migration-hybrid:latest",
        image_pull_policy="Always",
        resources=resources,
        args=["--island.number=" + str(migration_number)])
    # Create the specification of pod
    #v1localObjectReference = client.V1LocalObjectReference(name="myregistrykey")
    spec = client.V1PodSpec(containers=[container], restart_policy="Always")
    # Instantiate the pod object
    pod_migration = client.V1Pod(
        api_version="v1",
        kind="Pod",
        metadata=client.V1ObjectMeta(name=JOB_NAME_migration +
                                     str(migration_number),
                                     namespace=NAMESPACE),
        spec=spec)
    return pod_migration
def customEndpointSpec(custom_model_spec, service_account, min_replicas,
                       max_replicas):
    env = ([
        client.V1EnvVar(name=i["name"], value=i["value"])
        for i in custom_model_spec["env"]
    ] if custom_model_spec.get("env", "") else None)
    ports = ([
        client.V1ContainerPort(container_port=int(
            custom_model_spec.get("port", "")),
                               protocol="TCP")
    ] if custom_model_spec.get("port", "") else None)
    resources = (client.V1ResourceRequirements(
        requests=(
            custom_model_spec["resources"]["requests"] if
            custom_model_spec.get('resources', {}).get('requests') else None),
        limits=(
            custom_model_spec["resources"]["limits"] if custom_model_spec.get(
                'resources', {}).get('limits') else None),
    ) if custom_model_spec.get("resources", {}) else None)
    containerSpec = client.V1Container(
        name=custom_model_spec.get("name", "custom-container"),
        image=custom_model_spec["image"],
        env=env,
        ports=ports,
        command=custom_model_spec.get("command", None),
        args=custom_model_spec.get("args", None),
        image_pull_policy=custom_model_spec.get("image_pull_policy", None),
        working_dir=custom_model_spec.get("working_dir", None),
        resources=resources)
    return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(
        custom=V1alpha2CustomSpec(container=containerSpec),
        service_account_name=service_account,
        min_replicas=(min_replicas if min_replicas >= 0 else None),
        max_replicas=(max_replicas if max_replicas > 0
                      and max_replicas >= min_replicas else None)))
示例#6
0
 def _make_container(self, player_id):
     return client.V1Container(
         env=[
             client.V1EnvVar(name="DATA_URL",
                             value="%s/player/%d" %
                             (self.game_url, player_id)),
             client.V1EnvVar(name="PORT", value="5000"),
         ],
         name="aimmo-game-worker",
         image="ocadotechnology/aimmo-game-worker:%s" %
         os.environ.get("IMAGE_SUFFIX", "latest"),
         ports=[
             client.V1ContainerPort(container_port=5000, protocol="TCP")
         ],
         resources=client.V1ResourceRequirements(
             limits={
                 "cpu": "10m",
                 "memory": "64Mi"
             },
             requests={
                 "cpu": "6m",
                 "memory": "32Mi"
             },
         ),
         security_context=client.V1SecurityContext(
             capabilities=client.V1Capabilities(drop=["all"],
                                                add=["NET_BIND_SERVICE"])),
     )
def create_job_object(message, environment_image):
    """Function to create the AWS EKS Job object

    Arguments:
        message {[dict]} -- Submission message from AWS SQS queue

    Returns:
        [AWS EKS Job class object] -- AWS EKS Job class object
    """

    PYTHONUNBUFFERED_ENV = client.V1EnvVar(name="PYTHONUNBUFFERED", value="1")
    AUTH_TOKEN_ENV = client.V1EnvVar(name="AUTH_TOKEN", value=AUTH_TOKEN)
    EVALAI_API_SERVER_ENV = client.V1EnvVar(
        name="EVALAI_API_SERVER", value=EVALAI_API_SERVER
    )
    MESSAGE_BODY_ENV = client.V1EnvVar(name="BODY", value=json.dumps(message))
    submission_pk = message["submission_pk"]
    image = message["submitted_image_uri"]
    # Configureate Pod agent container
    agent_container = client.V1Container(
        name="agent", image=image, env=[PYTHONUNBUFFERED_ENV]
    )
    volume_mount_list = get_volume_mount_list("/dataset")
    # Configureate Pod environment container
    environment_container = client.V1Container(
        name="environment",
        image=environment_image,
        env=[
            PYTHONUNBUFFERED_ENV,
            AUTH_TOKEN_ENV,
            EVALAI_API_SERVER_ENV,
            MESSAGE_BODY_ENV,
        ],
        resources=client.V1ResourceRequirements(
            limits={"nvidia.com/gpu": "1"}
        ),
        volume_mounts=volume_mount_list,
    )
    volume_list = get_volume_list()
    # Create and configurate a spec section
    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels={"app": "evaluation"}),
        spec=client.V1PodSpec(
            containers=[environment_container, agent_container],
            restart_policy="Never",
            volumes=volume_list,
        ),
    )
    # Create the specification of deployment
    spec = client.V1JobSpec(backoff_limit=1, template=template)
    # Instantiate the job object
    job = client.V1Job(
        api_version="batch/v1",
        kind="Job",
        metadata=client.V1ObjectMeta(
            name="submission-{0}".format(submission_pk)
        ),
        spec=spec,
    )
    return job
示例#8
0
def create_datavol_pvc(body, i):
  # body: Dict (request body)
  pvc_nm = body["vol_name" + i]

  # Create a PVC if its a new Data Volume
  if body["vol_type" + i] == "New":
    size = body["vol_size" + i] + "Gi"
    mode = body["vol_access_modes" + i]

    pvc = client.V1PersistentVolumeClaim(
        metadata=client.V1ObjectMeta(
            name=pvc_nm,
            namespace=body["ns"]
        ),
        spec=client.V1PersistentVolumeClaimSpec(
            access_modes=[mode],
            resources=client.V1ResourceRequirements(
                requests={
                    "storage": size
                }
            )
        )
    )

    create_pvc(pvc)

  return
示例#9
0
 def _create_pvc_for_pv(self, vol):
     name = vol.metadata.name
     namespace = self.get_user_namespace()
     pvcname = name
     pvd = client.V1PersistentVolumeClaim(
         spec=client.V1PersistentVolumeClaimSpec(
             volume_name=name,
             access_modes=vol.spec.access_modes,
             resources=client.V1ResourceRequirements(
                 requests=vol.spec.capacity),
             selector=client.V1LabelSelector(match_labels={"name": name}),
             storage_class_name=vol.spec.storage_class_name))
     md = client.V1ObjectMeta(name=pvcname, labels={"name": pvcname})
     pvd.metadata = md
     self.log.info("Creating PVC '%s' in namespace '%s'" %
                   (pvcname, namespace))
     try:
         self.api.create_namespaced_persistent_volume_claim(namespace, pvd)
     except ApiException as e:
         if e.status != 409:
             self.log.exception("Create PVC '%s' " % pvcname +
                                "in namespace '%s' " % namespace +
                                "failed: %s" % str(e))
             raise
         else:
             self.log.info("PVC '%s' " % pvcname +
                           "in namespace '%s' " % namespace +
                           "already exists.")
示例#10
0
def get_resources(resources):
    """Create resources requirements.

    Args:
        resources: `PodResourcesConfig`

    Return:
        `V1ResourceRequirements`
    """
    limits = {}
    requests = {}
    if resources is None:
        return None
    if resources.cpu:
        if resources.cpu.limits:
            limits['cpu'] = resources.cpu.limits
        if resources.cpu.requests:
            requests['cpu'] = resources.cpu.requests

    if resources.cpu:
        if resources.cpu.limits:
            limits['memory'] = '{}Mi'.format(resources.memory.limits)
        if resources.cpu.requests:
            requests['memory'] = '{}Mi'.format(resources.memory.requests)

    if resources.gpu:
        if resources.gpu.limits:
            limits['alpha.kubernetes.io/nvidia-gpu'] = resources.gpu.limits
        if resources.gpu.requests:
            requests['alpha.kubernetes.io/nvidia-gpu'] = resources.gpu.requests
    return client.V1ResourceRequirements(limits=limits or None,
                                         requests=requests or None)
示例#11
0
def fxt_pvc(test_namespace):
    pvc_name = "pvc-test"
    api = client.CoreV1Api()
    pvc_body = client.V1PersistentVolumeClaim(
        api_version="v1",
        kind="PersistentVolumeClaim",
        metadata=client.V1ObjectMeta(name=pvc_name, ),
        spec=client.V1PersistentVolumeClaimSpec(
            storage_class_name="nfss1",
            access_modes=["ReadWriteMany"],
            resources=client.V1ResourceRequirements(
                requests={"storage": "1Gi"}),
        ),
    )

    try:
        response = api.create_namespaced_persistent_volume_claim(
            namespace=test_namespace, body=pvc_body)
    except ApiException as e:
        logging.error("That didn't work: %s" % e)

    pvcs = api.list_namespaced_persistent_volume_claim(
        namespace=test_namespace)
    logging.info(
        f"PVC {pvcs.items[0].metadata.name} currently {pvcs.items[0].status.phase}"
    )
    assert len(pvcs.items) == 1

    yield pvc_name
    logging.info("Destroying PersistentVolumeClaim")
    delete_pvc = api.delete_namespaced_persistent_volume_claim(
        name="pvc-test", namespace=test_namespace)
    assert delete_pvc, "Unable to delete PVC"
示例#12
0
 def deploy_volume_claim(
     self,
     labels={},
     name_suffix="",
     ports=None,
     env=None,
 ):
     volume_claim = self.get_volume_claim(labels)
     if not volume_claim:
         self.logger.info("creating volume claim (%s) ..." % name_suffix)
         storage_class_name = None
         if self.environment.storage_class:
             storage_class_name = self.environment.storage_class
         volume_claim = self.core_api.create_namespaced_persistent_volume_claim(
             namespace=self.environment.namespace,
             body=kubernetes_client.V1PersistentVolumeClaim(
                 api_version="v1",
                 kind="PersistentVolumeClaim",
                 metadata=kubernetes_client.V1ObjectMeta(
                     name=self.generate_object_name(name_suffix),
                     namespace=self.environment.namespace,
                     labels=self.generate_object_labels(labels),
                 ),
                 spec=kubernetes_client.V1PersistentVolumeClaimSpec(
                     access_modes=["ReadWriteOnce"],
                     resources=kubernetes_client.V1ResourceRequirements(
                         requests={
                             "storage": "1Gi",
                         }, ),
                     storage_class_name=storage_class_name,
                 ),
             ),
         )
     return volume_claim
 def _make_container(self, player_id):
     return client.V1Container(
         env=[
             client.V1EnvVar(name='DATA_URL',
                             value='%s/player/%d' %
                             (self.game_url, player_id)),
             client.V1EnvVar(name='PORT', value='5000')
         ],
         name='aimmo-game-worker',
         image='ocadotechnology/aimmo-game-worker:%s' %
         os.environ.get('IMAGE_SUFFIX', 'latest'),
         ports=[
             client.V1ContainerPort(container_port=5000, protocol='TCP')
         ],
         resources=client.V1ResourceRequirements(limits={
             'cpu': '10m',
             'memory': '64Mi'
         },
                                                 requests={
                                                     'cpu': '6m',
                                                     'memory': '32Mi'
                                                 }),
         security_context=client.V1SecurityContext(
             capabilities=client.V1Capabilities(drop=['all'],
                                                add=['NET_BIND_SERVICE'])))
示例#14
0
 def create_iperf_deploy(self, name):
     self.apps_v1_api.create_namespaced_deployment(
         namespace='default',
         body=client.V1Deployment(
             api_version="apps/v1",
             kind="Deployment",
             metadata=client.V1ObjectMeta(
                 name=name,
                 namespace="default",
                 labels={'app': name}
             ),
             spec=client.V1DeploymentSpec(
                 replicas=10,
                 selector=client.V1LabelSelector(match_labels={'app': name}),
                 template=client.V1PodTemplateSpec(
                     metadata=client.V1ObjectMeta(labels={'app': name}),
                     spec=client.V1PodSpec(
                         containers=[
                             client.V1Container(
                                 name=name,
                                 tty=True,
                                 image="zhuangweikang/k8stc:latest",
                                 image_pull_policy="IfNotPresent",
                                 security_context=client.V1SecurityContext(
                                     capabilities=client.V1Capabilities(add=["NET_ADMIN"])),
                                 resources=client.V1ResourceRequirements(
                                     limits={"cpu": "100m", "memory": "1Gi"},
                                     requests={"cpu": "100m", "memory": "1Gi"})
                             )
                         ]
                     )
                 )
             )
         )
     )
def create_volume_claim_template():
    return client.V1PersistentVolumeClaim(
        metadata=client.V1ObjectMeta(name="data"),
        spec=client.V1PersistentVolumeClaimSpec(
            access_modes=["ReadWriteOnce"],
            resources=client.V1ResourceRequirements(
                requests={"storage": "1Gi"})))
示例#16
0
def create_clone_pvc(pvc_values, sc_name, pvc_name, from_pvc_name,
                     created_objects):
    api_instance = client.CoreV1Api()
    pvc_metadata = client.V1ObjectMeta(name=pvc_name)
    pvc_resources = client.V1ResourceRequirements(
        requests={"storage": pvc_values["storage"]})
    pvc_data_source = client.V1TypedLocalObjectReference(
        kind="PersistentVolumeClaim", name=from_pvc_name)

    pvc_spec = client.V1PersistentVolumeClaimSpec(
        access_modes=[pvc_values["access_modes"]],
        resources=pvc_resources,
        storage_class_name=sc_name,
        data_source=pvc_data_source)

    pvc_body = client.V1PersistentVolumeClaim(api_version="v1",
                                              kind="PersistentVolumeClaim",
                                              metadata=pvc_metadata,
                                              spec=pvc_spec)

    try:
        LOGGER.info(
            f'PVC Create from Clone : Creating pvc {pvc_name} with parameters {str(pvc_values)} and storageclass {str(sc_name)} from PVC {from_pvc_name}'
        )
        api_response = api_instance.create_namespaced_persistent_volume_claim(
            namespace=namespace_value, body=pvc_body, pretty=True)
        LOGGER.debug(str(api_response))
        created_objects["clone_pvc"].append(pvc_name)
    except ApiException as e:
        LOGGER.info(f'PVC {pvc_name} creation operation has been failed')
        LOGGER.error(
            f"Exception when calling CoreV1Api->create_namespaced_persistent_volume_claim: {e}"
        )
        cleanup.clean_with_created_objects(created_objects)
        assert False
示例#17
0
def create_k8s_pvc(namespace: str, name: str, access_modes: tuple,
                   storage_class_name: str, size: str, user):
    """
    Create a Kubernetes persistent volume claim in a namespace in the cluster.
    An existing pvc with this name in this namespace leads to a no-op.

    Returns the new pvc.
    """
    core_v1 = get_user_core_v1(user)
    try:
        k8s_spec = client.V1PersistentVolumeClaimSpec(
            access_modes=access_modes,
            storage_class_name=storage_class_name,
            resources=client.V1ResourceRequirements(
                requests={'storage': size}))
        k8s_pvc = client.V1PersistentVolumeClaim(
            api_version="v1",
            kind="PersistentVolumeClaim",
            metadata=client.V1ObjectMeta(name=name),
            spec=k8s_spec)
        core_v1.create_namespaced_persistent_volume_claim(namespace=namespace,
                                                          body=k8s_pvc)
        logger.info(f"Created Kubernetes pvc '{namespace}:{name}'")
    except client.rest.ApiException as e:
        if e.status == 409:
            logger.warning(
                f"Tried to create already existing Kubernetes pvc '{namespace}:{name}'. Skipping the creation and using the existing one."
            )
        else:
            raise e
    return core_v1.read_namespaced_persistent_volume_claim(namespace=namespace,
                                                           name=name)
示例#18
0
def test_pod_with_gpus(mocker):
    args_list = [
        '--name',
        'test-wait-success',
        '--k8s-namespace',
        'test-namespace',
        '--shards',
        '1',
        '--replicas',
        '1',
        '--gpus',
        '3',
    ]
    args = set_pod_parser().parse_args(args_list)
    container = client.V1Container(
        name='test-container',
        resources=client.V1ResourceRequirements(limits={'nvidia.com/gpu': 3}),
    )
    spec = client.V1PodSpec(containers=[container])
    mocker.patch(
        'jina.peapods.pods.k8s.K8sPod._K8sDeployment._read_namespaced_deployment',
        return_value=client.V1Deployment(
            status=client.V1DeploymentStatus(replicas=1, ready_replicas=1), spec=spec
        ),
    )
    mocker.patch(
        'jina.peapods.pods.k8slib.kubernetes_deployment.deploy_service',
        return_value=f'test-wait-success.test-namespace.svc',
    )
    mocker.patch(
        'jina.peapods.pods.k8s.K8sPod._K8sDeployment._delete_namespaced_deployment',
        return_value=client.V1Status(status=200),
    )
    with K8sPod(args) as pod:
        assert pod.args.gpus == '3'
示例#19
0
    def create_cronjob(self, schedule: str, pipeline_uuid: str) -> str:
        run_job_endpoint= f'{kalytical_config.kalytical_api_endpoint}/pipeline/dispatcher/run_by_pipeline_uuid?pipeline_uuid={pipeline_uuid}'
        job_name = f'kalytical-api-trigger-{pipeline_uuid}'
        container = client.V1Container(
            name=job_name,
            image=kalytical_config.ext_cron_image_uri,
            env=[client.V1EnvVar(name='KALYTICAL_API_ENDPOINT', value=run_job_endpoint),
                 client.V1EnvVar(name='KALYTICAL_API_AUTH_SECRET', value=kalytical_config.kalytical_api_token)],
            resources=client.V1ResourceRequirements(limits={'cpu':'.1', 'memory':'50Mi'}))
        
        pod_template = client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(labels={'kalytical-api-pipeline': job_name}),
            spec=client.V1PodSpec(restart_policy="Never", containers=[container]))
        
        job_spec = client.V1JobSpec(
            completions=1, backoff_limit=0, template=pod_template)
        job_template = client.V1beta1JobTemplateSpec(job_tepmlate = job_template, schedule=schedule), 
        cron_body = client.V1beta1CronJob(
            spec=cron_spec, metadata=client.V1ObjectMeta(name=job_name)
        )

        try:
            self.log.debug(f"Attempting to write namespaced cronjob with namespace={self._k8s_namespace} parameters={str(cron_body)}")
            self._k8s_batch_client.create_namespaced_cron_job(
                namespace=self._k8s_namespace, body=cron_body)

        except ApiException as e:
            if e.status == 409:
                self.log.warn("This job already existed. We will re-create it.")
                self.delete_cronjob(job_name=job_name) #TODO Instead use patching
                self.create_cronjob(schedule=schedule, pipeline_uuid=pipeline_uuid)
            else: 
                raise e
        return job_name
def create(namespace_name,service_name,container_port,commands,replicas):
    # creating a instance of class Namespace
    body = client.V1ReplicationController()

    # giving name for the namespace as given in the function call
    body.metadata = client.V1ObjectMeta(name=str(service_name)+"-controller")
    
    port_obj = []

    # Creating Port object
    for ports in container_port:
        port = client.V1ContainerPort(container_port=ports["port"],name=ports["name"])
        port_obj.append(port)

    #containers
    container_obj= client.V1Container(name=service_name,image="registry.gokul.com:5000/gokul/" + str(namespace_name) ,command=commands,
    ports=port_obj, resources=client.V1ResourceRequirements(requests={"cpu":"100m"}))

    #pod spec
    pod_spec = client.V1PodSpec(containers=[container_obj])

    #spec template 
    template = client.V1PodTemplateSpec(client.V1ObjectMeta(labels={"app":service_name}),spec=pod_spec)

    #replication specs
    body.spec = client.V1ReplicationControllerSpec(replicas=replicas,selector={"app":service_name},template=template)

    v1.create_namespaced_replication_controller(namespace=namespace_name,body=body)

    return "replication controller created"
示例#21
0
def get_resources(resources):
    """Create resources requirements.

    Args:
        resources: `PodResourcesConfig`

    Return:
        `V1ResourceRequirements`
    """
    limits = {}
    requests = {}
    if resources is None:
        return None
    if resources.cpu:
        if resources.cpu.limits:
            limits['cpu'] = resources.cpu.limits
        if resources.cpu.requests:
            requests['cpu'] = resources.cpu.requests

    if resources.cpu:
        if resources.cpu.limits:
            limits['memory'] = '{}Mi'.format(resources.memory.limits)
        if resources.cpu.requests:
            requests['memory'] = '{}Mi'.format(resources.memory.requests)

    if resources.gpu:
        if resources.gpu.limits:
            limits[settings.K8S_GPU_RESOURCE_KEY] = resources.gpu.limits
        if resources.gpu.requests:
            requests[settings.K8S_GPU_RESOURCE_KEY] = resources.gpu.requests
    return client.V1ResourceRequirements(limits=limits or None,
                                         requests=requests or None)
示例#22
0
def _define_webserver_deployment() -> k8s.V1Deployment:
    # Define Pod container template
    container = k8s.V1Container(
        name='echo',
        image='hashicorp/http-echo',
        command=['/http-echo', '-listen=:{}'.format(CONTAINER_PORT), '-text="Echo server on port {}"'.format(CONTAINER_PORT)],
        ports=[k8s.V1ContainerPort(container_port=CONTAINER_PORT)],
        resources=k8s.V1ResourceRequirements(
            requests={'cpu': '100m', 'memory': '200Mi'},
            limits={'cpu': '500m', 'memory': '500Mi'},
        ),
    )

    # Create and configure a spec section
    template = k8s.V1PodTemplateSpec(
        metadata=k8s.V1ObjectMeta(labels={'app': APP_NAME}),
        spec=k8s.V1PodSpec(containers=[container]),
    )

    # Create the specification of deployment
    spec = k8s.V1DeploymentSpec(
        replicas=1, template=template, selector=k8s.V1LabelSelector(match_labels={'app': APP_NAME}))

    # Instantiate the deployment object
    deployment = k8s.V1Deployment(
        metadata=k8s.V1ObjectMeta(name=DEPLOYMENT_NAME),
        spec=spec,
    )

    return deployment
示例#23
0
def create_deployment_object():
    # Configureate Pod template container
    container = client.V1Container(
        name="zaproxy",
        image="owasp/zap2docker-stable",
        command=["zap.sh"],
        args=["-daemon", "-host", "0.0.0.0", "-port", "8090", "-config", "api.disablekey=true", "-config",
              "api.addrs.addr.name=.*", "-config", "api.addrs.addr.regex=true"],
        ports=[client.V1ContainerPort(container_port=8090)],
        resources=client.V1ResourceRequirements(
            requests={"cpu": "100m", "memory": "200Mi"},
            limits={"cpu": "500m", "memory": "500Mi"}
        )
    )
    # Create and configurate a spec section
    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels={'app': 'zap-app', 'name': 'zap-application'}),
        spec=client.V1PodSpec(containers=[container]))
    # Create the specification of deployment
    spec = client.V1DeploymentSpec(
        replicas=1,
        template=template,
        selector={'matchLabels': {'app': 'zap-app', 'name': 'zap-application'}})
    # Instantiate the deployment object
    deployment = client.V1Deployment(
        api_version="apps/v1",
        kind="Deployment",
        metadata=client.V1ObjectMeta(name=DEPLOYMENT_NAME, labels={'app': 'archerysec-app'}),
        spec=spec)

    return deployment
    def generate_app_migrator_job(self, tag: str, source: str):
        log.debug("Generating app-migrator job: tag={} source={}".format(tag, source))
        deployment = self.appsV1Api.read_namespaced_deployment(source, self.namespace)
        metadata = client.V1ObjectMeta(
            labels={"app": APP_MIGRATOR}, name=APP_MIGRATOR, namespace=self.namespace
        )
        new_image = generate_image(
            old_image=deployment.spec.template.spec.containers[0].image, new_tag=tag
        )
        job = client.V1Job(
            api_version="batch/v1",
            kind="Job",
            metadata=metadata,
            spec=client.V1JobSpec(
                template=client.V1PodTemplateSpec(
                    spec=deployment.spec.template.spec, metadata=metadata
                )
            ),
        )
        job.spec.template.spec.containers[0].image = new_image
        job.spec.template.spec.restart_policy = "Never"
        job.spec.template.spec.containers[0].command = config.APP_MIGRATOR_COMMAND
        job.spec.template.spec.containers[0].args = config.APP_MIGRATOR_ARGS
        job.spec.template.spec.containers[0].resources = client.V1ResourceRequirements()

        self.batchV1Api.create_namespaced_job(self.namespace, job)
        log.debug(
            "Generation of app-migrator job complete: tag={} source={}".format(
                tag, source
            )
        )
示例#25
0
 def create_notification_job(zip_code: int) -> client.V1Job:
     """Creates job to fulfill requested notification"""
     return bot.k8s_batch.create_namespaced_job(
         namespace=namespace,
         body=client.V1Job(
             api_version='batch/v1',
             kind='Job',
             metadata=client.V1ObjectMeta(generate_name=JOB_NAME_PREFIX),
             spec=client.V1JobSpec(
                 ttl_seconds_after_finished=JOB_TTL_SECONDS_AFTER_FINISHED,
                 backoff_limit=JOB_MAX_RETRIES,
                 template=client.V1PodTemplateSpec(spec=client.V1PodSpec(
                     restart_policy=JOB_RESTART_POLICY,
                     containers=[
                         client.V1Container(
                             name='worker',
                             image=job_image,
                             resources=client.V1ResourceRequirements(
                                 requests=JOB_RESOURCE_REQUESTS),
                             args=['--worker', '--zip_code',
                                   str(zip_code)],
                             env=[
                                 client.V1EnvVar(name=key, value=value)
                                 for key, value in {
                                     MONGO_USER: mongodb_user,
                                     MONGO_PASSWORD: mongodb_password,
                                     MONGO_HOST: mongodb_host,
                                     MONGO_PORT: mongodb_port,
                                     MY_TURN_API_KEY: my_turn_api_key
                                 }.items()
                             ])
                     ])))))
示例#26
0
def create_deployment(namespace, name, cpulim, memlim, podlim,priorityclass):
    try:
        config.load_kube_config()
    except:
        config.load_incluster_config()

    api = client.ExtensionsV1beta1Api()

    container = client.V1Container(
        name=name,
        image="ansi/lookbusy",
        resources=client.V1ResourceRequirements(
                  requests={'memory': memlim, 'cpu': cpulim}))

    body = client.ExtensionsV1beta1Deployment(
            api_version="extensions/v1beta1",
            kind="Deployment",
            metadata=client.V1ObjectMeta(name=name, namespace=namespace),
            spec = client.V1DeploymentSpec(
                selector=client.V1LabelSelector(match_labels={"app":name}),
                template = client.V1PodTemplateSpec(
                       metadata=client.V1ObjectMeta(name=name, namespace=namespace,labels={"app": name}),
                       spec=client.V1PodSpec(containers=[container],priority_class_name=priorityclass)
                       )
            )
        )
    pretty = 'true'

    try:
        api_response = api.create_namespaced_deployment(namespace, body, pretty=pretty)
    except ApiException as e:
        pprint("Exception when calling AppsV1Api->create_namespaced_deployment: %s\n" % e)
示例#27
0
文件: api.py 项目: xuw10/pipeline
def create_workspace_pvc(body):
    # If the type is New, then create a new PVC, else use an existing one
    # It always deletes the current one and creates a new
    annotations = {
        "rok/creds-secret-name": rok_secret_name(),
        "jupyter-workspace": body["ws_name"],
    }

    if body["ws_type"] == "Existing":
        annotations['rok/origin'] = body["ws_rok_url"]

    labels = {"component": "singleuser-storage"}

    # Create a PVC for the New/Existing Type
    pvc = client.V1PersistentVolumeClaim(
        metadata=client.V1ObjectMeta(
            name=body['ws_name'],
            namespace=body['ns'],
            annotations=annotations,
            labels=labels,
        ),
        spec=client.V1PersistentVolumeClaimSpec(
            access_modes=['ReadWriteOnce'],
            resources=client.V1ResourceRequirements(
                requests={'storage': body['ws_size'] + 'Gi'})))

    delete_existing_pvc(pvc.metadata.name, pvc.metadata.namespace)
    provision_new_pvc(pvc)
示例#28
0
def create_cronjob(username, namespace, dbhost):
    try:
        config.load_kube_config()
    except:
        config.load_incluster_config()

    api = client.BatchV1beta1Api()

    body = client.V1beta1CronJob(
                metadata=client.V1ObjectMeta(name=namespace),
                spec=client.V1beta1CronJobSpec( job_template=client.V1beta1JobTemplateSpec(

                        spec=client.V1JobSpec(template=client.V1PodTemplateSpec(
                                                spec=client.V1PodSpec(
                                                            containers=[
                                                                client.V1Container(name="scheduler", image="sahandha/lsstscheduler",
                                                                args=["/bin/bash","-c","python /sched.py {} {} {};".format(username, namespace, dbhost)],
                                                                resources=client.V1ResourceRequirements(
                                                                          requests={'memory': "200Mi", 'cpu': "100m"})
                                                                )],
                                                            restart_policy="OnFailure"
                                                                )))
                ),
                                                schedule = "*/1 * * * *")
    )

    try:
        api = api.create_namespaced_cron_job(namespace, body)
    except ApiException as e:
        print("Exception when calling BatchV1beta1Api->create_namespaced_cron_job: %s\n" % e)
示例#29
0
def create_resource_spec(limits, requests):

    GKE_TPU_DESIGNATORS = [
        "cloud-tpus.google.com/v2", "cloud-tpus.google.com/preemptible-v2",
        "cloud-tpus.google.com/v3", "cloud-tpus.google.com/preemptible-v3"
    ]

    allowed_keys = ["cpu", "memory", "nvidia.com/gpu"]

    allowed_keys.extend(GKE_TPU_DESIGNATORS)

    # Define container resource requirements
    resources = client.V1ResourceRequirements()

    def _raise_if_disallowed(key):
        if key not in allowed_keys:
            raise ValueError("Saw resource request or limit key %s "
                             "which is not in allowed keys %s" %
                             (key, allowed_keys))

    for key, value in limits.items():
        raise_if_disallowed_key(key)
        resources.limits[key] = value

    for key, value in requests.items():
        raise_if_disallowed_key(key)
        resources.requests[key] = value

    return resources
    def with_resources(
            self, limit_cpu: str, limit_memory: str, request_cpu: str,
            request_memory: str) -> Optional['HmlInferenceDeployment']:
        """
        Set the Resource Limits and Requests for the Container running the `HmlInferenceApp`

        Args:
            limit_cpu (str): Maximum amount of CPU to use
            limit_memory (str): Maximum amount of Memory to use
            request_cpu (str): The desired amount of CPU to reserve
            request_memory (str): The desired amount of Memory to reserve

        Returns:
            A reference to the current `HmlInferenceDeployment` (self)
        """

        self.k8s_container.resources = client.V1ResourceRequirements(
            limits={
                "cpu": limit_cpu,
                "memory": limit_memory
            },
            requests={
                "cpu": request_cpu,
                "memory": request_memory
            })
        return self