Пример #1
0
def create_workflow_object():
    """
    Creates the diamond DAG from the workflow examples
    Looks something like this where we flow from left to right
    one -> (two, three) -> four
    """
    container = client.V1Container(name="busybox",
                                   image="gcr.io/google-containers/busybox",
                                   command=[
                                       "sh", "-c",
                                       "echo Starting on: $(date); sleep 5; \
                 echo Goodbye cruel world at: $(date)"
                                   ])
    stepone = DagV1WorkflowStep(
        name="stepone",
        job_template=client.V1beta1JobTemplateSpec(spec=client.V1JobSpec(
            template=client.V1PodTemplateSpec(
                metadata=client.V1ObjectMeta(labels={"workflow": "stepone"}),
                spec=client.V1PodSpec(containers=[container],
                                      restart_policy="Never")))))
    steptwo = DagV1WorkflowStep(
        name="steptwo",
        dependencies=['stepone'],
        job_template=client.V1beta1JobTemplateSpec(spec=client.V1JobSpec(
            template=client.V1PodTemplateSpec(
                metadata=client.V1ObjectMeta(labels={"workflow": "steptwo"}),
                spec=client.V1PodSpec(containers=[container],
                                      restart_policy="Never")))))
    stepthree = DagV1WorkflowStep(
        name="stepthree",
        dependencies=['stepone'],
        job_template=client.V1beta1JobTemplateSpec(spec=client.V1JobSpec(
            template=client.V1PodTemplateSpec(
                metadata=client.V1ObjectMeta(labels={"workflow": "stepthree"}),
                spec=client.V1PodSpec(containers=[container],
                                      restart_policy="Never")))))
    stepfour = DagV1WorkflowStep(
        name="stepfour",
        dependencies=['steptwo', 'stepthree'],
        job_template=client.V1beta1JobTemplateSpec(spec=client.V1JobSpec(
            template=client.V1PodTemplateSpec(
                metadata=client.V1ObjectMeta(labels={"workflow": "stepfour"}),
                spec=client.V1PodSpec(containers=[container],
                                      restart_policy="Never")))))

    workflow = DagV1Workflow(
        metadata=client.V1ObjectMeta(name="diamond"),
        spec=DagV1WorkflowSpec(selector=client.V1LabelSelector(
            match_labels={"workflow": "diamond"}),
                               steps=[stepone, steptwo, stepthree, stepfour]))
    return workflow
Пример #2
0
    def create_cronjob(self, schedule: str, pipeline_uuid: str) -> str:
        run_job_endpoint= f'{kalytical_config.kalytical_api_endpoint}/pipeline/dispatcher/run_by_pipeline_uuid?pipeline_uuid={pipeline_uuid}'
        job_name = f'kalytical-api-trigger-{pipeline_uuid}'
        container = client.V1Container(
            name=job_name,
            image=kalytical_config.ext_cron_image_uri,
            env=[client.V1EnvVar(name='KALYTICAL_API_ENDPOINT', value=run_job_endpoint),
                 client.V1EnvVar(name='KALYTICAL_API_AUTH_SECRET', value=kalytical_config.kalytical_api_token)],
            resources=client.V1ResourceRequirements(limits={'cpu':'.1', 'memory':'50Mi'}))
        
        pod_template = client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(labels={'kalytical-api-pipeline': job_name}),
            spec=client.V1PodSpec(restart_policy="Never", containers=[container]))
        
        job_spec = client.V1JobSpec(
            completions=1, backoff_limit=0, template=pod_template)
        job_template = client.V1beta1JobTemplateSpec(job_tepmlate = job_template, schedule=schedule), 
        cron_body = client.V1beta1CronJob(
            spec=cron_spec, metadata=client.V1ObjectMeta(name=job_name)
        )

        try:
            self.log.debug(f"Attempting to write namespaced cronjob with namespace={self._k8s_namespace} parameters={str(cron_body)}")
            self._k8s_batch_client.create_namespaced_cron_job(
                namespace=self._k8s_namespace, body=cron_body)

        except ApiException as e:
            if e.status == 409:
                self.log.warn("This job already existed. We will re-create it.")
                self.delete_cronjob(job_name=job_name) #TODO Instead use patching
                self.create_cronjob(schedule=schedule, pipeline_uuid=pipeline_uuid)
            else: 
                raise e
        return job_name
Пример #3
0
def create_cronjob(username, namespace, dbhost):
    try:
        config.load_kube_config()
    except:
        config.load_incluster_config()

    api = client.BatchV1beta1Api()

    body = client.V1beta1CronJob(
                metadata=client.V1ObjectMeta(name=namespace),
                spec=client.V1beta1CronJobSpec( job_template=client.V1beta1JobTemplateSpec(

                        spec=client.V1JobSpec(template=client.V1PodTemplateSpec(
                                                spec=client.V1PodSpec(
                                                            containers=[
                                                                client.V1Container(name="scheduler", image="sahandha/lsstscheduler",
                                                                args=["/bin/bash","-c","python /sched.py {} {} {};".format(username, namespace, dbhost)],
                                                                resources=client.V1ResourceRequirements(
                                                                          requests={'memory': "200Mi", 'cpu': "100m"})
                                                                )],
                                                            restart_policy="OnFailure"
                                                                )))
                ),
                                                schedule = "*/1 * * * *")
    )

    try:
        api = api.create_namespaced_cron_job(namespace, body)
    except ApiException as e:
        print("Exception when calling BatchV1beta1Api->create_namespaced_cron_job: %s\n" % e)
Пример #4
0
def create_cron_job(
    pod_name: Text,
    schedule: Text,
    dry_run: bool,
    job_spec: client.V1JobSpec,
) -> Text:
    cron_job = client.V1beta1CronJob(
        api_version="batch/v1beta1",
        kind="CronJob",
        metadata=client.V1ObjectMeta(name=_cronjob_name(pod_name)),
        spec=client.V1beta1CronJobSpec(
            schedule=schedule,
            concurrency_policy="Forbid",
            successful_jobs_history_limit=1,
            failed_jobs_history_limit=1,
            job_template=client.V1beta1JobTemplateSpec(spec=job_spec, ),
        ),
    )
    options = {"namespace": "default", "body": cron_job, "pretty": "true"}
    _set_dry_run(options, dry_run)
    try:
        api_response = client.BatchV1beta1Api().patch_namespaced_cron_job(
            **gamla.add_key_value("name", _cronjob_name(pod_name))(options))
    except rest.ApiException:
        logging.info(
            f"CronJob {options.get('name')} doesn't exist, creating...")
        api_response = client.BatchV1beta1Api().create_namespaced_cron_job(
            **options)
    logging.info(f"CronJob updated: {api_response}.")

    return pod_name
Пример #5
0
def configure_workflow_cronjob(
    cron_schedule: str,
    namespace: str,
    project_name: str,
    project_repo_url: str,
    project_repo_branch: str = "master",
    retries: int = 2,
    successful_jobs_history_limit: int = 1,
    failed_jobs_history_limit: int = 1,
    image: str = BODYWORK_DOCKER_IMAGE,
) -> k8s.V1beta1CronJob:
    """Configure a Bodywork workflow cronjob.

    A cronjob is a k8s job that is executed on a cron-like schedule. In
    this particular instance, the job will execute the `run_workflow`
    function that will orchestrate the required jobs and deployments.

    :param cron_schedule: A valid cron schedule definition.
    :param namespace: The namespace to deploy the cronjob to.
    :param project_name: The name of the Bodywork project that the stage
        belongs to.
    :param project_repo_url: The URL for the Bodywork project Git
        repository.
    :param project_repo_branch: The Bodywork project Git repository
        branch to use, defaults to 'master'.
    :param retries: Number of times to retry running the stage to
        completion (if necessary), defaults to 2.
    :param successful_jobs_history_limit: The number of successful job
        runs (pods) to keep, defaults to 1.
    :param failed_jobs_history_limit: The number of unsuccessful job
        runs (pods) to keep, defaults to 1.
    :param image: Docker image to use for running the stage within,
        defaults to BODYWORK_DOCKER_IMAGE.
    :return: A configured k8s cronjob object.
    """
    job = configure_workflow_job(
        namespace=namespace,
        project_name=project_name,
        project_repo_url=project_repo_url,
        project_repo_branch=project_repo_branch,
        retries=retries,
        image=image,
    )
    job_template = k8s.V1beta1JobTemplateSpec(metadata=job.metadata,
                                              spec=job.spec)
    cronjob_spec = k8s.V1beta1CronJobSpec(
        schedule=cron_schedule,
        successful_jobs_history_limit=successful_jobs_history_limit,
        failed_jobs_history_limit=failed_jobs_history_limit,
        job_template=job_template,
    )
    cronjob = k8s.V1beta1CronJob(metadata=job.metadata, spec=cronjob_spec)
    return cronjob
    def create_cron_job_object(self,
                               cron_job_name,
                               schedule,
                               container_image,
                               args,
                               envs,
                               resources,
                               label_key,
                               label_value,
                               backoff_limit=Constants.BACKOFF_LIMIT):
        try:
            pod_template_spec = self.create_pod_template_spec(
                cron_job_name, container_image, args, envs, resources)
            # Create and configurate a spec section
            template = client.V1PodTemplateSpec(
                metadata=client.V1ObjectMeta(labels={label_key: label_value}),
                spec=pod_template_spec)

            job_spec = client.V1JobSpec(template=template,
                                        backoff_limit=backoff_limit,
                                        ttl_seconds_after_finished=3 * 24 *
                                        60 * 60)
            # Create the specification of deployment
            spec = client.V1beta1JobTemplateSpec(
                metadata=client.V1ObjectMeta(labels={label_key: label_value}),
                spec=job_spec)
            # Create the specification of cron job with schedule
            cron_job_spec = client.V1beta1CronJobSpec(job_template=spec,
                                                      schedule=schedule)
            # Instantiate the cron job object
            cron_job = client.V1beta1CronJob(
                api_version="batch/v1beta1",
                kind="CronJob",
                metadata=client.V1ObjectMeta(name=cron_job_name),
                spec=cron_job_spec)

            return cron_job
        except:
            print('error create_cron_job_object')
Пример #7
0
def create_cron_job(name,
                    configmap_name,
                    init_container_name,
                    init_container_image,
                    init_container_command,
                    container_name,
                    container_image,
                    container_command,
                    schedule,
                    namespace="default",
                    env_vars={}):
    try:
        # Body是对象体
        body = client.V1beta1CronJob(api_version="batch/v1beta1",
                                     kind="CronJob")
        # 对象需要 Metadata,每个JOB必须有一个不同的名称!
        body.metadata = client.V1ObjectMeta(namespace=namespace, name=name)
        # 添加 Status
        body.status = client.V1beta1CronJobStatus()

        template = client.V1PodTemplateSpec()

        # 在Env中传递Arguments:
        env_list = []
        for env_name, env_value in env_vars.items():
            env_list.append(client.V1EnvVar(name=env_name, value=env_value))

        container = client.V1Container(command=container_command,
                                       env=env_list,
                                       image=container_image,
                                       image_pull_policy="IfNotPresent",
                                       name=container_name)

        volume_mount = client.V1VolumeMount(name="share-volume",
                                            mount_path=mount_path)
        container.volume_mounts = [volume_mount]
        container.args = [mount_path + '']

        init_container = client.V1Container(command=init_container_command,
                                            image=init_container_image,
                                            image_pull_policy="IfNotPresent",
                                            name=init_container_name)

        init_volume_mount = client.V1VolumeMount(name="config-volume",
                                                 mount_path=init_mount_path)
        init_container.volume_mounts = [volume_mount, init_volume_mount]

        share_volume = client.V1Volume(name="share-volume", empty_dir={})

        config_map = client.V1ConfigMapVolumeSource(name=configmap_name)
        config_map_volume = client.V1Volume(name="config-volume",
                                            config_map=config_map)

        vlor = V1LocalObjectReference(name='ceph-secret')
        cephfs = V1CephFSVolumeSource(monitors=[
            '192.168.4.21:6789', '192.168.4.22:6789', '192.168.4.29:6789'
        ],
                                      user='******',
                                      secret_ref=vlor,
                                      path='/k8svolume/ai-algo')
        config_map_volume_1 = client.V1Volume(name='demo-path', cephfs=cephfs)
        config_map_volume.template.spec = client.V1PodSpec(
            active_deadline_seconds=600,
            containers=[container],
            restart_policy='OnFailure',
            volumes=[config_map_volume, share_volume, config_map_volume_1],
            init_containers=[init_container])

        job_template = client.V1beta1JobTemplateSpec()
        job_template.spec = client.V1JobSpec(template=template)

        body.spec = client.V1beta1CronJobSpec(starting_deadline_seconds=600,
                                              job_template=job_template,
                                              schedule=schedule)

        # To make an asynchronous HTTP request
        thread = batch_v1_beta1_api.create_namespaced_cron_job(namespace,
                                                               body,
                                                               async_req=True,
                                                               pretty=True)
        result = thread.get()

        return True, result

    except Exception as ex:
        print(ex)
        return False, ""
Пример #8
0
    def create_cronjob_object(self):
        container = client.V1Container(
            name=self.container_name,
            args=self.args,
            image=self.image,
            image_pull_policy='IfNotPresent',
            resources={"limits": {
                "cpu": "1",
                "memory": "512Mi"
            }},
            termination_message_policy='File',
            termination_message_path='/dev/termination-log',
            security_context={
                "allowPrivilegeEscalation": False,
                "capabilities": {},
                "privileged": False,
                "readOnlyRootFilesystem": False,
                "runAsNonRoot": False
            })

        job_template = client.V1beta1JobTemplateSpec(spec=client.V1JobSpec(
            backoff_limit=1,
            completions=1,
            parallelism=1,
            template=client.V1PodTemplateSpec(
                metadata=client.V1ObjectMeta(annotations=self.annotations),
                spec=client.V1PodSpec(affinity={
                    "nodeAffinity": {
                        "requiredDuringSchedulingIgnoredDuringExecution": {
                            "nodeSelectorTerms": [{
                                "matchExpressions": [{
                                    "key":
                                    "node-type",
                                    "operator":
                                    "In",
                                    "values": [self.server_type]
                                }]
                            }]
                        }
                    }
                },
                                      containers=[container],
                                      dns_policy='ClusterFirst',
                                      image_pull_secrets=[{
                                          'name':
                                          self.image_pull_secrets
                                      }],
                                      restart_policy='Never',
                                      scheduler_name='default-scheduler',
                                      security_context={},
                                      termination_grace_period_seconds=30)),
        ))

        spec = client.V1beta1CronJobSpec(concurrency_policy=self.concurrency,
                                         failed_jobs_history_limit=3,
                                         job_template=job_template,
                                         starting_deadline_seconds=300,
                                         schedule=self.schedule,
                                         successful_jobs_history_limit=3,
                                         suspend=False)

        cronjob = client.V1beta1CronJob(
            api_version='batch/v1beta1',
            kind='CronJob',
            metadata=client.V1ObjectMeta(
                labels={'cattle.io/creator': 'norman'},
                name=self.cronjob_name,
                namespace=self.namespace),
            spec=spec)

        return cronjob
Пример #9
0
def configure_cronjob(
    cron_schedule: str,
    namespace: str,
    project_name: str,
    project_repo_url: str,
    project_repo_branch: str = 'master',
    retries: int = 2,
    successful_jobs_history_limit: int = 1,
    failed_jobs_history_limit: int = 1,
    image: str = BODYWORK_DOCKER_IMAGE,
) -> k8s.V1beta1CronJob:
    """Configure a Bodywork batch stage k8s cron-job.

    A cron-job is a k8s job that is executed on a cron-like schedule. In
    this particular instance, the job will execute the `run_workflow`
    function that will orchestrate the required jobs and deployments.

    :param cron_schedule: A valid cron schedule definition.
    :param namespace: The namespace to deploy the cronjob to.
    :param project_name: The name of the Bodywork project that the stage
        belongs to.
    :param project_repo_url: The URL for the Bodywork project Git
        repository.
    :param project_repo_branch: The Bodywork project Git repository
        branch to use, defaults to 'master'.
    :param retries: Number of times to retry running the stage to
        completion (if necessary), defaults to 2.
    :param successful_jobs_history_limit: The number of successful job
        runs (pods) to keep, defaults to 1.
    :param failed_jobs_history_limit: The number of unsuccessful job
        runs (pods) to keep, defaults to 1.
    :param image: Docker image to use for running the stage within,
        defaults to BODYWORK_DOCKER_IMAGE.
    :return: A configured k8s cronjob object.
    """
    vcs_env_vars = [
        k8s.V1EnvVar(name=SSH_GITHUB_KEY_ENV_VAR,
                     value_from=k8s.V1EnvVarSource(
                         secret_key_ref=k8s.V1SecretKeySelector(
                             key=SSH_GITHUB_KEY_ENV_VAR,
                             name=SSH_GITHUB_SECRET_NAME,
                             optional=True)))
    ]
    container = k8s.V1Container(name='bodywork',
                                image=image,
                                image_pull_policy='Always',
                                env=vcs_env_vars,
                                command=['bodywork', 'workflow'],
                                args=[
                                    f'--namespace={namespace}',
                                    project_repo_url, project_repo_branch
                                ])
    pod_spec = k8s.V1PodSpec(
        service_account_name=BODYWORK_WORKFLOW_SERVICE_ACCOUNT,
        containers=[container],
        restart_policy='Never')
    pod_template_spec = k8s.V1PodTemplateSpec(spec=pod_spec)
    job_spec = k8s.V1JobSpec(template=pod_template_spec,
                             completions=1,
                             backoff_limit=retries)
    job_template = k8s.V1beta1JobTemplateSpec(spec=job_spec)
    cronjob_spec = k8s.V1beta1CronJobSpec(
        schedule=cron_schedule,
        successful_jobs_history_limit=successful_jobs_history_limit,
        failed_jobs_history_limit=failed_jobs_history_limit,
        job_template=job_template)
    cronjob = k8s.V1beta1CronJob(metadata=k8s.V1ObjectMeta(
        name=project_name, namespace=namespace),
                                 spec=cronjob_spec)
    return cronjob