def Run(self, args):
        client = self.context['dataproc_client']
        messages = self.context['dataproc_messages']

        job_ref = util.ParseJob(args.id, self.context)
        request = messages.DataprocProjectsRegionsJobsCancelRequest(
            projectId=job_ref.projectId,
            region=job_ref.region,
            jobId=job_ref.jobId,
            cancelJobRequest=messages.CancelJobRequest())

        # TODO(user) Check if Job is still running and fail or handle 401.

        if not console_io.PromptContinue(
                message="The job '{0}' will be killed.".format(args.id)):
            raise exceptions.ToolException('Cancellation aborted by user.')

        job = client.projects_regions_jobs.Cancel(request)
        log.status.Print('Job cancellation initiated for [{0}].'.format(
            job_ref.jobId))

        job = util.WaitForJobTermination(
            job,
            self.context,
            message='Waiting for job cancellation',
            goal_state=messages.JobStatus.StateValueValuesEnum.CANCELLED)

        log.status.Print('Killed [{0}].'.format(job_ref))

        return job
Ejemplo n.º 2
0
  def Run(self, args):
    dataproc = dp.Dataproc(self.ReleaseTrack())

    job_ref = args.CONCEPTS.job.Parse()
    request = dataproc.messages.DataprocProjectsRegionsJobsCancelRequest(
        projectId=job_ref.projectId,
        region=job_ref.region,
        jobId=job_ref.jobId,
        cancelJobRequest=dataproc.messages.CancelJobRequest())

    # TODO(b/36049788) Check if Job is still running and fail or handle 401.

    console_io.PromptContinue(
        message="The job '{0}' will be killed.".format(args.job),
        cancel_on_no=True,
        cancel_string='Cancellation aborted by user.')

    job = dataproc.client.projects_regions_jobs.Cancel(request)
    log.status.Print(
        'Job cancellation initiated for [{0}].'.format(job_ref.jobId))

    job = util.WaitForJobTermination(
        dataproc,
        job,
        job_ref,
        message='Waiting for job cancellation',
        goal_state=dataproc.messages.JobStatus.StateValueValuesEnum.CANCELLED)

    log.status.Print('Killed [{0}].'.format(job_ref))

    return job
Ejemplo n.º 3
0
    def Run(self, args):
        """This is what gets called when the user runs this command."""
        dataproc = dp.Dataproc(self.ReleaseTrack())

        request_id = util.GetUniqueId()
        job_id = args.id if args.id else request_id

        # Don't use ResourceArgument, because --id is hidden by default
        job_ref = util.ParseJob(job_id, dataproc)

        self.PopulateFilesByType(args)

        cluster_ref = util.ParseCluster(args.cluster, dataproc)
        request = dataproc.messages.DataprocProjectsRegionsClustersGetRequest(
            projectId=cluster_ref.projectId,
            region=cluster_ref.region,
            clusterName=cluster_ref.clusterName)

        cluster = dataproc.client.projects_regions_clusters.Get(request)

        self._staging_dir = self.GetStagingDir(cluster,
                                               job_ref.jobId,
                                               bucket=args.bucket)
        self.ValidateAndStageFiles()

        job = dataproc.messages.Job(
            reference=dataproc.messages.JobReference(
                projectId=job_ref.projectId, jobId=job_ref.jobId),
            placement=dataproc.messages.JobPlacement(clusterName=args.cluster))
        self.ConfigureJob(dataproc.messages, job, args)

        if args.max_failures_per_hour:
            scheduling = dataproc.messages.JobScheduling(
                maxFailuresPerHour=args.max_failures_per_hour)
            job.scheduling = scheduling

        request = dataproc.messages.DataprocProjectsRegionsJobsSubmitRequest(
            projectId=job_ref.projectId,
            region=job_ref.region,
            submitJobRequest=dataproc.messages.SubmitJobRequest(
                job=job, requestId=request_id))

        job = dataproc.client.projects_regions_jobs.Submit(request)

        log.status.Print('Job [{0}] submitted.'.format(job_id))

        if not args.async_:
            job = util.WaitForJobTermination(
                dataproc,
                job,
                job_ref,
                message='Waiting for job completion',
                goal_state=dataproc.messages.JobStatus.StateValueValuesEnum.
                DONE,
                error_state=dataproc.messages.JobStatus.StateValueValuesEnum.
                ERROR,
                stream_driver_log=True)
            log.status.Print('Job [{0}] finished successfully.'.format(job_id))

        return job
Ejemplo n.º 4
0
  def Run(self, args):
    dataproc = dp.Dataproc(self.ReleaseTrack())

    job_ref = args.CONCEPTS.job.Parse()

    job = dataproc.client.projects_regions_jobs.Get(
        dataproc.messages.DataprocProjectsRegionsJobsGetRequest(
            projectId=job_ref.projectId,
            region=job_ref.region,
            jobId=job_ref.jobId))

    # TODO(b/36050945) Check if Job is still running and fail or handle 401.

    job = util.WaitForJobTermination(
        dataproc,
        job,
        job_ref,
        message='Waiting for job completion',
        goal_state=dataproc.messages.JobStatus.StateValueValuesEnum.DONE,
        error_state=dataproc.messages.JobStatus.StateValueValuesEnum.ERROR,
        stream_driver_log=True)

    log.status.Print('Job [{0}] finished successfully.'.format(args.job))

    return job
Ejemplo n.º 5
0
    def Run(self, args):
        """This is what gets called when the user runs this command."""
        client = self.context['dataproc_client']
        messages = self.context['dataproc_messages']

        job_id = util.GetJobId(args.id)
        job_ref = util.ParseJob(job_id, self.context)

        self.PopulateFilesByType(args)

        cluster_ref = util.ParseCluster(args.cluster, self.context)
        request = messages.DataprocProjectsRegionsClustersGetRequest(
            projectId=cluster_ref.projectId,
            region=cluster_ref.region,
            clusterName=cluster_ref.clusterName)

        try:
            cluster = client.projects_regions_clusters.Get(request)
        except apitools_exceptions.HttpError as error:
            raise exceptions.HttpException(error)

        self._staging_dir = self.GetStagingDir(cluster, job_ref.jobId)
        self.ValidateAndStageFiles()

        job = messages.Job(
            reference=messages.JobReference(projectId=job_ref.projectId,
                                            jobId=job_ref.jobId),
            placement=messages.JobPlacement(clusterName=args.cluster))

        self.ConfigureJob(job, args)

        request = messages.DataprocProjectsRegionsJobsSubmitRequest(
            projectId=job_ref.projectId,
            region=job_ref.region,
            submitJobRequest=messages.SubmitJobRequest(job=job))

        try:
            job = client.projects_regions_jobs.Submit(request)
        except apitools_exceptions.HttpError as error:
            raise exceptions.HttpException(error)

        log.status.Print('Job [{0}] submitted.'.format(job_id))

        if not args. async:
            job = util.WaitForJobTermination(
                job,
                self.context,
                message='Waiting for job completion',
                goal_state=messages.JobStatus.StateValueValuesEnum.DONE,
                stream_driver_log=True)
            log.status.Print('Job [{0}] finished successfully.'.format(job_id))

        return job
Ejemplo n.º 6
0
  def Run(self, args):
    """This is what gets called when the user runs this command."""
    dataproc = dp.Dataproc(self.ReleaseTrack())

    job_id = util.GetJobId(args.id)
    job_ref = util.ParseJob(job_id, dataproc)

    self.PopulateFilesByType(args)

    cluster_ref = util.ParseCluster(args.cluster, dataproc)
    request = dataproc.messages.DataprocProjectsRegionsClustersGetRequest(
        projectId=cluster_ref.projectId,
        region=cluster_ref.region,
        clusterName=cluster_ref.clusterName)

    cluster = dataproc.client.projects_regions_clusters.Get(request)

    self._staging_dir = self.GetStagingDir(
        cluster, job_ref.jobId, bucket=args.bucket)
    self.ValidateAndStageFiles()

    job = dataproc.messages.Job(
        reference=dataproc.messages.JobReference(
            projectId=job_ref.projectId,
            jobId=job_ref.jobId),
        placement=dataproc.messages.JobPlacement(
            clusterName=args.cluster))

    self.ConfigureJob(dataproc.messages, job, args)

    request = dataproc.messages.DataprocProjectsRegionsJobsSubmitRequest(
        projectId=job_ref.projectId,
        region=job_ref.region,
        submitJobRequest=dataproc.messages.SubmitJobRequest(
            job=job))

    job = dataproc.client.projects_regions_jobs.Submit(request)

    log.status.Print('Job [{0}] submitted.'.format(job_id))

    if not args.async:
      job = util.WaitForJobTermination(
          dataproc,
          job,
          message='Waiting for job completion',
          goal_state=dataproc.messages.JobStatus.StateValueValuesEnum.DONE,
          stream_driver_log=True)
      log.status.Print('Job [{0}] finished successfully.'.format(job_id))

    return job
Ejemplo n.º 7
0
  def Run(self, args):
    client = self.context['dataproc_client']
    messages = self.context['dataproc_messages']

    job_ref = util.ParseJob(args.id, self.context)
    request = job_ref.Request()

    job = client.projects_regions_jobs.Get(request)
    # TODO(user) Check if Job is still running and fail or handle 401.

    job = util.WaitForJobTermination(
        job,
        self.context,
        message='Waiting for job completion',
        goal_state=messages.JobStatus.StateValueValuesEnum.DONE,
        stream_driver_log=True)

    log.status.Print('Job [{0}] finished successfully.'.format(args.id))

    return job