def Run(self, args):
        dataproc = dp.Dataproc(self.ReleaseTrack())

        template_ref = args.CONCEPTS.template.Parse()

        workflow_template = dataproc.GetRegionsWorkflowTemplate(
            template_ref, args.version)

        if args.cluster_name:
            cluster_name = args.cluster_name
        else:
            cluster_name = template_ref.workflowTemplatesId

        compute_resources = compute_helpers.GetComputeResources(
            self.ReleaseTrack(), cluster_name)

        beta = self.ReleaseTrack() == base.ReleaseTrack.BETA
        cluster_config = clusters.GetClusterConfig(args, dataproc,
                                                   template_ref.projectsId,
                                                   compute_resources, beta)

        labels = labels_util.ParseCreateArgs(
            args, dataproc.messages.ManagedCluster.LabelsValue)

        managed_cluster = dataproc.messages.ManagedCluster(
            clusterName=cluster_name, config=cluster_config, labels=labels)

        workflow_template.placement = dataproc.messages.WorkflowTemplatePlacement(
            managedCluster=managed_cluster)

        response = dataproc.client.projects_regions_workflowTemplates.Update(
            workflow_template)
        return response
Esempio n. 2
0
    def Run(self, args):
        self.ValidateArgs(args)

        dataproc = dp.Dataproc(self.ReleaseTrack())

        cluster_ref = args.CONCEPTS.cluster.Parse()

        compute_resources = compute_helpers.GetComputeResources(
            self.ReleaseTrack(), cluster_ref.clusterName, cluster_ref.region)

        cluster_config = clusters.GetClusterConfig(args,
                                                   dataproc,
                                                   cluster_ref.projectId,
                                                   compute_resources,
                                                   self.BETA,
                                                   include_ttl_config=True)

        cluster = dataproc.messages.Cluster(
            config=cluster_config,
            clusterName=cluster_ref.clusterName,
            projectId=cluster_ref.projectId)

        self.ConfigureCluster(dataproc.messages, args, cluster)

        return clusters.CreateCluster(dataproc, cluster_ref, cluster,
                                      args.async_, args.timeout)
    def Run(self, args):
        dataproc = dp.Dataproc(self.ReleaseTrack())

        template = util.ParseWorkflowTemplates(args.template, dataproc)

        workflow_template = dataproc.GetRegionsWorkflowTemplate(
            template, args.version)

        cluster_name = template.workflowTemplatesId

        compute_resources = compute_helpers.GetComputeResources(
            self.ReleaseTrack(), cluster_name)
        use_accelerators = self.ReleaseTrack() == base.ReleaseTrack.BETA
        use_auto_delete_ttl = self.ReleaseTrack() == base.ReleaseTrack.BETA
        use_min_cpu_platform = self.ReleaseTrack() == base.ReleaseTrack.BETA

        cluster_config = clusters.GetClusterConfig(
            args, dataproc, template.projectsId, compute_resources,
            use_accelerators, use_auto_delete_ttl, use_min_cpu_platform)

        labels = labels_util.ParseCreateArgs(
            args, dataproc.messages.ManagedCluster.LabelsValue)

        managed_cluster = dataproc.messages.ManagedCluster(
            clusterName=cluster_name, config=cluster_config, labels=labels)

        workflow_template.placement = dataproc.messages.WorkflowTemplatePlacement(
            managedCluster=managed_cluster)

        response = dataproc.client.projects_regions_workflowTemplates.Update(
            workflow_template)
        return response
  def Run(self, args):
    self.ValidateArgs(args)

    dataproc = dp.Dataproc(self.ReleaseTrack())

    cluster_ref = args.CONCEPTS.cluster.Parse()

    compute_resources = compute_helpers.GetComputeResources(
        self.ReleaseTrack(), cluster_ref.clusterName, cluster_ref.region)

    cluster_config = clusters.GetClusterConfig(
        args,
        dataproc,
        cluster_ref.projectId,
        compute_resources,
        self.BETA,
        include_ttl_config=True,
        include_gke_platform_args=self.BETA)

    action_on_failed_primary_workers = None
    if not self.BETA:
      action_on_failed_primary_workers = arg_utils.ChoiceToEnum(
          args.action_on_failed_primary_workers,
          dataproc.messages.DataprocProjectsRegionsClustersCreateRequest
          .ActionOnFailedPrimaryWorkersValueValuesEnum)

    cluster = dataproc.messages.Cluster(
        config=cluster_config,
        clusterName=cluster_ref.clusterName,
        projectId=cluster_ref.projectId)

    self.ConfigureCluster(dataproc.messages, args, cluster)

    return clusters.CreateCluster(
        dataproc,
        cluster_ref,
        cluster,
        args.async_,
        args.timeout,
        enable_create_on_gke=self.BETA,
        action_on_failed_primary_workers=action_on_failed_primary_workers)
Esempio n. 5
0
  def Run(self, args):
    self.ValidateArgs(args)

    dataproc = dp.Dataproc(self.ReleaseTrack())

    cluster_ref = util.ParseCluster(args.name, dataproc)

    compute_resources = compute_helpers.GetComputeResources(
        self.ReleaseTrack(), args.name)

    beta = self.ReleaseTrack() == base.ReleaseTrack.BETA
    cluster_config = clusters.GetClusterConfig(
        args, dataproc, cluster_ref.projectId, compute_resources, beta)

    cluster = dataproc.messages.Cluster(
        config=cluster_config,
        clusterName=cluster_ref.clusterName,
        projectId=cluster_ref.projectId)

    self.ConfigureCluster(dataproc.messages, args, cluster)

    return clusters.CreateCluster(dataproc, cluster, args.async, args.timeout)
Esempio n. 6
0
    def Run(self, args):
        self.ValidateArgs(args)

        dataproc = dp.Dataproc(self.ReleaseTrack())

        cluster_ref = util.ParseCluster(args.name, dataproc)

        compute_resources = compute_helpers.GetComputeResources(
            self.ReleaseTrack(), args.name)

        beta = self.ReleaseTrack() == base.ReleaseTrack.BETA
        cluster_config = clusters.GetClusterConfig(args, dataproc,
                                                   cluster_ref.projectId,
                                                   compute_resources, beta)

        cluster = dataproc.messages.Cluster(
            config=cluster_config,
            clusterName=cluster_ref.clusterName,
            projectId=cluster_ref.projectId)

        self.ConfigureCluster(dataproc.messages, args, cluster)

        operation = dataproc.client.projects_regions_clusters.Create(
            dataproc.messages.DataprocProjectsRegionsClustersCreateRequest(
                projectId=cluster_ref.projectId,
                region=cluster_ref.region,
                cluster=cluster))

        if args. async:
            log.status.write('Creating [{0}] with operation [{1}].'.format(
                cluster_ref, operation.name))
            return

        operation = util.WaitForOperation(
            dataproc,
            operation,
            message='Waiting for cluster creation operation',
            timeout_s=args.timeout)

        get_request = dataproc.messages.DataprocProjectsRegionsClustersGetRequest(
            projectId=cluster_ref.projectId,
            region=cluster_ref.region,
            clusterName=cluster_ref.clusterName)
        cluster = dataproc.client.projects_regions_clusters.Get(get_request)
        if cluster.status.state == (
                dataproc.messages.ClusterStatus.StateValueValuesEnum.RUNNING):

            zone_uri = cluster.config.gceClusterConfig.zoneUri
            zone_short_name = zone_uri.split('/')[-1]

            # Log the URL of the cluster
            log.CreatedResource(
                cluster_ref,
                # Also indicate which zone the cluster was placed in. This is helpful
                # if the server picked a zone (auto zone)
                details='Cluster placed in zone [{0}]'.format(zone_short_name))
        else:
            log.error('Create cluster failed!')
            if operation.details:
                log.error('Details:\n' + operation.details)
        return cluster