Esempio n. 1
0
  def GenerateKubeconfig(self, project_id, membership):
    project_number = project_util.GetProjectNumber(project_id)
    kwargs = {
        'membership':
            membership,
        'project_id':
            project_id,
        'server':
            SERVER_FORMAT.format(
                env=self.get_url_prefix(),
                project_number=project_number,
                membership=membership),
        'auth_provider':
            'gcp',
    }
    user_kwargs = {
        'auth_provider': 'gcp',
    }

    cluster_kwargs = {}
    context = KUBECONTEXT_FORMAT.format(
        project=project_id, membership=membership)
    kubeconfig = kconfig.Kubeconfig.Default()
    # Use same key for context, cluster, and user.
    kubeconfig.contexts[context] = kconfig.Context(context, context, context)
    kubeconfig.users[context] = kconfig.User(context, **user_kwargs)
    kubeconfig.clusters[context] = kconfig.Cluster(context, kwargs['server'],
                                                   **cluster_kwargs)
    kubeconfig.SetCurrentContext(context)
    kubeconfig.SaveToFile()
    return kubeconfig
Esempio n. 2
0
def _GKEConnectNamespace(kube_client, project_id):
  """Returns the namespace into which to install or update the connect agent.

  Connect namespaces are identified by the presence of the hub.gke.io/project
  label. If there is one existing namespace with this label in the cluster, its
  name is returned; otherwise, a connect agent namespace with the project
  number as a suffix is returned. If there are multiple namespaces with the
  hub.gke.io/project label, an error is raised.

  Args:
    kube_client: a KubernetesClient
    project_id: A GCP project identifier

  Returns:
    a string, the namespace

  Raises:
    exceptions.Error: if there are multiple Connect namespaces in the cluster
  """
  selector = '{}={}'.format(CONNECT_RESOURCE_LABEL, project_id)
  namespaces = kube_client.NamespacesWithLabelSelector(selector)
  if not namespaces:
    return 'gke-connect-{}'.format(p_util.GetProjectNumber(project_id))
  if len(namespaces) == 1:
    return namespaces[0]
  raise exceptions.Error(
      'Multiple GKE Connect namespaces in cluster: {}'.format(namespaces))
Esempio n. 3
0
 def Run(self, args):
     lake_ref = args.CONCEPTS.project.Parse()
     service_account = 'service-' + str(
         project_util.GetProjectNumber(lake_ref.projectsId)
     ) + '@gcp-sa-dataplex.iam.gserviceaccount.com'
     if args.IsSpecified('storage_bucket_resource'):
         return lake.RemoveServiceAccountFromBucketPolicy(
             storage_util.BucketReference(args.storage_bucket_resource),
             'serviceAccount:' + service_account,
             'roles/dataplex.serviceAgent')
     if args.IsSpecified('bigquery_dataset_resource'):
         get_dataset_request = apis.GetMessagesModule(
             'bigquery', 'v2').BigqueryDatasetsGetRequest(
                 datasetId=args.bigquery_dataset_resource,
                 projectId=args.secondary_project)
         dataset = apis.GetClientInstance(
             'bigquery', 'v2').datasets.Get(request=get_dataset_request)
         lake.RemoveServiceAccountFromDatasetPolicy(
             dataset, service_account, 'roles/dataplex.serviceAgent')
         return apis.GetClientInstance('bigquery', 'v2').datasets.Patch(
             apis.GetMessagesModule(
                 'bigquery', 'v2').BigqueryDatasetsPatchRequest(
                     datasetId=args.bigquery_dataset_resource,
                     projectId=args.secondary_project,
                     dataset=dataset))
     if args.IsSpecified('project_resource'):
         return projects_api.RemoveIamPolicyBinding(
             project_util.ParseProject(args.project_resource),
             'serviceAccount:' + service_account,
             'roles/dataplex.serviceAgent')
Esempio n. 4
0
def GetFeedParent(organization, project, folder):
  """Get the parent name from organization Number, project Id, or folder Number."""
  if organization:
    return 'organizations/{0}'.format(organization)
  if folder:
    return 'folders/{0}'.format(folder)
  return 'projects/{0}'.format(project_util.GetProjectNumber(project))
def _ConnectGatewayKubeconfig(kubeconfig, cluster, context, cmd_path):
    """Generates the Connect Gateway kubeconfig entry.

  Args:
    kubeconfig: object, Kubeconfig object.
    cluster: object, Anthos Multi-cloud cluster.
    context: str, context for the kubeconfig entry.
    cmd_path: str, authentication provider command path.

  Raises:
      errors.MissingClusterField: cluster is missing required fields.
  """
    if cluster.fleet is None:
        raise errors.MissingClusterField('fleet')
    if cluster.fleet.membership is None:
        raise errors.MissingClusterField('fleet.membership')
    membership_resource = resources.REGISTRY.ParseRelativeName(
        cluster.fleet.membership,
        collection='gkehub.projects.locations.memberships')
    # Connect Gateway only supports project number.
    # TODO(b/198380839): Use the url with locations once rolled out.
    server = 'https://{}/v1/projects/{}/memberships/{}'.format(
        _GetConnectGatewayEndpoint(),
        project_util.GetProjectNumber(membership_resource.projectsId),
        membership_resource.membershipsId)
    user_kwargs = {'auth_provider': 'gcp', 'auth_provider_cmd_path': cmd_path}
    kubeconfig.users[context] = kubeconfig_util.User(context, **user_kwargs)
    kubeconfig.clusters[context] = gwkubeconfig_util.Cluster(context, server)
Esempio n. 6
0
def _PurgeAlphaInstaller(kube_client, namespace, project_id):
    """Purge the Alpha installation resources if exists.

  Args:
    kube_client: Kubernetes client to operate on the cluster.
    namespace: the namespace of Alpha installation.
    project_id: the GCP project ID.

  Raises:
    exceptions.Error: if Alpha resources deletion failed.
  """
    project_number = p_util.GetProjectNumber(project_id)
    err = kube_client.Delete(
        INSTALL_ALPHA_TEMPLATE.format(
            namespace=namespace,
            connect_resource_label=CONNECT_RESOURCE_LABEL,
            project_id=project_id,
            project_number=project_number,
            membership_name='',
            proxy='',
            image='',
            gcp_sa_key='',
            gcp_sa_key_secret_name=GCP_SA_KEY_SECRET_NAME,
            agent_install_deployment_name=AGENT_INSTALL_DEPLOYMENT_NAME,
            agent_install_app_label=AGENT_INSTALL_APP_LABEL))
    if err:
        if 'NotFound' not in err:
            raise exceptions.Error(
                'failed to delete Alpha installation: {}'.format(err))
Esempio n. 7
0
def SwapProjectName(parent_string):
    components = parent_string.split('/')
    if len(components) < 2:
        return parent_string
    components[1] = six.text_type(
        projects_command_util.GetProjectNumber(components[1]))
    return '/'.join(components)
  def BuildInstanceSpec(self,
                        name,
                        zone,
                        machine_type,
                        disk_size,
                        preemptible,
                        network,
                        use_with_notebook,
                        source_image=None):
    """Builds an instance spec to be used for Instance creation."""

    disk = self.messages.AttachedDisk(
        boot=True,
        autoDelete=True,
        initializeParams=self.messages.AttachedDiskInitializeParams(
            sourceImage=source_image,
            diskSizeGb=disk_size
        ))
    project_number = p_util.GetProjectNumber(
        properties.VALUES.core.project.Get(required=True))
    network_interface = self.messages.NetworkInterface(
        network='projects/{}/global/networks/{}'.format(
            project_number, network),
        accessConfigs=[self.messages.AccessConfig(
            name='External NAT',
            type=self.messages.AccessConfig.TypeValueValuesEnum.ONE_TO_ONE_NAT)]
        )
    metadata = [self.messages.Metadata.ItemsValueListEntry(
        key='ctpu',
        value=name)]

    if use_with_notebook:
      metadata.append(
          self.messages.Metadata.ItemsValueListEntry(
              key='proxy-mode', value='project_editors'))

    service_account = self.messages.ServiceAccount(
        email='default',
        scopes=[
            'https://www.googleapis.com/auth/devstorage.read_write',
            'https://www.googleapis.com/auth/logging.write',
            'https://www.googleapis.com/auth/monitoring.write',
            'https://www.googleapis.com/auth/cloud-platform'
        ])
    labels = self.messages.Instance.LabelsValue(additionalProperties=[
        self.messages.Instance.LabelsValue.AdditionalProperty(
            key='ctpu', value=name)
    ])

    return self.messages.Instance(
        name=name,
        metadata=self.messages.Metadata(items=metadata),
        machineType='zones/{}/machineTypes/{}'.format(zone, machine_type),
        disks=[disk],
        scheduling=self.messages.Scheduling(preemptible=preemptible),
        networkInterfaces=[network_interface],
        labels=labels,
        serviceAccounts=[service_account])
Esempio n. 9
0
def CheckServiceAccountPermission(unused_repo_ref, repo_args, request):
    """Checks and grants key encrypt/decrypt permission for service account.

  Checks if Artifact Registry service account has encrypter/decrypter or owner
  role for the given key. If not, prompts users to grant key encrypter/decrypter
  permission to the service account. Operation would fail if users do not grant
  the permission.

  Args:
    unused_repo_ref: Repo reference input.
    repo_args: User input arguments.
    request: Create repository request.

  Returns:
    Create repository request.
  """
    if repo_args.kms_key:
        project_num = project_util.GetProjectNumber(GetProject(repo_args))
        service_account = _AR_SERVICE_ACCOUNT.format(project_num=project_num)
        policy = ar_requests.GetCryptoKeyPolicy(repo_args.kms_key)
        has_permission = False
        for binding in policy.bindings:
            if "serviceAccount:" + service_account in binding.members and (
                    binding.role
                    == "roles/cloudkms.cryptoKeyEncrypterDecrypter"
                    or binding.role == "roles/owner"):
                has_permission = True
                break
        if not has_permission:
            console_io.PromptContinue(
                prompt_string=
                ("\nGrant the Artifact Registry Service Account "
                 "permission to encrypt/decrypt with the selected key [{key_name}]"
                 .format(key_name=repo_args.kms_key)),
                cancel_on_no=True,
                cancel_string=
                ("The Artifact Registry Service Account needs permissions to "
                 "encrypt/decrypt on the selected key.\n"
                 "Learn more: https://cloud.google.com/artifact-registry/docs/cmek"
                 ))
            try:
                ar_requests.AddCryptoKeyPermission(
                    repo_args.kms_key, "serviceAccount:" + service_account)
            # We have checked the existence of the key when checking IAM bindings
            # So all 400s should be because the service account is problematic.
            # We are moving the permission check to the backend fairly soon anyway.
            except apitools_exceptions.HttpBadRequestError:
                msg = (
                    "The Artifact Registry service account may not exist, please "
                    "create the service account.\nLearn more: "
                    "https://cloud.google.com/artifact-registry/docs/cmek")
                raise ar_exceptions.ArtifactRegistryError(msg)

            log.status.Print(
                "Added Cloud KMS CryptoKey Encrypter/Decrypter Role to [{key_name}]"
                .format(key_name=repo_args.kms_key))
    return request
Esempio n. 10
0
def GenerateInstallManifest(project_id, namespace, image, sa_key_data,
                            image_pull_secret_data, membership_name, proxy):
  """Generates the contents of the GKE Connect agent install manifest.

  Args:
    project_id: The GCP project identifier.
    namespace: The namespace into which to deploy the Connect agent.
    image: The container image to use in the Connect agent install deployment
      (and, later, runtime deployment).
    sa_key_data: The contents of a GCP SA keyfile, base64-encoded.
    image_pull_secret_data: The contents of a secret that will be used as an
      image pull secret for the provided Docker image.
    membership_name: The name of the membership that this manifest is being
      generated for.
    proxy: The HTTP proxy that the agent should use, in the form
      http[s]://<proxy>

  Returns:
    A tuple, containing (
      a string, a YAML manifest that can be used to install the agent,
      the name of the Connect agent install Deployment
    )
  """
  project_number = p_util.GetProjectNumber(project_id)
  agent_install_deployment_name = 'gke-connect-agent-installer'

  install_manifest = INSTALL_MANIFEST_TEMPLATE.format(
      namespace=namespace,
      connect_resource_label=CONNECT_RESOURCE_LABEL,
      project_id=project_id,
      project_number=project_number,
      membership_name=membership_name or '',
      proxy=proxy or '',
      image=image,
      gcp_sa_key=sa_key_data,
      agent_install_deployment_name=agent_install_deployment_name,
      agent_install_app_label=AGENT_INSTALL_APP_LABEL)

  if image_pull_secret_data:
    # The indentation of this string literal is important: it must be
    # appendable to the bottom of the deployment_manifest.
    image_pull_secret_section = """\
      imagePullSecrets:
        - name: {}""".format(IMAGE_PULL_SECRET_NAME)

    install_manifest = '{}\n{}\n---\n{}'.format(
        install_manifest, image_pull_secret_section,
        IMAGE_PULL_SECRET_TEMPLATE.format(
            name=IMAGE_PULL_SECRET_NAME,
            namespace=namespace,
            connect_resource_label=CONNECT_RESOURCE_LABEL,
            project_id=project_id,
            image_pull_secret=image_pull_secret_data))

  return install_manifest, agent_install_deployment_name
Esempio n. 11
0
def _ParseProjectNumberFromNetwork(network, user_project):
    """Retrieves the project field from the provided network value."""
    try:
        registry = resources.REGISTRY.Clone()
        network_ref = registry.Parse(network, collection='compute.networks')
        project_identifier = network_ref.project
    except resources.Error:
        # If not a parseable resource string, then use user_project
        project_identifier = user_project

    return projects_command_util.GetProjectNumber(project_identifier)
    def Project(number=False):
        """Simple helper for getting the current project.

    Args:
      number: Boolean, whether to return the project number instead of the ID.

    Returns:
      The project ID or project number, as a string.
    """
        project = properties.VALUES.core.project.GetOrFail()
        if number:
            return project_util.GetProjectNumber(project)
        return project
Esempio n. 13
0
  def Run(self, args):
    """Run 'services vpc-peerings list'.

    Args:
      args: argparse.Namespace, The arguments that this command was invoked
          with.

    Returns:
      The list of connections.
    """
    project = properties.VALUES.core.project.Get(required=True)
    project_number = projects_util.GetProjectNumber(project)
    conns = peering.ListConnections(project_number, args.service, args.network)
    return iter(conns)
Esempio n. 14
0
    def ListReleasesByTarget(self, target_ref):
        """Lists the releases in a target.

    Args:
      target_ref: target object.

    Returns:
      a list of release messages.
    """
        target_dict = target_ref.AsDict()
        project_number = p_util.GetProjectNumber(target_dict['projectsId'])
        request = self.messages.ClouddeployProjectsLocationsDeliveryPipelinesReleasesListRequest(
            parent=target_ref.Parent().RelativeName(),
            filter=TARGET_FILTER_TEMPLATE.format(
                project_number, target_dict['locationsId'],
                target_dict['deliveryPipelinesId'], target_dict['targetsId']))
        return self._service.List(request).releases
Esempio n. 15
0
def CheckServiceAccountPermission(response, args):
    """Checks and grants key encrypt/decrypt permission for service account.

  Checks if Artifact Registry service account has encrypter/decrypter or owner
  role for the given key. If not, prompts users to grant key encrypter/decrypter
  permission to the service account. If users say no to the prompt, logs a
  message and points to the official documentation.

  Args:
    response: Create repository response.
    args: User input arguments.

  Returns:
    Create repository response.
  """
    if args.kms_key:
        project_num = project_util.GetProjectNumber(GetProject(args))
        service_account = _AR_SERVICE_ACCOUNT.format(project_num=project_num)

        policy = ar_requests.GetCryptoKeyPolicy(args.kms_key)
        has_permission = False
        for binding in policy.bindings:
            if service_account in binding.members and (
                    binding.role
                    == "roles/cloudkms.cryptoKeyEncrypterDecrypter"
                    or binding.role == "roles/owner"):
                has_permission = True
                break
        if not has_permission:
            cont = console_io.PromptContinue(prompt_string=(
                "\nDo you want to grant the Artifact Registry Service Account "
                "permission to encrypt/decrypt with the selected key [{key_name}]"
                .format(key_name=args.kms_key)),
                                             cancel_on_no=False)
            if not cont:
                log.status.Print(
                    "Note: You will need to grant the Artifact Registry Service "
                    "Account permissions to encrypt/decrypt on the selected key.\n"
                    "Learn more: https://cloud.google.com/artifact-registry/docs/cmek"
                )
                return response
            ar_requests.AddCryptoKeyPermission(args.kms_key, service_account)
            log.status.Print(
                "Added Cloud KMS CryptoKey Encrypter/Decrypter Role to [{key_name}]"
                .format(key_name=args.kms_key))
    return response
def _ApplySecretsArgsToFunction(function, args):
    """Populates cloud function message with secrets payload if applicable.

  It compares the CLI args with the existing secrets configuration to compute
  the effective secrets configuration.

  Args:
    function: Cloud function message to be checked and populated.
    args: All CLI arguments.

  Returns:
    updated_fields: update mask containing the list of fields to be updated.
  """
    if not secrets_config.IsArgsSpecified(args):
        return []

    old_secrets = secrets_util.GetSecretsAsDict(
        function.secretEnvironmentVariables, function.secretVolumes)
    new_secrets = {}
    try:
        new_secrets = secrets_config.ApplyFlags(
            old_secrets, args, _GetProject(),
            project_util.GetProjectNumber(_GetProject()))
    except ArgumentTypeError as error:
        exceptions.reraise(function_exceptions.FunctionsError(error))

    if new_secrets:
        _LogSecretsPermissionMessage(_GetProject(),
                                     function.serviceAccountEmail)

    old_secret_env_vars, old_secret_volumes = secrets_config.SplitSecretsDict(
        old_secrets)
    new_secret_env_vars, new_secret_volumes = secrets_config.SplitSecretsDict(
        new_secrets)

    updated_fields = []
    if old_secret_env_vars != new_secret_env_vars:
        function.secretEnvironmentVariables = secrets_util.SecretEnvVarsToMessages(
            new_secret_env_vars, api_util.GetApiMessagesModule())
        updated_fields.append('secretEnvironmentVariables')
    if old_secret_volumes != new_secret_volumes:
        function.secretVolumes = secrets_util.SecretVolumesToMessages(
            new_secret_volumes, api_util.GetApiMessagesModule())
        updated_fields.append('secretVolumes')
    return updated_fields
def GetFleetProject(args):
  """Gets and parses the fleet project argument.

  Project ID if specified is converted to project number. The parsed fleet
  project has format projects/<project-number>.

  Args:
    args: Arguments parsed from the command.

  Returns:
    The fleet project in format projects/<project-number>
    or None if the fleet projectnot is not specified.
  """
  p = getattr(args, 'fleet_project', None)
  if not p:
    return None
  if not p.isdigit():
    return 'projects/{}'.format(project_util.GetProjectNumber(p))
  return 'projects/{}'.format(p)
Esempio n. 18
0
def SetDefaultScopeIfEmpty(unused_ref, args, request):
  """Update the request scope to fall back to core project if not specified.

  Used by Asset Search gcloud `modify_request_hooks`. When --scope flag is not
  specified, it will modify the request.scope to fallback to the core properties
  project.

  Args:
    unused_ref: unused.
    args: The argument namespace.
    request: The request to modify.

  Returns:
    The modified request.
  """
  if not args.IsSpecified('scope'):
    project_id = properties.VALUES.core.project.GetOrFail()
    request.scope = 'projects/{0}'.format(
        project_util.GetProjectNumber(project_id))
  return request
    def GenerateKubeconfig(self, service_name, project_id, location,
                           resource_type, membership):
        project_number = project_util.GetProjectNumber(project_id)
        kwargs = {
            'membership':
            membership,
            'location':
            location,
            'project_id':
            project_id,
            'server':
            SERVER_FORMAT.format(service_name=service_name,
                                 version=self.GetVersion(),
                                 project_number=project_number,
                                 location=location,
                                 resource_type=resource_type,
                                 membership=membership),
            'auth_provider':
            'gcp',
        }
        user_kwargs = {
            'auth_provider': 'gcp',
        }

        cluster_kwargs = {}
        context = KUBECONTEXT_FORMAT.format(project=project_id,
                                            location=location,
                                            membership=membership)
        kubeconfig = kconfig.Kubeconfig.Default()
        # Use same key for context, cluster, and user.
        kubeconfig.contexts[context] = kconfig.Context(context, context,
                                                       context)
        kubeconfig.users[context] = kconfig.User(context, **user_kwargs)
        kubeconfig.clusters[context] = kconfig.Cluster(context,
                                                       kwargs['server'],
                                                       **cluster_kwargs)
        kubeconfig.SetCurrentContext(context)
        kubeconfig.SaveToFile()
        return kubeconfig
Esempio n. 20
0
def GenerateInstallManifest(project_id, namespace, image, sa_key_data,
                            image_pull_secret_data, membership_name, proxy):
    """Generates the contents of the GKE Connect agent install manifest.

  Args:
    project_id: The GCP project identifier.
    namespace: The namespace into which to deploy the Connect agent.
    image: The container image to use in the Connect agent pod (and, later,
      deployment).
    sa_key_data: The contents of a GCP SA keyfile, base64-encoded.
    image_pull_secret_data: The contents of a secret that will be used as an
      image pull secret for the provided Docker image.
    membership_name: The name of the membership that this manifest is being
      generated for.
    proxy: The HTTP proxy that the agent should use, in the form
      http[s]://<proxy>

  Returns:
    A tuple, containing (
      a string, a YAML manifest that can be used to install the agent,
      a string, the subset of the manifest that relates to the agent install
        pod, and can be reverted,
      the name of the connect agent install pod
    )
  """
    project_number = p_util.GetProjectNumber(project_id)
    agent_pod_name = 'gke-connect-agent-{}'.format(uuid.uuid4().hex)

    namespace_manifest = NAMESPACE_MANIFEST_TEMPLATE.format(
        connect_resource_label=CONNECT_RESOURCE_LABEL,
        namespace=namespace,
        project_id=project_id)

    pod_manifest = INSTALL_POD_MANIFEST_TEMPLATE.format(
        namespace=namespace,
        agent_pod_name=agent_pod_name,
        agent_app_label=AGENT_POD_LABEL,
        project_id=project_id,
        image=image)

    non_deleted_resources_manifest = MANIFEST_TEMPLATE_FOR_NON_DELETED_RESOURCES.format(
        connect_resource_label=CONNECT_RESOURCE_LABEL,
        namespace=namespace,
        project_id=project_id,
        project_number=project_number,
        membership_name=membership_name or '',
        proxy=proxy or '',
        image=image,
        gcp_sa_key=sa_key_data)

    if image_pull_secret_data:
        # The indentation of this string literal is important: it must be
        # appendable to the bottom of the pod_manifest.
        image_pull_secret_section = """\
  imagePullSecrets:
    - name: {}""".format(IMAGE_PULL_SECRET_NAME)

        pod_manifest = '{}\n{}\n---\n{}'.format(
            pod_manifest, image_pull_secret_section,
            IMAGE_PULL_SECRET_TEMPLATE.format(
                name=IMAGE_PULL_SECRET_NAME,
                connect_resource_label=CONNECT_RESOURCE_LABEL,
                namespace=namespace,
                project_id=project_id,
                image_pull_secret=image_pull_secret_data))

    return '{}\n---\n{}\n---\n{}'.format(
        namespace_manifest, pod_manifest,
        non_deleted_resources_manifest), pod_manifest, agent_pod_name
Esempio n. 21
0
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.
    """

    if args.pull_request_preview:
      if args.repo_type != 'github':
        raise c_exceptions.InvalidArgumentException(
            '--repo-type',
            "Repo type must be 'github' to configure pull request previewing.")
      if args.namespace:
        raise c_exceptions.InvalidArgumentException(
            '--namespace',
            'Namespace must not be provided to configure pull request '
            'previewing. --namespace must only be provided when configuring '
            'automated deployments with the --branch-pattern or --tag-pattern '
            'flags.')
      if args.preview_expiry <= 0:
        raise c_exceptions.InvalidArgumentException(
            '--preview-expiry',
            'Preview expiry must be > 0.')

    # Determine github or csr
    github_repo_name = None
    github_repo_owner = None
    csr_repo_name = None

    if args.repo_type == 'github':
      if not args.repo_owner:
        raise c_exceptions.RequiredArgumentException(
            '--repo-owner',
            'Repo owner is required for --repo-type=github.')
      github_repo_name = args.repo_name
      github_repo_owner = args.repo_owner
      # We do not have to verify that this repo exists because the request to
      # create the BuildTrigger will fail with the appropriate message asking
      # the user to connect their repo, if the repo is not found.

    elif args.repo_type == 'csr':
      if args.repo_owner:
        raise c_exceptions.InvalidArgumentException(
            '--repo-owner',
            'Repo owner must not be provided for --repo-type=csr.')
      csr_repo_name = args.repo_name
      self._VerifyCSRRepoExists(csr_repo_name)

    elif args.repo_type == 'bitbucket_mirrored':
      if not args.repo_owner:
        raise c_exceptions.RequiredArgumentException(
            '--repo-owner',
            'Repo owner is required for --repo-type=bitbucket_mirrored.')
      csr_repo_name = 'bitbucket_{}_{}'.format(args.repo_owner, args.repo_name)
      self._VerifyBitbucketCSRRepoExists(
          csr_repo_name, args.repo_owner, args.repo_name)

    elif args.repo_type == 'github_mirrored':
      if not args.repo_owner:
        raise c_exceptions.RequiredArgumentException(
            '--repo-owner',
            'Repo owner is required for --repo-type=github_mirrored.')
      csr_repo_name = 'github_{}_{}'.format(args.repo_owner, args.repo_name)
      self._VerifyGitHubCSRRepoExists(
          csr_repo_name, args.repo_owner, args.repo_name)

    self._VerifyClusterExists(args.cluster, args.location)

    # Determine app_name
    if args.app_name:
      app_name = args.app_name
    else:
      app_name = args.repo_name

    # Determine gcs_config_staging_dir_bucket, gcs_config_staging_dir_object
    if args.gcs_config_staging_dir is None:
      gcs_config_staging_dir_bucket = \
        staging_bucket_util.GetDefaultStagingBucket()
      gcs_config_staging_dir_object = 'deploy/config'
    else:
      try:
        gcs_config_staging_dir_ref = resources.REGISTRY.Parse(
            args.gcs_config_staging_dir, collection='storage.objects')
        gcs_config_staging_dir_object = gcs_config_staging_dir_ref.object
      except resources.WrongResourceCollectionException:
        gcs_config_staging_dir_ref = resources.REGISTRY.Parse(
            args.gcs_config_staging_dir, collection='storage.buckets')
        gcs_config_staging_dir_object = None
      gcs_config_staging_dir_bucket = gcs_config_staging_dir_ref.bucket

    gcs_client = storage_api.StorageClient()
    gcs_client.CreateBucketIfNotExists(gcs_config_staging_dir_bucket)

    # If we are using a default bucket check that it is owned by user project
    # (b/33046325)
    if (args.gcs_config_staging_dir is None
        and not staging_bucket_util.BucketIsInProject(
            gcs_client, gcs_config_staging_dir_bucket)):
      raise c_exceptions.RequiredArgumentException(
          '--gcs-config-staging-dir',
          'A bucket with name {} already exists and is owned by '
          'another project. Specify a bucket using '
          '--gcs-config-staging-dir.'.format(gcs_config_staging_dir_bucket))

    if gcs_config_staging_dir_object:
      gcs_config_staging_path = '{}/{}'.format(
          gcs_config_staging_dir_bucket, gcs_config_staging_dir_object)
    else:
      gcs_config_staging_path = gcs_config_staging_dir_bucket

    project = properties.VALUES.core.project.Get(required=True)
    project_number = projects_util.GetProjectNumber(project)
    cloudbuild_service_account = '{}@cloudbuild.gserviceaccount.com'.format(
        project_number)
    log.status.Print(
        'Add the roles/container.developer role to your Cloud Build '
        'service agent account, if you have not already done so. This allows '
        'the account to deploy to your cluster:\n\n'
        'gcloud projects add-iam-policy-binding {project} '
        '--member=serviceAccount:{service_account_email} '
        '--role=roles/container.developer --project={project}\n'.format(
            project=project,
            service_account_email=cloudbuild_service_account
        ))

    if args.pull_request_preview:
      self._ConfigurePRPreview(
          repo_owner=github_repo_owner,
          repo_name=github_repo_name,
          pull_request_pattern=args.pull_request_pattern,
          preview_expiry=args.preview_expiry,
          comment_control=args.comment_control,
          dockerfile_path=args.dockerfile,
          app_name=app_name,
          config_path=args.config,
          expose_port=args.expose,
          gcs_config_staging_path=gcs_config_staging_path,
          cluster=args.cluster,
          location=args.location)
    else:
      self._ConfigureGitPushBuildTrigger(
          repo_type=args.repo_type,
          csr_repo_name=csr_repo_name,
          github_repo_owner=github_repo_owner,
          github_repo_name=github_repo_name,
          branch_pattern=args.branch_pattern,
          tag_pattern=args.tag_pattern,
          dockerfile_path=args.dockerfile,
          app_name=app_name,
          config_path=args.config,
          namespace=args.namespace,
          expose_port=args.expose,
          gcs_config_staging_path=gcs_config_staging_path,
          cluster=args.cluster,
          location=args.location)
Esempio n. 22
0
    def Run(self, args):
        """Create or Update service from YAML."""
        conn_context = connection_context.GetConnectionContext(
            args, flags.Product.RUN, self.ReleaseTrack())

        with serverless_operations.Connect(conn_context) as client:
            try:
                new_service = service.Service(
                    messages_util.DictToMessageWithErrorCheck(
                        args.FILE, client.messages_module.Service),
                    client.messages_module)
            except messages_util.ScalarTypeMismatchError as e:
                exceptions.MaybeRaiseCustomFieldMismatch(e)

            # If managed, namespace must match project (or will default to project if
            # not specified).
            # If not managed, namespace simply must not conflict if specified in
            # multiple places (or will default to "default" if not specified).
            namespace = args.CONCEPTS.namespace.Parse().Name(
            )  # From flag or default
            if new_service.metadata.namespace is not None:
                if (args.IsSpecified('namespace')
                        and namespace != new_service.metadata.namespace):
                    raise exceptions.ConfigurationError(
                        'Namespace specified in file does not match passed flag.'
                    )
                namespace = new_service.metadata.namespace
                if flags.GetPlatform() == flags.PLATFORM_MANAGED:
                    project = properties.VALUES.core.project.Get()
                    project_number = projects_util.GetProjectNumber(project)
                    if namespace != project and namespace != str(
                            project_number):
                        raise exceptions.ConfigurationError(
                            'Namespace must be project ID [{}] or quoted number [{}] for '
                            'Cloud Run (fully managed).'.format(
                                project, project_number))
            new_service.metadata.namespace = namespace

            changes = [
                config_changes.ReplaceServiceChange(new_service),
                config_changes.SetLaunchStageAnnotationChange(
                    self.ReleaseTrack())
            ]
            service_ref = resources.REGISTRY.Parse(
                new_service.metadata.name,
                params={'namespacesId': new_service.metadata.namespace},
                collection='run.namespaces.services')
            service_obj = client.GetService(service_ref)

            pretty_print.Info(
                run_messages_util.GetStartDeployMessage(
                    conn_context,
                    service_ref,
                    operation='Applying new configuration'))

            deployment_stages = stages.ServiceStages()
            header = ('Deploying...'
                      if service_obj else 'Deploying new service...')
            with progress_tracker.StagedProgressTracker(
                    header,
                    deployment_stages,
                    failure_message='Deployment failed',
                    suppress_output=args.async_) as tracker:
                service_obj = client.ReleaseService(service_ref,
                                                    changes,
                                                    tracker,
                                                    asyn=args.async_,
                                                    allow_unauthenticated=None,
                                                    for_replace=True)
            if args.async_:
                pretty_print.Success(
                    'New configuration for [{{bold}}{serv}{{reset}}] is being applied '
                    'asynchronously.'.format(serv=service_obj.name))
            else:
                service_obj = client.GetService(service_ref)
                pretty_print.Success(
                    'New configuration has been applied to service '
                    '[{{bold}}{serv}{{reset}}].\n'
                    'URL: {{bold}}{url}{{reset}}'.format(
                        serv=service_obj.name, url=service_obj.domain))
            return service_obj
Esempio n. 23
0
    def Run(self, args):
        """Create or Update service from YAML."""
        conn_context = connection_context.GetConnectionContext(
            args, product=flags.Product.RUN)

        with serverless_operations.Connect(conn_context) as client:
            new_service = service.Service(
                messages_util.DictToMessageWithErrorCheck(
                    args.FILE, client.messages_module.Service),
                client.messages_module)

            # If managed, namespace must match project (or will default to project if
            # not specified).
            # If not managed, namespace simply must not conflict if specified in
            # multiple places (or will default to "default" if not specified).
            namespace = args.CONCEPTS.namespace.Parse().Name(
            )  # From flag or default
            if new_service.metadata.namespace is not None:
                if (args.IsSpecified('namespace')
                        and namespace != new_service.metadata.namespace):
                    raise exceptions.ConfigurationError(
                        'Namespace specified in file does not match passed flag.'
                    )
                namespace = new_service.metadata.namespace
                if flags.GetPlatform() == flags.PLATFORM_MANAGED:
                    project = properties.VALUES.core.project.Get()
                    project_number = projects_util.GetProjectNumber(project)
                    if namespace != project and namespace != str(
                            project_number):
                        raise exceptions.ConfigurationError(
                            'Namespace must be project ID [{}] or quoted number [{}] for '
                            'Cloud Run (fully managed).'.format(
                                project, project_number))
            new_service.metadata.namespace = namespace

            changes = [config_changes.ReplaceServiceChange(new_service)]
            service_ref = resources.REGISTRY.Parse(
                new_service.metadata.name,
                params={'namespacesId': new_service.metadata.namespace},
                collection='run.namespaces.services')
            original_service = client.GetService(service_ref)

            pretty_print.Info(
                deploy.GetStartDeployMessage(conn_context, service_ref))

            deployment_stages = stages.ServiceStages()
            header = ('Deploying...'
                      if original_service else 'Deploying new service...')
            with progress_tracker.StagedProgressTracker(
                    header,
                    deployment_stages,
                    failure_message='Deployment failed',
                    suppress_output=args.async_) as tracker:
                client.ReleaseService(service_ref,
                                      changes,
                                      tracker,
                                      asyn=args.async_,
                                      allow_unauthenticated=None,
                                      for_replace=True)
            if args.async_:
                pretty_print.Success(
                    'Service [{{bold}}{serv}{{reset}}] is deploying '
                    'asynchronously.'.format(serv=service_ref.servicesId))
            else:
                pretty_print.Success(
                    deploy.GetSuccessMessageForSynchronousDeploy(
                        client, service_ref))
def _EnsureProjectNumber(p):
    if p is None:
        return p
    if p.isdigit():
        return p
    return project_util.GetProjectNumber(p)
    def Run(self, args):
        """Create or Update service from YAML."""
        run_messages = apis.GetMessagesModule(
            global_methods.SERVERLESS_API_NAME,
            global_methods.SERVERLESS_API_VERSION)
        service_dict = dict(args.FILE)
        # Clear the status to make migration from k8s deployments easier.
        # Since a Deployment status will have several fields that Cloud Run doesn't
        # support, trying to convert it to a message as-is will fail even though
        # status is ignored by the server.
        if 'status' in service_dict:
            del service_dict['status']

        # For cases where YAML contains the project number as metadata.namespace,
        # preemptively convert them to a string to avoid validation failures.
        namespace = service_dict.get('metadata', {}).get('namespace', None)
        if namespace is not None and not isinstance(namespace, str):
            service_dict['metadata']['namespace'] = str(namespace)

        try:
            raw_service = messages_util.DictToMessageWithErrorCheck(
                service_dict, run_messages.Service)
            new_service = service.Service(raw_service, run_messages)
        except messages_util.ScalarTypeMismatchError as e:
            exceptions.MaybeRaiseCustomFieldMismatch(
                e,
                help_text=
                'Please make sure that the YAML file matches the Knative '
                'service definition spec in https://kubernetes.io/docs/'
                'reference/kubernetes-api/services-resources/service-v1/'
                '#Service.')

        # If managed, namespace must match project (or will default to project if
        # not specified).
        # If not managed, namespace simply must not conflict if specified in
        # multiple places (or will default to "default" if not specified).
        namespace = args.CONCEPTS.namespace.Parse().Name(
        )  # From flag or default
        if new_service.metadata.namespace is not None:
            if (args.IsSpecified('namespace')
                    and namespace != new_service.metadata.namespace):
                raise exceptions.ConfigurationError(
                    'Namespace specified in file does not match passed flag.')
            namespace = new_service.metadata.namespace
            if platforms.GetPlatform() == platforms.PLATFORM_MANAGED:
                project = properties.VALUES.core.project.Get()
                project_number = projects_util.GetProjectNumber(project)
                if namespace != project and namespace != str(project_number):
                    raise exceptions.ConfigurationError(
                        'Namespace must be project ID [{}] or quoted number [{}] for '
                        'Cloud Run (fully managed).'.format(
                            project, project_number))
        new_service.metadata.namespace = namespace

        changes = [
            config_changes.ReplaceServiceChange(new_service),
            config_changes.SetLaunchStageAnnotationChange(self.ReleaseTrack())
        ]
        service_ref = resources.REGISTRY.Parse(
            new_service.metadata.name,
            params={'namespacesId': new_service.metadata.namespace},
            collection='run.namespaces.services')

        region_label = new_service.region if new_service.is_managed else None

        conn_context = connection_context.GetConnectionContext(
            args,
            flags.Product.RUN,
            self.ReleaseTrack(),
            region_label=region_label)

        with serverless_operations.Connect(conn_context) as client:
            service_obj = client.GetService(service_ref)

            pretty_print.Info(
                run_messages_util.GetStartDeployMessage(
                    conn_context,
                    service_ref,
                    operation='Applying new configuration to'))

            deployment_stages = stages.ServiceStages()
            header = ('Deploying...'
                      if service_obj else 'Deploying new service...')
            with progress_tracker.StagedProgressTracker(
                    header,
                    deployment_stages,
                    failure_message='Deployment failed',
                    suppress_output=args.async_) as tracker:
                service_obj = client.ReleaseService(service_ref,
                                                    changes,
                                                    tracker,
                                                    asyn=args.async_,
                                                    allow_unauthenticated=None,
                                                    for_replace=True)
            if args.async_:
                pretty_print.Success(
                    'New configuration for [{{bold}}{serv}{{reset}}] is being applied '
                    'asynchronously.'.format(serv=service_obj.name))
            else:
                service_obj = client.GetService(service_ref)
                pretty_print.Success(
                    'New configuration has been applied to service '
                    '[{{bold}}{serv}{{reset}}].\n'
                    'URL: {{bold}}{url}{{reset}}'.format(
                        serv=service_obj.name, url=service_obj.domain))
            return service_obj
def _ModifyInstanceTemplate(args, is_mcp, metadata_args):
    """Modify the instance template to include the service proxy metadata."""

    if metadata_args.asm_labels:
        asm_labels = metadata_args.asm_labels
    else:
        asm_labels = collections.OrderedDict()

    asm_labels[
        _ISTIO_CANONICAL_SERVICE_NAME_LABEL] = metadata_args.canonical_service
    asm_labels[
        _ISTIO_CANONICAL_SERVICE_REVISION_LABEL] = metadata_args.canonical_revision

    asm_labels_string = json.dumps(asm_labels, sort_keys=True)

    service_proxy_config = collections.OrderedDict()
    service_proxy_config['mode'] = 'ON'

    service_proxy_config['proxy-spec'] = {
        'network': metadata_args.network,
        'api-server': metadata_args.service_proxy_api_server,
        'log-level': 'info',
    }

    service_proxy_config['service'] = {}

    proxy_config = metadata_args.asm_proxy_config
    if not proxy_config:
        proxy_config = collections.OrderedDict()
    if 'proxyMetadata' not in proxy_config:
        proxy_config['proxyMetadata'] = collections.OrderedDict()
    else:
        proxy_config['proxyMetadata'] = collections.OrderedDict(
            proxy_config['proxyMetadata'])

    proxy_metadata = proxy_config['proxyMetadata']
    proxy_metadata['ISTIO_META_WORKLOAD_NAME'] = metadata_args.workload_name
    proxy_metadata['POD_NAMESPACE'] = metadata_args.workload_namespace
    proxy_metadata['USE_TOKEN_FOR_CSR'] = 'true'
    proxy_metadata['ISTIO_META_DNS_CAPTURE'] = 'true'
    proxy_metadata[
        'ISTIO_META_AUTO_REGISTER_GROUP'] = metadata_args.workload_name
    proxy_metadata['SERVICE_ACCOUNT'] = metadata_args.service_account
    proxy_metadata[
        'CREDENTIAL_IDENTITY_PROVIDER'] = metadata_args.identity_provider
    if metadata_args.trust_domain:
        proxy_metadata['TRUST_DOMAIN'] = metadata_args.trust_domain
    if metadata_args.mesh_id:
        proxy_metadata['ISTIO_META_MESH_ID'] = metadata_args.mesh_id
    proxy_metadata['ISTIO_META_NETWORK'] = '{}-{}'.format(
        metadata_args.project_id, metadata_args.network)
    proxy_metadata['CANONICAL_SERVICE'] = metadata_args.canonical_service
    proxy_metadata['CANONICAL_REVISION'] = metadata_args.canonical_revision
    proxy_metadata['ISTIO_METAJSON_LABELS'] = asm_labels_string

    if metadata_args.asm_revision == 'default':
        proxy_metadata['ASM_REVISION'] = ''
    else:
        proxy_metadata['ASM_REVISION'] = metadata_args.asm_revision

    gce_software_declaration = collections.OrderedDict()
    service_proxy_agent_recipe = collections.OrderedDict()

    service_proxy_agent_recipe['name'] = 'install-gce-service-proxy-agent'
    service_proxy_agent_recipe['desired_state'] = 'INSTALLED'

    if is_mcp:
        service_proxy_agent_recipe['installSteps'] = [{
            'scriptRun': {
                'script':
                service_proxy_aux_data.
                startup_script_for_asm_service_proxy_installer
            }
        }]
        proxy_metadata.update(metadata_args.mcp_env_config)
        # ISTIO_META_CLOUDRUN_ADDR must be set to generate node metadata on VM.
        if _CLOUDRUN_ADDR_KEY in proxy_metadata:
            proxy_metadata[_ISTIO_META_CLOUDRUN_ADDR_KEY] = proxy_metadata[
                _CLOUDRUN_ADDR_KEY]
        if 'gce-service-proxy-installer-bucket' not in args.metadata:
            args.metadata['gce-service-proxy-installer-bucket'] = (
                _SERVICE_PROXY_INSTALLER_BUCKET_NAME)
    else:
        service_proxy_agent_recipe['installSteps'] = [{
            'scriptRun': {
                'script':
                service_proxy_aux_data.startup_script_for_asm_service_proxy.
                format(ingress_ip=metadata_args.expansionagateway_ip,
                       asm_revision=metadata_args.asm_revision)
            }
        }]
        proxy_metadata['ISTIO_META_ISTIO_VERSION'] = metadata_args.asm_version
        args.metadata['rootcert'] = metadata_args.root_cert
        if _GCE_SERVICE_PROXY_AGENT_BUCKET_METADATA not in args.metadata:
            args.metadata[_GCE_SERVICE_PROXY_AGENT_BUCKET_METADATA] = (
                _SERVICE_PROXY_BUCKET_NAME.format(metadata_args.asm_version))

    gce_software_declaration['softwareRecipes'] = [service_proxy_agent_recipe]

    service_proxy_config['asm-config'] = proxy_config

    args.metadata['enable-osconfig'] = 'true'
    args.metadata['enable-guest-attributes'] = 'true'
    args.metadata['osconfig-disabled-features'] = 'tasks'
    args.metadata['gce-software-declaration'] = json.dumps(
        gce_software_declaration)
    args.metadata['gce-service-proxy'] = json.dumps(service_proxy_config,
                                                    sort_keys=True)

    if args.labels is None:
        args.labels = collections.OrderedDict()
    args.labels['asm_service_name'] = metadata_args.canonical_service
    args.labels['asm_service_namespace'] = metadata_args.workload_namespace
    if metadata_args.mesh_id:
        args.labels['mesh_id'] = metadata_args.mesh_id
    else:
        # This works for now as we only support adding VM to the Fleet project. But
        # it should be the Fleet project instead.
        project_number = project_util.GetProjectNumber(
            metadata_args.project_id)
        args.labels['mesh_id'] = 'proj-{}'.format(project_number)
    # For ASM VM usage tracking.
    args.labels['gce-service-proxy'] = 'asm-istiod'