Ejemplo n.º 1
0
    def Run(self, args):
        """This is what ts called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Raises:
      InvalidImageNameError: If the user specified an invalid image name.
    Returns:
      A list of the deleted docker_name.Tag and docker_name.Digest objects
    """
        # IMAGE_NAME: The fully-qualified image name to delete (with a digest).
        # Deletes the layers. Ex. gcr.io/google-appengine/java(@DIGEST|:TAG).

        http_obj = util.Http()
        with util.WrapExpectedDockerlessErrors():
            # collect input/validate
            digests, explicit_tags = self._ProcessImageNames(args.image_names)

            # Resolve tags to digests.
            for tag in explicit_tags:
                digests.add(util.GetDigestFromName(str(tag)))

            # Find all the tags that reference digests to be deleted.
            all_tags = set()
            for digest in digests:
                all_tags.update(util.GetDockerTagsForDigest(digest, http_obj))

            # Find all the tags that weren't specified explicitly.
            implicit_tags = all_tags.difference(explicit_tags)

            if implicit_tags and not args.force_delete_tags:
                log.error('Tags:')
                for tag in explicit_tags:
                    log.error('- ' + str(tag))
                raise exceptions.Error(
                    'This operation will implicitly delete the tags listed above. '
                    'Please manually remove with the `untag` command or re-run with '
                    '--force-delete-tags to confirm.')

            # Print the digests to be deleted.
            if digests:
                log.status.Print('Digests:')
            for digest in digests:
                self._PrintDigest(digest, http_obj)

            # Print the tags to be deleted.
            if explicit_tags:
                log.status.Print('Tags:')
            for tag in explicit_tags:
                log.status.Print('- ' + str(tag))

            # Prompt the user for consent to delete all the above.
            console_io.PromptContinue(
                'This operation will delete the tags and images identified by the '
                'digests above.',
                default=True,
                cancel_on_no=True)

            # The user has given explicit consent, merge the tags.
            explicit_tags.update(implicit_tags)

            # delete and collect output
            result = []
            for tag in explicit_tags:  # tags must be deleted before digests
                self._DeleteDockerTagOrDigest(tag, http_obj)
                result.append({'name': str(tag)})
            for digest in digests:
                self._DeleteDockerTagOrDigest(digest, http_obj)
                result.append({'name': str(digest)})
            return result
Ejemplo n.º 2
0
    def Run(self, args):
        project = arg_utils.GetFromNamespace(args,
                                             '--project',
                                             use_defaults=True)

        # This incidentally verifies that the kubeconfig and context args are valid.
        kube_client = kube_util.KubernetesClient(args)
        uuid = kube_util.GetClusterUUID(kube_client)

        self._VerifyClusterExclusivity(kube_client, project, args.context,
                                       uuid)

        # Read the service account files provided in the arguments early, in order
        # to catch invalid files before performing mutating operations.
        try:
            service_account_key_data = hub_util.Base64EncodedFileContents(
                args.service_account_key_file)
        except files.Error as e:
            raise exceptions.Error('Could not process {}: {}'.format(
                SERVICE_ACCOUNT_KEY_FILE_FLAG, e))

        docker_credential_data = None
        if args.docker_credential_file:
            try:
                docker_credential_data = hub_util.Base64EncodedFileContents(
                    args.docker_credential_file)
            except files.Error as e:
                raise exceptions.Error('Could not process {}: {}'.format(
                    DOCKER_CREDENTIAL_FILE_FLAG, e))

        gke_cluster_self_link = api_util.GKEClusterSelfLink(args)

        # The full resource name of the membership for this registration flow.
        name = 'projects/{}/locations/global/memberships/{}'.format(
            project, uuid)
        # Attempt to create a membership.
        already_exists = False
        try:
            exclusivity_util.ApplyMembershipResources(kube_client, project)
            obj = api_util.CreateMembership(project, uuid, args.CLUSTER_NAME,
                                            gke_cluster_self_link)
        except apitools_exceptions.HttpConflictError as e:
            # If the error is not due to the object already existing, re-raise.
            error = core_api_exceptions.HttpErrorPayload(e)
            if error.status_description != 'ALREADY_EXISTS':
                raise

            # The membership already exists. Check to see if it has the same
            # description (i.e., user-visible cluster name).
            #
            # This intentionally does not verify that the gke_cluster_self_link is
            # equivalent: this check is meant to prevent the user from updating the
            # Connect agent in a cluster that is different from the one that they
            # expect, and is not required for the proper functioning of the agent or
            # the Hub.
            obj = api_util.GetMembership(name)
            if obj.description != args.CLUSTER_NAME:
                # A membership exists, but does not have the same description. This is
                # possible if two different users attempt to register the same
                # cluster, or if the user is upgrading and has passed a different
                # cluster name. Treat this as an error: even in the upgrade case,
                # this is useful to prevent the user from upgrading the wrong cluster.
                raise exceptions.Error(
                    'There is an existing membership, [{}], that conflicts with [{}]. '
                    'Please delete it before continuing:\n\n'
                    '  gcloud {}container memberships delete {}'.format(
                        obj.description, args.CLUSTER_NAME,
                        hub_util.ReleaseTrackCommandPrefix(
                            self.ReleaseTrack()), name))

            # The membership exists and has the same description.
            already_exists = True
            console_io.PromptContinue(
                message='A membership for [{}] already exists. Continuing will '
                'reinstall the Connect agent deployment to use a new image (if one '
                'is available).'.format(args.CLUSTER_NAME),
                cancel_on_no=True)

        # A membership exists. Attempt to update the existing agent deployment, or
        # install a new agent if necessary.
        if already_exists:
            obj = api_util.GetMembership(name)
            agent_util.DeployConnectAgent(args, service_account_key_data,
                                          docker_credential_data, name)
            return obj

        # No membership exists. Attempt to create a new one, and install a new
        # agent.
        try:
            agent_util.DeployConnectAgent(args, service_account_key_data,
                                          docker_credential_data, name)
        except:
            api_util.DeleteMembership(name)
            exclusivity_util.DeleteMembershipResources(kube_client)
            raise
        return obj
Ejemplo n.º 3
0
  def Run(self, args):
    holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
    client = holder.client
    start = time_util.CurrentTimeSec()

    # Set up Encryption utilities.
    openssl_executable = files.FindExecutableOnPath('openssl')
    if windows_encryption_utils:
      crypt = windows_encryption_utils.WinCrypt()
    elif openssl_executable:
      crypt = openssl_encryption_utils.OpensslCrypt(openssl_executable)
    else:
      raise utils.MissingDependencyError(
          'Your platform does not support OpenSSL.')

    # Get Authenticated email address and default username.
    email = properties.VALUES.core.account.GetOrFail()
    if args.user:
      user = args.user
    else:
      user = gaia.MapGaiaEmailToDefaultAccountName(email)

    if args.instance_name == user:
      raise utils.InvalidUserError(
          MACHINE_USERNAME_SAME_ERROR.format(user, args.instance_name))

    # Warn user (This warning doesn't show for non-interactive sessions).
    message = RESET_PASSWORD_WARNING.format(user)
    prompt_string = ('Would you like to set or reset the password for [{0}]'
                     .format(user))
    console_io.PromptContinue(
        message=message,
        prompt_string=prompt_string,
        cancel_on_no=True)

    log.status.Print('Resetting and retrieving password for [{0}] on [{1}]'
                     .format(user, args.instance_name))

    # Get Encryption Keys.
    key = crypt.GetKeyPair()
    modulus, exponent = crypt.GetModulusExponentFromPublicKey(
        crypt.GetPublicKey(key))

    # Create Windows key entry.
    self.windows_key_entry = self._ConstructWindowsKeyEntry(
        user, modulus, exponent, email)

    # Call ReadWriteCommad.Run() which will fetch the instance and update
    # the metadata (using the data in self.windows_key_entry).
    instance_ref = self.CreateReference(client, holder.resources, args)
    get_request = self.GetGetRequest(client, instance_ref)

    objects = client.MakeRequests([get_request])

    new_object = self.Modify(client, objects[0])

    # If existing object is equal to the proposed object or if
    # Modify() returns None, then there is no work to be done, so we
    # print the resource and return.
    if objects[0] == new_object:
      log.status.Print(
          'No change requested; skipping update for [{0}].'.format(
              objects[0].name))
      return objects

    updated_instance = client.MakeRequests(
        [self.GetSetRequest(client, instance_ref, new_object)])[0]

    # Retrieve and Decrypt the password from the serial console.
    enc_password = self._GetEncryptedPasswordFromSerialPort(
        client, instance_ref, modulus)
    password = crypt.DecryptMessage(key, enc_password)

    # Get External IP address.
    try:
      access_configs = updated_instance.networkInterfaces[0].accessConfigs
      external_ip_address = access_configs[0].natIP
    except (KeyError, IndexError) as _:
      log.warning(NO_IP_WARNING.format(updated_instance.name))
      external_ip_address = None

    # Check for old Windows credentials.
    if self.old_metadata_keys:
      log.warning(OLD_KEYS_WARNING.format(instance_ref.instance,
                                          instance_ref.instance,
                                          instance_ref.zone,
                                          ','.join(self.old_metadata_keys)))

    log.info('Total Elapsed Time: {0}'
             .format(time_util.CurrentTimeSec() - start))

    # The connection info resource.
    connection_info = {'username': user,
                       'password': password,
                       'ip_address': external_ip_address}
    return connection_info
Ejemplo n.º 4
0
def RunDeploy(
        args,
        enable_endpoints=False,
        use_beta_stager=False,
        runtime_builder_strategy=runtime_builders.RuntimeBuilderStrategy.NEVER,
        use_service_management=False,
        check_for_stopped=False):
    """Perform a deployment based on the given args.

  Args:
    args: argparse.Namespace, An object that contains the values for the
        arguments specified in the ArgsDeploy() function.
    enable_endpoints: Enable Cloud Endpoints for the deployed app.
    use_beta_stager: Use the stager registry defined for the beta track rather
        than the default stager registry.
    runtime_builder_strategy: runtime_builders.RuntimeBuilderStrategy, when to
      use the new CloudBuild-based runtime builders (alternative is old
      externalized runtimes).
    use_service_management: bool, whether to use servicemanagement API to
      enable the Appengine Flexible API for a Flexible deployment.
    check_for_stopped: bool, whether to check if the app is stopped before
      deploying.

  Returns:
    A dict on the form `{'versions': new_versions, 'configs': updated_configs}`
    where new_versions is a list of version_util.Version, and updated_configs
    is a list of config file identifiers, see yaml_parsing.ConfigYamlInfo.
  """
    project = properties.VALUES.core.project.Get(required=True)
    deploy_options = DeployOptions.FromProperties(
        enable_endpoints, runtime_builder_strategy=runtime_builder_strategy)

    with files.TemporaryDirectory() as staging_area:
        if args.skip_staging:
            stager = staging.GetNoopStager(staging_area)
        elif use_beta_stager:
            stager = staging.GetBetaStager(staging_area)
        else:
            stager = staging.GetStager(staging_area)
        services, configs = deployables.GetDeployables(
            args.deployables, stager, deployables.GetPathMatchers())
        service_infos = [d.service_info for d in services]

        if not args.skip_image_url_validation:
            flags.ValidateImageUrl(args.image_url, service_infos)

        # The new API client.
        api_client = appengine_api_client.GetApiClient()
        # pylint: disable=protected-access
        log.debug(
            'API endpoint: [{endpoint}], API version: [{version}]'.format(
                endpoint=api_client.client.url,
                version=api_client.client._VERSION))
        # The legacy admin console API client.
        # The Admin Console API existed long before the App Engine Admin API, and
        # isn't being improved. We're in the process of migrating all of the calls
        # over to the Admin API, but a few things (notably config deployments)
        # haven't been ported over yet.
        ac_client = appengine_client.AppengineClient(args.server,
                                                     args.ignore_bad_certs)

        app = _PossiblyCreateApp(api_client, project)
        if check_for_stopped:
            _RaiseIfStopped(api_client, app)
        app = _PossiblyRepairApp(api_client, app)

        # Tell the user what is going to happen, and ask them to confirm.
        version_id = args.version or util.GenerateVersionId()
        deployed_urls = output_helpers.DisplayProposedDeployment(
            app, project, services, configs, version_id,
            deploy_options.promote)
        console_io.PromptContinue(cancel_on_no=True)
        if service_infos:
            # Do generic app setup if deploying any services.
            # All deployment paths for a service involve uploading source to GCS.
            metrics.CustomTimedEvent(metric_names.GET_CODE_BUCKET_START)
            code_bucket_ref = args.bucket or flags.GetCodeBucket(app, project)
            metrics.CustomTimedEvent(metric_names.GET_CODE_BUCKET)
            log.debug(
                'Using bucket [{b}].'.format(b=code_bucket_ref.ToBucketUrl()))

            # Prepare Flex if any service is going to deploy an image.
            if any([s.RequiresImage() for s in service_infos]):
                if use_service_management:
                    deploy_command_util.PossiblyEnableFlex(project)
                else:
                    deploy_command_util.DoPrepareManagedVms(ac_client)

            all_services = dict([(s.id, s) for s in api_client.ListServices()])
        else:
            code_bucket_ref = None
            all_services = {}
        new_versions = []
        deployer = ServiceDeployer(api_client, deploy_options)

        # Track whether a service has been deployed yet, for metrics.
        service_deployed = False
        for service in services:
            if not service_deployed:
                metrics.CustomTimedEvent(
                    metric_names.FIRST_SERVICE_DEPLOY_START)
            new_version = version_util.Version(project, service.service_id,
                                               version_id)
            deployer.Deploy(service, new_version, code_bucket_ref,
                            args.image_url, all_services, app.gcrDomain)
            new_versions.append(new_version)
            log.status.Print('Deployed service [{0}] to [{1}]'.format(
                service.service_id, deployed_urls[service.service_id]))
            if not service_deployed:
                metrics.CustomTimedEvent(metric_names.FIRST_SERVICE_DEPLOY)
            service_deployed = True

    # Deploy config files.
    if configs:
        metrics.CustomTimedEvent(metric_names.UPDATE_CONFIG_START)
        for config in configs:
            message = 'Updating config [{config}]'.format(config=config.name)
            with progress_tracker.ProgressTracker(message):
                ac_client.UpdateConfig(config.name, config.parsed)
        metrics.CustomTimedEvent(metric_names.UPDATE_CONFIG)

    updated_configs = [c.name for c in configs]

    PrintPostDeployHints(new_versions, updated_configs)

    # Return all the things that were deployed.
    return {'versions': new_versions, 'configs': updated_configs}
Ejemplo n.º 5
0
    def Run(self, args):
        """Promotes Cloud SQL read replica to a stand-alone instance.

    Args:
      args: argparse.Namespace, The arguments that this command was invoked
          with.

    Returns:
      A dict object representing the operations resource describing the
      promote-replica operation if the promote-replica was successful.
    """
        client = api_util.SqlClient(api_util.API_VERSION_DEFAULT)
        sql_client = client.sql_client
        sql_messages = client.sql_messages

        validate.ValidateInstanceName(args.replica)
        instance_ref = client.resource_parser.Parse(
            args.replica,
            params={'project': properties.VALUES.core.project.GetOrFail},
            collection='sql.instances')

        instance_resource = sql_client.instances.Get(
            sql_messages.SqlInstancesGetRequest(
                project=instance_ref.project, instance=instance_ref.instance))

        if instances.InstancesV1Beta4.IsMysqlDatabaseVersion(
                instance_resource.databaseVersion):
            database_type_fragment = 'mysql'
        elif instances.InstancesV1Beta4.IsPostgresDatabaseVersion(
                instance_resource.databaseVersion):
            database_type_fragment = 'postgres'
        else:
            # TODO(b/144067325): currently the link below goes to extremely
            # database-specific instructions to query the replication lag, so in case
            # we (...somehow...) end up here for a db other than mysql or postgres,
            # it's probably better to show nothing than to link to misleading info.
            # Once the metrics are made uniform in b/144067325, then we could default
            # to something here as we'd be giving the same instructions for all dbs
            # anyway.
            database_type_fragment = None
        promote_replica_docs_link = None
        if database_type_fragment:
            promote_replica_docs_link = (
                'Learn more:\n' +
                'https://cloud.google.com/sql/docs/{}/replication/manage-replicas#promote-replica\n\n'
                .format(database_type_fragment))

        # Format the message ourselves here rather than supplying it as part of the
        # 'message' to PromptContinue. Having the whole paragraph be automatically
        # formatted by PromptContinue would leave it with a line break in the middle
        # of the URL, rendering it unclickable.
        sys.stderr.write(textwrap.TextWrapper().fill(
            'Promoting a read replica stops replication and converts the instance '
            'to a standalone primary instance with read and write capabilities. '
            'This can\'t be undone. To avoid loss of data, before promoting the '
            'replica, you should verify that the replica has applied all '
            'transactions received from the primary.') + '\n\n')
        if promote_replica_docs_link:
            sys.stderr.write(promote_replica_docs_link)

        console_io.PromptContinue(message='', default=True, cancel_on_no=True)

        result = sql_client.instances.PromoteReplica(
            sql_messages.SqlInstancesPromoteReplicaRequest(
                project=instance_ref.project, instance=instance_ref.instance))
        operation_ref = client.resource_parser.Create(
            'sql.operations',
            operation=result.name,
            project=instance_ref.project)

        if args.async_:
            return sql_client.operations.Get(
                sql_messages.SqlOperationsGetRequest(
                    project=operation_ref.project,
                    operation=operation_ref.operation))

        operations.OperationsV1Beta4.WaitForOperation(
            sql_client, operation_ref, 'Promoting Cloud SQL replica')

        log.status.write(
            'Promoted [{instance}].\n'.format(instance=instance_ref))
Ejemplo n.º 6
0
def _CheckIamPermissions(project_id, cloudbuild_service_account_roles,
                         compute_service_account_roles,
                         custom_compute_service_account=''):
  """Check for needed IAM permissions and prompt to add if missing.

  Args:
    project_id: A string with the id of the project.
    cloudbuild_service_account_roles: A set of roles required for cloudbuild
      service account.
    compute_service_account_roles: A set of roles required for compute service
      account.
    custom_compute_service_account: Custom compute service account
  """
  project = projects_api.Get(project_id)
  # If the user's project doesn't have cloudbuild enabled yet, then the service
  # account won't even exist. If so, then ask to enable it before continuing.
  # Also prompt them to enable Cloud Logging if they haven't yet.
  expected_services = ['cloudbuild.googleapis.com', 'logging.googleapis.com',
                       'compute.googleapis.com']
  for service_name in expected_services:
    if not services_api.IsServiceEnabled(project.projectId, service_name):
      # TODO(b/112757283): Split this out into a separate library.
      prompt_message = (
          'The "{0}" service is not enabled for this project. '
          'It is required for this operation.\n').format(service_name)
      enable_service = console_io.PromptContinue(
          prompt_message,
          'Would you like to enable this service?',
          throw_if_unattended=True)
      if enable_service:
        services_api.EnableService(project.projectId, service_name)
      else:
        log.warning(
            'If import fails, manually enable {0} before retrying. For '
            'instructions on enabling services, see '
            'https://cloud.google.com/service-usage/docs/enable-disable.'
            .format(service_name))

  build_account = 'serviceAccount:{0}@cloudbuild.gserviceaccount.com'.format(
      project.projectNumber)
  # https://cloud.google.com/compute/docs/access/service-accounts#default_service_account
  compute_account = (
      'serviceAccount:{0}[email protected]'.format(
          project.projectNumber))
  if custom_compute_service_account:
    compute_account = 'serviceAccount:{0}'.format(
        custom_compute_service_account)

  # Now that we're sure the service account exists, actually check permissions.
  try:
    policy = projects_api.GetIamPolicy(project_id)
  except apitools_exceptions.HttpForbiddenError:
    log.warning(
        'Your account does not have permission to check roles for the '
        'service account {0}. If import fails, '
        'ensure "{0}" has the roles "{1}" and "{2}" has the roles "{3}" before '
        'retrying.'.format(build_account, cloudbuild_service_account_roles,
                           compute_account, compute_service_account_roles))
    return

  _VerifyRolesAndPromptIfMissing(project_id, build_account,
                                 _CurrentRolesForAccount(policy, build_account),
                                 frozenset(cloudbuild_service_account_roles))

  current_compute_account_roles = _CurrentRolesForAccount(
      policy, compute_account)

  # By default, the Compute Engine service account has the role `roles/editor`
  # applied to it, which is sufficient for import and export. If that's not
  # present, then request the minimal number of permissions.
  if ROLE_EDITOR not in current_compute_account_roles:
    _VerifyRolesAndPromptIfMissing(
        project_id, compute_account, current_compute_account_roles,
        compute_service_account_roles)
Ejemplo n.º 7
0
    def Run(self, args):
        project_ref = resources.REGISTRY.Parse(
            properties.VALUES.core.project.Get(required=True),
            collection='cloudresourcemanager.projects',
        )
        normalized_artifact_url = binauthz_command_util.NormalizeArtifactUrl(
            args.artifact_url)

        attestor_ref = args.CONCEPTS.attestor.Parse()
        key_ref = args.CONCEPTS.keyversion.Parse()

        # NOTE: This will hit the alpha Binauthz API until we promote this command
        # to the beta surface or hardcode it e.g. to Beta.
        api_version = apis.GetApiVersion(self.ReleaseTrack())
        attestor = attestors.Client(api_version).Get(attestor_ref)
        # TODO(b/79709480): Add other types of attestors if/when supported.
        note_ref = resources.REGISTRY.ParseResourceId(
            'containeranalysis.projects.notes',
            attestor.userOwnedDrydockNote.noteReference, {})

        key_id = args.public_key_id_override or kms.GetKeyUri(key_ref)
        if key_id not in set(
                pubkey.id
                for pubkey in attestor.userOwnedDrydockNote.publicKeys):
            log.warning('No public key with ID [%s] found on attestor [%s]',
                        key_id, attestor.name)
            console_io.PromptContinue(
                prompt_string='Create and upload Attestation anyway?',
                cancel_on_no=True)

        payload = binauthz_command_util.MakeSignaturePayload(args.artifact_url)

        kms_client = kms.Client()
        pubkey_response = kms_client.GetPublicKey(key_ref.RelativeName())

        sign_response = kms_client.AsymmetricSign(
            key_ref.RelativeName(),
            kms.GetAlgorithmDigestType(pubkey_response.algorithm), payload)

        ca_api_version = ca_apis.GetApiVersion(self.ReleaseTrack())
        # TODO(b/138859339): Remove when remainder of surface migrated to V1 API.
        if ca_api_version == ca_apis.V1:
            return containeranalysis.Client(
                ca_api_version).CreateAttestationOccurrence(
                    project_ref=project_ref,
                    note_ref=note_ref,
                    artifact_url=normalized_artifact_url,
                    public_key_id=key_id,
                    signature=sign_response.signature,
                    plaintext=payload,
                )
        else:
            return containeranalysis.Client(
                ca_api_version).CreateGenericAttestationOccurrence(
                    project_ref=project_ref,
                    note_ref=note_ref,
                    artifact_url=normalized_artifact_url,
                    public_key_id=key_id,
                    signature=sign_response.signature,
                    plaintext=payload,
                )
Ejemplo n.º 8
0
    def Run(self, args):
        """Connect to a running flex instance.

    Args:
      args: argparse.Namespace, the args the command was invoked with.

    Raises:
      InvalidInstanceTypeError: The instance is not supported for SSH.
      MissingVersionError: The version specified does not exist.
      MissingInstanceError: The instance specified does not exist.
      UnattendedPromptError: Not running in a tty.
      OperationCancelledError: User cancelled the operation.
      ssh.CommandError: The SSH command exited with SSH exit code, which
        usually implies that a connection problem occurred.

    Returns:
      int, The exit code of the SSH command.
    """
        env = ssh.Environment.Current()
        env.RequireSSH()
        keys = ssh.Keys.FromFilename()

        api_client = appengine_api_client.GetApiClient()
        try:
            version = api_client.GetVersionResource(service=args.service,
                                                    version=args.version)
        except api_exceptions.NotFoundError:
            raise command_exceptions.MissingVersionError('{}/{}'.format(
                args.service, args.version))
        version = version_util.Version.FromVersionResource(version, None)
        if version.environment is not util.Environment.FLEX:
            if version.environment is util.Environment.MANAGED_VMS:
                environment = 'Managed VMs'
                msg = 'Use `gcloud compute ssh` for Managed VMs instances.'
            else:
                environment = 'Standard'
                msg = None
            raise command_exceptions.InvalidInstanceTypeError(environment, msg)
        res = resources.REGISTRY.Parse(
            args.instance,
            params={
                'versionsId': args.version,
                'instancesId': args.instance,
                'servicesId': args.service
            },
            collection='appengine.apps.services.versions.instances')
        rel_name = res.RelativeName()
        try:
            instance = api_client.GetInstanceResource(res)
        except api_exceptions.NotFoundError:
            raise command_exceptions.MissingInstanceError(rel_name)

        if not instance.vmDebugEnabled:
            log.warn(ENABLE_DEBUG_WARNING)
            console_io.PromptContinue(cancel_on_no=True,
                                      throw_if_unattended=True)
        user = ssh.GetDefaultSshUsername()
        remote = ssh.Remote(instance.vmIp, user=user)
        public_key = keys.GetPublicKey().ToEntry()
        ssh_key = '{user}:{key} {user}'.format(user=user, key=public_key)
        log.status.Print(
            'Sending public key to instance [{}].'.format(rel_name))
        api_client.DebugInstance(res, ssh_key)
        options = {
            'IdentitiesOnly': 'yes',  # No ssh-agent as of yet
            'UserKnownHostsFile': ssh.KnownHosts.DEFAULT_PATH
        }
        cmd = ssh.SSHCommand(remote,
                             identity_file=keys.key_file,
                             options=options)
        if args.container:
            cmd.tty = True
            cmd.remote_command = ['container_exec', args.container, '/bin/sh']
        return cmd.Run(env)
Ejemplo n.º 9
0
    def WarnForZonalCreation(self, resource_refs):
        """Warns the user if a zone has upcoming maintanence or deprecation."""
        zones = self.GetZones(resource_refs)
        if not zones:
            return

        prompts = []
        zones_with_upcoming_maintenance = []
        zones_with_deprecated = []
        for zone in zones:
            if zone.maintenanceWindows:
                zones_with_upcoming_maintenance.append(zone)
            if zone.deprecated:
                zones_with_deprecated.append(zone)

        if not zones_with_upcoming_maintenance and not zones_with_deprecated:
            return

        if zones_with_upcoming_maintenance:
            phrases = []
            if len(zones_with_upcoming_maintenance) == 1:
                phrases = ('a zone', 'window is')
            else:
                phrases = ('zones', 'windows are')
            title = ('You have selected {0} with upcoming '
                     'maintenance. During maintenance, resources are '
                     'temporarily unavailible. The next scheduled '
                     '{1} as follows:'.format(phrases[0], phrases[1]))
            printable_maintenance_zones = []
            for zone in zones_with_upcoming_maintenance:
                next_event = min(zone.maintenanceWindows,
                                 key=lambda x: x.beginTime)
                window = '[{0}]: {1} -- {2}'.format(zone.name,
                                                    next_event.beginTime,
                                                    next_event.endTime)
                printable_maintenance_zones.append(window)
            prompts.append(
                utils.ConstructList(title, printable_maintenance_zones))

        if zones_with_deprecated:
            phrases = []
            if len(zones_with_deprecated) == 1:
                phrases = ('zone is', 'this zone', 'the')
            else:
                phrases = ('zones are', 'these zones', 'their')
            title = ('\n'
                     'WARNING: The following selected {0} deprecated.'
                     ' All resources in {1} will be deleted after'
                     ' {2} turndown date.'.format(phrases[0], phrases[1],
                                                  phrases[2]))
            printable_deprecated_zones = []
            for zone in zones_with_deprecated:
                if zone.deprecated.deleted:
                    printable_deprecated_zones.append(
                        ('[{0}] {1}').format(zone.name,
                                             zone.deprecated.deleted))
                else:
                    printable_deprecated_zones.append('[{0}]'.format(
                        zone.name))
            prompts.append(
                utils.ConstructList(title, printable_deprecated_zones))

        final_message = ' '.join(prompts)
        if not console_io.PromptContinue(message=final_message):
            raise calliope_exceptions.ToolException(
                'Creation aborted by user.')
Ejemplo n.º 10
0
    def Run(self, args):
        """Creates a new Cloud SQL instance.

    Args:
      args: argparse.Namespace, The arguments that this command was invoked
          with.

    Returns:
      A dict object representing the operations resource describing the create
      operation if the create was successful.
    Raises:
      HttpException: A http error response was received while executing api
          request.
      ToolException: An error other than http error occured while executing the
          command.
    """
        client = api_util.SqlClient(api_util.API_VERSION_DEFAULT)
        sql_client = client.sql_client
        sql_messages = client.sql_messages

        validate.ValidateInstanceName(args.instance)
        instance_ref = client.resource_parser.Parse(
            args.instance,
            params={'project': properties.VALUES.core.project.GetOrFail},
            collection='sql.instances')
        instance_resource = instances.InstancesV1Beta4.ConstructInstanceFromArgs(
            sql_messages, args, instance_ref=instance_ref)

        if args.pricing_plan == 'PACKAGE':
            if not console_io.PromptContinue(
                    'Charges will begin accruing immediately. Really create Cloud '
                    'SQL instance?'):
                raise exceptions.ToolException('canceled by the user.')

        operation_ref = None
        try:
            result_operation = sql_client.instances.Insert(instance_resource)

            operation_ref = client.resource_parser.Create(
                'sql.operations',
                operation=result_operation.name,
                project=instance_ref.project)

            if args. async:
                if not args.IsSpecified('format'):
                    args.format = 'default'
                return sql_client.operations.Get(
                    sql_messages.SqlOperationsGetRequest(
                        project=operation_ref.project,
                        operation=operation_ref.operation))

            operations.OperationsV1Beta4.WaitForOperation(
                sql_client, operation_ref, 'Creating Cloud SQL instance')

            log.CreatedResource(instance_ref)

            new_resource = sql_client.instances.Get(
                sql_messages.SqlInstancesGetRequest(
                    project=instance_ref.project,
                    instance=instance_ref.instance))
            return new_resource
        except apitools_exceptions.HttpError as error:
            log.debug('operation : %s', str(operation_ref))
            exc = exceptions.HttpException(error)
            if resource_property.Get(
                    exc.payload.content,
                    resource_lex.ParseKey('error.errors[0].reason'),
                    None) == 'errorMaxInstancePerLabel':
                msg = resource_property.Get(
                    exc.payload.content,
                    resource_lex.ParseKey('error.message'), None)
                raise exceptions.HttpException(msg)
            raise
Ejemplo n.º 11
0
def _Run(args, holder, url_map_arg, release_track):
    """Issues requests necessary to import URL maps."""
    client = holder.client
    resources = holder.resources

    url_map_ref = url_map_arg.ResolveAsResource(
        args,
        resources,
        default_scope=compute_scope.ScopeEnum.GLOBAL,
        scope_lister=compute_flags.GetDefaultScopeLister(client))

    data = console_io.ReadFromFileOrStdin(args.source or '-', binary=False)

    try:
        url_map = export_util.Import(message_type=client.messages.UrlMap,
                                     stream=data,
                                     schema_path=_GetSchemaPath(release_track))
    except yaml_validator.ValidationError as e:
        raise compute_exceptions.ValidationError(str(e))

    if url_map.name != url_map_ref.Name():
        # Replace warning and raise error after 10/01/2021
        log.warning(
            'The name of the Url Map must match the value of the ' +
            '\'name\' attribute in the YAML file. Future versions of ' +
            'gcloud will fail with an error.')
    # Get existing URL map.
    try:
        url_map_old = url_maps_utils.SendGetRequest(client, url_map_ref)
    except apitools_exceptions.HttpError as error:
        if error.status_code != 404:
            raise error
        # Url Map does not exist, create a new one.
        return _SendInsertRequest(client, resources, url_map_ref, url_map)

    # No change, do not send requests to server.
    if url_map_old == url_map:
        return

    console_io.PromptContinue(
        message=('Url Map [{0}] will be overwritten.').format(
            url_map_ref.Name()),
        cancel_on_no=True)

    # Populate id and fingerprint fields when YAML files don't contain them.
    if not url_map.id:
        url_map.id = url_map_old.id
    if url_map.fingerprint:
        # Replace warning and raise error after 10/01/2021
        log.warning(
            'An up-to-date fingerprint must be provided to ' +
            'update the Url Map. Future versions of gcloud will fail ' +
            'with an error \'412 conditionNotMet\'')
        url_map.fingerprint = url_map_old.fingerprint
    # Unspecified fields are assumed to be cleared.
    # TODO(b/182287403) Replace with proto reflection and update scenario tests.
    cleared_fields = []
    if not url_map.description:
        cleared_fields.append('description')
    if not url_map.hostRules:
        cleared_fields.append('hostRules')
    if not url_map.pathMatchers:
        cleared_fields.append('pathMatchers')
    if not url_map.tests:
        cleared_fields.append('tests')
    if not url_map.defaultService:
        cleared_fields.append('defaultService')
    if not url_map.defaultRouteAction:
        cleared_fields.append('defaultRouteAction')
    else:
        cleared_fields = cleared_fields + _GetClearedFieldsForRoutAction(
            url_map.defaultRouteAction, 'defaultRouteAction.')
    if not url_map.defaultUrlRedirect:
        cleared_fields.append('defaultUrlRedirect')
    else:
        cleared_fields = cleared_fields + _GetClearedFieldsForUrlRedirect(
            url_map.defaultUrlRedirect, 'defaultUrlRedirect.')
    if not url_map.headerAction:
        cleared_fields.append('headerAction')
    else:
        cleared_fields = cleared_fields + _GetClearedFieldsForHeaderAction(
            url_map.headerAction, 'headerAction.')

    with client.apitools_client.IncludeFields(cleared_fields):
        return _SendPatchRequest(client, resources, url_map_ref, url_map)
Ejemplo n.º 12
0
def UpdateRC(command_completion, path_update, rc_path, bin_path, sdk_root):
    """Update the system path to include bin_path.

  Args:
    command_completion: bool, Whether or not to do command completion. If None,
      ask.
    path_update: bool, Whether or not to update PATH. If None, ask.
    rc_path: str, The path to the rc file to update. If None, ask.
    bin_path: str, The absolute path to the directory that will contain
      Cloud SDK binaries.
    sdk_root: str, The path to the Cloud SDK root.
  """

    host_os = platforms.OperatingSystem.Current()
    if host_os == platforms.OperatingSystem.WINDOWS:
        if path_update is None:
            path_update = console_io.PromptContinue(
                prompt_string='Update %PATH% to include Cloud SDK binaries?')
        if path_update:
            _UpdatePathForWindows(bin_path)
        return

    if command_completion is None:
        if path_update is None:  # Ask only one question if both were not set.
            path_update = console_io.PromptContinue(
                prompt_string=('\nModify profile to update your $PATH '
                               'and enable shell command completion?'))
            command_completion = path_update
        else:
            command_completion = console_io.PromptContinue(
                prompt_string=('\nModify profile to enable shell command '
                               'completion?'))
    elif path_update is None:
        path_update = console_io.PromptContinue(
            prompt_string=('\nModify profile to update your $PATH?'))

    rc_paths = _GetRcPaths(command_completion, path_update, rc_path, sdk_root,
                           host_os)

    if rc_paths.rc_path:
        if os.path.exists(rc_paths.rc_path):
            with open(rc_paths.rc_path) as rc_file:
                rc_data = rc_file.read()
                cached_rc_data = rc_data
        else:
            rc_data = ''
            cached_rc_data = ''

        if path_update:
            rc_data = _GetRcData(
                '# The next line updates PATH for the Google Cloud'
                ' SDK.', rc_paths.path, rc_data)

        if command_completion:
            rc_data = _GetRcData(
                '# The next line enables shell command completion'
                ' for gcloud.',
                rc_paths.completion,
                rc_data,
                pattern='# The next line enables [a-z][a-z]*'
                'completion for gcloud.')

        if cached_rc_data == rc_data:
            print(
                'No changes necessary for [{rc}].'.format(rc=rc_paths.rc_path))
            return

        if os.path.exists(rc_paths.rc_path):
            rc_backup = rc_paths.rc_path + '.backup'
            print('Backing up [{rc}] to [{backup}].'.format(
                rc=rc_paths.rc_path, backup=rc_backup))
            shutil.copyfile(rc_paths.rc_path, rc_backup)

        with open(rc_paths.rc_path, 'w') as rc_file:
            rc_file.write(rc_data)

        print("""\
[{rc_path}] has been updated.
Start a new shell for the changes to take effect.
""".format(rc_path=rc_paths.rc_path))

    if not command_completion:
        print("""\
Source [{rc}]
in your profile to enable shell command completion for gcloud.
""".format(rc=rc_paths.completion))

    if not path_update:
        print("""\
Source [{rc}]
in your profile to add the Google Cloud SDK command line tools to your $PATH.
""".format(rc=rc_paths.path))
Ejemplo n.º 13
0
def _Run(args,
         track=None,
         enable_runtime=True,
         enable_build_worker_pool=False):
    """Run a function deployment with the given args."""
    # Check for labels that start with `deployment`, which is not allowed.
    labels_util.CheckNoDeploymentLabels('--remove-labels', args.remove_labels)
    labels_util.CheckNoDeploymentLabels('--update-labels', args.update_labels)

    # Check that exactly one trigger type is specified properly.
    trigger_util.ValidateTriggerArgs(args.trigger_event, args.trigger_resource,
                                     args.IsSpecified('retry'),
                                     args.IsSpecified('trigger_http'))
    trigger_params = trigger_util.GetTriggerEventParams(
        args.trigger_http, args.trigger_bucket, args.trigger_topic,
        args.trigger_event, args.trigger_resource)

    function_ref = args.CONCEPTS.name.Parse()
    function_url = function_ref.RelativeName()

    messages = api_util.GetApiMessagesModule(track)

    # Get an existing function or create a new one.
    function = api_util.GetFunction(function_url)
    is_new_function = function is None
    had_vpc_connector = bool(
        function.vpcConnector) if not is_new_function else False
    if is_new_function:
        trigger_util.CheckTriggerSpecified(args)
        function = messages.CloudFunction()
        function.name = function_url
    elif trigger_params:
        # If the new deployment would implicitly change the trigger_event type
        # raise error
        trigger_util.CheckLegacyTriggerUpdate(function.eventTrigger,
                                              trigger_params['trigger_event'])

    # Keep track of which fields are updated in the case of patching.
    updated_fields = []

    # Populate function properties based on args.
    if args.entry_point:
        function.entryPoint = args.entry_point
        updated_fields.append('entryPoint')
    if args.timeout:
        function.timeout = '{}s'.format(args.timeout)
        updated_fields.append('timeout')
    if args.memory:
        function.availableMemoryMb = utils.BytesToMb(args.memory)
        updated_fields.append('availableMemoryMb')
    if args.service_account:
        function.serviceAccountEmail = args.service_account
        updated_fields.append('serviceAccountEmail')
    if (args.IsSpecified('max_instances')
            or args.IsSpecified('clear_max_instances')):
        max_instances = 0 if args.clear_max_instances else args.max_instances
        function.maxInstances = max_instances
        updated_fields.append('maxInstances')
    if enable_runtime:
        if args.IsSpecified('runtime'):
            function.runtime = args.runtime
            updated_fields.append('runtime')
            if args.runtime in ['nodejs6']:
                log.warning(
                    'The Node.js 6 runtime is deprecated on Cloud Functions. '
                    'Please migrate to Node.js 8 (--runtime=nodejs8) or Node.js 10 '
                    '(--runtime=nodejs10). '
                    'See https://cloud.google.com/functions/docs/migrating/nodejs-runtimes'
                )
        elif is_new_function:
            raise exceptions.RequiredArgumentException(
                'runtime', 'Flag `--runtime` is required for new functions.')
    if args.vpc_connector or args.clear_vpc_connector:
        function.vpcConnector = ('' if args.clear_vpc_connector else
                                 args.vpc_connector)
        updated_fields.append('vpcConnector')
    if args.IsSpecified('egress_settings'):
        will_have_vpc_connector = ((had_vpc_connector
                                    and not args.clear_vpc_connector)
                                   or args.vpc_connector)
        if not will_have_vpc_connector:
            raise exceptions.RequiredArgumentException(
                'vpc-connector', 'Flag `--vpc-connector` is '
                'required for setting `egress-settings`.')
        egress_settings_enum = arg_utils.ChoiceEnumMapper(
            arg_name='egress_settings',
            message_enum=function.VpcConnectorEgressSettingsValueValuesEnum,
            custom_mappings=flags.EGRESS_SETTINGS_MAPPING).GetEnumForChoice(
                args.egress_settings)
        function.vpcConnectorEgressSettings = egress_settings_enum
        updated_fields.append('vpcConnectorEgressSettings')
    if args.IsSpecified('ingress_settings'):
        ingress_settings_enum = arg_utils.ChoiceEnumMapper(
            arg_name='ingress_settings',
            message_enum=function.IngressSettingsValueValuesEnum,
            custom_mappings=flags.INGRESS_SETTINGS_MAPPING).GetEnumForChoice(
                args.ingress_settings)
        function.ingressSettings = ingress_settings_enum
        updated_fields.append('ingressSettings')
    if enable_build_worker_pool:
        if args.build_worker_pool or args.clear_build_worker_pool:
            function.buildWorkerPool = ('' if args.clear_build_worker_pool else
                                        args.build_worker_pool)
            updated_fields.append('buildWorkerPool')
    # Populate trigger properties of function based on trigger args.
    if args.trigger_http:
        function.httpsTrigger = messages.HttpsTrigger()
        function.eventTrigger = None
        updated_fields.extend(['eventTrigger', 'httpsTrigger'])
    if trigger_params:
        function.eventTrigger = trigger_util.CreateEventTrigger(
            **trigger_params)
        function.httpsTrigger = None
        updated_fields.extend(['eventTrigger', 'httpsTrigger'])
    if args.IsSpecified('retry'):
        updated_fields.append('eventTrigger.failurePolicy')
        if args.retry:
            function.eventTrigger.failurePolicy = messages.FailurePolicy()
            function.eventTrigger.failurePolicy.retry = messages.Retry()
        else:
            function.eventTrigger.failurePolicy = None
    elif function.eventTrigger:
        function.eventTrigger.failurePolicy = None

    # Populate source properties of function based on source args.
    # Only Add source to function if its explicitly provided, a new function,
    # using a stage bucket or deploy of an existing function that previously
    # used local source.
    if (args.source or args.stage_bucket or is_new_function
            or function.sourceUploadUrl):
        updated_fields.extend(
            source_util.SetFunctionSourceProps(function,
                                               function_ref,
                                               args.source,
                                               args.stage_bucket,
                                               args.ignore_file,
                                               update_date=True))

    # Apply label args to function
    if labels_util.SetFunctionLabels(function, args.update_labels,
                                     args.remove_labels, args.clear_labels):
        updated_fields.append('labels')

    # Apply environment variables args to function
    updated_fields.extend(_ApplyEnvVarsArgsToFunction(function, args))

    ensure_all_users_invoke = flags.ShouldEnsureAllUsersInvoke(args)
    deny_all_users_invoke = flags.ShouldDenyAllUsersInvoke(args)

    if is_new_function:
        if (not ensure_all_users_invoke and not deny_all_users_invoke and
                api_util.CanAddFunctionIamPolicyBinding(_GetProject(args))):
            ensure_all_users_invoke = console_io.PromptContinue(prompt_string=(
                'Allow unauthenticated invocations of new function [{}]?'.
                format(args.NAME)),
                                                                default=False)

        op = api_util.CreateFunction(function,
                                     function_ref.Parent().RelativeName())
        if (not ensure_all_users_invoke and not deny_all_users_invoke):
            template = ('Function created with limited-access IAM policy. '
                        'To enable unauthorized access consider "%s"')
            log.warning(template %
                        _CreateBindPolicyCommand(args.NAME, args.region))
            deny_all_users_invoke = True

    elif updated_fields:
        op = api_util.PatchFunction(function, updated_fields)

    else:
        op = None  # Nothing to wait for
        if not ensure_all_users_invoke and not deny_all_users_invoke:
            log.status.Print('Nothing to update.')
            return

    stop_trying_perm_set = [False]

    # The server asyncrhonously sets allUsers invoker permissions some time after
    # we create the function. That means, to remove it, we need do so after the
    # server adds it. We can remove this mess after the default changes.
    # TODO(b/139026575): Remove the "remove" path, only bother adding. Remove the
    # logic from the polling loop. Remove the ability to add logic like this to
    # the polling loop.
    def TryToSetInvokerPermission():
        """Try to make the invoker permission be what we said it should.

    This is for executing in the polling loop, and will stop trying as soon as
    it succeeds at making a change.
    """
        if stop_trying_perm_set[0]:
            return
        try:
            if ensure_all_users_invoke:
                api_util.AddFunctionIamPolicyBinding(function.name)
                stop_trying_perm_set[0] = True
            elif deny_all_users_invoke:
                stop_trying_perm_set[0] = (
                    api_util.RemoveFunctionIamPolicyBindingIfFound(
                        function.name))
        except exceptions.HttpException:
            stop_trying_perm_set[0] = True
            log.warning('Setting IAM policy failed, try "%s"' %
                        _CreateBindPolicyCommand(args.NAME, args.region))

    log_stackdriver_url = [True]

    def TryToLogStackdriverURL(op):
        """Logs stackdriver URL.

    This is for executing in the polling loop, and will stop trying as soon as
    it succeeds at making a change.

    Args:
      op: the operation
    """
        if log_stackdriver_url[0] and op.metadata:
            metadata = encoding.PyValueToMessage(
                messages.OperationMetadataV1,
                encoding.MessageToPyValue(op.metadata))
            if metadata.buildId:
                sd_info_template = '\nFor Cloud Build Stackdriver Logs, visit: %s'
                log.status.Print(sd_info_template %
                                 _CreateStackdriverURLforBuildLogs(
                                     metadata.buildId, _GetProject(args)))
                log_stackdriver_url[0] = False

    if op:
        api_util.WaitForFunctionUpdateOperation(
            op,
            try_set_invoker=TryToSetInvokerPermission,
            on_every_poll=[TryToLogStackdriverURL])
    return api_util.GetFunction(function.name)
Ejemplo n.º 14
0
class Create(base.CreateCommand):
    """Create a cluster for running containers."""
    @staticmethod
    def Args(parser):
        _Args(parser)
        _AddAdditionalZonesFlag(parser, deprecated=True)
        flags.AddNodeLocationsFlag(parser)
        flags.AddAddonsFlags(parser)
        flags.AddClusterAutoscalingFlags(parser)
        flags.AddEnableAutoRepairFlag(parser)
        flags.AddEnableKubernetesAlphaFlag(parser)
        flags.AddEnableLegacyAuthorizationFlag(parser)
        flags.AddIPAliasFlags(parser)
        flags.AddLabelsFlag(parser)
        flags.AddLocalSSDFlag(parser)
        flags.AddMaintenanceWindowFlag(parser)
        flags.AddMasterAuthorizedNetworksFlags(parser)
        flags.AddMinCpuPlatformFlag(parser)
        flags.AddNetworkPolicyFlags(parser)
        flags.AddNodeTaintsFlag(parser)
        flags.AddPreemptibleFlag(parser)
        flags.AddDeprecatedClusterNodeIdentityFlags(parser)

    def ParseCreateOptions(self, args):
        return ParseCreateOptionsBase(args)

    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Cluster message for the successfully created cluster.

    Raises:
      util.Error, if creation failed.
    """
        if args. async and not args.IsSpecified('format'):
            args.format = util.OPERATIONS_FORMAT

        util.CheckKubectlInstalled()

        adapter = self.context['api_adapter']
        location_get = self.context['location_get']
        location = location_get(args)

        cluster_ref = adapter.ParseCluster(args.name, location)
        options = self.ParseCreateOptions(args)

        if options.enable_kubernetes_alpha:
            console_io.PromptContinue(
                message=constants.KUBERNETES_ALPHA_PROMPT,
                throw_if_unattended=True,
                cancel_on_no=True)

        if getattr(args, 'region', None):
            # TODO(b/68496825): Remove this completely after regional clusters beta
            # launch.
            if self._release_track == base.ReleaseTrack.ALPHA:
                console_io.PromptContinue(
                    message=constants.KUBERNETES_REGIONAL_CHARGES_PROMPT,
                    throw_if_unattended=True,
                    cancel_on_no=True)

        if options.enable_autorepair is not None:
            log.status.Print(
                messages.AutoUpdateUpgradeRepairMessage(
                    options.enable_autorepair, 'autorepair'))

        if options.enable_autoupgrade is not None:
            log.status.Print(
                messages.AutoUpdateUpgradeRepairMessage(
                    options.enable_autoupgrade, 'autoupgrade'))

        if options.accelerators is not None:
            log.status.Print(constants.KUBERNETES_GPU_LIMITATION_MSG)

        operation = None
        try:
            operation_ref = adapter.CreateCluster(cluster_ref, options)
            if args. async:
                return adapter.GetCluster(cluster_ref)

            operation = adapter.WaitForOperation(operation_ref,
                                                 'Creating cluster {0}'.format(
                                                     cluster_ref.clusterId),
                                                 timeout_s=args.timeout)
            cluster = adapter.GetCluster(cluster_ref)
        except apitools_exceptions.HttpError as error:
            raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)

        log.CreatedResource(cluster_ref)
        cluster_url = util.GenerateClusterUrl(cluster_ref)
        log.status.Print('To inspect the contents of your cluster, go to: ' +
                         cluster_url)
        if operation.detail:
            # Non-empty detail on a DONE create operation should be surfaced as
            # a warning to end user.
            log.warning(operation.detail)

        try:
            util.ClusterConfig.Persist(cluster, cluster_ref.projectId)
        except kconfig.MissingEnvVarError as error:
            log.warning(error)

        return [cluster]
Ejemplo n.º 15
0
def RunDeploy(
    args, enable_endpoints=False, use_beta_stager=False, upload_strategy=None,
    runtime_builder_strategy=runtime_builders.RuntimeBuilderStrategy.NEVER):
  """Perform a deployment based on the given args.

  Args:
    args: argparse.Namespace, An object that contains the values for the
        arguments specified in the ArgsDeploy() function.
    enable_endpoints: Enable Cloud Endpoints for the deployed app.
    use_beta_stager: Use the stager registry defined for the beta track rather
        than the default stager registry.
    upload_strategy: deploy_app_command_util.UploadStrategy, the parallelism
      straetgy to use for uploading files, or None to use the default.
    runtime_builder_strategy: runtime_builders.RuntimeBuilderStrategy, when to
      use the new CloudBuild-based runtime builders (alternative is old
      externalized runtimes).

  Returns:
    A dict on the form `{'versions': new_versions, 'configs': updated_configs}`
    where new_versions is a list of version_util.Version, and updated_configs
    is a list of config file identifiers, see yaml_parsing.ConfigYamlInfo.
  """
  project = properties.VALUES.core.project.Get(required=True)
  deploy_options = DeployOptions.FromProperties(
      enable_endpoints, upload_strategy=upload_strategy,
      runtime_builder_strategy=runtime_builder_strategy)

  # Parse existing app.yamls or try to generate a new one if the directory is
  # empty.
  if not args.deployables:
    yaml_path = deploy_command_util.DEFAULT_DEPLOYABLE
    if not os.path.exists(deploy_command_util.DEFAULT_DEPLOYABLE):
      log.warning('Automatic app detection is currently in Beta')
      yaml_path = deploy_command_util.CreateAppYamlForAppDirectory(os.getcwd())
    app_config = yaml_parsing.AppConfigSet([yaml_path])
  else:
    app_config = yaml_parsing.AppConfigSet(args.deployables)

  # If applicable, sort services by order they were passed to the command.
  services = app_config.Services()

  if not args.skip_image_url_validation:
    flags.ValidateImageUrl(args.image_url, services)

  # The new API client.
  api_client = appengine_api_client.GetApiClient()
  # pylint: disable=protected-access
  log.debug('API endpoint: [{endpoint}], API version: [{version}]'.format(
      endpoint=api_client.client.url,
      version=api_client.client._VERSION))
  # The legacy admin console API client.
  # The Admin Console API existed long before the App Engine Admin API, and
  # isn't being improved. We're in the process of migrating all of the calls
  # over to the Admin API, but a few things (notably config deployments) haven't
  # been ported over yet.
  ac_client = appengine_client.AppengineClient(
      args.server, args.ignore_bad_certs)

  app = _PossiblyCreateApp(api_client, project)
  app = _PossiblyRepairApp(api_client, app)

  # Tell the user what is going to happen, and ask them to confirm.
  version_id = args.version or util.GenerateVersionId()
  deployed_urls = output_helpers.DisplayProposedDeployment(
      app, project, app_config, version_id, deploy_options.promote)
  console_io.PromptContinue(cancel_on_no=True)
  if services:
    # Do generic app setup if deploying any services.
    # All deployment paths for a service involve uploading source to GCS.
    code_bucket_ref = args.bucket or flags.GetCodeBucket(app, project)
    metrics.CustomTimedEvent(metric_names.GET_CODE_BUCKET)
    log.debug('Using bucket [{b}].'.format(b=code_bucket_ref.ToBucketUrl()))

    # Prepare Flex if any service is going to deploy an image.
    if any([m.RequiresImage() for m in services.values()]):
      deploy_command_util.DoPrepareManagedVms(ac_client)

    all_services = dict([(s.id, s) for s in api_client.ListServices()])
  else:
    code_bucket_ref = None
    all_services = {}
  new_versions = []
  if args.skip_staging:
    stager = staging.GetNoopStager()
  elif use_beta_stager:
    stager = staging.GetBetaStager()
  else:
    stager = staging.GetStager()
  deployer = ServiceDeployer(api_client, stager, deploy_options)

  for name, service in services.iteritems():
    new_version = version_util.Version(project, name, version_id)
    deployer.Deploy(service, new_version, code_bucket_ref, args.image_url,
                    all_services, app.gcrDomain)
    new_versions.append(new_version)
    log.status.Print('Deployed service [{0}] to [{1}]'.format(
        name, deployed_urls[name]))

  # Deploy config files.
  for (name, config) in app_config.Configs().iteritems():
    message = 'Updating config [{config}]'.format(config=name)
    with progress_tracker.ProgressTracker(message):
      ac_client.UpdateConfig(name, config.parsed)

  updated_configs = app_config.Configs().keys()

  PrintPostDeployHints(new_versions, updated_configs)

  # Return all the things that were deployed.
  return {
      'versions': new_versions,
      'configs': updated_configs
  }
Ejemplo n.º 16
0
    def Run(self, args):
        """Deletes an SSL certificate for a Cloud SQL instance.

    Args:
      args: argparse.Namespace, The arguments that this command was invoked
          with.

    Returns:
      A dict object representing the operations resource describing the delete
      operation if the api request was successful.
    Raises:
      ResourceNotFoundError: The ssl cert could not be found for the instance.
    """
        client = api_util.SqlClient(api_util.API_VERSION_DEFAULT)
        sql_client = client.sql_client
        sql_messages = client.sql_messages

        validate.ValidateInstanceName(args.instance)
        instance_ref = client.resource_parser.Parse(
            args.instance,
            params={'project': properties.VALUES.core.project.GetOrFail},
            collection='sql.instances')

        # TODO(b/36050482): figure out how to rectify the common_name and the
        # sha1fingerprint, so that things can work with the resource parser.

        console_io.PromptContinue(
            message='{0} will be deleted. New connections can no longer be made '
            'using this certificate. Existing connections are not affected.'.
            format(args.common_name),
            default=True,
            cancel_on_no=True)

        cert_ref = cert.GetCertRefFromName(sql_client, sql_messages,
                                           client.resource_parser,
                                           instance_ref, args.common_name)
        if not cert_ref:
            raise exceptions.ResourceNotFoundError(
                'no ssl cert named [{name}] for instance [{instance}]'.format(
                    name=args.common_name, instance=instance_ref))

        result = sql_client.sslCerts.Delete(
            sql_messages.SqlSslCertsDeleteRequest(
                project=cert_ref.project,
                instance=cert_ref.instance,
                sha1Fingerprint=cert_ref.sha1Fingerprint))

        operation_ref = client.resource_parser.Create('sql.operations',
                                                      operation=result.name,
                                                      project=cert_ref.project)

        if args. async:
            return sql_client.operations.Get(
                sql_messages.SqlOperationsGetRequest(
                    project=operation_ref.project,
                    instance=operation_ref.instance,
                    operation=operation_ref.operation))

        operations.OperationsV1Beta4.WaitForOperation(sql_client,
                                                      operation_ref,
                                                      'Deleting sslCert')

        log.DeletedResource(cert_ref)
Ejemplo n.º 17
0
    def Run(self, args):
        client = registrations.RegistrationsClient()

        registration_ref = args.CONCEPTS.registration.Parse()
        location_ref = registration_ref.Parent()

        labels = labels_util.ParseCreateArgs(
            args, client.messages.Registration.LabelsValue)

        name_servers = util.ParseNameServers(args.name_servers,
                                             args.cloud_dns_zone,
                                             registration_ref.registrationsId)
        registrant_contact = util.ParseWhoisContact(
            args.registrant_contact_from_file)
        if registrant_contact is None:
            registrant_contact = util.PromptForWhoisContact()
        if registrant_contact is None:
            raise exceptions.Error(
                'Registrant contact is required. It can be provided interactively or '
                'through --registrant-contact-from-file flag.')

        availability = client.CheckAvailability(
            location_ref, registration_ref.registrationsId).availability

        if availability.available != client.availability_enum.AVAILABLE:
            raise exceptions.Error(
                'Domain [{}] is not available for registration: [{}]'.format(
                    registration_ref.registrationsId, availability.available))

        whois_privacy = util.ParseWhoisPrivacy(args.whois_privacy)
        if whois_privacy is None:
            whois_privacy = util.PromptForWhoisPrivacy(
                availability.supportedWhoisPrivacy)

        hsts_notice_accepted = False
        if client.notices_enum.HSTS_PRELOADED in availability.notices:
            console_io.PromptContinue((
                '{} is a secure namespace. You may purchase {} now but it will '
                'require an SSL certificate for website connection.').format(
                    util.DomainNamespace(availability.domainName),
                    availability.domainName),
                                      throw_if_unattended=True,
                                      cancel_on_no=True)
            hsts_notice_accepted = True

        console_io.PromptContinue('Yearly price: {}\n'.format(
            util.TransformMoneyType(availability.yearlyPrice)),
                                  throw_if_unattended=True,
                                  cancel_on_no=True)

        response = client.Create(location_ref,
                                 registration_ref.registrationsId,
                                 name_servers=name_servers,
                                 registrant_contact=registrant_contact,
                                 whois_privacy=whois_privacy,
                                 yearly_price=availability.yearlyPrice,
                                 hsts_notice_accepted=hsts_notice_accepted,
                                 labels=labels,
                                 validate_only=args.validate_only)

        if args.validate_only:
            # TODO(b/110077203): Log something sensible.
            return

        if args.async_:
            # TODO(b/110077203): Log something sensible.
            return response

        operations_client = operations.Client.FromApiVersion('v1alpha1')
        operation_ref = util.ParseOperation(response.name)
        response = operations_client.WaitForOperation(
            operation_ref,
            'Waiting for [{}] to complete'.format(operation_ref.Name()))

        log.CreatedResource(registration_ref.Name(), 'registration')
        return response
Ejemplo n.º 18
0
 def testEOFInteractive(self):
     self.StartObjectPatch(console_io, 'IsInteractive').return_value = True
     result = console_io.PromptContinue()
     self.assertTrue(result)
Ejemplo n.º 19
0
    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.
    """
        adapter = self.context['api_adapter']
        location_get = self.context['location_get']
        location = location_get(args)
        cluster_ref = adapter.ParseCluster(args.name, location)
        project_id = properties.VALUES.core.project.Get(required=True)
        concurrent_node_count = getattr(args, 'concurrent_node_count', None)

        try:
            cluster = adapter.GetCluster(cluster_ref)
        except (exceptions.HttpException,
                apitools_exceptions.HttpForbiddenError, util.Error) as error:
            log.warning(
                ('Problem loading details of cluster to upgrade:\n\n{}\n\n'
                 'You can still attempt to upgrade the cluster.\n').format(
                     console_attr.SafeText(error)))
            cluster = None

        try:
            server_conf = adapter.GetServerConfig(project_id, location)
        except (exceptions.HttpException,
                apitools_exceptions.HttpForbiddenError, util.Error) as error:
            log.warning(
                ('Problem loading server config:\n\n{}\n\n'
                 'You can still attempt to upgrade the cluster.\n').format(
                     console_attr.SafeText(error)))
            server_conf = None

        upgrade_message = container_command_util.ClusterUpgradeMessage(
            name=args.name,
            server_conf=server_conf,
            cluster=cluster,
            main=args.main,
            node_pool_name=args.node_pool,
            new_version=args.cluster_version,
            concurrent_node_count=concurrent_node_count)

        console_io.PromptContinue(message=upgrade_message,
                                  throw_if_unattended=True,
                                  cancel_on_no=True)

        options = self.ParseUpgradeOptions(args)

        try:
            op_ref = adapter.UpdateCluster(cluster_ref, options)
        except apitools_exceptions.HttpError as error:
            raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)

        if not args.async_:
            adapter.WaitForOperation(op_ref,
                                     'Upgrading {0}'.format(
                                         cluster_ref.clusterId),
                                     timeout_s=args.timeout)

            log.UpdatedResource(cluster_ref)
Ejemplo n.º 20
0
    def Run(self, args):
        client = appengine_api_client.GetApiClientForTrack(self.ReleaseTrack())

        services = client.ListServices()

        # If a service is supplied, only list versions for that service
        if args.service:
            services = [s for s in services if s.id == args.service]

        all_versions = client.ListVersions(services)
        # Sort versions to make behavior deterministic enough for unit testing.
        versions = sorted(version_util.GetMatchingVersions(
            all_versions, args.versions, args.service),
                          key=str)

        services_to_delete = []
        for service in sorted(services):
            service_versions = len(
                [v for v in all_versions if v.service == service.id])
            versions_to_delete = len(
                [v for v in versions if v.service == service.id])
            if service_versions == versions_to_delete and service_versions > 0:
                if service.id == 'default':
                    raise VersionsDeleteError(
                        'The default service (module) may not be deleted, and must '
                        'comprise at least one version.')
                else:
                    services_to_delete.append(service)
                for version in copy.copy(versions):
                    if version.service == service.id:
                        versions.remove(version)

        for version in versions:
            if version.traffic_split:
                # TODO(b/32869800): collect info on all versions before raising.
                raise VersionsDeleteError(
                    'Version [{version}] is currently serving {allocation:.2f}% of '
                    'traffic for service [{service}].\n\n'
                    'Please move all traffic away via one of the following methods:\n'
                    ' - deploying a new version with the `--promote` argument\n'
                    ' - running `gcloud app services set-traffic`\n'
                    ' - running `gcloud app versions migrate`'.format(
                        version=version.id,
                        allocation=version.traffic_split * 100,
                        service=version.service))

        if services_to_delete:
            word = text.Pluralize(len(services_to_delete), 'service')
            log.warning(
                'Requested deletion of all existing versions for the following {0}:'
                .format(word))
            resource_printer.Print(services_to_delete, 'list', out=log.status)
            console_io.PromptContinue(prompt_string=(
                '\nYou cannot delete all versions of a service. Would you like to '
                'delete the entire {0} instead?').format(word),
                                      cancel_on_no=True)
            service_util.DeleteServices(client, services_to_delete)

        if versions:
            fmt = 'list[title="Deleting the following versions:"]'
            resource_printer.Print(versions, fmt, out=log.status)
            console_io.PromptContinue(cancel_on_no=True)
        else:
            if not services_to_delete:
                log.warning('No matching versions found.')

        version_util.DeleteVersions(client, versions)
Ejemplo n.º 21
0
def PopulatePublicKey(api_client, service_id, version_id, instance_id,
                      public_key, release_track):
    """Enable debug mode on and send SSH keys to a flex instance.

  Common method for SSH-like commands, does the following:
  - Makes sure that the service/version/instance specified exists and is of the
    right type (Flexible).
  - If not already done, prompts and enables debug on the instance.
  - Populates the public key onto the instance.

  Args:
    api_client: An appengine_api_client.AppEngineApiClient.
    service_id: str, The service ID.
    version_id: str, The version ID.
    instance_id: str, The instance ID.
    public_key: ssh.Keys.PublicKey, Public key to send.
    release_track: calliope.base.ReleaseTrack, The current release track.

  Raises:
    InvalidInstanceTypeError: The instance is not supported for SSH.
    MissingVersionError: The version specified does not exist.
    MissingInstanceError: The instance specified does not exist.
    UnattendedPromptError: Not running in a tty.
    OperationCancelledError: User cancelled the operation.

  Returns:
    ConnectionDetails, the details to use for SSH/SCP for the SSH
    connection.
  """
    try:
        version = api_client.GetVersionResource(service=service_id,
                                                version=version_id)
    except apitools_exceptions.HttpNotFoundError:
        raise command_exceptions.MissingVersionError('{}/{}'.format(
            service_id, version_id))
    version = version_util.Version.FromVersionResource(version, None)
    if version.environment is not env.FLEX:
        if version.environment is env.MANAGED_VMS:
            environment = 'Managed VMs'
            msg = 'Use `gcloud compute ssh` for Managed VMs instances.'
        else:
            environment = 'Standard'
            msg = None
        raise command_exceptions.InvalidInstanceTypeError(environment, msg)
    res = resources.REGISTRY.Parse(
        instance_id,
        params={
            'appsId': properties.VALUES.core.project.GetOrFail,
            'versionsId': version_id,
            'instancesId': instance_id,
            'servicesId': service_id,
        },
        collection='appengine.apps.services.versions.instances')
    rel_name = res.RelativeName()
    try:
        instance = api_client.GetInstanceResource(res)
    except apitools_exceptions.HttpNotFoundError:
        raise command_exceptions.MissingInstanceError(rel_name)

    if not instance.vmDebugEnabled:
        log.warning(_ENABLE_DEBUG_WARNING)
        console_io.PromptContinue(cancel_on_no=True, throw_if_unattended=True)
    user = ssh.GetDefaultSshUsername()
    project = _GetComputeProject(release_track)
    user, use_oslogin = ssh.CheckForOsloginAndGetUser(None, project, user,
                                                      public_key.ToEntry(),
                                                      None, release_track)
    remote = ssh.Remote(instance.vmIp, user=user)
    if not use_oslogin:
        ssh_key = '{user}:{key} {user}'.format(user=user,
                                               key=public_key.ToEntry())
        log.status.Print(
            'Sending public key to instance [{}].'.format(rel_name))
        api_client.DebugInstance(res, ssh_key)
    options = {
        'IdentitiesOnly':
        'yes',  # No ssh-agent as of yet
        'UserKnownHostsFile':
        ssh.KnownHosts.DEFAULT_PATH,
        'CheckHostIP':
        'no',
        'HostKeyAlias':
        _HOST_KEY_ALIAS.format(project=api_client.project,
                               instance_id=instance_id)
    }
    return ConnectionDetails(remote, options)
Ejemplo n.º 22
0
  def Run(self, args):
    project = arg_utils.GetFromNamespace(args, '--project', use_defaults=True)
    # This incidentally verifies that the kubeconfig and context args are valid.
    with kube_util.KubernetesClient(args) as kube_client:
      kube_client.CheckClusterAdminPermissions()
      kube_util.ValidateClusterIdentifierFlags(kube_client, args)
      uuid = kube_util.GetClusterUUID(kube_client)
      # Read the service account files provided in the arguments early, in order
      # to catch invalid files before performing mutating operations.
      # Service Account key file is required if Workload Identity is not
      # enabled.
      # If Workload Identity is enabled, then the Connect Agent uses
      # a Kubernetes Service Account token instead and hence a GCP Service
      # Account key is not required.
      service_account_key_data = ''
      if args.service_account_key_file:
        try:
          service_account_key_data = hub_util.Base64EncodedFileContents(
              args.service_account_key_file)
        except files.Error as e:
          raise exceptions.Error('Could not process {}: {}'.format(
              SERVICE_ACCOUNT_KEY_FILE_FLAG, e))

      docker_credential_data = None
      if args.docker_credential_file:
        try:
          docker_credential_data = hub_util.Base64EncodedFileContents(
              args.docker_credential_file)
        except files.Error as e:
          raise exceptions.Error('Could not process {}: {}'.format(
              DOCKER_CREDENTIAL_FILE_FLAG, e))

      gke_cluster_self_link = kube_client.processor.gke_cluster_self_link
      issuer_url = None
      # enable_workload_identity, public_issuer_url, and
      # manage_workload_identity_bucket are only properties if we are on the
      # alpha or beta track
      if (self.ReleaseTrack() is not base.ReleaseTrack.GA
          and args.enable_workload_identity):
        # public_issuer_url can be None or given by user or gke_cluster_uri
        # (incase of a gke cluster).
        # args.public_issuer_url takes precedence over gke_cluster_uri.
        public_issuer_url = args.public_issuer_url or kube_client.processor.gke_cluster_uri or None

        try:
          openid_config_json = kube_client.GetOpenIDConfiguration(
              issuer_url=public_issuer_url)
        except Exception as e:  # pylint: disable=broad-except
          raise exceptions.Error(
              'Error getting the OpenID Provider Configuration'
              '{}'.format(e))

        # Extract the issuer URL from the discovery doc.
        issuer_url = json.loads(openid_config_json).get('issuer')
        if not issuer_url:
          raise exceptions.Error('Invalid OpenID Config: '
                                 'missing issuer: {}'.format(
                                     openid_config_json))
        # Ensure public_issuer_url (only non-empty) matches what came back in
        # the discovery doc.
        if public_issuer_url and (public_issuer_url != issuer_url):
          raise exceptions.Error('--public-issuer-url {} did not match issuer '
                                 'returned in discovery doc: {}'.format(
                                     public_issuer_url, issuer_url))

        # Set up the GCS bucket that serves OpenID Provider Config and JWKS.
        if self.ReleaseTrack(
        ) is base.ReleaseTrack.ALPHA and args.manage_workload_identity_bucket:
          openid_keyset_json = kube_client.GetOpenIDKeyset()
          api_util.CreateWorkloadIdentityBucket(project, issuer_url,
                                                openid_config_json,
                                                openid_keyset_json)

      # Attempt to create a membership.
      already_exists = False

      obj = None
      # For backward compatiblity, check if a membership was previously created
      # using the cluster uuid.
      parent = api_util.ParentRef(project, 'global')
      membership_id = uuid
      resource_name = api_util.MembershipRef(project, 'global', uuid)
      obj = self._CheckMembershipWithUUID(resource_name, args.CLUSTER_NAME)
      if obj:
        # The membership exists and has the same description.
        already_exists = True
      else:
        # Attempt to create a new membership using cluster_name.
        membership_id = args.CLUSTER_NAME
        resource_name = api_util.MembershipRef(project, 'global',
                                               args.CLUSTER_NAME)
        try:
          self._VerifyClusterExclusivity(kube_client, parent, membership_id)
          obj = api_util.CreateMembership(project, args.CLUSTER_NAME,
                                          args.CLUSTER_NAME,
                                          gke_cluster_self_link, uuid,
                                          self.ReleaseTrack(),
                                          issuer_url)
        except apitools_exceptions.HttpConflictError as e:
          # If the error is not due to the object already existing, re-raise.
          error = core_api_exceptions.HttpErrorPayload(e)
          if error.status_description != 'ALREADY_EXISTS':
            raise
          obj = api_util.GetMembership(resource_name, self.ReleaseTrack())
          if not obj.externalId:
            raise exceptions.Error(
                'invalid membership {0} does not have '
                'external_id field set. We cannot determine '
                'if registration is requested against a '
                'valid existing Membership. Consult the '
                'documentation on container hub memberships '
                'update for more information or run gcloud '
                'container hub memberships delete {0} if you '
                'are sure that this is an invalid or '
                'otherwise stale Membership'.format(membership_id))
          if obj.externalId != uuid:
            raise exceptions.Error(
                'membership {0} already exists in the project'
                ' with another cluster. If this operation is'
                ' intended, please run `gcloud container '
                'hub memberships delete {0}` and register '
                'again.'.format(membership_id))

          # The membership exists with same cluster_name.
          already_exists = True

      # In case of an existing membership, check with the user to upgrade the
      # Connect-Agent.
      if already_exists:
        # Update Membership if issuer is updated by the user from an empty value
        # to a non-empty value or vice versa. UpdateMembership API will error
        # out if the user tries to modify the issuer URL.
        if self.ReleaseTrack() is not base.ReleaseTrack.GA and (
            (obj.authority and not issuer_url) or
            (issuer_url and not obj.authority) or
            (obj.authority and (obj.authority.issuer != issuer_url))):
          console_io.PromptContinue(
              message=hub_util.GenerateWIUpdateMsgString(
                  obj, issuer_url, resource_name, args.CLUSTER_NAME),
              cancel_on_no=True)
          try:
            api_util.UpdateMembership(resource_name, obj, 'authority',
                                      self.ReleaseTrack(), issuer_url)
            log.status.Print(
                'Updated the membership [{}] for the cluster [{}]'.format(
                    resource_name, args.CLUSTER_NAME))
          except Exception as e:
            raise exceptions.Error(
                'Error in updating the membership [{}]:{}'.format(
                    resource_name, e))
        else:
          console_io.PromptContinue(
              message='A membership [{}] for the cluster [{}] already exists. '
              'Continuing will reinstall the Connect agent deployment to use a '
              'new image (if one is available).'.format(resource_name,
                                                        args.CLUSTER_NAME),
              cancel_on_no=True)
      else:
        log.status.Print(
            'Created a new membership [{}] for the cluster [{}]'.format(
                resource_name, args.CLUSTER_NAME))

      # Attempt to update the existing agent deployment, or install a new agent
      # if necessary.
      try:
        self._InstallOrUpdateExclusivityArtifacts(kube_client, resource_name)
        agent_util.DeployConnectAgent(kube_client, args,
                                      service_account_key_data,
                                      docker_credential_data, resource_name,
                                      self.ReleaseTrack())
      except Exception as e:
        log.status.Print('Error in installing the Connect Agent: {}'.format(e))
        # In case of a new membership, we need to clean up membership and
        # resources if we failed to install the Connect Agent.
        if not already_exists:
          api_util.DeleteMembership(resource_name, self.ReleaseTrack())
          exclusivity_util.DeleteMembershipResources(kube_client)
        raise
      log.status.Print(
          'Finished registering the cluster [{}] with the Hub.'.format(
              args.CLUSTER_NAME))
      return obj
Ejemplo n.º 23
0
    def Run(self, args):
        """Imports data into a Cloud SQL instance from Google Cloud Storage.

    Args:
      args: argparse.Namespace, The arguments that this command was invoked
          with.

    Returns:
      A dict object representing the operations resource describing the import
      operation if the import was successful.
    Raises:
      HttpException: A http error response was received while executing api
          request.
      ToolException: An error other than http error occured while executing the
          command.
    """
        client = api_util.SqlClient(api_util.API_VERSION_FALLBACK)
        sql_client = client.sql_client
        sql_messages = client.sql_messages

        validate.ValidateInstanceName(args.instance)

        console_io.PromptContinue(
            message='Data from {0} will be imported to {1}.'.format(
                args.uri[0], args.instance),
            default=True,
            cancel_on_no=True)
        instance_ref = client.resource_parser.Parse(args.instance,
                                                    collection='sql.instances')

        import_request = sql_messages.SqlInstancesImportRequest(
            instance=instance_ref.instance,
            project=instance_ref.project,
            instancesImportRequest=sql_messages.InstancesImportRequest(
                importContext=sql_messages.ImportContext(
                    uri=args.uri,
                    database=args.database,
                ), ),
        )

        result = sql_client.instances.Import(import_request)

        operation_ref = client.resource_parser.Create(
            'sql.operations',
            operation=result.operation,
            project=instance_ref.project,
            instance=instance_ref.instance,
        )

        if args. async:
            return sql_client.operations.Get(
                sql_messages.SqlOperationsGetRequest(
                    project=operation_ref.project,
                    instance=operation_ref.instance,
                    operation=operation_ref.operation))

        operations.OperationsV1Beta3.WaitForOperation(
            sql_client, operation_ref, 'Importing Cloud SQL instance')

        log.status.write('Imported [{instance}] from [{buckets}].\n'.format(
            instance=instance_ref, buckets=','.join(args.uri)))

        return None
Ejemplo n.º 24
0
  def _PickAccount(self, console_only, preselected=None):
    """Checks if current credentials are valid, if not runs auth login.

    Args:
      console_only: bool, True if the auth flow shouldn't use the browser
      preselected: str, disable prompts and use this value if not None

    Returns:
      bool, True if valid credentials are setup.
    """

    new_credentials = False
    accounts = c_store.AvailableAccounts()
    if accounts:
      # There is at least one credentialed account.
      if preselected:
        # Try to use the preselected account. Fail if its not credentialed.
        account = preselected
        if account not in accounts:
          log.status.write('\n[{0}] is not one of your credentialed accounts '
                           '[{1}].\n'.format(account, ','.join(accounts)))
          return False
        # Fall through to the set the account property.
      else:
        # Prompt for the account to use.
        idx = console_io.PromptChoice(
            accounts + ['Log in with a new account'],
            message='Choose the account you would like to use to perform '
                    'operations for this configuration:',
            prompt_string=None)
        if idx is None:
          return False
        if idx < len(accounts):
          account = accounts[idx]
        else:
          new_credentials = True
    elif preselected:
      # Preselected account specified but there are no credentialed accounts.
      log.status.write('\n[{0}] is not a credentialed account.\n'.format(
          preselected))
      return False
    else:
      # Must log in with new credentials.
      answer = console_io.PromptContinue(
          prompt_string='You must log in to continue. Would you like to log in')
      if not answer:
        return False
      new_credentials = True
    if new_credentials:
      # Call `gcloud auth login` to get new credentials.
      # `gcloud auth login` may have user interaction, do not suppress it.
      browser_args = ['--no-launch-browser'] if console_only else []
      if not self._RunCmd(['auth', 'login'],
                          ['--force', '--brief'] + browser_args,
                          disable_user_output=False):
        return False
      # `gcloud auth login` already did `gcloud config set account`.
    else:
      # Set the config account to the already credentialed account.
      properties.PersistProperty(properties.VALUES.core.account, account)

    log.status.write('You are logged in as: [{0}].\n\n'
                     .format(properties.VALUES.core.account.Get()))
    return True
Ejemplo n.º 25
0
    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.
    """
        adapter = self.context['api_adapter']
        location_get = self.context['location_get']
        location = location_get(args)
        cluster_ref = adapter.ParseCluster(args.name, location)
        cluster_name = args.name
        cluster_node_count = None
        cluster_zone = cluster_ref.zone
        try:
            # Attempt to get cluster for better prompts and to validate args.
            # Error is a warning but not fatal. Should only exit with a failure on
            # the actual update API calls below.
            cluster = adapter.GetCluster(cluster_ref)
            cluster_name = cluster.name
            cluster_node_count = cluster.currentNodeCount
            cluster_zone = cluster.zone
        except (exceptions.HttpException,
                apitools_exceptions.HttpForbiddenError, util.Error) as error:
            log.warning(
                ('Problem loading details of cluster to update:\n\n{}\n\n'
                 'You can still attempt updates to the cluster.\n').format(
                     console_attr.SafeText(error)))

        # locations will be None if additional-zones was specified, an empty list
        # if it was specified with no argument, or a populated list if zones were
        # provided. We want to distinguish between the case where it isn't
        # specified (and thus shouldn't be passed on to the API) and the case where
        # it's specified as wanting no additional zones, in which case we must pass
        # the cluster's primary zone to the API.
        # TODO(b/29578401): Remove the hasattr once the flag is GA.
        locations = None
        if hasattr(args,
                   'additional_zones') and args.additional_zones is not None:
            locations = sorted([cluster_ref.zone] + args.additional_zones)
        if hasattr(args, 'node_locations') and args.node_locations is not None:
            locations = sorted(args.node_locations)

        if args.IsSpecified('username') or args.IsSpecified(
                'enable_basic_auth'):
            flags.MungeBasicAuthFlags(args)
            options = api_adapter.SetMainAuthOptions(
                action=api_adapter.SetMainAuthOptions.SET_USERNAME,
                username=args.username,
                password=args.password)

            try:
                op_ref = adapter.SetMainAuth(cluster_ref, options)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        elif (args.generate_password or args.set_password
              or args.IsSpecified('password')):
            if args.generate_password:
                password = ''
                options = api_adapter.SetMainAuthOptions(
                    action=api_adapter.SetMainAuthOptions.GENERATE_PASSWORD,
                    password=password)
            else:
                password = args.password
                if not args.IsSpecified('password'):
                    password = input('Please enter the new password:'******'Enabling/Disabling Network Policy causes a rolling '
                'update of all cluster nodes, similar to performing a cluster '
                'upgrade.  This operation is long-running and will block other '
                'operations on the cluster (including delete) until it has run '
                'to completion.',
                cancel_on_no=True)
            options = api_adapter.SetNetworkPolicyOptions(
                enabled=args.enable_network_policy)
            try:
                op_ref = adapter.SetNetworkPolicy(cluster_ref, options)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        elif args.start_ip_rotation or args.start_credential_rotation:
            if args.start_ip_rotation:
                msg_tmpl = """This will start an IP Rotation on cluster [{name}]. The \
main will be updated to serve on a new IP address in addition to the current \
IP address. Kubernetes Engine will then recreate all nodes ({num_nodes} nodes) \
to point to the new IP address. This operation is long-running and will block \
other operations on the cluster (including delete) until it has run to \
completion."""
                rotate_credentials = False
            elif args.start_credential_rotation:
                msg_tmpl = """This will start an IP and Credentials Rotation on cluster\
 [{name}]. The main will be updated to serve on a new IP address in addition \
to the current IP address, and cluster credentials will be rotated. Kubernetes \
Engine will then recreate all nodes ({num_nodes} nodes) to point to the new IP \
address. This operation is long-running and will block other operations on the \
cluster (including delete) until it has run to completion."""
                rotate_credentials = True
            console_io.PromptContinue(message=msg_tmpl.format(
                name=cluster_name,
                num_nodes=cluster_node_count if cluster_node_count else '?'),
                                      cancel_on_no=True)
            try:
                op_ref = adapter.StartIpRotation(
                    cluster_ref, rotate_credentials=rotate_credentials)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        elif args.complete_ip_rotation or args.complete_credential_rotation:
            if args.complete_ip_rotation:
                msg_tmpl = """This will complete the in-progress IP Rotation on \
cluster [{name}]. The main will be updated to stop serving on the old IP \
address and only serve on the new IP address. Make sure all API clients have \
been updated to communicate with the new IP address (e.g. by running `gcloud \
container clusters get-credentials --project {project} --zone {zone} {name}`). \
This operation is long-running and will block other operations on the cluster \
(including delete) until it has run to completion."""
            elif args.complete_credential_rotation:
                msg_tmpl = """This will complete the in-progress Credential Rotation on\
 cluster [{name}]. The main will be updated to stop serving on the old IP \
address and only serve on the new IP address. Old cluster credentials will be \
invalidated. Make sure all API clients have been updated to communicate with \
the new IP address (e.g. by running `gcloud container clusters get-credentials \
--project {project} --zone {zone} {name}`). This operation is long-running and \
will block other operations on the cluster (including delete) until it has run \
to completion."""
            console_io.PromptContinue(message=msg_tmpl.format(
                name=cluster_name,
                project=cluster_ref.projectId,
                zone=cluster_zone),
                                      cancel_on_no=True)
            try:
                op_ref = adapter.CompleteIpRotation(cluster_ref)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        elif args.update_labels is not None:
            try:
                op_ref = adapter.UpdateLabels(cluster_ref, args.update_labels)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        elif args.remove_labels is not None:
            try:
                op_ref = adapter.RemoveLabels(cluster_ref, args.remove_labels)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        elif args.logging_service is not None and args.monitoring_service is None:
            try:
                op_ref = adapter.SetLoggingService(cluster_ref,
                                                   args.logging_service)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        elif args.maintenance_window is not None:
            try:
                op_ref = adapter.SetMaintenanceWindow(cluster_ref,
                                                      args.maintenance_window)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        else:
            if args.enable_legacy_authorization is not None:
                op_ref = adapter.SetLegacyAuthorization(
                    cluster_ref, args.enable_legacy_authorization)
            else:
                options = self.ParseUpdateOptions(args, locations)
                op_ref = adapter.UpdateCluster(cluster_ref, options)

        if not args. async:
            adapter.WaitForOperation(op_ref,
                                     'Updating {0}'.format(
                                         cluster_ref.clusterId),
                                     timeout_s=1800)

            log.UpdatedResource(cluster_ref)
            cluster_url = util.GenerateClusterUrl(cluster_ref)
            log.status.Print(
                'To inspect the contents of your cluster, go to: ' +
                cluster_url)

            if (args.start_ip_rotation or args.complete_ip_rotation
                    or args.start_credential_rotation
                    or args.complete_credential_rotation):
                cluster = adapter.GetCluster(cluster_ref)
                try:
                    util.ClusterConfig.Persist(cluster, cluster_ref.projectId)
                except kconfig.MissingEnvVarError as error:
                    log.warning(error)
Ejemplo n.º 26
0
  def _PickDefaultRegionAndZone(self):
    """Pulls metadata properties for region and zone and sets them in gcloud."""
    try:
      # Use --quiet flag to skip the enable api prompt.
      project_info = self._RunCmd(['compute', 'project-info', 'describe'],
                                  params=['--quiet'])
    except Exception:  # pylint:disable=broad-except
      log.status.write("""\
Not setting default zone/region (this feature makes it easier to use
[gcloud compute] by setting an appropriate default value for the
--zone and --region flag).
See https://cloud.google.com/compute/docs/gcloud-compute section on how to set
default compute region and zone manually. If you would like [gcloud init] to be
able to do this for you the next time you run it, make sure the
Compute Engine API is enabled for your project on the
https://console.developers.google.com/apis page.

""")
      return None

    default_zone = None
    default_region = None
    if project_info is not None:
      project_info = resource_projector.MakeSerializable(project_info)
      metadata = project_info.get('commonInstanceMetadata', {})
      for item in metadata.get('items', []):
        if item['key'] == 'google-compute-default-zone':
          default_zone = item['value']
        elif item['key'] == 'google-compute-default-region':
          default_region = item['value']

    # We could not determine zone automatically. Before offering choices for
    # zone and/or region ask user if he/she wants to do this.
    if not default_zone:
      answer = console_io.PromptContinue(
          prompt_string=('Do you want to configure a default Compute '
                         'Region and Zone?'))
      if not answer:
        return

    # Same logic applies to region and zone properties.
    def SetProperty(name, default_value, list_command):
      """Set named compute property to default_value or get via list command."""
      if not default_value:
        values = self._RunCmd(list_command)
        if values is None:
          return
        values = list(values)
        message = (
            'Which Google Compute Engine {0} would you like to use as project '
            'default?\n'
            'If you do not specify a {0} via a command line flag while working '
            'with Compute Engine resources, the default is assumed.').format(
                name)
        idx = console_io.PromptChoice(
            [value['name'] for value in values]
            + ['Do not set default {0}'.format(name)],
            message=message, prompt_string=None, allow_freeform=True,
            freeform_suggester=usage_text.TextChoiceSuggester())
        if idx is None or idx == len(values):
          return
        default_value = values[idx]
      properties.PersistProperty(properties.VALUES.compute.Property(name),
                                 default_value['name'])
      log.status.write('Your project default Compute Engine {0} has been set '
                       'to [{1}].\nYou can change it by running '
                       '[gcloud config set compute/{0} NAME].\n\n'
                       .format(name, default_value['name']))
      return default_value

    if default_zone:
      default_zone = self._RunCmd(['compute', 'zones', 'describe'],
                                  [default_zone])
    zone = SetProperty('zone', default_zone, ['compute', 'zones', 'list'])
    if zone and not default_region:
      default_region = zone['region']
    if default_region:
      default_region = self._RunCmd(['compute', 'regions', 'describe'],
                                    [default_region])
    SetProperty('region', default_region, ['compute', 'regions', 'list'])
Ejemplo n.º 27
0
class Create(base.CreateCommand):
    """Create a cluster for running containers."""
    @staticmethod
    def Args(parser):
        _Args(parser)
        _AddAdditionalZonesFlag(parser, deprecated=True)
        flags.AddNodeLocationsFlag(parser)
        flags.AddAddonsFlags(parser)
        flags.AddClusterAutoscalingFlags(parser)
        flags.AddMaxPodsPerNodeFlag(parser)
        flags.AddEnableAutoRepairFlag(parser, for_create=True)
        flags.AddEnableKubernetesAlphaFlag(parser)
        flags.AddEnableStackdriverKubernetesFlag(parser)
        flags.AddEnableLegacyAuthorizationFlag(parser)
        flags.AddIPAliasFlags(parser)
        flags.AddLabelsFlag(parser)
        flags.AddLocalSSDFlag(parser)
        flags.AddMaintenanceWindowFlag(parser)
        flags.AddMasterAuthorizedNetworksFlags(parser)
        flags.AddMinCpuPlatformFlag(parser)
        flags.AddNetworkPolicyFlags(parser)
        flags.AddNodeTaintsFlag(parser)
        flags.AddPreemptibleFlag(parser)
        flags.AddClusterNodeIdentityFlags(parser)
        flags.AddPrivateClusterFlags(parser,
                                     with_deprecated=False,
                                     with_alpha=False)
        flags.AddClusterVersionFlag(parser)
        flags.AddNodeVersionFlag(parser)
        flags.AddEnableAutoUpgradeFlag(parser)
        flags.AddTpuFlags(parser, hidden=False)
        flags.AddResourceUsageExportFlags(parser)

    def ParseCreateOptions(self, args):
        flags.WarnGAForFutureAutoUpgradeChange()
        return ParseCreateOptionsBase(args)

    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Cluster message for the successfully created cluster.

    Raises:
      util.Error, if creation failed.
    """
        if args. async and not args.IsSpecified('format'):
            args.format = util.OPERATIONS_FORMAT

        util.CheckKubectlInstalled()

        adapter = self.context['api_adapter']
        location_get = self.context['location_get']
        location = location_get(args)

        cluster_ref = adapter.ParseCluster(args.name, location)
        options = self.ParseCreateOptions(args)

        if options.private_cluster and not (
                options.enable_master_authorized_networks
                or options.master_authorized_networks):
            log.warning(
                '`--private-cluster` makes the master inaccessible from '
                'cluster-external IP addresses, by design. To allow limited '
                'access to the master, see the `--master-authorized-networks` flags '
                'and our documentation on setting up private clusters: '
                'https://cloud.google.com'
                '/kubernetes-engine/docs/how-to/private-clusters')

        if not (options.metadata
                and 'disable-legacy-endpoints' in options.metadata):
            log.warning(
                'Starting in 1.12, default node pools in new clusters '
                'will have their legacy Compute Engine instance metadata '
                'endpoints disabled by default. To create a cluster with '
                'legacy instance metadata endpoints disabled in the default '
                'node pool, run `clusters create` with the flag '
                '`--metadata disable-legacy-endpoints=true`.')

        if options.enable_ip_alias:
            log.warning(
                'The Pod address range limits the maximum size of the cluster. '
                'Please refer to https://cloud.google.com/kubernetes-engine/docs/how-to/flexible-pod-cidr to learn how to optimize IP address allocation.'
            )
        else:
            max_node_number = util.CalculateMaxNodeNumberByPodRange(
                options.cluster_ipv4_cidr)
            if max_node_number > 0:
                log.warning(
                    'Your Pod address range (`--cluster-ipv4-cidr`) can accommodate at most %d node(s). '
                    % max_node_number)

        if options.enable_kubernetes_alpha:
            console_io.PromptContinue(
                message=constants.KUBERNETES_ALPHA_PROMPT,
                throw_if_unattended=True,
                cancel_on_no=True)

        if options.enable_autorepair is not None:
            log.status.Print(
                messages.AutoUpdateUpgradeRepairMessage(
                    options.enable_autorepair, 'autorepair'))

        if options.accelerators is not None:
            log.status.Print(constants.KUBERNETES_GPU_LIMITATION_MSG)

        operation = None
        try:
            operation_ref = adapter.CreateCluster(cluster_ref, options)
            if args. async:
                return adapter.GetCluster(cluster_ref)

            operation = adapter.WaitForOperation(
                operation_ref,
                'Creating cluster {0} in {1}'.format(cluster_ref.clusterId,
                                                     cluster_ref.zone),
                timeout_s=args.timeout)
            cluster = adapter.GetCluster(cluster_ref)
        except apitools_exceptions.HttpError as error:
            raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)

        log.CreatedResource(cluster_ref)
        cluster_url = util.GenerateClusterUrl(cluster_ref)
        log.status.Print('To inspect the contents of your cluster, go to: ' +
                         cluster_url)
        if operation.detail:
            # Non-empty detail on a DONE create operation should be surfaced as
            # a warning to end user.
            log.warning(operation.detail)

        try:
            util.ClusterConfig.Persist(cluster, cluster_ref.projectId)
        except kconfig.MissingEnvVarError as error:
            log.warning(error)

        return [cluster]
Ejemplo n.º 28
0
def RunDeploy(
        args,
        api_client,
        use_beta_stager=False,
        runtime_builder_strategy=runtime_builders.RuntimeBuilderStrategy.NEVER,
        parallel_build=True,
        flex_image_build_option=FlexImageBuildOptions.ON_CLIENT,
        use_legacy_apis=False,
        service_account=None):
    """Perform a deployment based on the given args.

  Args:
    args: argparse.Namespace, An object that contains the values for the
      arguments specified in the ArgsDeploy() function.
    api_client: api_lib.app.appengine_api_client.AppengineClient, App Engine
      Admin API client.
    use_beta_stager: Use the stager registry defined for the beta track rather
      than the default stager registry.
    runtime_builder_strategy: runtime_builders.RuntimeBuilderStrategy, when to
      use the new CloudBuild-based runtime builders (alternative is old
      externalized runtimes).
    parallel_build: bool, whether to use parallel build and deployment path.
      Only supported in v1beta and v1alpha App Engine Admin API.
    flex_image_build_option: FlexImageBuildOptions, whether a flex deployment
      should upload files so that the server can build the image or build the
      image on client.
    use_legacy_apis: bool, if true, use the legacy deprecated admin-console-hr
      superapp for queue.yaml and cron.yaml uploads instead of Cloud Tasks &
      Cloud Scheduler FEs.
    service_account: string, the identity that the deployed version will run as,
      if not set, will use the App Engine default service account instead.

  Returns:
    A dict on the form `{'versions': new_versions, 'configs': updated_configs}`
    where new_versions is a list of version_util.Version, and updated_configs
    is a list of config file identifiers, see yaml_parsing.ConfigYamlInfo.
  """
    project = properties.VALUES.core.project.Get(required=True)
    deploy_options = DeployOptions.FromProperties(
        runtime_builder_strategy=runtime_builder_strategy,
        parallel_build=parallel_build,
        flex_image_build_option=flex_image_build_option)

    with files.TemporaryDirectory() as staging_area:
        stager = _MakeStager(args.skip_staging, use_beta_stager,
                             args.staging_command, staging_area)
        services, configs = deployables.GetDeployables(
            args.deployables, stager, deployables.GetPathMatchers(),
            args.appyaml)

        wait_for_stop_version = _CheckIfConfigsContainDispatch(configs)

        service_infos = [d.service_info for d in services]

        flags.ValidateImageUrl(args.image_url, service_infos)

        # pylint: disable=protected-access
        log.debug(
            'API endpoint: [{endpoint}], API version: [{version}]'.format(
                endpoint=api_client.client.url,
                version=api_client.client._VERSION))
        # The legacy admin console API client.
        # The Admin Console API existed long before the App Engine Admin API, and
        # isn't being improved. We're in the process of migrating all of the calls
        # over to the Admin API, but a few things (notably config deployments)
        # haven't been ported over yet.
        # Import only when necessary, to decrease startup time.
        # pylint: disable=g-import-not-at-top
        from googlecloudsdk.api_lib.app import appengine_client
        # pylint: enable=g-import-not-at-top
        ac_client = appengine_client.AppengineClient(args.server,
                                                     args.ignore_bad_certs)

        app = _PossiblyCreateApp(api_client, project)
        _RaiseIfStopped(api_client, app)

        # Call _PossiblyRepairApp when --bucket param is unspecified
        if not args.bucket:
            app = _PossiblyRepairApp(api_client, app)

        # Tell the user what is going to happen, and ask them to confirm.
        version_id = args.version or util.GenerateVersionId()
        deployed_urls = output_helpers.DisplayProposedDeployment(
            app, project, services, configs, version_id,
            deploy_options.promote, service_account)
        console_io.PromptContinue(cancel_on_no=True)
        if service_infos:
            # Do generic app setup if deploying any services.
            # All deployment paths for a service involve uploading source to GCS.
            metrics.CustomTimedEvent(metric_names.GET_CODE_BUCKET_START)
            code_bucket_ref = args.bucket or flags.GetCodeBucket(app, project)
            metrics.CustomTimedEvent(metric_names.GET_CODE_BUCKET)
            log.debug('Using bucket [{b}].'.format(b=code_bucket_ref.ToUrl()))

            # Prepare Flex if any service is going to deploy an image.
            if any([s.RequiresImage() for s in service_infos]):
                deploy_command_util.PossiblyEnableFlex(project)

            all_services = dict([(s.id, s) for s in api_client.ListServices()])
        else:
            code_bucket_ref = None
            all_services = {}
        new_versions = []
        deployer = ServiceDeployer(api_client, deploy_options)

        # Track whether a service has been deployed yet, for metrics.
        service_deployed = False
        for service in services:
            if not service_deployed:
                metrics.CustomTimedEvent(
                    metric_names.FIRST_SERVICE_DEPLOY_START)
            new_version = version_util.Version(project, service.service_id,
                                               version_id)
            deployer.Deploy(service,
                            new_version,
                            code_bucket_ref,
                            args.image_url,
                            all_services,
                            app.gcrDomain,
                            disable_build_cache=args.no_cache,
                            wait_for_stop_version=wait_for_stop_version,
                            flex_image_build_option=flex_image_build_option,
                            ignore_file=args.ignore_file,
                            service_account=service_account)
            new_versions.append(new_version)
            log.status.Print('Deployed service [{0}] to [{1}]'.format(
                service.service_id, deployed_urls[service.service_id]))
            if not service_deployed:
                metrics.CustomTimedEvent(metric_names.FIRST_SERVICE_DEPLOY)
            service_deployed = True

    # Deploy config files.
    if configs:
        metrics.CustomTimedEvent(metric_names.UPDATE_CONFIG_START)
        for config in configs:
            message = 'Updating config [{config}]'.format(config=config.name)
            with progress_tracker.ProgressTracker(message):
                if config.name == 'dispatch':
                    api_client.UpdateDispatchRules(config.GetRules())
                elif config.name == yaml_parsing.ConfigYamlInfo.INDEX:
                    index_api.CreateMissingIndexes(project, config.parsed)
                elif (not use_legacy_apis
                      and config.name == yaml_parsing.ConfigYamlInfo.QUEUE):
                    RunDeployCloudTasks(config)
                elif (not use_legacy_apis
                      and config.name == yaml_parsing.ConfigYamlInfo.CRON):
                    RunDeployCloudScheduler(config)
                else:
                    ac_client.UpdateConfig(config.name, config.parsed)
        metrics.CustomTimedEvent(metric_names.UPDATE_CONFIG)

    updated_configs = [c.name for c in configs]

    PrintPostDeployHints(new_versions, updated_configs)

    # Return all the things that were deployed.
    return {'versions': new_versions, 'configs': updated_configs}
Ejemplo n.º 29
0
def PromptForDeletionHelper(resource_name, prompt_list, prompt_title=None):
    prompt_title = (prompt_title or
                    'The following {0} will be deleted:'.format(resource_name))
    prompt_message = ConstructList(prompt_title, prompt_list)
    if not console_io.PromptContinue(message=prompt_message):
        raise calliope_exceptions.ToolException('Deletion aborted by user.')
Ejemplo n.º 30
0
    def Run(self, args):
        start = time_utils.CurrentTimeSec()

        # Set up Encryption utilities.
        openssl_executable = files.FindExecutableOnPath('openssl')
        if windows_encryption_utils:
            crypt = windows_encryption_utils.WinCrypt()
        elif openssl_executable:
            crypt = openssl_encryption_utils.OpensslCrypt(openssl_executable)
        else:
            raise utils.MissingDependencyError(
                'Your platform does not support OpenSSL.')

        # Get Authenticated email address and default username.
        email = gaia_utils.GetAuthenticatedGaiaEmail(self.http)
        if args.user:
            user = args.user
        else:
            user = gaia_utils.MapGaiaEmailToDefaultAccountName(email)

        if args.instance == user:
            raise utils.InvalidUserError(
                MACHINE_USERNAME_SAME_ERROR.format(user, args.instance))

        # Warn user (This warning doesn't show for non-interactive sessions).
        message = RESET_PASSWORD_WARNING.format(user)
        prompt_string = (
            'Would you like to set or reset the password for [{0}]'.format(
                user))
        console_io.PromptContinue(message=message,
                                  prompt_string=prompt_string,
                                  cancel_on_no=True)

        log.status.Print(
            'Resetting and retrieving password for [{0}] on [{1}]'.format(
                user, args.instance))

        # Get Encryption Keys.
        key = crypt.GetKeyPair()
        modulus, exponent = crypt.GetModulusExponentFromPublicKey(
            crypt.GetPublicKey(key))

        # Create Windows key entry.
        self.windows_key_entry = self._ConstructWindowsKeyEntry(
            user, modulus, exponent, email)

        # Call ReadWriteCommad.Run() which will fetch the instance and update
        # the metadata (using the data in self.windows_key_entry).
        objects = super(ResetWindowsPassword, self).Run(args)
        updated_instance = list(objects)[0]

        # Retrieve and Decrypt the password from the serial console.
        enc_password = self._GetEncryptedPasswordFromSerialPort(modulus)
        password = crypt.DecryptMessage(key, enc_password)

        # Get External IP address.
        try:
            access_configs = updated_instance['networkInterfaces'][0][
                'accessConfigs']
            external_ip_address = access_configs[0]['natIP']
        except KeyError:
            log.warn(NO_IP_WARNING.format(updated_instance['name']))
            external_ip_address = None

        # Check for old Windows credentials.
        if self.old_metadata_keys:
            log.warn(
                OLD_KEYS_WARNING.format(self.ref.Name(), self.ref.Name(),
                                        self.ref.zone,
                                        ','.join(self.old_metadata_keys)))

        log.info('Total Elapsed Time: {0}'.format(time_utils.CurrentTimeSec() -
                                                  start))

        # The connection info resource.
        connection_info = {
            'username': user,
            'password': password,
            'ip_address': external_ip_address
        }
        return connection_info