Example #1
0
  def DeleteModule(self, module, version):
    """Deletes the given version of the module.

    Args:
      module: str, The module to delete.
      version: str, The version of the module to delete.

    Returns:
      bool, whether the deletion was successful
    """
    rpcserver = self._GetRpcServer()
    response = rpcserver.Send('/api/versions/delete',
                              app_id=self.project, module=module,
                              version_match=version)
    # These strings have been seen in production, and verified against the code
    # of the AppEngine API endpoint being hit.
    if response == 'Module deleted.' or response.endswith('Success.\n'):
      # This is the normal case: deletion succeeded
      return True
    elif response == 'Cannot delete default version.\n':
      # This is a known error case.
      log.error('Cannot delete default version [{0}] of module [{1}].'.format(
          version, module))
      return False
    else:
      # This is the unknown case. The safest behavior is to pass the message
      # through to the user as a warning, but assume the deletion succeeded.
      log.warning('Version [{0}] of module [{1}]: '.format(version, module) +
                  response)
      return True
Example #2
0
def PrepareGCDDataDir(args):
  """Prepares the given directory using gcd create.

  Raises:
    UnableToPrepareDataDir: If the gcd create execution fails.

  Args:
    args: The arguments passed to the command.
  """
  data_dir = args.data_dir
  if os.path.isdir(data_dir) and os.listdir(data_dir):
    log.warning('Reusing existing data in [{0}].'.format(data_dir))
    return

  gcd_create_args = ['create']
  project = properties.VALUES.core.project.Get(required=True)
  gcd_create_args.append('--project_id={0}'.format(project))
  gcd_create_args.append(data_dir)
  exec_args = ArgsForGCDEmulator(gcd_create_args, args)

  log.status.Print('Executing: {0}'.format(' '.join(exec_args)))
  with util.Exec(exec_args) as process:
    util.PrefixOutput(process, DATASTORE)
    failed = process.poll()
    if failed:
      raise UnableToPrepareDataDir()
Example #3
0
def _ReadArgGroupsFromFile(arg_file):
  """Collects all the arg groups defined in the yaml file into a dictionary.

  Each dictionary key is an arg-group name whose corresponding value is a nested
  dictionary containing arg-name: arg-value pairs defined in that group.

  Args:
    arg_file: str, the name of the YAML argument file to open and parse.

  Returns:
    A dict containing all arg-groups found in the arg_file.

  Raises:
    yaml.Error: If the YAML file could not be read or parsed.
    BadFileException: If the contents of the file are not valid.
  """
  all_groups = {}
  for d in yaml.load_all_path(arg_file):
    if d is None:
      log.warning('Ignoring empty yaml document.')
    elif isinstance(d, dict):
      all_groups.update(d)
    else:
      raise calliope_exceptions.BadFileException(
          'Failed to parse YAML file [{}]: [{}] is not a valid argument '
          'group.'.format(arg_file, str(d)))
  return all_groups
def FindSentinel(filename, blocksize=2 ** 16):
  """Return the sentinel line from the output file.

  Args:
    filename: The filename of the output file.  (We'll read this file.)
    blocksize: Optional block size for buffering, for unit testing.

  Returns:
    The contents of the last line in the file that doesn't start with
    a tab, with its trailing newline stripped; or None if the file
    couldn't be opened or no such line could be found by inspecting
    the last 'blocksize' bytes of the file.
  """
  try:
    fp = open(filename, 'rb')
  except IOError as err:
    log.warning('Append mode disabled: can\'t read [%r]: %s', filename, err)
    return None
  try:
    fp.seek(0, 2)  # EOF
    fp.seek(max(0, fp.tell() - blocksize))
    lines = fp.readlines()
    del lines[:1]  # First line may be partial, throw it away
    sentinel = None
    for line in lines:
      if not line.startswith('\t'):
        sentinel = line
    if not sentinel:
      return None
    return sentinel.rstrip('\n')
  finally:
    fp.close()
Example #5
0
  def _WarnOrReadFirstKeyLine(self, path, kind):
    """Returns the first line from the key file path.

    A None return indicates an error and is always accompanied by a log.warning
    message.

    Args:
      path: The path of the file to read from.
      kind: The kind of key file, 'private' or 'public'.

    Returns:
      None (and prints a log.warning message) if the file does not exist, is not
      readable, or is empty. Otherwise returns the first line utf8 decoded.
    """
    try:
      with open(path) as f:
        # Decode to utf8 to handle any unicode characters. Key data is base64
        # encoded so it cannot contain any unicode. Comments may contain
        # unicode, but they are ignored in the key file analysis here, so
        # replacing invalid chars with ? is OK.
        line = f.readline().strip().decode('utf8', 'replace')
        if line:
          return line
        msg = 'is empty'
        status = KeyFileStatus.BROKEN
    except IOError as e:
      if e.errno == errno.ENOENT:
        msg = 'does not exist'
        status = KeyFileStatus.ABSENT
      else:
        msg = 'is not readable'
        status = KeyFileStatus.BROKEN
    log.warning('The %s SSH key file for gcloud %s.', kind, msg)
    return status
Example #6
0
 def ParseStringsAndWarn(self, value):
   # print a warning and then return the value as-is
   if not self.warned:
     self.warned = True
     log.warning(
         "Delimiter '=' is deprecated for properties flag. Use ':' instead.")
   return value
Example #7
0
  def _PossiblyBuildAndPush(self, new_version, service, image, code_bucket_ref):
    """Builds and Pushes the Docker image if necessary for this service.

    Args:
      new_version: version_util.Version describing where to deploy the service
      service: yaml_parsing.ServiceYamlInfo, service configuration to be
        deployed
      image: str or None, the URL for the Docker image to be deployed (if image
        already exists).
      code_bucket_ref: cloud_storage.BucketReference where the service's files
        have been uploaded

    Returns:
      str, The name of the pushed or given container image or None if the
        service does not require an image.
    """
    if service.RequiresImage():
      if service.env == util.Environment.FLEXIBLE:
        log.warning('Deployment of App Engine Flexible Environment apps is '
                    'currently in Beta')
      if not image:
        image = deploy_command_util.BuildAndPushDockerImage(
            new_version.project, service, new_version.id, code_bucket_ref)
    else:
      image = None
    return image
Example #8
0
  def _CloneRepo(self, repo_name):
    """Queries user for output path and clones selected repo to it."""
    default_clone_path = os.path.join(os.getcwd(), repo_name)
    while True:
      clone_path = console_io.PromptResponse(
          'Where would you like to clone [{0}] repository to [{1}]:'
          .format(repo_name, default_clone_path))
      if not clone_path:
        clone_path = default_clone_path
      if os.path.exists(clone_path):
        log.status.write('Directory [{0}] already exists\n'.format(clone_path))
        continue
      clone_path = os.path.abspath(clone_path)
      parent_dir = os.path.dirname(clone_path)
      if not os.path.isdir(parent_dir):
        log.status.write('No such directory [{0}]\n'.format(parent_dir))
        answer = console_io.PromptContinue(
            prompt_string='Would you like to create it')
        if answer:
          files.MakeDir(parent_dir)
          break
      else:
        break

    # Show output from this command in case there are errors.
    try:
      self._RunCmd(['source', 'repos', 'clone'], [repo_name, clone_path],
                   disable_user_output=False)
    except c_exc.FailedSubCommand:
      log.warning(
          'Was not able to run\n  '
          '[gcloud source repos clone {0} {1}]\n'
          'at this time. You can try running this command any time later.\n'
          .format(repo_name, clone_path))
Example #9
0
  def Filter(self, tool_context, args):

    if config.INSTALLATION_CONFIG.IsAlternateReleaseChannel():
      log.warning('You are using alternate release channel: [%s]',
                  config.INSTALLATION_CONFIG.release_channel)
      # Always show the URL if using a non standard release channel.
      log.warning('Snapshot URL for this release channel is: [%s]',
                  config.INSTALLATION_CONFIG.snapshot_url)

    try:
      os_override = platforms.OperatingSystem.FromId(
          args.operating_system_override)
      arch_override = platforms.Architecture.FromId(args.architecture_override)
    except platforms.InvalidEnumValue:
      raise exceptions.ToolException.FromCurrent()

    platform = platforms.Platform.Current(os_override, arch_override)
    root = (os.path.expanduser(args.sdk_root_override)
            if args.sdk_root_override else None)
    url = (os.path.expanduser(args.snapshot_url_override)
           if args.snapshot_url_override else None)

    manager = update_manager.UpdateManager(
        sdk_root=root, url=url, platform_filter=platform,
        out_stream=log.out)
    tool_context[config.CLOUDSDK_UPDATE_MANAGER_KEY] = manager
Example #10
0
 def _LogWarnings(warnings):
   new_warnings = warnings[warnings_so_far:]
   if new_warnings:
     # Drop a line to print nicely with the progress tracker.
     log.err.write(tracker_separator)
     for warning in new_warnings:
       log.warning(warning)
Example #11
0
def ExitCodeFromRollupOutcome(outcome, summary_enum):
  """Map a test roll-up outcome into the appropriate gcloud test exit_code.

  Args:
    outcome: a toolresults_v1.Outcome message.
    summary_enum: a toolresults.Outcome.SummaryValueValuesEnum reference.

  Returns:
    The exit_code which corresponds to the test execution's rolled-up outcome.

  Raises:
    TestOutcomeError: If Tool Results service returns an invalid outcome value.
  """
  if not outcome or not outcome.summary:
    log.warning('Tool Results service did not provide a roll-up test outcome.')
    return INCONCLUSIVE
  if outcome.summary == summary_enum.success:
    return ROLLUP_SUCCESS
  if outcome.summary == summary_enum.failure:
    return ROLLUP_FAILURE
  if outcome.summary == summary_enum.skipped:
    return UNSUPPORTED_ENV
  if outcome.summary == summary_enum.inconclusive:
    return INCONCLUSIVE
  raise TestOutcomeError(
      "Unknown test outcome summary value '{0}'".format(outcome.summary))
Example #12
0
  def __call__(self, parser, namespace, value, option_string=None):
    if value is None:
      log.warning(self.deprecation_message)
      if self.none_arg:
        setattr(namespace, self.none_arg, True)

    setattr(namespace, self.dest, value)
Example #13
0
def UnidentifiedDirMatcher(path, stager):
  """Generate a Service from a potential app directory.

  This function is a path matcher that returns if and only if:
  - `path` points to an `<app-dir>` where the fingerprinter identifies a runtime
    and the user opts in to writing an `app.yaml` into `<app-dir>.

  If the runtime and environment match an entry in the stager, the service will
  be staged into a directory.

  Args:
    path: str, Unsanitized absolute path, may point to a directory or a file of
        any type. There is no guarantee that it exists.
    stager: staging.Stager, stager that will be invoked if there is a runtime
        and environment match.

  Raises:
    staging.StagingCommandFailedError, staging command failed.

  Returns:
    Service, fully populated with entries that respect a potentially
        staged deployable service, or None if the path does not fulfill the
        requirements described above.
  """
  if os.path.isdir(path):
    log.warning('Automatic app detection is currently in Beta')
    yaml = deploy_command_util.CreateAppYamlForAppDirectory(path)
    return ServiceYamlMatcher(yaml, stager)
Example #14
0
def CredentialsFromP12Key(private_key, account, password=None):
  """Creates creadentials object from given private key and account name."""
  log.warning('.p12 service account keys are not recomended unless it is '
              'necessary for backwards compatability. Please switch to '
              'a newer .json service account key for this account.')

  try:
    cred = service_account.ServiceAccountCredentials.from_p12_keyfile_buffer(
        service_account_email=account,
        file_buffer=io.BytesIO(private_key),
        private_key_password=password,
        scopes=config.CLOUDSDK_SCOPES)
  except NotImplementedError:
    if not encoding.GetEncodedValue(os.environ, 'CLOUDSDK_PYTHON_SITEPACKAGES'):
      raise UnsupportedCredentialsType(
          ('PyOpenSSL is not available. If you have already installed '
           'PyOpenSSL, you will need to enable site packages by '
           'setting the environment variable CLOUDSDK_PYTHON_SITEPACKAGES '
           'to 1. If that does not work, see '
           'https://developers.google.com/cloud/sdk/crypto for details '
           'or consider using .json private key instead.'))
    else:
      raise UnsupportedCredentialsType(
          ('PyOpenSSL is not available. See '
           'https://developers.google.com/cloud/sdk/crypto for details '
           'or consider using .json private key instead.'))

  # pylint: disable=protected-access
  cred.user_agent = cred._user_agent = config.CLOUDSDK_USER_AGENT

  return cred
Example #15
0
def _ApplyArgDefaults(args, defaults_dict, issue_warning=False):
  """Apply default values from a dictionary to args with no values.

  Args which already have a value are never modified by this function. Thus,
  if there are multiple sets of default args, they should be applied in order
  from highest-to-lowest precedence.

  Args:
    args: an argparse namespace. All the arguments that were provided to the
      command invocation (i.e. group and command arguments combined), plus any
      arg defaults already applied to the namespace.
    defaults_dict: a map of args to their default values.
    issue_warning: (boolean) issue a warning if an arg already has a value and
      we do not apply the provided default (used for arg-files where any args
      specified in the file are lower-priority than the CLI args.).
  """
  for arg in defaults_dict:
    if getattr(args, arg, None) is None:
      log.debug('Applying default {0}: {1}'
                .format(arg, str(defaults_dict[arg])))
      setattr(args, arg, defaults_dict[arg])
    elif issue_warning and getattr(args, arg) != defaults_dict[arg]:
      ext_name = arg_validate.ExternalArgNameFrom(arg)
      log.warning(
          'Command-line argument "--{0} {1}" overrides file argument "{2}: {3}"'
          .format(ext_name, _FormatArgValue(getattr(args, arg)),
                  ext_name, _FormatArgValue(defaults_dict[arg])))
Example #16
0
def GetStreamLogs(async_, stream_logs):
  """Return, based on the command line arguments, whether we should stream logs.

  Both arguments cannot be set (they're mutually exclusive flags) and the
  default is False.

  Args:
    async_: bool, the value of the --async flag.
    stream_logs: bool, the value of the --stream-logs flag.

  Returns:
    bool, whether to stream the logs

  Raises:
    ValueError: if both async_ and stream_logs are True.
  """
  if async_ and stream_logs:
    # Doesn't have to be a nice error; they're mutually exclusive so we should
    # never get here.
    raise ValueError('--async and --stream-logs cannot both be set.')

  if async_:
    # TODO(b/36195821): Use the flag deprecation machinery when it supports the
    # store_true action
    log.warning('The --async flag is deprecated, as the default behavior is to '
                'submit the job asynchronously; it can be omitted. '
                'For synchronous behavior, please pass --stream-logs.\n')
  return stream_logs
Example #17
0
def _ReadArgGroupsFromFile(arg_file):
  """Collects all the arg groups defined in the yaml file into a dictionary.

  Each dictionary key is an arg-group name whose corresponding value is a nested
  dictionary containing arg-name: arg-value pairs defined in that group.

  Args:
    arg_file: str, the name of the YAML argument file to open and parse.

  Returns:
    A dict containing all arg-groups found in the arg_file.

  Raises:
    BadFileException: the yaml package encountered a ScannerError.
  """
  with open(arg_file, 'r') as data:
    yaml_generator = yaml.safe_load_all(data)
    all_groups = {}
    try:
      for d in yaml_generator:
        if d is None:
          log.warning('Ignoring empty yaml document.')
        elif isinstance(d, dict):
          all_groups.update(d)
        else:
          raise yaml.scanner.ScannerError(
              '[{0}] is not a valid argument group.'.format(str(d)))
    except yaml.scanner.ScannerError as error:
      raise calliope_exceptions.BadFileException(
          'Error parsing YAML file [{0}]: {1}'.format(arg_file, str(error)))
  return all_groups
Example #18
0
  def Shutdown(self, wait_secs=10):
    """Shuts down the broker server.

    Args:
      wait_secs: (float) The maximum time to wait for the broker to shutdown.

    Raises:
      BrokerError: If shutdown failed.
    """
    if self._process:
      try:
        execution_utils.KillSubprocess(self._process)
        self._process = None
        if self._comm_thread:
          self._comm_thread.join()
          self._comm_thread = None
      except RuntimeError as e:
        log.warning('Failed to shutdown broker: %s' % e)
        raise BrokerError('Broker failed to shutdown: %s' % e)
    else:
      # Invoke the /shutdown handler.
      try:
        self._SendJsonRequest('POST', '/shutdown')
      except RequestSocketError as e:
        if e.errno not in (SocketConnRefusedErrno(), SocketConnResetErrno()):
          raise
        # We may get an exception reading the response to the shutdown
        # request, because the shutdown may preempt the response.

    if not _Await(lambda: not self.IsRunning(), wait_secs):
      log.warning('Failed to shutdown broker: still running after {0}s'.format(
          wait_secs))
      raise BrokerError('Broker failed to shutdown: timed-out')

    log.info('Shutdown broker.')
Example #19
0
def GetDefaultSshUsername(warn_on_account_user=False):
  """Returns the default username for ssh.

  The default username is the local username, unless that username is invalid.
  In that case, the default username is the username portion of the current
  account.

  Emits a warning if it's not using the local account username.

  Args:
    warn_on_account_user: bool, whether to warn if using the current account
      instead of the local username.

  Returns:
    str, the default SSH username.
  """
  user = getpass.getuser()
  if not _IsValidSshUsername(user):
    full_account = properties.VALUES.core.account.Get(required=True)
    account_user = gaia.MapGaiaEmailToDefaultAccountName(full_account)
    if warn_on_account_user:
      log.warning(
          'Invalid characters in local username [{0}]. '
          'Using username corresponding to active account: [{1}]'.format(
              user, account_user))
    user = account_user
  return user
Example #20
0
  def Format(self, entry):
    """Safely formats a log entry into human readable text.

    Args:
      entry: A log entry message emitted from the V2 API client.

    Returns:
      A string without line breaks respecting the `max_length` property.
    """
    text = self._LogEntryToText(entry)
    text = text.strip().replace('\n', '  ')

    try:
      time = times.FormatDateTime(times.ParseDateTime(entry.timestamp),
                                  self.api_time_format)
    except times.Error:
      log.warning('Received timestamp [{0}] does not match expected'
                  ' format.'.format(entry.timestamp))
      time = '????-??-?? ??:??:??'

    out = u'{timestamp} {log_text}'.format(
        timestamp=time,
        log_text=text)
    if self.max_length and len(out) > self.max_length:
      out = out[:self.max_length - 3] + '...'
    return out
Example #21
0
def _ApplyLowerPriorityArgs(args, lower_pri_args, issue_cli_warning=False):
  """Apply lower-priority arg values from a dictionary to args without values.

  May be used to apply arg default values, or to merge args from another source,
  such as an arg-file. Args which already have a value are never modified by
  this function. Thus, if there are multiple sets of lower-priority args, they
  should be applied in order from highest-to-lowest precedence.

  Args:
    args: the existing argparse.Namespace. All the arguments that were provided
      to the command invocation (i.e. group and command arguments combined),
      plus any arg defaults already applied to the namespace. These args have
      higher priority than the lower_pri_args.
    lower_pri_args: a dict mapping lower-priority arg names to their values.
    issue_cli_warning: (boolean) issue a warning if an arg already has a value
      from the command line and we do not apply the lower-priority arg value
      (used for arg-files where any args specified in the file are lower in
      priority than the CLI args.).
  """
  for arg in lower_pri_args:
    if getattr(args, arg, None) is None:
      log.debug('Applying default {0}: {1}'
                .format(arg, str(lower_pri_args[arg])))
      setattr(args, arg, lower_pri_args[arg])
    elif issue_cli_warning and getattr(args, arg) != lower_pri_args[arg]:
      ext_name = arg_validate.ExternalArgNameFrom(arg)
      log.warning(
          'Command-line argument "--{0} {1}" overrides file argument "{2}: {3}"'
          .format(ext_name, _FormatArgValue(getattr(args, arg)),
                  ext_name, _FormatArgValue(lower_pri_args[arg])))
Example #22
0
def _AddPositionalToSummary(command, summary, length_per_snippet,
                            location, terms):
  """Adds summary of arg, given location such as ['positionals']['myarg']."""
  positionals = command.get(lookup.POSITIONALS)
  lines = []
  line = ''
  if _FormatHeader(lookup.POSITIONALS) not in summary:
    lines.append(_FormatHeader(lookup.POSITIONALS))

  # Add specific positional if given in location.
  if len(location) > 1:
    lines.append(_FormatItem(location[1]))
    positionals = [p for p in positionals if p[lookup.NAME] == location[1]]
    if positionals:
      positional = positionals[0]
      line = positional.get(lookup.DESCRIPTION, '')
      line = _Snip(line, length_per_snippet, terms)
    else:
      log.warning('Attempted to look up a location [{}] that was not '
                  'found.'.format(location[1]))

  # If no specific positional given, just add list of all available.
  else:
    line = ', '.join(sorted([p[lookup.NAME] for p in positionals]))
  if line:
    lines.append(line)
    summary += lines
Example #23
0
  def ListEmulators(self):
    """Returns the list of emulators, or None.

    Returns:
      A list of Json dicts representing google.emulators.Emulator proto
      messages, or None if the list operation fails.

    Raises:
      BrokerNotRunningError: If the broker is not running.
    """
    if not self.IsRunning():
      raise BrokerNotRunningError('Failed to list emulators')

    try:
      response, data = self._SendJsonRequest('GET', _EmulatorPath())
      if response.status != httplib.OK:
        log.warning('Failed to list emulators: {0} ({1})'
                    .format(response.reason, response.status))
        return
    except RequestError:
      return

    list_response = json.loads(data)
    try:
      return list_response['emulators']
    except KeyError:
      # The expected values were not present.
      return
Example #24
0
def CreateApp(api_client, project, region, suppress_warning=False):
  """Create an App Engine app in the given region.

  Prints info about the app being created and displays a progress tracker.

  Args:
    api_client: The App Engine Admin API client
    project: The GCP project
    region: The region to create the app
    suppress_warning: True if user doesn't need to be warned this is
        irreversible.

  Raises:
    AppAlreadyExistsError if app already exists
  """
  if not suppress_warning:
    log.status.Print('You are creating an app for project [{project}].'.format(
        project=project))
    log.warning(APP_CREATE_WARNING)
  try:
    api_client.CreateApp(region)
  except apitools_exceptions.HttpConflictError:
    raise AppAlreadyExistsError(
        'The project [{project}] already contains an App Engine application. '
        'You can deploy your application using `gcloud app deploy`.'.format(
            project=project))
  def Install(self, components, allow_no_backup=False,
              throw_if_unattended=False):
    """Installs the given components at the version you are current on.

    Args:
      components: [str], A list of component ids to install.
      allow_no_backup: bool, True if we want to allow the updater to run
        without creating a backup.  This lets us be in the root directory of the
        SDK and still do an update.  It is more fragile if there is a failure,
        so we only do it if necessary.
      throw_if_unattended: bool, True to throw an exception on prompts when
        not running in interactive mode.

    Raises:
      InvalidComponentError: If any of the given component ids do not exist.

    Returns:
      bool, True if the update succeeded (or there was nothing to do, False if
      if was cancelled by the user.
    """
    if not components:
      raise InvalidComponentError('You must specify components to install')

    version = config.INSTALLATION_CONFIG.version
    if properties.VALUES.component_manager.additional_repositories.Get():
      log.warning('Additional component repositories are currently active.  '
                  'Running `update` instead of `install`.')
      version = None

    return self.Update(
        components,
        allow_no_backup=allow_no_backup,
        throw_if_unattended=throw_if_unattended,
        version=version)
Example #26
0
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Cluster message for the successfully created cluster.

    Raises:
      util.Error, if creation failed.
    """
    util.CheckKubectlInstalled()
    if not args.password:
      args.password = ''.join(random.SystemRandom().choice(
          string.ascii_letters + string.digits) for _ in range(16))

    adapter = self.context['api_adapter']

    if not args.scopes:
      args.scopes = []
    cluster_ref = adapter.ParseCluster(args.name)
    options = self.ParseCreateOptions(args)

    operation = None
    try:
      operation_ref = adapter.CreateCluster(cluster_ref, options)
      if not args.wait:
        return adapter.GetCluster(cluster_ref)

      operation = adapter.WaitForOperation(
          operation_ref,
          'Creating cluster {0}'.format(cluster_ref.clusterId),
          timeout_s=args.timeout)
      cluster = adapter.GetCluster(cluster_ref)
    except apitools_exceptions.HttpError as error:
      raise exceptions.HttpException(util.GetError(error))

    log.CreatedResource(cluster_ref)
    if operation.detail:
      # Non-empty detail on a DONE create operation should be surfaced as
      # a warning to end user.
      log.warning(operation.detail)
    # Persist cluster config
    current_context = kconfig.Kubeconfig.Default().current_context
    c_config = util.ClusterConfig.Persist(cluster, cluster_ref.projectId)
    if not c_config.has_certs:
      # Purge config so we retry the cert fetch on next kubectl command
      util.ClusterConfig.Purge(
          cluster.name, cluster.zone, cluster_ref.projectId)
      # reset current context
      if current_context:
        kubeconfig = kconfig.Kubeconfig.Default()
        kubeconfig.SetCurrentContext(current_context)
        kubeconfig.SaveToFile()
      raise util.Error(NO_CERTS_ERROR_FMT.format(command=' '.join(
          args.command_path[:-1] + ['get-credentials', cluster.name])))
    return cluster
Example #27
0
def _MergeArgGroupIntoArgs(
    args_from_file, group_name, all_arg_groups, all_test_args_set,
    already_included_set=None):
  """Merges args from an arg group into the given args_from_file dictionary.

  Args:
    args_from_file: dict of arg:value pairs already loaded from the arg-file.
    group_name: str, the name of the arg-group to merge into args_from_file.
    all_arg_groups: dict containing all arg-groups loaded from the arg-file.
    all_test_args_set: set of str, all possible test arg names.
    already_included_set: set of str, all group names which were already
      included. Used to detect 'include:' cycles.

  Raises:
    BadFileException: an undefined arg-group name was encountered.
    InvalidArgException: a valid argument name has an invalid value, or
      use of include: led to cyclic references.
    InvalidTestArgError: an undefined argument name was encountered.
  """
  if already_included_set is None:
    already_included_set = set()
  elif group_name in already_included_set:
    raise exceptions.InvalidArgException(
        _INCLUDE,
        'Detected cyclic reference to arg group [{g}]'.format(g=group_name))
  if group_name not in all_arg_groups:
    raise calliope_exceptions.BadFileException(
        'Could not find argument group [{g}] in argument file.'
        .format(g=group_name))

  arg_group = all_arg_groups[group_name]
  if not arg_group:
    log.warning('Argument group [{0}] is empty.'.format(group_name))
    return

  for arg_name in arg_group:
    arg = arg_validate.InternalArgNameFrom(arg_name)
    # Must process include: groups last in order to follow precedence rules.
    if arg == _INCLUDE:
      continue

    if arg not in all_test_args_set:
      raise exceptions.InvalidTestArgError(arg_name)
    if arg in args_from_file:
      log.info(
          'Skipping include: of arg [{0}] because it already had value [{1}].'
          .format(arg_name, args_from_file[arg]))
    else:
      args_from_file[arg] = arg_validate.ValidateArgFromFile(
          arg, arg_group[arg_name])

  already_included_set.add(group_name)  # Prevent "include:" cycles

  if _INCLUDE in arg_group:
    included_groups = arg_validate.ValidateStringList(_INCLUDE,
                                                      arg_group[_INCLUDE])
    for included_group in included_groups:
      _MergeArgGroupIntoArgs(args_from_file, included_group, all_arg_groups,
                             all_test_args_set, already_included_set)
Example #28
0
def Create(models_client, model, regions=None, enable_logging=None,
           labels=None, description=None):
  if regions is None:
    log.warning('`--regions` flag will soon be required. Please explicitly '
                'specify a region. Using [us-central1] by default.')
    regions = ['us-central1']
  return models_client.Create(model, regions, enable_logging, labels=labels,
                              description=description)
Example #29
0
  def Filter(self, unused_tool_context, args):

    if config.INSTALLATION_CONFIG.IsAlternateReleaseChannel():
      log.warning('You are using alternate release channel: [%s]',
                  config.INSTALLATION_CONFIG.release_channel)
      # Always show the URL if using a non standard release channel.
      log.warning('Snapshot URL for this release channel is: [%s]',
                  config.INSTALLATION_CONFIG.snapshot_url)
def IsDefaultRegionalBackendServicePropertyNoneWarnOtherwise():
  """Warns if core/default_regional_backend_service property is set."""
  default_regional = (
      properties.VALUES.core.default_regional_backend_service.GetBool())
  if default_regional is not None:
    # Print a warning if it is set.
    log.warning(
        'core/default_regional_backend_service property is deprecated and '
        'has no meaning.')
Example #31
0
  def Validate(self):
    """Displays warnings and raises exceptions for non-schema errors.

    Raises:
      YamlValidationError: If validation of parsed info fails.
    """
    if self.parsed.runtime == 'vm':
      vm_runtime = self.parsed.GetEffectiveRuntime()
    else:
      vm_runtime = None
      if self.parsed.runtime == 'python':
        raise YamlValidationError(
            'Service [{service}] uses unsupported Python 2.5 runtime. '
            'Please use [runtime: python27] instead.'.format(
                service=(self.parsed.service or
                         ServiceYamlInfo.DEFAULT_SERVICE_NAME)))
      elif self.parsed.runtime == 'python-compat':
        raise YamlValidationError(
            '"python-compat" is not a supported runtime.')
      elif self.parsed.runtime == 'custom' and not self.parsed.env:
        raise YamlValidationError(
            'runtime "custom" requires that env be explicitly specified.')

    if self.env is env.MANAGED_VMS:
      log.warning(MANAGED_VMS_DEPRECATION_WARNING)

    if (self.env is env.FLEX and self.parsed.beta_settings and
        self.parsed.beta_settings.get('enable_app_engine_apis')):
      log.warning(APP_ENGINE_APIS_DEPRECATION_WARNING)

    if self.env is env.FLEX and vm_runtime == 'python27':
      raise YamlValidationError(
          'The "python27" is not a valid runtime in env: flex.  '
          'Please use [python] instead.')

    if self.env is env.FLEX and vm_runtime == 'python-compat':
      log.warning('[runtime: {}] is deprecated.  Please use [runtime: python] '
                  'instead.  See {} for more info.'
                  .format(vm_runtime, UPGRADE_FLEX_PYTHON_URL))

    for warn_text in self.parsed.GetWarnings():
      log.warning('In file [{0}]: {1}'.format(self.file, warn_text))

    if (self.env is env.STANDARD and
        self.parsed.runtime == 'python27' and
        HasLib(self.parsed, 'ssl', '2.7')):
      log.warning(PYTHON_SSL_WARNING)

    if (self.env is env.FLEX and
        vm_runtime == 'python' and
        GetRuntimeConfigAttr(self.parsed, 'python_version') == '3.4'):
      log.warning(FLEX_PY34_WARNING)

    if self.is_ti_runtime:
      _CheckIllegalAttribute(
          name='threadsafe',
          yaml_info=self.parsed,
          extractor_func=lambda yaml: yaml.threadsafe,
          file_path=self.file,
          msg=HINT_THREADSAFE.format(self.runtime))

    _CheckIllegalAttribute(
        name='application',
        yaml_info=self.parsed,
        extractor_func=lambda yaml: yaml.application,
        file_path=self.file,
        msg=HINT_PROJECT)

    _CheckIllegalAttribute(
        name='version',
        yaml_info=self.parsed,
        extractor_func=lambda yaml: yaml.version,
        file_path=self.file,
        msg=HINT_VERSION)
    def _PossiblyBuildAndPush(self, new_version, service, upload_dir,
                              source_files, image, code_bucket_ref, gcr_domain,
                              flex_image_build_option):
        """Builds and Pushes the Docker image if necessary for this service.

    Args:
      new_version: version_util.Version describing where to deploy the service
      service: yaml_parsing.ServiceYamlInfo, service configuration to be
        deployed
      upload_dir: str, path to the service's upload directory
      source_files: [str], relative paths to upload.
      image: str or None, the URL for the Docker image to be deployed (if image
        already exists).
      code_bucket_ref: cloud_storage.BucketReference where the service's files
        have been uploaded
      gcr_domain: str, Cloud Registry domain, determines the physical location
        of the image. E.g. `us.gcr.io`.
      flex_image_build_option: FlexImageBuildOptions, whether a flex deployment
        should upload files so that the server can build the image or build the
        image on client or build the image on client using the buildpacks.

    Returns:
      BuildArtifact, a wrapper which contains either the build ID for
        an in-progress build, or the name of the container image for a serial
        build. Possibly None if the service does not require an image.
    Raises:
      RequiredFileMissingError: if a required file is not uploaded.
    """
        build = None
        if image:
            if service.RequiresImage() and service.parsed.skip_files.regex:
                log.warning(
                    'Deployment of service [{0}] will ignore the skip_files '
                    'field in the configuration file, because the image has '
                    'already been built.'.format(new_version.service))
            return app_cloud_build.BuildArtifact.MakeImageArtifact(image)
        elif service.RequiresImage():
            if not _AppYamlInSourceFiles(source_files,
                                         service.GetAppYamlBasename()):
                raise RequiredFileMissingError(service.GetAppYamlBasename())

            if flex_image_build_option == FlexImageBuildOptions.ON_SERVER:
                cloud_build_options = {
                    'appYamlPath': service.GetAppYamlBasename(),
                }
                timeout = properties.VALUES.app.cloud_build_timeout.Get()
                if timeout:
                    build_timeout = int(
                        times.ParseDuration(timeout,
                                            default_suffix='s').total_seconds)
                    cloud_build_options['cloudBuildTimeout'] = six.text_type(
                        build_timeout) + 's'
                build = app_cloud_build.BuildArtifact.MakeBuildOptionsArtifact(
                    cloud_build_options)
            else:
                build = deploy_command_util.BuildAndPushDockerImage(
                    new_version.project, service, upload_dir, source_files,
                    new_version.id, code_bucket_ref, gcr_domain,
                    self.deploy_options.runtime_builder_strategy,
                    self.deploy_options.parallel_build, flex_image_build_option
                    == FlexImageBuildOptions.BUILDPACK_ON_CLIENT)

        return build
Example #33
0
    def __init__(self,
                 sdk_root=None,
                 url=None,
                 platform_filter=None,
                 out_stream=None,
                 warn=True):
        """Creates a new UpdateManager.

    Args:
      sdk_root: str, The path to the root directory of the Cloud SDK is
        installation.  If None, the updater will search for the install
        directory based on the current directory.
      url: str, The URL to get the latest component snapshot from.  If None,
        the default will be used.
      platform_filter: platforms.Platform, A platform that components must match
        in order to be considered for any operations.  If None, all components
        will match.
      out_stream: a file like object, The place to write more dynamic or
        interactive user output.  If not provided, sys.stdout will be used.
      warn: bool, True to warn about overridden configuration like an alternate
        snapshot file, fixed SDK version, or additional repo.  Should be set
        to False when using this class for background operations like checking
        for updates so the user only sees the warnings when they are actually
        dealing directly with the component manager.

    Raises:
      local_state.InvalidSDKRootError: If the Cloud SDK root cannot be found.
    """

        if not url:
            url = properties.VALUES.component_manager.snapshot_url.Get()
        if url:
            if warn:
                log.warning('You are using an overridden snapshot URL: [%s]',
                            url)
        else:
            url = config.INSTALLATION_CONFIG.snapshot_url

        # Change the snapshot URL to point to a fixed SDK version if specified.
        fixed_version = properties.VALUES.component_manager.fixed_sdk_version.Get(
        )
        if fixed_version:
            urls = url.split(',')
            urls[0] = (
                os.path.dirname(urls[0]) + '/' +
                UpdateManager.VERSIONED_SNAPSHOT_FORMAT.format(fixed_version))
            if warn:
                log.warning(
                    'You have configured your Cloud SDK installation to be '
                    'fixed to version [{0}].'.format(fixed_version))
            url = ','.join(urls)

        # Add in any additional repositories that have been registered.
        repos = properties.VALUES.component_manager.additional_repositories.Get(
        )
        if repos:
            if warn:
                for repo in repos.split(','):
                    log.warning(
                        'You are using additional component repository: [%s]',
                        repo)
            url = ','.join([url, repos])

        self.__sdk_root = sdk_root
        if not self.__sdk_root:
            self.__sdk_root = config.Paths().sdk_root
        if not self.__sdk_root:
            raise local_state.InvalidSDKRootError()
        self.__sdk_root = os.path.realpath(self.__sdk_root)
        self.__url = url
        self.__platform_filter = platform_filter
        self.__out_stream = out_stream if out_stream else log.out
        self.__text_wrapper = textwrap.TextWrapper(replace_whitespace=False,
                                                   drop_whitespace=False)
Example #34
0
def Run(flow,
        launch_browser=True,
        http=None,
        auth_host_name='localhost',
        auth_host_port_start=8085):
    """Run a web flow to get oauth2 credentials.

  Args:
    flow: oauth2client.OAuth2WebServerFlow, A flow that is ready to run.
    launch_browser: bool, If False, give the user a URL to copy into
        a browser. Requires that they paste the refresh token back into the
        terminal. If True, opens a web browser in a new window.
    http: httplib2.Http, The http transport to use for authentication.
    auth_host_name: str, Host name for the redirect server.
    auth_host_port_start: int, First port to try for serving the redirect. If
        this port is taken, it will keep trying incrementing ports until 100
        have been tried, then fail.

  Returns:
    oauth2client.Credential, A ready-to-go credential that has already been
    put in the storage.

  Raises:
    AuthRequestRejectedError: If the request was rejected.
    AuthRequestFailedError: If the request fails.
  """

    if launch_browser:
        success = False
        port_number = auth_host_port_start

        while True:
            try:
                httpd = tools.ClientRedirectServer(
                    (auth_host_name, port_number), ClientRedirectHandler)
            except socket.error as e:
                if port_number > auth_host_port_start + 100:
                    success = False
                    break
                port_number += 1
            else:
                success = True
                break

        if success:
            flow.redirect_uri = ('http://%s:%s/' %
                                 (auth_host_name, port_number))

            authorize_url = flow.step1_get_authorize_url()
            webbrowser.open(authorize_url, new=1, autoraise=True)
            message = 'Your browser has been opened to visit:'
            log.err.Print('{message}\n\n    {url}\n\n'.format(
                message=message,
                url=authorize_url,
            ))

            httpd.handle_request()
            if 'error' in httpd.query_params:
                raise AuthRequestRejectedError('Unable to authenticate.')
            if 'code' in httpd.query_params:
                code = httpd.query_params['code']
            else:
                raise AuthRequestFailedError(
                    'Failed to find "code" in the query parameters of the redirect.'
                )
        else:
            message = (
                'Failed to start a local webserver listening on any port '
                'between {start_port} and {end_port}. Please check your '
                'firewall settings or locally running programs that may be '
                'blocking or using those ports.')
            log.warning(
                message.format(
                    start_port=auth_host_port_start,
                    end_port=port_number,
                ))

            launch_browser = False
            log.warning('Defaulting to URL copy/paste mode.')

    if not launch_browser:
        flow.redirect_uri = client.OOB_CALLBACK_URN
        authorize_url = flow.step1_get_authorize_url()
        message = 'Go to the following link in your browser:'
        log.err.Print('{message}\n\n    {url}\n\n'.format(
            message=message,
            url=authorize_url,
        ))
        try:
            code = input('Enter verification code: ').strip()
        except EOFError as e:
            raise AuthRequestRejectedError(e)

    try:
        credential = flow.step2_exchange(code, http=http)
    except client.FlowExchangeError as e:
        raise AuthRequestFailedError(e)
    except ResponseNotReady:
        raise AuthRequestFailedError(
            'Could not reach the login server. A potential cause of this could be '
            'because you are behind a proxy. Please set the environment variables '
            'HTTPS_PROXY and HTTP_PROXY to the address of the proxy in the format '
            '"protocol://address:port" (without quotes) and try again.\n'
            'Example: HTTPS_PROXY=https://192.168.0.1:8080')

    return credential
Example #35
0
    def Run(self, args):
        """Run 'deployments update'.

    Args:
      args: argparse.Namespace, The arguments that this command was invoked
          with.

    Returns:
      If --async=true, returns Operation to poll.
      Else, returns a struct containing the list of resources and list of
        outputs in the deployment.

    Raises:
      HttpException: An http error response was received while executing api
          request.
    """
        deployment_ref = self.resources.Parse(
            args.deployment_name,
            params={'project': properties.VALUES.core.project.GetOrFail},
            collection='deploymentmanager.deployments')
        if not args.IsSpecified('format') and args.async_:
            args.format = flags.OPERATION_FORMAT

        patch_request = False
        deployment = self.messages.Deployment(name=deployment_ref.deployment, )

        if not (args.config is None and args.template is None
                and args.composite_type is None):
            deployment.target = importer.BuildTargetConfig(
                self.messages,
                config=args.config,
                template=args.template,
                composite_type=args.composite_type,
                properties=args.properties)
        elif (self.ReleaseTrack()
              in [base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA]
              and args.manifest_id):
            deployment.target = importer.BuildTargetConfigFromManifest(
                self.client, self.messages, dm_base.GetProject(),
                deployment_ref.deployment, args.manifest_id, args.properties)
        # Get the fingerprint from the deployment to update.
        try:
            current_deployment = self.client.deployments.Get(
                self.messages.DeploymentmanagerDeploymentsGetRequest(
                    project=dm_base.GetProject(),
                    deployment=deployment_ref.deployment))

            if args.fingerprint:
                deployment.fingerprint = dm_util.DecodeFingerprint(
                    args.fingerprint)
            else:
                # If no fingerprint is present, default to an empty fingerprint.
                # TODO(b/34966984): Remove the empty default after cleaning up all
                # deployments that has no fingerprint
                deployment.fingerprint = current_deployment.fingerprint or b''

            # Get the credential from the deployment to update.
            if self.ReleaseTrack() in [base.ReleaseTrack.ALPHA
                                       ] and args.credential:
                deployment.credential = dm_util.CredentialFrom(
                    self.messages, args.credential)

            # Update the labels of the deployment

            deployment.labels = self._GetUpdatedDeploymentLabels(
                args, current_deployment)
            # If no config or manifest_id are specified, but try to update labels,
            # only add patch_request header when directly updating a non-previewed
            # deployment

            no_manifest = (self.ReleaseTrack() is
                           base.ReleaseTrack.GA) or not args.manifest_id
            patch_request = not args.config and no_manifest and (bool(
                args.update_labels) or bool(args.remove_labels))
            if args.description is None:
                deployment.description = current_deployment.description
            elif not args.description or args.description.isspace():
                deployment.description = None
            else:
                deployment.description = args.description
        except apitools_exceptions.HttpError as error:
            raise exceptions.HttpException(error,
                                           dm_api_util.HTTP_ERROR_FORMAT)

        if patch_request:
            args.format = flags.DEPLOYMENT_FORMAT
        try:
            # Necessary to handle API Version abstraction below
            parsed_delete_flag = Update._delete_policy_flag_map.GetEnumForChoice(
                args.delete_policy).name
            if self.ReleaseTrack() in [
                    base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA
            ]:
                parsed_create_flag = (
                    Update._create_policy_v2beta_flag_map.GetEnumForChoice(
                        args.create_policy).name)
            else:
                parsed_create_flag = (
                    Update._create_policy_flag_map.GetEnumForChoice(
                        args.create_policy).name)
            request = self.messages.DeploymentmanagerDeploymentsUpdateRequest(
                deploymentResource=deployment,
                project=dm_base.GetProject(),
                deployment=deployment_ref.deployment,
                preview=args.preview,
                createPolicy=(
                    self.messages.DeploymentmanagerDeploymentsUpdateRequest.
                    CreatePolicyValueValuesEnum(parsed_create_flag)),
                deletePolicy=(
                    self.messages.DeploymentmanagerDeploymentsUpdateRequest.
                    DeletePolicyValueValuesEnum(parsed_delete_flag)))
            client = self.client
            client.additional_http_headers['X-Cloud-DM-Patch'] = patch_request
            operation = client.deployments.Update(request)

            # Fetch and print the latest fingerprint of the deployment.
            updated_deployment = dm_api_util.FetchDeployment(
                self.client, self.messages, dm_base.GetProject(),
                deployment_ref.deployment)
            if patch_request:
                if args.async_:
                    log.warning(
                        'Updating Deployment metadata is synchronous, --async flag '
                        'is ignored.')
                log.status.Print(
                    'Update deployment metadata completed successfully.')
                return updated_deployment
            dm_util.PrintFingerprint(updated_deployment.fingerprint)
        except apitools_exceptions.HttpError as error:
            raise exceptions.HttpException(error,
                                           dm_api_util.HTTP_ERROR_FORMAT)
        if args.async_:
            return operation
        else:
            op_name = operation.name
            try:
                operation = dm_write.WaitForOperation(
                    self.client,
                    self.messages,
                    op_name,
                    'update',
                    dm_base.GetProject(),
                    timeout=OPERATION_TIMEOUT)
                dm_util.LogOperationStatus(operation, 'Update')
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error,
                                               dm_api_util.HTTP_ERROR_FORMAT)

            return dm_api_util.FetchResourcesAndOutputs(
                self.client, self.messages, dm_base.GetProject(),
                deployment_ref.deployment,
                self.ReleaseTrack() is base.ReleaseTrack.ALPHA)
def WarnOnDeprecatedFlags(args):
    if getattr(args, 'zone', None):  # TODO(b/28518663).
        log.warning(
            'The --zone flag is deprecated, please use --instance-group-zone'
            ' instead. It will be removed in a future release.')
Example #37
0
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.
    """
    adapter = self.context['api_adapter']
    location_get = self.context['location_get']
    location = location_get(args)
    cluster_ref = adapter.ParseCluster(args.name, location)
    cluster_name = args.name
    cluster_node_count = None
    cluster_zone = cluster_ref.zone
    try:
      # Attempt to get cluster for better prompts and to validate args.
      # Error is a warning but not fatal. Should only exit with a failure on
      # the actual update API calls below.
      cluster = adapter.GetCluster(cluster_ref)
      cluster_name = cluster.name
      cluster_node_count = cluster.currentNodeCount
      cluster_zone = cluster.zone
    except (exceptions.HttpException, apitools_exceptions.HttpForbiddenError,
            util.Error) as error:
      log.warning(('Problem loading details of cluster to update:\n\n{}\n\n'
                   'You can still attempt updates to the cluster.\n').format(
                       console_attr.SafeText(error)))

    # locations will be None if additional-zones was specified, an empty list
    # if it was specified with no argument, or a populated list if zones were
    # provided. We want to distinguish between the case where it isn't
    # specified (and thus shouldn't be passed on to the API) and the case where
    # it's specified as wanting no additional zones, in which case we must pass
    # the cluster's primary zone to the API.
    # TODO(b/29578401): Remove the hasattr once the flag is GA.
    locations = None
    if hasattr(args, 'additional_zones') and args.additional_zones is not None:
      locations = sorted([cluster_ref.zone] + args.additional_zones)
    if hasattr(args, 'node_locations') and args.node_locations is not None:
      locations = sorted(args.node_locations)

    if args.IsSpecified('username') or args.IsSpecified('enable_basic_auth'):
      flags.MungeBasicAuthFlags(args)
      options = api_adapter.SetMasterAuthOptions(
          action=api_adapter.SetMasterAuthOptions.SET_USERNAME,
          username=args.username,
          password=args.password)

      try:
        op_ref = adapter.SetMasterAuth(cluster_ref, options)
      except apitools_exceptions.HttpError as error:
        raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
    elif (args.generate_password or args.set_password or
          args.IsSpecified('password')):
      if args.generate_password:
        password = ''
        options = api_adapter.SetMasterAuthOptions(
            action=api_adapter.SetMasterAuthOptions.GENERATE_PASSWORD,
            password=password)
      else:
        password = args.password
        if not args.IsSpecified('password'):
          password = input('Please enter the new password:'******'Enabling/Disabling Network Policy causes a rolling '
          'update of all cluster nodes, similar to performing a cluster '
          'upgrade.  This operation is long-running and will block other '
          'operations on the cluster (including delete) until it has run '
          'to completion.',
          cancel_on_no=True)
      options = api_adapter.SetNetworkPolicyOptions(
          enabled=args.enable_network_policy)
      try:
        op_ref = adapter.SetNetworkPolicy(cluster_ref, options)
      except apitools_exceptions.HttpError as error:
        raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
    elif args.start_ip_rotation or args.start_credential_rotation:
      if args.start_ip_rotation:
        msg_tmpl = """This will start an IP Rotation on cluster [{name}]. The \
master will be updated to serve on a new IP address in addition to the current \
IP address. Kubernetes Engine will then recreate all nodes ({num_nodes} nodes) \
to point to the new IP address. This operation is long-running and will block \
other operations on the cluster (including delete) until it has run to \
completion."""
        rotate_credentials = False
      elif args.start_credential_rotation:
        msg_tmpl = """This will start an IP and Credentials Rotation on cluster\
 [{name}]. The master will be updated to serve on a new IP address in addition \
to the current IP address, and cluster credentials will be rotated. Kubernetes \
Engine will then recreate all nodes ({num_nodes} nodes) to point to the new IP \
address. This operation is long-running and will block other operations on the \
cluster (including delete) until it has run to completion."""
        rotate_credentials = True
      console_io.PromptContinue(
          message=msg_tmpl.format(
              name=cluster_name,
              num_nodes=cluster_node_count if cluster_node_count else '?'),
          cancel_on_no=True)
      try:
        op_ref = adapter.StartIpRotation(
            cluster_ref, rotate_credentials=rotate_credentials)
      except apitools_exceptions.HttpError as error:
        raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
    elif args.complete_ip_rotation or args.complete_credential_rotation:
      if args.complete_ip_rotation:
        msg_tmpl = """This will complete the in-progress IP Rotation on \
cluster [{name}]. The master will be updated to stop serving on the old IP \
address and only serve on the new IP address. Make sure all API clients have \
been updated to communicate with the new IP address (e.g. by running `gcloud \
container clusters get-credentials --project {project} --zone {zone} {name}`). \
This operation is long-running and will block other operations on the cluster \
(including delete) until it has run to completion."""
      elif args.complete_credential_rotation:
        msg_tmpl = """This will complete the in-progress Credential Rotation on\
 cluster [{name}]. The master will be updated to stop serving on the old IP \
address and only serve on the new IP address. Old cluster credentials will be \
invalidated. Make sure all API clients have been updated to communicate with \
the new IP address (e.g. by running `gcloud container clusters get-credentials \
--project {project} --zone {zone} {name}`). This operation is long-running and \
will block other operations on the cluster (including delete) until it has run \
to completion."""
      console_io.PromptContinue(
          message=msg_tmpl.format(
              name=cluster_name,
              project=cluster_ref.projectId,
              zone=cluster_zone),
          cancel_on_no=True)
      try:
        op_ref = adapter.CompleteIpRotation(cluster_ref)
      except apitools_exceptions.HttpError as error:
        raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
    elif args.update_labels is not None:
      try:
        op_ref = adapter.UpdateLabels(cluster_ref, args.update_labels)
      except apitools_exceptions.HttpError as error:
        raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
    elif args.remove_labels is not None:
      try:
        op_ref = adapter.RemoveLabels(cluster_ref, args.remove_labels)
      except apitools_exceptions.HttpError as error:
        raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
    elif args.logging_service is not None and args.monitoring_service is None:
      try:
        op_ref = adapter.SetLoggingService(cluster_ref, args.logging_service)
      except apitools_exceptions.HttpError as error:
        raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
    elif args.maintenance_window is not None:
      try:
        op_ref = adapter.SetMaintenanceWindow(cluster_ref,
                                              args.maintenance_window)
      except apitools_exceptions.HttpError as error:
        raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
    else:
      if args.enable_legacy_authorization is not None:
        op_ref = adapter.SetLegacyAuthorization(
            cluster_ref, args.enable_legacy_authorization)
      else:
        options = self.ParseUpdateOptions(args, locations)
        op_ref = adapter.UpdateCluster(cluster_ref, options)

    if not args.async:
      adapter.WaitForOperation(op_ref,
                               'Updating {0}'.format(cluster_ref.clusterId),
                               timeout_s=1800)

      log.UpdatedResource(cluster_ref)
      cluster_url = util.GenerateClusterUrl(cluster_ref)
      log.status.Print('To inspect the contents of your cluster, go to: ' +
                       cluster_url)

      if (args.start_ip_rotation or args.complete_ip_rotation or
          args.start_credential_rotation or args.complete_credential_rotation):
        cluster = adapter.GetCluster(cluster_ref)
        try:
          util.ClusterConfig.Persist(cluster, cluster_ref.projectId)
        except kconfig.MissingEnvVarError as error:
          log.warning(error)
Example #38
0
def _MatchOneWordInText(backend, key, op, warned_attribute, value, pattern):
  """Returns True if value word matches pattern.

  Args:
    backend: The parser backend object.
    key: The parsed expression key.
    op: The expression operator string.
    warned_attribute: Deprecation warning Boolean attribute name.
    value: The value to be matched by pattern.
    pattern: An (operand, standard_regex, deprecated_regex) tuple.

  Raises:
    ValueError: To catch codebase reliance on deprecated usage.

  Returns:
    True if pattern matches value.

  Examples:
    See surface/topic/filters.py for a table of example matches.
  """
  operand, standard_regex, deprecated_regex = pattern
  if isinstance(value, float):
    try:
      if value == float(operand):
        return True
    except ValueError:
      pass
    if value == 0 and operand.lower() == 'false':
      return True
    if value == 1 and operand.lower() == 'true':
      return True
    # Stringize float with trailing .0's stripped.
    text = re.sub(r'\.0*$', '', _Stringize(value))  # pytype: disable=wrong-arg-types
  elif value == operand:
    return True
  elif value is None:
    # if operand == '':  # pylint: disable=g-explicit-bool-comparison
    if operand in ('', None):
      return True
    if operand == '*' and op == ':':
      return False
    text = 'null'
  else:
    text = NormalizeForSearch(value, html=True)

  # TODO(b/64595527): OnePlatform : and = operator deprecation train.
  # Phase 1: return deprecated_matched and warn if different from matched.
  # Phase 2: return matched and warn if different from deprecated_matched.
  # Phase 3: drop deprecated logic.
  matched = bool(standard_regex.search(text))
  if not deprecated_regex:
    return matched

  deprecated_matched = bool(deprecated_regex.search(text))
  if (matched != deprecated_matched and
      not getattr(backend, warned_attribute, False)):
    setattr(backend, warned_attribute, True)
    old_match = 'matches' if deprecated_matched else 'does not match'
    new_match = 'will match' if matched else 'will not match'
    log.warning('--filter : operator evaluation is changing for '
                'consistency across Google APIs.  {key}{op}{operand} currently '
                '{old_match} but {new_match} in the near future.  Run '
                '`gcloud topic filters` for details.'.format(
                    key=resource_lex.GetKeyName(key),
                    op=op,
                    operand=operand,
                    old_match=old_match,
                    new_match=new_match))
  return deprecated_matched
Example #39
0
def WarnIfPartialRequestFail(problems):
    errors = []
    for _, message in problems:
        errors.append(six.text_type(message))

    log.warning(ConstructList('Some requests did not succeed.', errors))
Example #40
0
def _Run(args,
         track=None,
         enable_runtime=True,
         enable_max_instances=False,
         enable_connected_vpc=False,
         enable_service_account=False):
    """Run a function deployment with the given args."""
    # Check for labels that start with `deployment`, which is not allowed.
    labels_util.CheckNoDeploymentLabels('--remove-labels', args.remove_labels)
    labels_util.CheckNoDeploymentLabels('--update-labels', args.update_labels)

    # Check that exactly one trigger type is specified properly.
    trigger_util.ValidateTriggerArgs(args.trigger_event, args.trigger_resource,
                                     args.IsSpecified('retry'),
                                     args.IsSpecified('trigger_http'))

    trigger_params = trigger_util.GetTriggerEventParams(
        args.trigger_http, args.trigger_bucket, args.trigger_topic,
        args.trigger_event, args.trigger_resource)

    function_ref = args.CONCEPTS.name.Parse()
    function_url = function_ref.RelativeName()

    messages = api_util.GetApiMessagesModule(track)

    # Get an existing function or create a new one.
    function = api_util.GetFunction(function_url)
    is_new_function = function is None
    if is_new_function:
        trigger_util.CheckTriggerSpecified(args)
        function = messages.CloudFunction()
        function.name = function_url
    elif trigger_params:
        # If the new deployment would implicitly change the trigger_event type
        # raise error
        trigger_util.CheckLegacyTriggerUpdate(function.eventTrigger,
                                              trigger_params['trigger_event'])

    # Keep track of which fields are updated in the case of patching.
    updated_fields = []

    # Populate function properties based on args.
    if args.entry_point:
        function.entryPoint = args.entry_point
        updated_fields.append('entryPoint')
    if args.timeout:
        function.timeout = '{}s'.format(args.timeout)
        updated_fields.append('timeout')
    if args.memory:
        function.availableMemoryMb = utils.BytesToMb(args.memory)
        updated_fields.append('availableMemoryMb')
    if enable_service_account and args.service_account:
        function.serviceAccountEmail = args.service_account
        updated_fields.append('serviceAccountEmail')
    if enable_runtime:
        if args.IsSpecified('runtime'):
            function.runtime = args.runtime
            updated_fields.append('runtime')
        elif is_new_function:
            log.warning('Flag `--runtime` will become a required flag soon. '
                        'Please specify the value for this flag.')
    if enable_max_instances:
        if (args.IsSpecified('max_instances')
                or args.IsSpecified('clear_max_instances')):
            max_instances = 0 if args.clear_max_instances else args.max_instances
            function.maxInstances = max_instances
            updated_fields.append('maxInstances')
    if enable_connected_vpc:
        if args.connected_vpc:
            function.network = args.connected_vpc
            updated_fields.append('network')
        if args.vpc_connector:
            function.vpcConnector = args.vpc_connector
            updated_fields.append('vpcConnector')

    # Populate trigger properties of function based on trigger args.
    if args.trigger_http:
        function.httpsTrigger = messages.HttpsTrigger()
        function.eventTrigger = None
        updated_fields.extend(['eventTrigger', 'httpsTrigger'])
    if trigger_params:
        function.eventTrigger = trigger_util.CreateEventTrigger(
            **trigger_params)
        function.httpsTrigger = None
        updated_fields.extend(['eventTrigger', 'httpsTrigger'])
    if args.IsSpecified('retry'):
        updated_fields.append('eventTrigger.failurePolicy')
        if args.retry:
            function.eventTrigger.failurePolicy = messages.FailurePolicy()
            function.eventTrigger.failurePolicy.retry = messages.Retry()
        else:
            function.eventTrigger.failurePolicy = None
    elif function.eventTrigger:
        function.eventTrigger.failurePolicy = None

    # Populate source properties of function based on source args.
    # Only Add source to function if its explicitly provided, a new function,
    # using a stage bucket or deploy of an existing function that previously
    # used local source.
    if (args.source or args.stage_bucket or is_new_function
            or function.sourceUploadUrl):
        updated_fields.extend(
            source_util.SetFunctionSourceProps(function, function_ref,
                                               args.source, args.stage_bucket))

    # Apply label args to function
    if labels_util.SetFunctionLabels(function, args.update_labels,
                                     args.remove_labels, args.clear_labels):
        updated_fields.append('labels')

    # Apply environment variables args to function
    updated_fields.extend(_ApplyEnvVarsArgsToFunction(function, args))

    if is_new_function:
        return api_util.CreateFunction(function,
                                       function_ref.Parent().RelativeName())
    if updated_fields:
        return api_util.PatchFunction(function, updated_fields)
    log.status.Print('Nothing to update.')
Example #41
0
def RunBaseCreateCommand(args, release_track):
    """Creates a new Cloud SQL instance.

  Args:
    args: argparse.Namespace, The arguments that this command was invoked with.
    release_track: base.ReleaseTrack, the release track that this was run under.

  Returns:
    A dict object representing the operations resource describing the create
    operation if the create was successful.
  Raises:
    HttpException: A http error response was received while executing api
        request.
  """
    client = common_api_util.SqlClient(common_api_util.API_VERSION_DEFAULT)
    sql_client = client.sql_client
    sql_messages = client.sql_messages

    validate.ValidateInstanceName(args.instance)
    instance_ref = client.resource_parser.Parse(
        args.instance,
        params={'project': properties.VALUES.core.project.GetOrFail},
        collection='sql.instances')

    # Get the region, tier, and database version from the master if these fields
    # are not specified.
    # TODO(b/64266672): Remove once API does not require these fields.
    if args.IsSpecified('master_instance_name'):
        master_instance_ref = client.resource_parser.Parse(
            args.master_instance_name,
            params={'project': properties.VALUES.core.project.GetOrFail},
            collection='sql.instances')
        try:
            master_instance_resource = sql_client.instances.Get(
                sql_messages.SqlInstancesGetRequest(
                    project=instance_ref.project,
                    instance=master_instance_ref.instance))
        except apitools_exceptions.HttpError as error:
            # TODO(b/64292220): Remove once API gives helpful error message.
            log.debug('operation : %s', six.text_type(master_instance_ref))
            exc = exceptions.HttpException(error)
            if resource_property.Get(
                    exc.payload.content,
                    resource_lex.ParseKey('error.errors[0].reason'),
                    None) == 'notAuthorized':
                msg = (
                    'You are either not authorized to access the master instance or '
                    'it does not exist.')
                raise exceptions.HttpException(msg)
            raise
        if not args.IsSpecified('region'):
            args.region = master_instance_resource.region
        if not args.IsSpecified('database_version'):
            args.database_version = master_instance_resource.databaseVersion
        if not args.IsSpecified('tier') and master_instance_resource.settings:
            args.tier = master_instance_resource.settings.tier
        # Check for CMEK usage; warn the user about replica inheriting the setting.
        if master_instance_resource.diskEncryptionConfiguration:
            command_util.ShowCmekWarning('replica', 'the master instance')

    # --root-password is required when creating SQL Server instances
    if args.IsSpecified(
            'database_version') and args.database_version.startswith(
                'SQLSERVER') and not args.IsSpecified('root_password'):
        raise exceptions.RequiredArgumentException(
            '--root-password',
            '`--root-password` is required when creating SQL Server instances.'
        )

    instance_resource = (
        command_util.InstancesV1Beta4.ConstructCreateInstanceFromArgs(
            sql_messages,
            args,
            instance_ref=instance_ref,
            release_track=release_track))

    # TODO(b/122660263): Remove when V1 instances are no longer supported.
    # V1 instances are deprecated. Prompt to continue if one is being created.
    if api_util.IsInstanceV1(instance_resource):
        log.warning(
            'First Generation instances will be automatically upgraded '
            'to Second Generation starting March 4th, 2020, and First Generation '
            'will be fully decommissioned on March 25, 2020. We recommend you '
            'create a Second Generation instance.')
        console_io.PromptContinue(cancel_on_no=True)

    if args.pricing_plan == 'PACKAGE':
        console_io.PromptContinue(
            'Charges will begin accruing immediately. Really create Cloud '
            'SQL instance?',
            cancel_on_no=True)

    operation_ref = None
    try:
        result_operation = sql_client.instances.Insert(instance_resource)

        operation_ref = client.resource_parser.Create(
            'sql.operations',
            operation=result_operation.name,
            project=instance_ref.project)

        if args.async_:
            if not args.IsSpecified('format'):
                args.format = 'default'
            return sql_client.operations.Get(
                sql_messages.SqlOperationsGetRequest(
                    project=operation_ref.project,
                    operation=operation_ref.operation))

        operations.OperationsV1Beta4.WaitForOperation(
            sql_client,
            operation_ref,
            'Creating Cloud SQL instance',
            # TODO(b/138403566): Remove the override once we improve creation times.
            max_wait_seconds=680)

        log.CreatedResource(instance_ref)

        new_resource = sql_client.instances.Get(
            sql_messages.SqlInstancesGetRequest(
                project=instance_ref.project, instance=instance_ref.instance))
        return new_resource
    except apitools_exceptions.HttpError as error:
        log.debug('operation : %s', six.text_type(operation_ref))
        exc = exceptions.HttpException(error)
        if resource_property.Get(
                exc.payload.content,
                resource_lex.ParseKey('error.errors[0].reason'),
                None) == 'errorMaxInstancePerLabel':
            msg = resource_property.Get(exc.payload.content,
                                        resource_lex.ParseKey('error.message'),
                                        None)
            raise exceptions.HttpException(msg)
        raise
Example #42
0
  def __PushGrpcConfigFiles(self, files, service_name, project_id, config_id):
    """Creates a new ServiceConfig in SerivceManagement from gRPC files.

    Args:
      files: Files to be pushed to Service Management
      service_name: Name of the service to push configs to
      project_id: Project the service belongs to
      config_id: ID to assign to the new ServiceConfig

    Returns:
      ServiceConfig Id

    Raises:
      BadFileException: If there is something wrong with the files
    """
    messages = endpoints.GetMessagesModule()
    file_types = messages.ConfigFile.FileTypeValueValuesEnum
    # TODO(b/77867100): remove .proto support and deprecation warning.
    give_proto_deprecate_warning = False
    config_files = []

    for config_file in files:
      config_contents = endpoints.ReadServiceConfigFile(config_file)

      config_dict = self.__ValidJsonOrYaml(config_file, config_contents)
      if config_dict:
        if config_dict.get('type') == 'google.api.Service':
          config_files.append(
              self.__MakeConfigFileMessage(config_contents, config_file,
                                           file_types.SERVICE_CONFIG_YAML))
        elif 'name' in config_dict:
          # This is a special case. If we have been provided a Google Service
          # Configuration file which has a service 'name' field, but no 'type'
          # field, we have to assume that this is a normalized service config,
          # and can be uploaded via the CreateServiceConfig API. Therefore,
          # we can short circute the process here.
          if len(files) > 1:
            raise calliope_exceptions.BadFileException(
                ('Ambiguous input. Found normalized service configuration in '
                 'file [{0}], but received multiple input files. To upload '
                 'normalized service config, please provide it separately from '
                 'other input files to avoid ambiguity.'
                ).format(config_file))

          return self. __PushServiceConfigFiles(
              files, service_name, project_id, config_id, normalized=True)
        else:
          raise calliope_exceptions.BadFileException(
              'The file {} is not a valid api configuration file'.format(
                  config_file))
      elif endpoints.IsProtoDescriptor(config_file):
        config_files.append(
            self.__MakeConfigFileMessage(config_contents, config_file,
                                         file_types.FILE_DESCRIPTOR_SET_PROTO))
      elif endpoints.IsRawProto(config_file):
        give_proto_deprecate_warning = True
        config_files.append(
            self.__MakeConfigFileMessage(config_contents, config_file,
                                         file_types.PROTO_FILE))
      else:
        raise calliope_exceptions.BadFileException(
            ('Could not determine the content type of file [{0}]. Supported '
             'extensions are .json .yaml .yml .pb and .descriptor'
            ).format(config_file))

    if give_proto_deprecate_warning:
      log.warning(
          'Support for uploading uncompiled .proto files is deprecated and '
          'will soon be removed. Use compiled descriptor sets (.pb) instead.\n')

    return self.__PushServiceConfigFiles(
        config_files, service_name, project_id, config_id)
Example #43
0
    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.
    """
        adapter = self.context['api_adapter']
        location_get = self.context['location_get']
        location = location_get(args)
        cluster_ref = adapter.ParseCluster(args.name, location)
        # Make sure it exists (will raise appropriate error if not)
        cluster = adapter.GetCluster(cluster_ref)

        # locations will be None if additional-zones was specified, an empty list
        # if it was specified with no argument, or a populated list if zones were
        # provided. We want to distinguish between the case where it isn't
        # specified (and thus shouldn't be passed on to the API) and the case where
        # it's specified as wanting no additional zones, in which case we must pass
        # the cluster's primary zone to the API.
        # TODO(b/29578401): Remove the hasattr once the flag is GA.
        locations = None
        if hasattr(args,
                   'additional_zones') and args.additional_zones is not None:
            locations = sorted([cluster_ref.zone] + args.additional_zones)
        if hasattr(args, 'node_locations') and args.node_locations is not None:
            locations = sorted(args.node_locations)

        if args.username is not None or args.enable_basic_auth is not None:
            flags.MungeBasicAuthFlags(args)
            options = api_adapter.SetMasterAuthOptions(
                action=api_adapter.SetMasterAuthOptions.SET_USERNAME,
                username=args.username,
                password=args.password)

            try:
                op_ref = adapter.SetMasterAuth(cluster_ref, options)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        elif (args.generate_password or args.set_password
              or args.password is not None):
            if args.generate_password:
                password = ''
                options = api_adapter.SetMasterAuthOptions(
                    action=api_adapter.SetMasterAuthOptions.GENERATE_PASSWORD,
                    password=password)
            else:
                password = args.password
                if args.password is None:
                    password = raw_input('Please enter the new password:'******'Enabling/Disabling Network Policy causes a rolling '
                'update of all cluster nodes, similar to performing a cluster '
                'upgrade.  This operation is long-running and will block other '
                'operations on the cluster (including delete) until it has run '
                'to completion.',
                cancel_on_no=True)
            options = api_adapter.SetNetworkPolicyOptions(
                enabled=args.enable_network_policy)
            try:
                op_ref = adapter.SetNetworkPolicy(cluster_ref, options)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        elif args.start_ip_rotation:
            console_io.PromptContinue(
                message=
                'This will start an IP Rotation on cluster [{name}]. The '
                'master will be updated to serve on a new IP address in addition to '
                'the current IP address. Kubernetes Engine will then recreate all '
                'nodes ({num_nodes} nodes) to point to the new IP address. This '
                'operation is long-running and will block other operations on the '
                'cluster (including delete) until it has run to completion.'.
                format(name=cluster.name, num_nodes=cluster.currentNodeCount),
                cancel_on_no=True)
            try:
                op_ref = adapter.StartIpRotation(cluster_ref)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        elif args.complete_ip_rotation:
            console_io.PromptContinue(
                message=
                'This will complete the in-progress IP Rotation on cluster '
                '[{name}]. The master will be updated to stop serving on the old IP '
                'address and only serve on the new IP address. Make sure all API '
                'clients have been updated to communicate with the new IP address '
                '(e.g. by running `gcloud container clusters get-credentials '
                '--project {project} --zone {zone} {name}`). This operation is long-'
                'running and will block other operations on the cluster (including '
                'delete) until it has run to completion.'.format(
                    name=cluster.name,
                    project=cluster_ref.projectId,
                    zone=cluster.zone),
                cancel_on_no=True)
            try:
                op_ref = adapter.CompleteIpRotation(cluster_ref)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        elif args.update_labels is not None:
            try:
                op_ref = adapter.UpdateLabels(cluster_ref, args.update_labels)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        elif args.remove_labels is not None:
            try:
                op_ref = adapter.RemoveLabels(cluster_ref, args.remove_labels)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        elif args.logging_service is not None:
            try:
                op_ref = adapter.SetLoggingService(cluster_ref,
                                                   args.logging_service)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        elif args.maintenance_window is not None:
            try:
                op_ref = adapter.SetMaintenanceWindow(cluster_ref,
                                                      args.maintenance_window)
            except apitools_exceptions.HttpError as error:
                raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
        else:
            if args.enable_legacy_authorization is not None:
                op_ref = adapter.SetLegacyAuthorization(
                    cluster_ref, args.enable_legacy_authorization)
            else:
                options = self.ParseUpdateOptions(args, locations)
                op_ref = adapter.UpdateCluster(cluster_ref, options)

        if not args. async:
            adapter.WaitForOperation(
                op_ref, 'Updating {0}'.format(cluster_ref.clusterId))

            log.UpdatedResource(cluster_ref)
            cluster_url = util.GenerateClusterUrl(cluster_ref)
            log.status.Print(
                'To inspect the contents of your cluster, go to: ' +
                cluster_url)

            if args.start_ip_rotation or args.complete_ip_rotation:
                cluster = adapter.GetCluster(cluster_ref)
                try:
                    util.ClusterConfig.Persist(cluster, cluster_ref.projectId)
                except kconfig.MissingEnvVarError as error:
                    log.warning(error.message)
Example #44
0
    def Update(self, update_seed=None, allow_no_backup=False):
        """Performs an update of the given components.

    If no components are provided, it will attempt to update everything you have
    installed.

    Args:
      update_seed: list of str, A list of component ids to update.
      allow_no_backup: bool, True if we want to allow the updater to run
        without creating a backup.  This lets us be in the root directory of the
        SDK and still do an update.  It is more fragile if there is a failure,
        so we only do it if necessary.

    Raises:
      InvalidComponentError: If any of the given component ids do not exist.
    """
        self._EnsureNotDisabled()
        try:
            install_state, diff = self._GetStateAndDiff()
        except snapshots.IncompatibleSchemaVersionError as e:
            return self._DoFreshInstall(e)

        if update_seed:
            invalid_seeds = diff.InvalidUpdateSeeds(update_seed)
            if invalid_seeds:
                if os.environ.get('CLOUDSDK_REINSTALL_COMPONENTS'):
                    # We are doing a reinstall.  Ignore any components that no longer
                    # exist.
                    update_seed = set(update_seed) - invalid_seeds
                else:
                    raise InvalidComponentError(
                        'The following components are unknown [{invalid_seeds}]'
                        .format(invalid_seeds=', '.join(invalid_seeds)))
        else:
            update_seed = diff.current.components.keys()

        to_remove = diff.ToRemove(update_seed)
        to_install = diff.ToInstall(update_seed)

        self.__Write()
        if not to_remove and not to_install:
            self.__Write('All components are up to date.')
            with install_state.LastUpdateCheck() as update_check:
                update_check.SetFromSnapshot(diff.latest, force=True)
            return

        disable_backup = self._CheckCWD(allow_no_backup=allow_no_backup)
        self._PrintPendingAction(
            diff.DetailsForCurrent(to_remove - to_install), 'removed')
        self._PrintPendingAction(diff.DetailsForLatest(to_remove & to_install),
                                 'updated')
        self._PrintPendingAction(diff.DetailsForLatest(to_install - to_remove),
                                 'installed')
        self.__Write()

        if not console_io.PromptContinue():
            return

        components_to_install = diff.DetailsForLatest(to_install)
        components_to_remove = diff.DetailsForCurrent(to_remove)

        for c in components_to_install:
            metrics.Installs(c.id, c.version.version_string)

        if disable_backup:
            self.__Write('Performing in place update...\n')
            self._UpdateAndPrint(components_to_remove, 'Uninstalling',
                                 install_state.Uninstall)
            self._UpdateAndPrint(components_to_install, 'Installing',
                                 self._InstallFunction(install_state, diff))
        else:
            self.__Write('Creating update staging area...\n')
            staging_state = install_state.CloneToStaging()
            self._UpdateAndPrint(components_to_remove, 'Uninstalling',
                                 staging_state.Uninstall)
            self._UpdateAndPrint(components_to_install, 'Installing',
                                 self._InstallFunction(staging_state, diff))
            self.__Write('Creating backup and activating new installation...')
            install_state.ReplaceWith(staging_state)

        with install_state.LastUpdateCheck() as update_check:
            update_check.SetFromSnapshot(diff.latest, force=True)
        self.__Write('\nDone!\n')

        bad_commands = self.FindAllOldToolsOnPath()
        if bad_commands:
            log.warning("""\
There are older versions of Google Cloud Platform tools on your system PATH.
Please remove the following to avoid accidentally invoking these old tools:

{0}

""".format('\n'.join(bad_commands)))
 def WrappedRun(*args, **kw):
     if is_removed:
         raise DeprecationException(error)
     log.warning(warning)
     return run_func(*args, **kw)
Example #46
0
 def Run(self, args):
     queues_client = queues.Queues()
     queue_ref = parsers.ParseQueue(args.queue, args.location)
     log.warning(constants.QUEUE_MANAGEMENT_WARNING)
     queues_client.Resume(queue_ref)
     log.status.Print('Resumed queue [{}].'.format(queue_ref.Name()))
def _AllowlistClientIP(instance_ref,
                       sql_client,
                       sql_messages,
                       resources,
                       minutes=5):
    """Add CLIENT_IP to the authorized networks list.

  Makes an API call to add CLIENT_IP to the authorized networks list.
  The server knows to interpret the string CLIENT_IP as the address with which
  the client reaches the server. This IP will be allowlisted for 1 minute.

  Args:
    instance_ref: resources.Resource, The instance we're connecting to.
    sql_client: apitools.BaseApiClient, A working client for the sql version to
      be used.
    sql_messages: module, The module that defines the messages for the sql
      version to be used.
    resources: resources.Registry, The registry that can create resource refs
      for the sql version to be used.
    minutes: How long the client IP will be allowlisted for, in minutes.

  Returns:
    string, The name of the authorized network rule. Callers can use this name
    to find out the IP the client reached the server with.
  Raises:
    HttpException: An http error response was received while executing api
        request.
    ResourceNotFoundError: The SQL instance was not found.
  """
    time_of_connection = network.GetCurrentTime()

    acl_name = 'sql connect at time {0}'.format(time_of_connection)
    user_acl = sql_messages.AclEntry(
        kind='sql#aclEntry',
        name=acl_name,
        expirationTime=iso_duration.Duration(
            minutes=minutes).GetRelativeDateTime(time_of_connection)
        # TODO(b/122989827): Remove this once the datetime parsing is fixed.
        # Setting the microseconds component to 10 milliseconds. This complies
        # with backend formatting restrictions, since backend requires a microsecs
        # component and anything less than 1 milli will get truncated.
        .replace(microsecond=10000).isoformat(),
        value='CLIENT_IP')

    try:
        original = sql_client.instances.Get(
            sql_messages.SqlInstancesGetRequest(
                project=instance_ref.project, instance=instance_ref.instance))
    except apitools_exceptions.HttpError as error:
        if error.status_code == six.moves.http_client.FORBIDDEN:
            raise exceptions.ResourceNotFoundError(
                'There was no instance found at {} or you are not authorized to '
                'connect to it.'.format(instance_ref.RelativeName()))
        raise calliope_exceptions.HttpException(error)

    # TODO(b/122989827): Remove this once the datetime parsing is fixed.
    original.serverCaCert = None

    original.settings.ipConfiguration.authorizedNetworks.append(user_acl)
    try:
        patch_request = sql_messages.SqlInstancesPatchRequest(
            databaseInstance=original,
            project=instance_ref.project,
            instance=instance_ref.instance)
        result = sql_client.instances.Patch(patch_request)
    except apitools_exceptions.HttpError as error:
        log.warning(
            "If you're connecting from an IPv6 address, or are "
            "constrained by certain organization policies (restrictPublicIP, "
            "restrictAuthorizedNetworks), consider running the beta version of this "
            "command by connecting through the Cloud SQL proxy: "
            "gcloud beta sql connect")
        raise calliope_exceptions.HttpException(error)

    operation_ref = resources.Create('sql.operations',
                                     operation=result.name,
                                     project=instance_ref.project)
    message = ('Allowlisting your IP for incoming connection for '
               '{0} {1}'.format(minutes, text.Pluralize(minutes, 'minute')))

    operations.OperationsV1Beta4.WaitForOperation(sql_client, operation_ref,
                                                  message)

    return acl_name
Example #48
0
def Load(account=None,
         scopes=None,
         prevent_refresh=False,
         allow_account_impersonation=True):
    """Get the credentials associated with the provided account.

  This loads credentials regardless of whether credentials have been disabled
  via properties. Only use this when the functionality of the caller absolutely
  requires credentials (like printing out a token) vs logically requiring
  credentials (like for an http request).

  Credential information may come from the stored credential file (representing
  the last gcloud auth command), or the credential cache (representing the last
  time the credentials were refreshed). If they come from the cache, the
  token_response field will be None, as the full server response from the cached
  request was not stored.

  Args:
    account: str, The account address for the credentials being fetched. If
        None, the account stored in the core.account property is used.
    scopes: tuple, Custom auth scopes to request. By default CLOUDSDK_SCOPES
        are requested.
    prevent_refresh: bool, If True, do not refresh the access token even if it
        is out of date. (For use with operations that do not require a current
        access token, such as credential revocation.)
    allow_account_impersonation: bool, True to allow use of impersonated service
      account credentials (if that is configured). If False, the active user
      credentials will always be loaded.

  Returns:
    oauth2client.client.Credentials, The specified credentials.

  Raises:
    NoActiveAccountException: If account is not provided and there is no
        active account.
    NoCredentialsForAccountException: If there are no valid credentials
        available for the provided or active account.
    c_gce.CannotConnectToMetadataServerException: If the metadata server cannot
        be reached.
    TokenRefreshError: If the credentials fail to refresh.
    TokenRefreshReauthError: If the credentials fail to refresh due to reauth.
    AccountImpersonationError: If impersonation is requested but an
      impersonation provider is not configured.
  """
    cred = _Load(account, scopes, prevent_refresh)
    if not allow_account_impersonation:
        return cred
    impersonate_service_account = (
        properties.VALUES.auth.impersonate_service_account.Get())
    if not impersonate_service_account:
        return cred
    if not IMPERSONATION_TOKEN_PROVIDER:
        raise AccountImpersonationError(
            'gcloud is configured to impersonate service account [{}] but '
            'impersonation support is not available.'.format(
                impersonate_service_account))
    log.warning(
        'This command is using service account impersonation. All API calls will '
        'be executed as [{}].'.format(impersonate_service_account))
    return IMPERSONATION_TOKEN_PROVIDER.GetElevationAccessToken(
        impersonate_service_account, scopes or config.CLOUDSDK_SCOPES)
Example #49
0
    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.
    """
        adapter = self.context['api_adapter']
        location_get = self.context['location_get']
        location = location_get(args, ignore_property=True, required=False)
        project = properties.VALUES.core.project.Get(required=True)

        def sort_key(cluster):
            return (cluster.zone, cluster.name)

        try:
            clusters = adapter.ListClusters(project, location)
            clusters.clusters = sorted(clusters.clusters, key=sort_key)

            if clusters.missingZones:
                log.warning(
                    'The following zones did not respond: {0}. List results may be '
                    'incomplete.'.format(', '.join(clusters.missingZones)))

            upgrade_available = False
            support_ending = False
            unsupported = False
            expiring = False
            self._upgrade_hint = ''
            self._expire_warning = ''
            self._degraded_warning = ''
            vv = VersionVerifier()
            for c in clusters.clusters:
                time_left = transforms.ParseExpireTime(c.expireTime)
                if time_left and time_left.days < constants.EXPIRE_WARNING_DAYS:
                    expiring = True
                if adapter.IsDegraded(c):
                    self._degraded_warning = constants.DEGRADED_WARNING
                if c.enableKubernetesAlpha:
                    # Don't print upgrade hints for alpha clusters, they aren't
                    # upgradeable.
                    continue
                ver_status = vv.Compare(c.currentMasterVersion,
                                        c.currentNodeVersion)
                if ver_status == VersionVerifier.UPGRADE_AVAILABLE:
                    c.currentNodeVersion += ' *'

                    upgrade_available = True
                elif ver_status == VersionVerifier.SUPPORT_ENDING:
                    c.currentNodeVersion += ' **'
                    support_ending = True
                elif ver_status == VersionVerifier.UNSUPPORTED:
                    c.currentNodeVersion += ' ***'
                    unsupported = True

            if upgrade_available:
                self._upgrade_hint += UpgradeHelpText.UPGRADE_AVAILABLE
            if support_ending:
                self._upgrade_hint += UpgradeHelpText.SUPPORT_ENDING
            if unsupported:
                self._upgrade_hint += UpgradeHelpText.UNSUPPORTED
            if self._upgrade_hint:
                self._upgrade_hint += UpgradeHelpText.UPGRADE_COMMAND.format(
                    name='NAME')
            if expiring:
                self._expire_warning = constants.EXPIRE_WARNING

            return clusters.clusters
        except apitools_exceptions.HttpError as error:
            raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
Example #50
0
 def DeprecationFunc(value):
     if show_message(value):
         if removed:
             raise parser_errors.ArgumentError(add_help.message)
         else:
             log.warning(add_help.message)
Example #51
0
def GetAsyncValueFromAsyncAndWaitFlags(async, wait):
  # TODO(b/28523509): Remove this function after July 2017.
  """Derives --async value from --async and --wait flags for gcloud container.

  Args:
    async: The --async flag value
    wait: The --wait flag value.

  Returns:
    boolean representing derived async value
  """
  async_was_set = async is not None
  wait_was_set = wait is not None

  if wait_was_set:
    log.warning('\nThe --wait flag is deprecated and will be removed in a '
                'future release. Use --async or --no-async instead.\n')

  if not async_was_set and not wait_was_set:
    return False  # Waiting is the 'default' value for cloud sdk
  elif async_was_set and not wait_was_set:
    return async
  elif not async_was_set and wait_was_set:
    return not wait
  else:  # async_was_set and wait_was_set
    if (async and wait) or (not async and not wait):
      raise exceptions.InvalidArgumentException('--async',
                                                'You cannot set both the '
                                                '--async and --wait flags.')
    elif async and not wait:
      return True
    else:  # not async or wait
Example #52
0
  def Run(self, args):
    """Run 'endpoints services deploy'.

    Args:
      args: argparse.Namespace, The arguments that this command was invoked
          with.

    Returns:
      The response from the Update API call.

    Raises:
      BadFileExceptionn: if the provided service configuration files are
          invalid or cannot be read.
    """
    messages = services_util.GetMessagesModule()
    client = services_util.GetClientInstance()

    file_types = messages.ConfigFile.FileTypeValueValuesEnum
    self.service_name = self.service_version = config_contents = None
    config_files = []

    self.validate_only = args.validate_only

    # TODO(b/77867100): remove .proto support and deprecation warning.
    give_proto_deprecate_warning = False

    # If we're not doing a validate-only run, we don't want to output the
    # resource directly unless the user specifically requests it using the
    # --format flag. The Epilog will show useful information after deployment
    # is complete.
    if not self.validate_only and not args.IsSpecified('format'):
      args.format = 'none'

    for service_config_file in args.service_config_file:
      config_contents = services_util.ReadServiceConfigFile(service_config_file)

      if services_util.FilenameMatchesExtension(
          service_config_file, ['.json', '.yaml', '.yml']):
        # Try to load the file as JSON. If that fails, try YAML.
        service_config_dict = services_util.LoadJsonOrYaml(config_contents)
        if not service_config_dict:
          raise calliope_exceptions.BadFileException(
              'Could not read JSON or YAML from service config file '
              '[{0}].'.format(service_config_file))

        if 'swagger' in service_config_dict:
          if 'host' not in service_config_dict:
            raise calliope_exceptions.BadFileException((
                'Malformed input. Found Swagger service config in file [{}], '
                'but no host was specified. Add a host specification to the '
                'config file.').format(
                    service_config_file))
          if not self.service_name and service_config_dict.get('host'):
            self.service_name = service_config_dict.get('host')

          # Always use YAML for Open API because JSON is a subset of YAML.
          config_files.append(
              self.MakeConfigFileMessage(config_contents, service_config_file,
                                         file_types.OPEN_API_YAML))
        elif service_config_dict.get('type') == 'google.api.Service':
          if not self.service_name and service_config_dict.get('name'):
            self.service_name = service_config_dict.get('name')

          config_files.append(
              self.MakeConfigFileMessage(config_contents, service_config_file,
                                         file_types.SERVICE_CONFIG_YAML))
        elif 'name' in service_config_dict:
          # This is a special case. If we have been provided a Google Service
          # Configuration file which has a service 'name' field, but no 'type'
          # field, we have to assume that this is a normalized service config,
          # and can be uploaded via the CreateServiceConfig API. Therefore,
          # we can short circute the process here.
          if len(args.service_config_file) > 1:
            raise calliope_exceptions.BadFileException((
                'Ambiguous input. Found normalized service configuration in '
                'file [{0}], but received multiple input files. To upload '
                'normalized service config, please provide it separately from '
                'other input files to avoid ambiguity.').format(
                    service_config_file))

          # If this is a validate-only run, abort now, since this is not
          # supported in the ServiceConfigs.Create API
          if self.validate_only:
            raise exceptions.InvalidFlagError(
                'The --validate-only flag is not supported when using '
                'normalized service configs as input.')

          self.service_name = service_config_dict.get('name')
          config_files = []
          break
        else:
          raise calliope_exceptions.BadFileException((
              'Unable to parse Open API, or Google Service Configuration '
              'specification from {0}').format(service_config_file))

      elif services_util.IsProtoDescriptor(service_config_file):
        config_files.append(
            self.MakeConfigFileMessage(config_contents, service_config_file,
                                       file_types.FILE_DESCRIPTOR_SET_PROTO))
      elif services_util.IsRawProto(service_config_file):
        give_proto_deprecate_warning = True
        config_files.append(
            self.MakeConfigFileMessage(config_contents, service_config_file,
                                       file_types.PROTO_FILE))
      else:
        raise calliope_exceptions.BadFileException((
            'Could not determine the content type of file [{0}]. Supported '
            'extensions are .json .yaml .yml .pb and .descriptor').format(
                service_config_file))

    if give_proto_deprecate_warning:
      log.warning(
          'Support for uploading uncompiled .proto files is deprecated and '
          'will soon be removed. Use compiled descriptor sets (.pb) instead.\n')

    # Check if we need to create the service.
    was_service_created = False
    if not services_util.DoesServiceExist(self.service_name):
      project_id = properties.VALUES.core.project.Get(required=True)
      # Deploying, even with validate-only, cannot succeed without the service
      # being created
      if self.validate_only:
        if not console_io.CanPrompt():
          raise exceptions.InvalidConditionError(VALIDATE_NEW_ERROR.format(
              service_name=self.service_name, project_id=project_id))
        if not console_io.PromptContinue(
            VALIDATE_NEW_PROMPT.format(
                service_name=self.service_name, project_id=project_id)):
          return None
      services_util.CreateService(self.service_name, project_id)
      was_service_created = True

    if config_files:
      push_config_result = services_util.PushMultipleServiceConfigFiles(
          self.service_name, config_files, args.async,
          validate_only=self.validate_only)
      self.service_config_id = (
          services_util.GetServiceConfigIdFromSubmitConfigSourceResponse(
              push_config_result)
      )
    else:
      push_config_result = services_util.PushNormalizedGoogleServiceConfig(
          self.service_name,
          properties.VALUES.core.project.Get(required=True),
          services_util.LoadJsonOrYaml(config_contents))
      self.service_config_id = push_config_result.id

    if not self.service_config_id:
      raise exceptions.InvalidConditionError(
          'Failed to retrieve Service Configuration Id.')

    # Run the Push Advisor to see if we need to warn the user of any
    # potentially hazardous changes to the service configuration.
    if self.CheckPushAdvisor(args.force):
      return None

    # Create a Rollout for the new service configuration
    if not self.validate_only:
      percentages = messages.TrafficPercentStrategy.PercentagesValue()
      percentages.additionalProperties.append(
          (messages.TrafficPercentStrategy.PercentagesValue.AdditionalProperty(
              key=self.service_config_id, value=100.0)))
      traffic_percent_strategy = messages.TrafficPercentStrategy(
          percentages=percentages)
      rollout = messages.Rollout(
          serviceName=self.service_name,
          trafficPercentStrategy=traffic_percent_strategy,)
      rollout_create = messages.ServicemanagementServicesRolloutsCreateRequest(
          rollout=rollout,
          serviceName=self.service_name,
      )
      rollout_operation = client.services_rollouts.Create(rollout_create)
      services_util.ProcessOperationResult(rollout_operation, args.async)

      if was_service_created:
        self.AttemptToEnableService(
            services_util.GetEndpointsServiceName(), args.async)
        self.AttemptToEnableService(self.service_name, args.async)

    return push_config_result
Example #53
0
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.
    """
    adapter = self.context['api_adapter']
    location_get = self.context['location_get']
    location = location_get(args)
    cluster_refs = []
    for name in args.names:
      cluster_refs.append(adapter.ParseCluster(name, location))
    console_io.PromptContinue(
        message=util.ConstructList('The following clusters will be deleted.', [
            '[{name}] in [{zone}]'.format(
                name=ref.clusterId, zone=adapter.Zone(ref))
            for ref in cluster_refs
        ]),
        throw_if_unattended=True,
        cancel_on_no=True)

    operations = []
    errors = []
    # Issue all deletes first
    for cluster_ref in cluster_refs:
      try:
        op_ref = adapter.DeleteCluster(cluster_ref)
        operations.append((op_ref, cluster_ref))
      except apitools_exceptions.HttpError as error:
        errors.append(
            str(exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)))
      except util.Error as error:
        errors.append(error)
    if not args.async:
      # Poll each operation for completion
      for operation_ref, cluster_ref in operations:
        try:
          adapter.WaitForOperation(
              operation_ref,
              'Deleting cluster {0}'.format(cluster_ref.clusterId),
              timeout_s=args.timeout)
          # Purge cached config files
          try:
            util.ClusterConfig.Purge(cluster_ref.clusterId,
                                     adapter.Zone(cluster_ref),
                                     cluster_ref.projectId)
          except kconfig.MissingEnvVarError as error:
            log.warning(error)

          if properties.VALUES.container.cluster.Get() == cluster_ref.clusterId:
            properties.PersistProperty(properties.VALUES.container.cluster,
                                       None)
          log.DeletedResource(cluster_ref)
        except apitools_exceptions.HttpError as error:
          errors.append(exceptions.HttpException(error, util.HTTP_ERROR_FORMAT))
        except util.Error as error:
          errors.append(error)

    if errors:
      raise util.Error(
          util.ConstructList('Some requests did not succeed:', errors))
Example #54
0
 def __init__(self):
     resources.REGISTRY.RegisterApiByName('ml', 'v1beta1')
     # TODO(b/36712515) Remove this warning and cut over.
     log.warning(flags.V1BETA1_DEPRECATION_WARNING)
 def Epilog(self, resources_were_displayed):
     if resources_were_displayed and types.IsAuditLogType(self._event_type):
         log.warning(
             'It may take up to {} minutes for the new trigger to become active.'
             .format(triggers.MAX_ACTIVE_DELAY_MINUTES))
Example #56
0
 def WarnIfDeprecated(self):
     """Warns that this runtime is deprecated (if it has been marked as such)."""
     if self.deprecation_message:
         log.warning(self.deprecation_message)
Example #57
0
class Create(base.CreateCommand):
  """Create a cluster for running containers."""

  @staticmethod
  def Args(parser):
    _Args(parser)
    _AddAdditionalZonesFlag(parser, deprecated=True)
    flags.AddNodeLocationsFlag(parser)
    flags.AddAddonsFlags(parser)
    flags.AddClusterAutoscalingFlags(parser)
    flags.AddEnableAutoRepairFlag(parser, for_create=True)
    flags.AddEnableKubernetesAlphaFlag(parser)
    flags.AddEnableLegacyAuthorizationFlag(parser)
    flags.AddIPAliasFlags(parser)
    flags.AddLabelsFlag(parser)
    flags.AddLocalSSDFlag(parser)
    flags.AddMaintenanceWindowFlag(parser)
    flags.AddMasterAuthorizedNetworksFlags(parser)
    flags.AddMinCpuPlatformFlag(parser)
    flags.AddNetworkPolicyFlags(parser)
    flags.AddNodeTaintsFlag(parser)
    flags.AddPreemptibleFlag(parser)
    flags.AddDeprecatedClusterNodeIdentityFlags(parser)
    flags.AddPrivateClusterFlags(
        parser, with_deprecated=False, with_alpha=False)

  def ParseCreateOptions(self, args):
    flags.WarnGAForFutureAutoUpgradeChange()
    return ParseCreateOptionsBase(args)

  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Cluster message for the successfully created cluster.

    Raises:
      util.Error, if creation failed.
    """
    if args.async and not args.IsSpecified('format'):
      args.format = util.OPERATIONS_FORMAT

    util.CheckKubectlInstalled()

    adapter = self.context['api_adapter']
    location_get = self.context['location_get']
    location = location_get(args)

    cluster_ref = adapter.ParseCluster(args.name, location)
    options = self.ParseCreateOptions(args)

    if options.private_cluster and not (
        options.enable_master_authorized_networks or
        options.master_authorized_networks):
      log.warning(
          '`--private-cluster` makes the master inaccessible from '
          'cluster-external IP addresses, by design. To allow limited '
          'access to the master, see the `--master-authorized-networks` flags '
          'and our documentation on setting up private clusters: '
          'https://cloud.google.com'
          '/kubernetes-engine/docs/how-to/private-clusters')

    if not (options.metadata and
            'disable-legacy-endpoints' in options.metadata):
      log.warning('Starting in 1.12, default node pools in new clusters '
                  'will have their legacy Compute Engine instance metadata '
                  'endpoints disabled by default. To create a cluster with '
                  'legacy instance metadata endpoints disabled in the default '
                  'node pool, run `clusters create` with the flag '
                  '`--metadata disable-legacy-endpoints=true`.')

    if options.enable_kubernetes_alpha:
      console_io.PromptContinue(
          message=constants.KUBERNETES_ALPHA_PROMPT,
          throw_if_unattended=True,
          cancel_on_no=True)

    if options.enable_autorepair is not None:
      log.status.Print(
          messages.AutoUpdateUpgradeRepairMessage(options.enable_autorepair,
                                                  'autorepair'))

    if options.enable_autoupgrade is not None:
      log.status.Print(
          messages.AutoUpdateUpgradeRepairMessage(options.enable_autoupgrade,
                                                  'autoupgrade'))

    if options.accelerators is not None:
      log.status.Print(constants.KUBERNETES_GPU_LIMITATION_MSG)

    operation = None
    try:
      operation_ref = adapter.CreateCluster(cluster_ref, options)
      if args.async:
        return adapter.GetCluster(cluster_ref)

      operation = adapter.WaitForOperation(
          operation_ref,
          'Creating cluster {0} in {1}'.format(cluster_ref.clusterId,
                                               cluster_ref.zone),
          timeout_s=args.timeout)
      cluster = adapter.GetCluster(cluster_ref)
    except apitools_exceptions.HttpError as error:
      raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)

    log.CreatedResource(cluster_ref)
    cluster_url = util.GenerateClusterUrl(cluster_ref)
    log.status.Print('To inspect the contents of your cluster, go to: ' +
                     cluster_url)
    if operation.detail:
      # Non-empty detail on a DONE create operation should be surfaced as
      # a warning to end user.
      log.warning(operation.detail)

    try:
      util.ClusterConfig.Persist(cluster, cluster_ref.projectId)
    except kconfig.MissingEnvVarError as error:
      log.warning(error)

    return [cluster]
Example #58
0
def PopulatePublicKey(api_client, service_id, version_id, instance_id,
                      public_key):
    """Enable debug mode on and send SSH keys to a flex instance.

  Common method for SSH-like commands, does the following:
  - Makes sure that the service/version/instance specified exists and is of the
    right type (Flexible).
  - If not already done, prompts and enables debug on the instance.
  - Populates the public key onto the instance.

  Args:
    api_client: An appengine_api_client.AppEngineApiClient.
    service_id: str, The service ID.
    version_id: str, The version ID.
    instance_id: str, The instance ID.
    public_key: ssh.Keys.PublicKey, Public key to send.

  Raises:
    InvalidInstanceTypeError: The instance is not supported for SSH.
    MissingVersionError: The version specified does not exist.
    MissingInstanceError: The instance specified does not exist.
    UnattendedPromptError: Not running in a tty.
    OperationCancelledError: User cancelled the operation.

  Returns:
    ConnectionDetails, the details to use for SSH/SCP for the SSH
    connection.
  """
    try:
        version = api_client.GetVersionResource(service=service_id,
                                                version=version_id)
    except apitools_exceptions.HttpNotFoundError:
        raise command_exceptions.MissingVersionError('{}/{}'.format(
            service_id, version_id))
    version = version_util.Version.FromVersionResource(version, None)
    if version.environment is not util.Environment.FLEX:
        if version.environment is util.Environment.MANAGED_VMS:
            environment = 'Managed VMs'
            msg = 'Use `gcloud compute ssh` for Managed VMs instances.'
        else:
            environment = 'Standard'
            msg = None
        raise command_exceptions.InvalidInstanceTypeError(environment, msg)
    res = resources.REGISTRY.Parse(
        instance_id,
        params={
            'appsId': properties.VALUES.core.project.GetOrFail,
            'versionsId': version_id,
            'instancesId': instance_id,
            'servicesId': service_id,
        },
        collection='appengine.apps.services.versions.instances')
    rel_name = res.RelativeName()
    try:
        instance = api_client.GetInstanceResource(res)
    except apitools_exceptions.HttpNotFoundError:
        raise command_exceptions.MissingInstanceError(rel_name)

    if not instance.vmDebugEnabled:
        log.warning(_ENABLE_DEBUG_WARNING)
        console_io.PromptContinue(cancel_on_no=True, throw_if_unattended=True)
    user = ssh.GetDefaultSshUsername()
    remote = ssh.Remote(instance.vmIp, user=user)
    ssh_key = '{user}:{key} {user}'.format(user=user, key=public_key.ToEntry())
    log.status.Print('Sending public key to instance [{}].'.format(rel_name))
    api_client.DebugInstance(res, ssh_key)
    options = {
        'IdentitiesOnly':
        'yes',  # No ssh-agent as of yet
        'UserKnownHostsFile':
        ssh.KnownHosts.DEFAULT_PATH,
        'CheckHostIP':
        'no',
        'HostKeyAlias':
        _HOST_KEY_ALIAS.format(project=api_client.project,
                               instance_id=instance_id)
    }
    return ConnectionDetails(remote, options)
Example #59
0
def ParseCreateOptionsBase(args):
  """Parses the flags provided with the cluster creation command."""
  if not (args.IsSpecified('enable_basic_auth') or
          args.IsSpecified('username')):
    log.warning('Starting in 1.12, new clusters will have basic '
                'authentication disabled by default. Basic authentication '
                'can be enabled (or disabled) manually using the '
                '`--[no-]enable-basic-auth` flag.')
  if not args.IsSpecified('issue_client_certificate'):
    log.warning('Starting in 1.12, new clusters will not have a client '
                'certificate issued. You can manually enable (or disable) the '
                'issuance of the client certificate using the '
                '`--[no-]issue-client-certificate` flag.')

  flags.MungeBasicAuthFlags(args)

  if args.IsSpecified('issue_client_certificate') and not (
      args.IsSpecified('enable_basic_auth') or args.IsSpecified('username')):
    log.warning('If `--issue-client-certificate` is specified but '
                '`--enable-basic-auth` or `--username` is not, our API will '
                'treat that as `--no-enable-basic-auth`.')

  if (args.IsSpecified('enable_cloud_endpoints') and
      properties.VALUES.container.new_scopes_behavior.GetBool()):
    raise util.Error('Flag --[no-]enable-cloud-endpoints is not allowed if '
                     'property container/ new_scopes_behavior is set to true.')
  if args.IsSpecified('enable_autorepair'):
    enable_autorepair = args.enable_autorepair
  else:
    # Node pools using COS support auto repairs, enable it for them by default.
    # Other node pools using (Ubuntu, custom images) don't support node auto
    # repairs, attempting to enable autorepair for them will result in API call
    # failing so don't do it.
    enable_autorepair = ((args.image_type or '').lower() in ['', 'cos'])
  flags.WarnForUnspecifiedIpAllocationPolicy(args)
  flags.WarnForNodeModification(args, enable_autorepair)
  metadata = metadata_utils.ConstructMetadataDict(args.metadata,
                                                  args.metadata_from_file)
  return api_adapter.CreateClusterOptions(
      accelerators=args.accelerator,
      additional_zones=args.additional_zones,
      addons=args.addons,
      cluster_ipv4_cidr=args.cluster_ipv4_cidr,
      cluster_secondary_range_name=args.cluster_secondary_range_name,
      cluster_version=args.cluster_version,
      node_version=args.node_version,
      create_subnetwork=args.create_subnetwork,
      disk_type=args.disk_type,
      enable_autorepair=enable_autorepair,
      enable_autoscaling=args.enable_autoscaling,
      enable_autoupgrade=args.enable_autoupgrade,
      enable_cloud_endpoints=args.enable_cloud_endpoints,
      enable_cloud_logging=args.enable_cloud_logging,
      enable_cloud_monitoring=args.enable_cloud_monitoring,
      enable_ip_alias=args.enable_ip_alias,
      enable_kubernetes_alpha=args.enable_kubernetes_alpha,
      enable_legacy_authorization=args.enable_legacy_authorization,
      enable_master_authorized_networks=args.enable_master_authorized_networks,
      enable_network_policy=args.enable_network_policy,
      enable_private_nodes=args.enable_private_nodes,
      enable_private_endpoint=args.enable_private_endpoint,
      image_type=args.image_type,
      image=args.image,
      image_project=args.image_project,
      image_family=args.image_family,
      issue_client_certificate=args.issue_client_certificate,
      labels=args.labels,
      local_ssd_count=args.local_ssd_count,
      maintenance_window=args.maintenance_window,
      master_authorized_networks=args.master_authorized_networks,
      master_ipv4_cidr=args.master_ipv4_cidr,
      max_nodes=args.max_nodes,
      max_nodes_per_pool=args.max_nodes_per_pool,
      min_cpu_platform=args.min_cpu_platform,
      min_nodes=args.min_nodes,
      network=args.network,
      node_disk_size_gb=utils.BytesToGb(args.disk_size),
      node_labels=args.node_labels,
      node_locations=args.node_locations,
      node_machine_type=args.machine_type,
      node_taints=args.node_taints,
      num_nodes=args.num_nodes,
      password=args.password,
      preemptible=args.preemptible,
      scopes=args.scopes,
      service_account=args.service_account,
      services_ipv4_cidr=args.services_ipv4_cidr,
      services_secondary_range_name=args.services_secondary_range_name,
      subnetwork=args.subnetwork,
      tags=args.tags,
      user=args.username,
      metadata=metadata)
Example #60
0
def PrintBetaResourceDeletionDisclaimer(resource_type_plural):
    log.warning(
        _BETA_RESOURCE_DELETION_DISCLAIMER.format(
            resource_type=resource_type_plural))