Exemple #1
0
  def _GetRules(self, path):
    """Returns the set of rules that apply to a given path.

    Searches all parent paths for rules, returning the concatenation of all the
    rules.

    Args:
      path: The path to check.

    Returns:
      A list of (RegEx, Bool) pairs indicating file name patterns to include/
      exclude.
    """
    # Build an array of the parent directories of the given path, from root
    # down. On Windows, drive letters will be handled as if they were
    # directories under root.
    dirs = ['/']
    pos = str.find(path, '/')
    if pos == 0:
      pos = str.find(path, '/', 1)
    while pos != -1:
      dirs.append(path[0:pos])
      pos = str.find(path, '/', pos+1)
    dirs.append(path)
    rules = []
    for d in dirs:
      if d in self._ignore_rules:
        log.debug('{0}: Applying rules for {1}'.format(path, d))
        rules.extend(self._ignore_rules[d])
    return rules
Exemple #2
0
  def ReportMetrics(self, wait_for_report=False):
    """Reports the collected metrics using a separate async process."""
    if not self._metrics:
      return

    temp_metrics_file = tempfile.NamedTemporaryFile(delete=False)
    with temp_metrics_file:
      pickle.dump(self._metrics, temp_metrics_file)
      self._metrics = []

    reporting_script_path = os.path.join(
        config.GoogleCloudSDKPackageRoot(), 'core', 'metrics_reporter.py')
    execution_args = execution_utils.ArgsForPythonTool(
        reporting_script_path, temp_metrics_file.name)

    exec_env = os.environ.copy()
    python_path_var = 'PYTHONPATH'
    python_path = exec_env.get(python_path_var)
    if python_path:
      python_path += os.pathsep + config.LibraryRoot()
    else:
      python_path = config.LibraryRoot()
    exec_env[python_path_var] = python_path

    p = subprocess.Popen(execution_args, env=exec_env, **self._async_popen_args)
    if wait_for_report:
      # NOTE: p.wait() can cause a deadlock. p.communicate() is recommended.
      # See python docs for more information.
      p.communicate()
    log.debug('Metrics reporting process started...')
  def HandleEndpointsError(self, user_error):
    """Handle an error state returned by IsEndpointsConfigUpdated.

    Args:
      user_error: Either None or a string with a message from the server
        that indicates what the error was and how the user should resolve it.

    Raises:
      Error: The update state is fatal and the user hasn't chosen
        to ignore Endpoints errors.
    """
    detailed_error = user_error or (
        "Check the app's AppEngine logs for errors: %s" % self.GetLogUrl())
    error_message = ('Failed to update Endpoints configuration.  %s' %
                     detailed_error)
    log.error(error_message)

    # Also display a link to the Python troubleshooting documentation.
    doc_link = ('https://developers.google.com/appengine/docs/python/'
                'endpoints/test_deploy#troubleshooting_a_deployment_failure')
    log.error('See the deployment troubleshooting documentation for more '
              'information: %s' % doc_link)

    if self.ignore_endpoints_failures:
      log.debug('Ignoring Endpoints failure and proceeding with update.')
    else:
      raise Error(error_message)
  def CreateApp(self, location):
    """Creates an App Engine app within the current cloud project.

    Creates a new singleton app within the currently selected Cloud Project.
    The action is one-time and irreversible.

    Args:
      location: str, The location (region) of the app, i.e. "us-central"

    Raises:
      apitools_exceptions.HttpConflictError if app already exists

    Returns:
      A long running operation.
    """
    create_request = self.messages.Application(id=self.project,
                                               locationId=location)

    operation = self.client.apps.Create(create_request)

    log.debug('Received operation: [{operation}]'.format(
        operation=operation.name))

    message = ('Creating App Engine application in project [{project}] and '
               'region [{region}].'.format(project=self.project,
                                           region=location))
    return operations_util.WaitForOperation(self.client.apps_operations,
                                            operation, message=message)
Exemple #5
0
  def Call(self, request, global_params=None, raw=False,
           limit=None, page_size=None):
    """Executes this method with the given arguments.

    Args:
      request: The apitools request object to send.
      global_params: {str: str}, A dictionary of global parameters to send with
        the request.
      raw: bool, True to not do any processing of the response, False to maybe
        do processing for List results.
      limit: int, The max number of items to return if this is a List method.
      page_size: int, The max number of items to return in a page if this API
        supports paging.

    Returns:
      The response from the API.
    """
    client = apis.GetClientInstance(
        self.collection.api_name, self.collection.api_version)
    service = _GetService(client, self.collection.name)
    request_func = self._GetRequestFunc(
        service, request, raw=raw, limit=limit, page_size=page_size)
    try:
      return request_func(global_params=global_params)
    except apitools_exc.InvalidUserInputError as e:
      log.debug('', exc_info=True)
      raise APICallError(e.message)
    def CloneFiles(url, files, file_type):
      """Sends files to the given url.

      Args:
        url: the server URL to use.
        files: a list of files
        file_type: the type of the files
      """
      if not files:
        return

      log.debug('Cloning %d %s file%s.' %
                (len(files), file_type, len(files) != 1 and 's' or ''))
      # Do only N files at a time to avoid huge requests and responses.
      max_files = self.resource_limits['max_files_to_clone']
      for i in xrange(0, len(files), max_files):
        if i > 0 and i % max_files == 0:
          log.debug('Cloned %d files.' % i)

        chunk = files[i:min(len(files), i + max_files)]
        result = self.logging_context.Send(url,
                                           payload=BuildClonePostBody(chunk))
        if result:
          to_upload = {}
          for f in result.split(LIST_DELIMITER):
            for entry in files:
              real_path, upload_path = entry[:2]
              if f == upload_path:
                to_upload[real_path] = self.files[real_path]
                break
          files_to_upload.update(to_upload)
  def Deploy(self):
    """Deploys the new app version but does not make it default.

    All the files returned by Begin() must have been uploaded with UploadFile()
    before Deploy() can be called.

    Returns:
      An appinfo.AppInfoSummary if one was returned from the Deploy, None
      otherwise.

    Raises:
      Error: Some required files were not uploaded.
    """
    assert self.in_transaction, 'Begin() must be called before Deploy().'
    if self.files:
      raise Error('Not all required files have been uploaded.')

    log.debug('Starting deployment.')
    result = self.logging_context.Send('/api/appversion/deploy')
    self.deployed = True

    if result:
      return yaml_object.BuildSingleObject(appinfo.AppInfoSummary, result)
    else:
      return None
Exemple #8
0
    def RequestLogLines(self, tf, offset, valid_dates, sentinel):
        """Make a single roundtrip to the server.

    Args:
      tf: Writable binary stream to which the log lines returned by
        the server are written, stripped of headers, and excluding
        lines skipped due to self.sentinel or self.valid_dates filtering.
      offset: Offset string for a continued request; None for the first.
      valid_dates: (datetime.date, datetime.date), A tuple of start and end
        dates to get the logs between.
      sentinel: str, The last line in the log file we are appending to, or None.

    Returns:
      The offset string to be used for the next request, if another
      request should be issued; or None, if not.
    """
        log.debug("Request with offset %r.", offset)
        params = dict(self._params)
        if offset:
            params["offset"] = offset

        response = self.rpcserver.Send("/api/request_logs", payload=None, **params)
        response = response.replace("\r", "\0")
        lines = response.splitlines()
        log.info("Received %d bytes, %d records.", len(response), len(lines))

        # Move all references to self.<anything> out of the loop.
        is_skipping = True
        (start, end) = valid_dates
        next_offset_regex = self._next_offset_regex
        len_sentinel = len(sentinel) if sentinel else None

        next_offset = None
        for line in lines:
            if line.startswith("#"):
                match = next_offset_regex.match(line)
                # We are now (May 2014) frequently seeing None instead of a blank or
                # not-present next_offset at all. This extra check handles that.
                if match and match.group(1) != "None":
                    next_offset = match.group(1)
                continue

            if sentinel and line.startswith(sentinel) and line[len_sentinel : len_sentinel + 1] in ("", "\0"):
                return None

            linedate = self.DateOfLogLine(line)
            # We don't write unparseable log lines, ever.
            if not linedate:
                continue
            if is_skipping:
                if linedate > end:
                    continue
                else:
                    # We are in the good date range, stop doing date comparisons.
                    is_skipping = False

            if start and linedate < start:
                return None
            tf.write(line + "\n")
        return next_offset
Exemple #9
0
  def GenKubeconfig(self):
    """Generate kubeconfig for this cluster."""
    context = self.kube_context
    kubeconfig = kconfig.Kubeconfig.Default()
    cluster_kwargs = {}
    user_kwargs = {
        'token': self.token,
        'username': self.username,
        'password': self.password,
        'auth_provider': self.auth_provider,
    }
    if self.has_ca_cert:
      cluster_kwargs['ca_data'] = self.ca_data
    if self.has_cert_data:
      user_kwargs['cert_data'] = self.client_cert_data
      user_kwargs['key_data'] = self.client_key_data

    # Use same key for context, cluster, and user
    kubeconfig.contexts[context] = kconfig.Context(context, context, context)
    kubeconfig.users[context] = kconfig.User(context, **user_kwargs)
    kubeconfig.clusters[context] = kconfig.Cluster(
        context, self.server, **cluster_kwargs)
    kubeconfig.SetCurrentContext(context)
    kubeconfig.SaveToFile()

    path = kconfig.Kubeconfig.DefaultPath()
    log.debug('Saved kubeconfig to %s', path)
    log.status.Print(KUBECONFIG_USAGE_FMT.format(
        cluster=self.cluster_name, context=context))
Exemple #10
0
  def _SendJsonRequest(self, method, url, body=None):
    """Sends a request to the broker.

    Args:
      method: (str) The HTTP method.
      url: (str) The URL path.
      body: (str) The request body.

    Returns:
      (HTTPResponse, str) or (None, None).

    Raises:
      IOError: The request could not be sent.
    """
    conn = self._NewConnection()
    headers = {}
    if body is not None:
      headers['Content-Type'] = 'application/json'
    try:
      conn.request(method, url, body=body, headers=headers)
      resp = conn.getresponse()
      data = resp.read()
      return (resp, data)
    except IOError as e:
      log.debug('Error sending request: %r', e)
      raise
    finally:
      conn.close()
Exemple #11
0
def RunGsutilCommand(command_name, command_arg_str, run_concurrent=False):
    """Runs the specified gsutil command and returns the command's exit code.

  Args:
    command_name: The gsutil command to run.
    command_arg_str: Arguments to pass to the command.
    run_concurrent: Whether concurrent uploads should be enabled while running
      the command.

  Returns:
    The exit code of the call to the gsutil command.
  """
    command_path = _GetGsutilPath()

    if run_concurrent:
        command_args = ["-m", command_name]
    else:
        command_args = [command_name]

    command_args += command_arg_str.split(" ")

    if platforms.OperatingSystem.Current() == platforms.OperatingSystem.WINDOWS:
        gsutil_args = execution_utils.ArgsForCMDTool(command_path + ".cmd", *command_args)
    else:
        gsutil_args = execution_utils.ArgsForExecutableTool(command_path, *command_args)
    log.debug("Running command: [{args}]]".format(args=" ".join(gsutil_args)))
    return execution_utils.Exec(
        gsutil_args, no_exit=True, out_func=log.file_only_logger.debug, err_func=log.file_only_logger.debug
    )
Exemple #12
0
def HandleError(exc, command_path, known_error_handler=None):
  """Handles an error that occurs during command execution.

  It calls ConvertKnownError to convert exceptions to known types before
  processing. If it is a known type, it is printed nicely as as error. If not,
  it is raised as a crash.

  Args:
    exc: Exception, The original exception that occurred.
    command_path: str, The name of the command that failed (for error
      reporting).
    known_error_handler: f(exc): A function to process known errors.
  """
  known_exc, print_error = ConvertKnownError(exc)
  if known_exc:
    msg = u'({0}) {1}'.format(
        console_attr.SafeText(command_path),
        console_attr.SafeText(known_exc))
    log.debug(msg, exc_info=sys.exc_info())
    if print_error:
      log.error(msg)
    # Uncaught errors will be handled in gcloud_main.
    if known_error_handler:
      known_error_handler(exc)
    if properties.VALUES.core.print_handled_tracebacks.GetBool():
      raise
    _Exit(known_exc)
  else:
    # Make sure any uncaught exceptions still make it into the log file.
    log.debug(console_attr.SafeText(exc), exc_info=sys.exc_info())
    raise
Exemple #13
0
 def RequestWithErrHandling(*args, **kwargs):
   try:
     return orig_request(*args, **kwargs)
   except client.AccessTokenRefreshError as e:
     log.debug('Exception caught during HTTP request: %s', e.message,
               exc_info=True)
     raise CannotRefreshAuthTokenError(e.message)
Exemple #14
0
 def GetVersion(image):
   """Extracts the "20140718" from an image name like "debian-v20140718"."""
   parts = image.name.rsplit('v', 1)
   if len(parts) != 2:
     log.debug('Skipping image with malformed name [%s].', image.name)
     return None
   return parts[1]
  def DeployService(
      self, service_name, version_id, service_config, manifest, image):
    """Updates and deploys new app versions based on given config.

    Args:
      service_name: str, The service to deploy.
      version_id: str, The version of the service to deploy.
      service_config: AppInfoExternal, Service info parsed from a service yaml
        file.
      manifest: Dictionary mapping source files to Google Cloud Storage
        locations.
      image: The name of the container image.
    Returns:
      A Version resource representing the deployed version.
    """
    version_resource = self._CreateVersionResource(service_config, manifest,
                                                   version_id, image)
    create_request = self.messages.AppengineAppsModulesVersionsCreateRequest(
        name=self._FormatService(app_id=self.project,
                                 service_name=service_name),
        version=version_resource)

    operation = requests.MakeRequest(
        self.client.apps_modules_versions.Create, create_request)

    log.debug('Received operation: [{operation}]'.format(
        operation=operation.name))

    return operations.WaitForOperation(self.client.apps_operations, operation)
  def RunOpenSSL(self, cmd_args, cmd_input=None):
    """Run an openssl command with optional input and return the output."""

    command = [self.openssl_executable]
    command.extend(cmd_args)

    try:
      p = subprocess.Popen(command, stdin=subprocess.PIPE,
                           stdout=subprocess.PIPE, stderr=subprocess.PIPE)
      output, stderr = p.communicate(cmd_input)
      log.debug('Ran command "{0}" with standard error of:\n{1}'
                .format(' '.join(command), stderr))
    except OSError as e:
      # This should be rare. Generally, OSError will show up when openssl
      # doesn't exist or can't be executed. However, in the code, we use
      # "FindExecutableOnPath" which already checks for these things.
      raise OpenSSLException(
          '[{0}] exited with [{1}].'.format(command[0], e.strerror))

    if p.returncode:
      # This will happen whenever there is an openssl failure (e.g. a failure
      # to decrypt a message with the given key).
      raise OpenSSLException('[{0}] exited with return code [{1}]:\n{2}.'
                             .format(command[0], p.returncode, stderr))
    return output
Exemple #17
0
def RunGsutilCommand(command_name, command_args=None, run_concurrent=False):
  """Runs the specified gsutil command and returns the command's exit code.

  This is more reliable than storage_api.StorageClient.CopyFilesToGcs especially
  for large files.

  Args:
    command_name: The gsutil command to run.
    command_args: List of arguments to pass to the command.
    run_concurrent: Whether concurrent uploads should be enabled while running
      the command.

  Returns:
    The exit code of the call to the gsutil command.
  """
  command_path = _GetGsutilPath()

  args = ['-m', command_name] if run_concurrent else [command_name]
  if command_args is not None:
    args += command_args

  if platforms.OperatingSystem.Current() == platforms.OperatingSystem.WINDOWS:
    gsutil_args = execution_utils.ArgsForCMDTool(command_path + '.cmd', *args)
  else:
    gsutil_args = execution_utils.ArgsForExecutableTool(command_path, *args)
  log.debug('Running command: [{args}]]'.format(args=' '.join(gsutil_args)))
  return execution_utils.Exec(gsutil_args, no_exit=True,
                              out_func=log.file_only_logger.debug,
                              err_func=log.file_only_logger.debug)
Exemple #18
0
  def SetFromIncompatibleSchema(self):
    """Sets that we just did an update check and found a new schema version.

    An incompatible schema version means there are definitely updates available
    but we can't read the notifications to correctly notify the user.  This will
    install a default notification for the incompatible schema.

    You must call Save() to persist these changes or use this as a context
    manager.
    """
    log.debug('Incompatible schema found.  Activating default notification.')

    # Nag once a week to update if the schema changed and we don't know what's
    # going on anymore.
    notification_spec = schemas.NotificationSpec(
        id='incompatible',
        condition=schemas.Condition(None, None, None, None, False),
        trigger=schemas.Trigger(frequency=604800, command_regex=None),
        notification=schemas.Notification(None, None, None)
    )
    self._data.notifications = [notification_spec]
    self._CleanUpLastNagTimes()

    self._data.last_update_check_time = time.time()
    self._data.last_update_check_revision = 0  # Doesn't matter
    self._dirty = True
Exemple #19
0
def _AddSSHKeyToMetadataMessage(message_classes, user, public_key, metadata,
                                legacy=False):
  """Adds the public key material to the metadata if it's not already there.

  Args:
    message_classes: An object containing API message classes.
    user: The username for the SSH key.
    public_key: The SSH public key to add to the metadata.
    metadata: The existing metadata.
    legacy: If true, store the key in the legacy "sshKeys" metadata entry.

  Returns:
    An updated metadata API message.
  """
  entry = u'{user}:{public_key}'.format(
      user=user, public_key=public_key)

  ssh_keys, ssh_legacy_keys = _GetSSHKeysFromMetadata(metadata)
  all_ssh_keys = ssh_keys + ssh_legacy_keys
  log.debug('Current SSH keys in project: {0}'.format(all_ssh_keys))

  if entry in all_ssh_keys:
    return metadata

  if legacy:
    metadata_key = constants.SSH_KEYS_LEGACY_METADATA_KEY
    updated_ssh_keys = ssh_legacy_keys
  else:
    metadata_key = constants.SSH_KEYS_METADATA_KEY
    updated_ssh_keys = ssh_keys
  updated_ssh_keys.append(entry)
  return metadata_utils.ConstructMetadataMessage(
      message_classes=message_classes,
      metadata={metadata_key: _PrepareSSHKeysValue(updated_ssh_keys)},
      existing_metadata=metadata)
def _GetGitRemoteUrls(source_directory):
    """Finds the list of git remotes for the given source directory.

  Args:
    source_directory: The path to directory containing the source code.
  Returns:
    A dictionary of remote name to remote URL, empty if no remotes are found.
  """
    remote_url_config_output = _GetGitRemoteUrlConfigs(source_directory)
    if not remote_url_config_output:
        return {}

    result = {}
    config_lines = remote_url_config_output.split("\n")
    for config_line in config_lines:
        if not config_line:
            continue  # Skip blank lines.

        # Each line looks like "remote.<name>.url <url>.
        config_line_parts = config_line.split(" ")
        if len(config_line_parts) != 2:
            log.debug("Skipping unexpected config line, incorrect segments: %s", config_line)
            continue

        # Extract the two parts, then find the name of the remote.
        remote_url_config_name = config_line_parts[0]
        remote_url = config_line_parts[1]
        remote_url_name_match = _REMOTE_URL_RE.match(remote_url_config_name)
        if not remote_url_name_match:
            log.debug("Skipping unexpected config line, could not match remote: %s", config_line)
            continue
        remote_url_name = remote_url_name_match.group(1)

        result[remote_url_name] = remote_url
    return result
Exemple #21
0
def UpdateCheck(command_path, **unused_kwargs):
  try:
    update_manager.UpdateManager.PerformUpdateCheck(command_path=command_path)
  # pylint:disable=broad-except, We never want this to escape, ever. Only
  # messages printed should reach the user.
  except Exception:
    log.debug('Failed to perform update check.', exc_info=True)
Exemple #22
0
  def ReportMetrics(self, wait_for_report=False):
    """Reports the collected metrics using a separate async process."""
    if not self._metrics:
      return

    temp_metrics_file = tempfile.NamedTemporaryFile(delete=False)
    with temp_metrics_file:
      pickle.dump(self._metrics, temp_metrics_file)
      self._metrics = []

    # TODO(user): make this not depend on the file.
    reporting_script_path = os.path.join(os.path.dirname(__file__),
                                         'metrics_reporter.py')
    execution_args = execution_utils.ArgsForPythonTool(
        reporting_script_path, temp_metrics_file.name)

    exec_env = os.environ.copy()
    exec_env['PYTHONPATH'] = os.pathsep.join(sys.path)

    p = subprocess.Popen(execution_args, env=exec_env, **self._async_popen_args)
    if wait_for_report:
      # NOTE: p.wait() can cause a deadlock. p.communicate() is recommended.
      # See python docs for more information.
      p.communicate()
    log.debug('Metrics reporting process started...')
  def CopyCerts(self):
    """Copies certificates from the VM for secure access.

    This can fail if the function is called before the VM is ready for SSH, or
    before the certificates are generated, so some retries are needed.

    Raises:
      exceptions.ToolException: If the certificates cannot be copied after all
        the retries.
    """
    for i in range(_RETRIES):
      try:
        self._cli.Execute(
            ['compute', 'copy-files', '--zone', self._zone,
             '--verbosity', 'none', '--no-user-output-enabled', '--quiet',
             '--project', self._project,
             _REMOTE_CERT_FORMAT.format(name=self._name), self.cert_dir])
        break
      except (SystemExit, exceptions.ToolException):
        log.debug(
            'Error copying certificates. Retry {retry} of {retries}.'.format(
                retry=i, retries=_RETRIES))
        time.sleep(_RETRY_TIME)
        continue
    else:
      raise exceptions.ToolException('Unable to copy certificates.')
Exemple #24
0
  def _DictFromURL(url, command_path, is_extra_repo=False):
    """Loads a json dictionary from a URL.

    Args:
      url: str, The URL to the file to load.
      command_path: the command path to include in the User-Agent header if the
        URL is HTTP
      is_extra_repo: bool, True if this is not the primary repository.

    Returns:
      A ComponentSnapshot object.

    Raises:
      URLFetchError: If the URL cannot be fetched.
    """
    extra_repo = url if is_extra_repo else None
    try:
      response = installers.ComponentInstaller.MakeRequest(url, command_path)
    except (urllib2.HTTPError, urllib2.URLError, ssl.SSLError):
      log.debug('Could not fetch [{url}]'.format(url=url), exc_info=True)
      response = None
    except ValueError as e:
      if not e.message or 'unknown url type' not in e.message:
        raise e
      log.debug('Bad repository url: [{url}]'.format(url=url), exc_info=True)
      raise URLFetchError(malformed=True, extra_repo=extra_repo)

    if not response:
      raise URLFetchError(extra_repo=extra_repo)
    code = response.getcode()
    if code and code != 200:
      raise URLFetchError(code=code, extra_repo=extra_repo)
    data = json.loads(response.read())
    return data
  def CallKubectl(self, c_config, kubectl_args):
    """Shell out to call to kubectl tool.

    Args:
      c_config: ClusterConfig object for cluster.
      kubectl_args: specific args to call kubectl with (not including args
        for authentication).
    Returns:
      (output, error), where
        output: str, raw output of the kubectl command.
        error: subprocess.CalledProcessError, if the command exited with
          non-zero status, None if command exited with success.
    """
    base_args = [
        '--kubeconfig=%s' % kconfig.Kubeconfig.DefaultPath(),
        '--context=%s' % c_config.kube_context,
    ]
    if not c_config.has_certs:
      log.warn('No certificate files found in %s. Certificate checking '
               'disabled for calls to cluster master.', c_config.config_dir)
    args = ['kubectl'] + base_args + kubectl_args
    try:
      log.debug('Calling \'%s\'', repr(args))
      output = subprocess.check_output(args, stderr=subprocess.STDOUT)
      return (output, None)
    except subprocess.CalledProcessError as error:
      return (error.output, error)
def _RunGsutilCommand(command_name, command_args, run_concurrent=False):
  """Runs the specified gsutil command and returns the command's exit code.

  Args:
    command_name: The gsutil command to run.
    command_args: List of arguments to pass to the command.
    run_concurrent: Whether concurrent uploads should be enabled while running
      the command.

  Returns:
    The exit code of the call to the gsutil command.
  """
  gsutil_path = _GetGsutilPath()

  gsutil_args = []
  if run_concurrent:
    gsutil_args += ['-m']
  gsutil_args += [command_name]
  gsutil_args += command_args
  env = None

  gsutil_cmd = execution_utils.ArgsForBinaryTool(gsutil_path, *gsutil_args)
  log.debug('Running command: [{args}], Env: [{env}]'.format(
      args=' '.join(gsutil_cmd),
      env=env))
  return execution_utils.Exec(gsutil_cmd, no_exit=True, env=env)
Exemple #27
0
def _ApplyLowerPriorityArgs(args, lower_pri_args, issue_cli_warning=False):
  """Apply lower-priority arg values from a dictionary to args without values.

  May be used to apply arg default values, or to merge args from another source,
  such as an arg-file. Args which already have a value are never modified by
  this function. Thus, if there are multiple sets of lower-priority args, they
  should be applied in order from highest-to-lowest precedence.

  Args:
    args: the existing argparse.Namespace. All the arguments that were provided
      to the command invocation (i.e. group and command arguments combined),
      plus any arg defaults already applied to the namespace. These args have
      higher priority than the lower_pri_args.
    lower_pri_args: a dict mapping lower-priority arg names to their values.
    issue_cli_warning: (boolean) issue a warning if an arg already has a value
      from the command line and we do not apply the lower-priority arg value
      (used for arg-files where any args specified in the file are lower in
      priority than the CLI args.).
  """
  for arg in lower_pri_args:
    if getattr(args, arg, None) is None:
      log.debug('Applying default {0}: {1}'
                .format(arg, str(lower_pri_args[arg])))
      setattr(args, arg, lower_pri_args[arg])
    elif issue_cli_warning and getattr(args, arg) != lower_pri_args[arg]:
      ext_name = arg_validate.ExternalArgNameFrom(arg)
      log.warning(
          'Command-line argument "--{0} {1}" overrides file argument "{2}: {3}"'
          .format(ext_name, _FormatArgValue(getattr(args, arg)),
                  ext_name, _FormatArgValue(lower_pri_args[arg])))
  def __init__(self):
    """Initialize a new MetricsCollector.

    This should only be invoked through the static GetCollector() function.
    """
    current_platform = platforms.Platform.Current()
    self._user_agent = 'CloudSDK/{version} {fragment}'.format(
        version=config.CLOUD_SDK_VERSION,
        fragment=current_platform.UserAgentFragment())
    self._async_popen_args = current_platform.AsycPopenArgs()
    self._project_ids = {}

    hostname = socket.getfqdn()
    install_type = 'Google' if hostname.endswith('.google.com') else 'External'
    self._ga_params = [('v', '1'),
                       ('tid', _GA_TID),
                       ('cid', _MetricsCollector._GetCID()),
                       ('t', 'event'),
                       ('cd1', config.INSTALLATION_CONFIG.release_channel),
                       ('cd2', install_type)]

    self._csi_params = [('s', _CSI_ID),
                        ('v', '2'),
                        ('rls', config.CLOUD_SDK_VERSION)]

    self.StartTimer(time.time())
    self._metrics = []

    log.debug('Metrics collector initialized...')
Exemple #29
0
  def ShouldIgnoreFile(self, path):
    """Test if a file should be ignored based on the given patterns.

    Compares the path to each pattern in ignore_patterns, in order. If it
    matches a pattern whose Bool is True, the file should be excluded unless it
    also matches a later pattern which has a bool of False. Similarly, if a name
    matches a pattern with a Bool that is False, it should be included
    unless it also matches a later pattern which has a bool of True.

    Args:
      path: The file name to test.
    Returns:
      True if the file should be ignored, False otherwise.
    """
    # Normalize separators, but leave any trailing '/' to allow explicit
    # directory matches.
    path = _NormalizeToUnixPath(path, strip=False)
    rules = self._GetRules(path)
    ret = False
    for pattern, should_ignore in rules:
      if pattern.match(path):
        log.debug('{0}: matches {1} => ignore=={2}'.format(
            path, pattern.pattern, should_ignore))
        ret = should_ignore
    return ret
  def Start(self, *positional, **flags):
    """Start the dev_appserver.

    Args:
      *positional: str, The positional arguments to be passed to dev_appserver.
      **flags: str, The flags to be passed to dev_appserver.

    Raises:
      DevappserverExecutionError: If devappserver execution returns an error.
    """
    all_flags = dict(self._global_flags)
    all_flags.update(flags)
    # Don't include the script name in argv because we are hijacking the parse
    # method.
    argv = (self._GenerateFlags(all_flags) +
            [arg for arg in positional if arg is not None])
    log.debug('Running [dev_appserver.py] with: {cmd}'.format(
        cmd=' '.join(argv)))
    run_args = execution_utils.ArgsForPythonTool(self.__executable_path, *argv)
    # TODO(user): Take this out (b/19485297).  This is because the
    # devappserver is depending on our pythonpath right now (it should not in
    # the future (b/19443812).  We need to do this because if something invokes
    # gcloud.py directly, the sys.path is updated but is never put in the env.
    # If you call gcloud.sh then it does put it in the env so this is not
    # required.
    env = dict(os.environ)
    env['PYTHONPATH'] = os.pathsep.join(sys.path)
    return_code = subprocess.call(run_args, env=env)
    if return_code != 0:
      raise DevappserverExecutionError(return_code, argv)
Exemple #31
0
def RunBaseCreateCommand(args, release_track):
    """Creates a new Cloud SQL instance.

  Args:
    args: argparse.Namespace, The arguments that this command was invoked with.
    release_track: base.ReleaseTrack, the release track that this was run under.

  Returns:
    A dict object representing the operations resource describing the create
    operation if the create was successful.
  Raises:
    HttpException: A http error response was received while executing api
        request.
    ArgumentError: An argument supplied by the user was incorrect, such as
      specifying an invalid CMEK configuration or attempting to create a V1
      instance.
    RequiredArgumentException: A required argument was not supplied by the user,
      such as omitting --root-password on a SQL Server instance.
  """
    client = common_api_util.SqlClient(common_api_util.API_VERSION_DEFAULT)
    sql_client = client.sql_client
    sql_messages = client.sql_messages

    validate.ValidateInstanceName(args.instance)
    instance_ref = client.resource_parser.Parse(
        args.instance,
        params={'project': properties.VALUES.core.project.GetOrFail},
        collection='sql.instances')

    # Get the region, tier, and database version from the master if these fields
    # are not specified.
    # TODO(b/64266672): Remove once API does not require these fields.
    if args.IsSpecified('master_instance_name'):
        master_instance_ref = client.resource_parser.Parse(
            args.master_instance_name,
            params={'project': properties.VALUES.core.project.GetOrFail},
            collection='sql.instances')
        try:
            master_instance_resource = sql_client.instances.Get(
                sql_messages.SqlInstancesGetRequest(
                    project=instance_ref.project,
                    instance=master_instance_ref.instance))
        except apitools_exceptions.HttpError as error:
            # TODO(b/64292220): Remove once API gives helpful error message.
            log.debug('operation : %s', six.text_type(master_instance_ref))
            exc = exceptions.HttpException(error)
            if resource_property.Get(
                    exc.payload.content,
                    resource_lex.ParseKey('error.errors[0].reason'),
                    None) == 'notAuthorized':
                msg = (
                    'You are either not authorized to access the master instance or '
                    'it does not exist.')
                raise exceptions.HttpException(msg)
            raise
        if not args.IsSpecified('region'):
            args.region = master_instance_resource.region
        if not args.IsSpecified('database_version'):
            args.database_version = master_instance_resource.databaseVersion.name
        if not args.IsSpecified('tier') and master_instance_resource.settings:
            args.tier = master_instance_resource.settings.tier

        # Validate master/replica CMEK configurations.
        if master_instance_resource.diskEncryptionConfiguration:
            if args.region == master_instance_resource.region:
                # Warn user that same-region replicas inherit their master's CMEK
                # configuration.
                command_util.ShowCmekWarning('replica', 'the master instance')
            elif not args.IsSpecified('disk_encryption_key'):
                # Raise error that cross-region replicas require their own CMEK key if
                # the master is CMEK.
                raise exceptions.RequiredArgumentException(
                    '--disk-encryption-key',
                    '`--disk-encryption-key` is required when creating a cross-region '
                    'replica of an instance with customer-managed encryption.')
            else:
                command_util.ShowCmekWarning('replica')
        elif args.IsSpecified('disk_encryption_key'):
            # Raise error that cross-region replicas cannot be CMEK encrypted if their
            # master is not.
            raise sql_exceptions.ArgumentError(
                '`--disk-encryption-key` cannot be specified when creating a replica '
                'of an instance without customer-managed encryption.')

    # --root-password is required when creating SQL Server instances
    if args.IsSpecified(
            'database_version') and args.database_version.startswith(
                'SQLSERVER') and not args.IsSpecified('root_password'):
        raise exceptions.RequiredArgumentException(
            '--root-password',
            '`--root-password` is required when creating SQL Server instances.'
        )

    instance_resource = (
        command_util.InstancesV1Beta4.ConstructCreateInstanceFromArgs(
            sql_messages,
            args,
            instance_ref=instance_ref,
            release_track=release_track))

    # TODO(b/122660263): Remove when V1 instances are no longer supported.
    # V1 instances are deprecated.
    # Note that the exception type is intentionally vague because the user may not
    # have directly supplied the offending argument.  For example, creating a read
    # replica defaults its tier to that of its master.
    if api_util.IsInstanceV1(sql_messages, instance_resource):
        raise sql_exceptions.ArgumentError(
            'First Generation instances can no longer be created.')

    operation_ref = None
    try:
        result_operation = sql_client.instances.Insert(instance_resource)

        operation_ref = client.resource_parser.Create(
            'sql.operations',
            operation=result_operation.name,
            project=instance_ref.project)

        if args.async_:
            if not args.IsSpecified('format'):
                args.format = 'default'
            return sql_client.operations.Get(
                sql_messages.SqlOperationsGetRequest(
                    project=operation_ref.project,
                    operation=operation_ref.operation))

        operations.OperationsV1Beta4.WaitForOperation(
            sql_client,
            operation_ref,
            'Creating Cloud SQL instance',
            # TODO(b/138403566): Remove the override once we improve creation times.
            max_wait_seconds=680)

        log.CreatedResource(instance_ref)

        new_resource = sql_client.instances.Get(
            sql_messages.SqlInstancesGetRequest(
                project=instance_ref.project, instance=instance_ref.instance))
        return new_resource
    except apitools_exceptions.HttpError as error:
        log.debug('operation : %s', six.text_type(operation_ref))
        exc = exceptions.HttpException(error)
        if resource_property.Get(
                exc.payload.content,
                resource_lex.ParseKey('error.errors[0].reason'),
                None) == 'errorMaxInstancePerLabel':
            msg = resource_property.Get(exc.payload.content,
                                        resource_lex.ParseKey('error.message'),
                                        None)
            raise exceptions.HttpException(msg)
        raise
Exemple #32
0
    def _UpdateWindowsKeysValue(self, existing_metadata):
        """Returns a string appropriate for the metadata.

    Values are removed if they have expired and non-expired keys are removed
    from the head of the list only if the total key size is greater than
    MAX_METADATA_VALUE_SIZE_IN_BYTES.

    Args:
      existing_metadata: The existing metadata for the instance to be updated.

    Returns:
      A new-line-joined string of Windows keys.
    """
        # Get existing keys from metadata.
        windows_keys = []
        self.old_metadata_keys = []
        for item in existing_metadata.items:
            if item.key == METADATA_KEY:
                windows_keys = [
                    key.strip() for key in item.value.split('\n') if key
                ]
            if item.key in OLD_METADATA_KEYS:
                self.old_metadata_keys.append(item.key)

        # Append new key.
        windows_keys.append(self.windows_key_entry)

        # Remove expired and excess key entries.
        keys = []
        bytes_consumed = 0

        for key in reversed(
                windows_keys):  # Keys should be removed in FIFO order.
            num_bytes = len(key + '\n')
            key_expired = False

            # Try to determine if key is expired. Ignore any errors.
            try:
                key_data = json.loads(key)
                if time_utils.IsExpired(key_data['expireOn']):
                    key_expired = True
            # Errors should come in two forms: Invalid JSON (ValueError) or missing
            # 'expireOn' key (KeyError).
            except (ValueError, KeyError):
                pass

            if key_expired:
                log.debug(
                    'The following Windows key has expired and will be removed '
                    'from your project: {0}'.format(key))
            elif (bytes_consumed + num_bytes >
                  constants.MAX_METADATA_VALUE_SIZE_IN_BYTES):
                log.debug(
                    'The following Windows key will be removed from your project '
                    'because your windows keys metadata value has reached its '
                    'maximum allowed size of {0} bytes: {1}'.format(
                        constants.MAX_METADATA_VALUE_SIZE_IN_BYTES, key))
            else:
                keys.append(key)
                bytes_consumed += num_bytes

        log.debug('Number of Windows Keys: {0}'.format(len(keys)))
        keys.reverse()
        return '\n'.join(keys)
Exemple #33
0
    def Load(cls, cluster_name, zone_id, project_id):
        """Load and verify config for given cluster.

    Args:
      cluster_name: name of cluster to load config for.
      zone_id: compute zone the cluster is running in.
      project_id: project in which the cluster is running.
    Returns:
      ClusterConfig for the cluster, or None if config data is missing or
      incomplete.
    """
        log.debug('Loading cluster config for cluster=%s, zone=%s project=%s',
                  cluster_name, zone_id, project_id)
        k = kconfig.Kubeconfig.Default()

        key = cls.KubeContext(cluster_name, zone_id, project_id)

        cluster = k.clusters.get(key) and k.clusters[key].get('cluster')
        user = k.users.get(key) and k.users[key].get('user')
        context = k.contexts.get(key) and k.contexts[key].get('context')
        if not cluster or not user or not context:
            log.debug('missing kubeconfig entries for %s', key)
            return None
        if context.get('user') != key or context.get('cluster') != key:
            log.debug('invalid context %s', context)
            return None

        # Verify cluster data
        server = cluster.get('server')
        insecure = cluster.get('insecure-skip-tls-verify')
        ca_data = cluster.get('certificate-authority-data')
        if not server:
            log.debug('missing cluster.server entry for %s', key)
            return None
        if insecure:
            if ca_data:
                log.debug(
                    'cluster cannot specify both certificate-authority-data '
                    'and insecure-skip-tls-verify')
                return None
        elif not ca_data:
            log.debug('cluster must specify one of certificate-authority-data|'
                      'insecure-skip-tls-verify')
            return None

        # Verify user data
        auth_provider = user.get('auth-provider')
        cert_data = user.get('client-certificate-data')
        key_data = user.get('client-key-data')
        cert_auth = cert_data and key_data
        has_valid_auth = auth_provider or cert_auth
        if not has_valid_auth:
            log.debug('missing auth info for user %s: %s', key, user)
            return None
        # Construct ClusterConfig
        kwargs = {
            'cluster_name': cluster_name,
            'zone_id': zone_id,
            'project_id': project_id,
            'server': server,
            'auth_provider': auth_provider,
            'ca_data': ca_data,
            'client_key_data': key_data,
            'client_cert_data': cert_data,
        }
        return cls(**kwargs)
Exemple #34
0
def BuildPackages(package_path, output_dir):
    """Builds Python packages from the given package source.

  That is, builds Python packages from the code in package_path, using its
  parent directory (the 'package root') as its context using the setuptools
  `sdist` command.

  If there is a `setup.py` file in the package root, use that. Otherwise,
  use a simple, temporary one made for this package.

  We try to be as unobstrustive as possible (see _RunSetupTools for details):

  - setuptools writes some files to the package root--we move as many temporary
    generated files out of the package root as possible
  - the final output gets written to output_dir
  - any temporary setup.py file is written outside of the package root.
  - if the current directory isn't writable, we silenly make a temporary copy

  Args:
    package_path: str. Path to the package. This should be the path to
      the directory containing the Python code to be built, *not* its parent
      (which optionally contains setup.py and other metadata).
    output_dir: str, path to a long-lived directory in which the built packages
      should be created.

  Returns:
    list of str. The full local path to all built Python packages.

  Raises:
    SetuptoolsFailedError: If the setup.py file fails to successfully build.
    MissingInitError: If the package doesn't contain an `__init__.py` file.
  """
    package_path = os.path.abspath(package_path)
    with files.TemporaryDirectory() as temp_dir:
        package_root = _CopyIfNotWritable(os.path.dirname(package_path),
                                          temp_dir)
        if not os.path.exists(os.path.join(package_path, '__init__.py')):
            # We could drop `__init__.py` in here, but it's pretty likely that this
            # indicates an incorrect directory or some bigger problem and we don't
            # want to obscure that.
            #
            # Note that we could more strictly validate here by checking each package
            # in the `--module-name` argument, but this should catch most issues.
            raise MissingInitError(package_path)

        provided_setup_py_path = os.path.join(package_root, 'setup.py')
        package_name = os.path.basename(package_path)
        setup_py_path = _GenerateSetupPyIfNeeded(provided_setup_py_path,
                                                 temp_dir, package_name)
        generated = provided_setup_py_path != setup_py_path
        try:
            return _RunSetupTools(package_root, setup_py_path, output_dir)
        except RuntimeError as err:
            raise SetuptoolsFailedError(str(err), generated)
        finally:
            if generated:
                # For some reason, this artifact gets generated in the package root by
                # setuptools, even after setting PYTHONDONTWRITEBYTECODE or running
                # `python setup.py clean --all`. It's weird to leave someone a .pyc for
                # a file they never had, so we clean it up.
                pyc_file = os.path.join(package_root, 'setup.pyc')
                try:
                    os.unlink(pyc_file)
                except OSError:
                    log.debug(
                        "Couldn't remove file [%s] (it may never have been created).",
                        pyc_file)
def _BuildStagingDirectory(source_dir, staging_dir, bucket_ref,
                           excluded_regexes):
    """Creates a staging directory to be uploaded to Google Cloud Storage.

  The staging directory will contain a symlink for each file in the original
  directory. The source is a file whose name is the sha1 hash of the original
  file and points to the original file.

  Consider the following original structure:
    app/
      main.py
      tools/
        foo.py
   Assume main.py has SHA1 hash 123 and foo.py has SHA1 hash 456. The resultant
   staging directory will look like:
     /tmp/staging/
       123 -> app/main.py
       456 -> app/tools/foo.py
   (Note: "->" denotes a symlink)

   If the staging directory is then copied to a GCS bucket at
   gs://staging-bucket/ then the resulting manifest will be:
     {
       "app/main.py": {
         "sourceUrl": "https://storage.googleapis.com/staging-bucket/123",
         "sha1Sum": "123"
       },
       "app/tools/foo.py": {
         "sourceUrl": "https://storage.googleapis.com/staging-bucket/456",
         "sha1Sum": "456"
       }
     }

  Args:
    source_dir: The original directory containing the application's source
      code.
    staging_dir: The directory where the staged files will be created.
    bucket_ref: A reference to the GCS bucket where the files will be uploaded.
    excluded_regexes: List of file patterns to skip while building the staging
      directory.

  Raises:
    LargeFileError: if one of the files to upload exceeds the maximum App Engine
    file size.

  Returns:
    A dictionary which represents the file manifest.
  """
    manifest = {}
    bucket_url = bucket_ref.GetPublicUrl()

    def AddFileToManifest(manifest_path, input_path):
        """Adds the given file to the current manifest.

    Args:
      manifest_path: The path to the file as it will be stored in the manifest.
      input_path: The location of the file to be added to the manifest.
    Returns:
      If the target was already in the manifest with different contexts,
      returns None. In all other cases, returns a target location to which the
      caller must copy, move, or link the file.
    """
        file_ext = os.path.splitext(input_path)[1]
        sha1_hash = file_utils.Checksum().AddFileContents(
            input_path).HexDigest()

        target_filename = sha1_hash + file_ext
        target_path = os.path.join(staging_dir, target_filename)

        dest_path = '/'.join([bucket_url, target_filename])
        old_url = manifest.get(manifest_path, {}).get('sourceUrl', '')
        if old_url and old_url != dest_path:
            return None
        manifest[manifest_path] = {
            'sourceUrl': dest_path,
            'sha1Sum': sha1_hash,
        }
        return target_path

    for relative_path in util.FileIterator(source_dir, excluded_regexes):
        local_path = os.path.join(source_dir, relative_path)
        size = os.path.getsize(local_path)
        if size > _MAX_FILE_SIZE:
            raise LargeFileError(local_path, size, _MAX_FILE_SIZE)
        target_path = AddFileToManifest(relative_path, local_path)
        if not os.path.exists(target_path):
            _CopyOrSymlink(local_path, target_path)

    context_files = context_util.CreateContextFiles(staging_dir,
                                                    None,
                                                    overwrite=True,
                                                    source_dir=source_dir)
    for context_file in context_files:
        manifest_path = os.path.basename(context_file)
        target_path = AddFileToManifest(manifest_path, context_file)
        if not target_path:
            log.status.Print(
                'Not generating {0} because a user-generated '
                'file with the same name exists.'.format(manifest_path))
        if not target_path or os.path.exists(target_path):
            # If we get here, it probably means that the user already generated the
            # context file manually and put it either in the top directory or in some
            # subdirectory. The new context file is useless and may confuse later
            # stages of the upload (it is in the staging directory with a
            # nonconformant name), so delete it. The entry in the manifest will point
            # at the existing file.
            os.remove(context_file)
        else:
            # Rename the source-context*.json file (which is in the staging directory)
            # to the hash-based name in the same directory.
            os.rename(context_file, target_path)

    log.debug('Generated deployment manifest: "{0}"'.format(
        json.dumps(manifest, indent=2, sort_keys=True)))
    return manifest
Exemple #36
0
  def Run(self, args):
    """Run the helper command."""

    if args.method not in GitHelper.METHODS:
      if args.ignore_unknown:
        return
      raise auth_exceptions.GitCredentialHelperError(
          'Unexpected method [{meth}]. One of [{methods}] expected.'
          .format(meth=args.method, methods=', '.join(GitHelper.METHODS)))

    info = self._ParseInput()
    credentialed_domains = [
        'source.developers.google.com',
        GitHelper.GOOGLESOURCE,  # Requires a different username value.
    ]
    credentialed_domains_suffix = [
        '.'+GitHelper.GOOGLESOURCE,
    ]
    extra = properties.VALUES.core.credentialed_hosted_repo_domains.Get()
    if extra:
      credentialed_domains.extend(extra.split(','))
    host = info.get('host')

    def _ValidateHost(host):
      if host in credentialed_domains:
        return True
      for suffix in credentialed_domains_suffix:
        if host.endswith(suffix):
          return True
      return False

    if not _ValidateHost(host):
      if not args.ignore_unknown:
        raise auth_exceptions.GitCredentialHelperError(
            'Unknown host [{host}].'.format(host=host))
      return

    if args.method == GitHelper.GET:
      account = properties.VALUES.core.account.Get()
      try:
        cred = c_store.Load(account)
        c_store.Refresh(cred)
      except c_store.Error as e:
        sys.stderr.write(textwrap.dedent("""\
            ERROR: {error}
            Run 'gcloud auth login' to log in.
            """.format(error=str(e))))
        return

      self._CheckNetrc()

      # For googlesource.com, any username beginning with "git-" is accepted
      # and the identity of the user is extracted from the token server-side.
      if (host == GitHelper.GOOGLESOURCE
          or host.endswith('.'+GitHelper.GOOGLESOURCE)):
        sent_account = 'git-account'
      else:
        sent_account = account

      sys.stdout.write(textwrap.dedent("""\
          username={username}
          password={password}
          """).format(username=sent_account, password=cred.access_token))
    elif args.method == GitHelper.STORE:
      # On OSX, there is an additional credential helper that gets called before
      # ours does.  When we return a token, it gets cached there.  Git continues
      # to get it from there first until it expires.  That command then fails,
      # and the token is deleted, but it does not retry the operation.  The next
      # command gets a new token from us and it starts working again, for an
      # hour.  This erases our credential from the other cache whenever 'store'
      # is called on us.  Because they are called first, the token will already
      # be stored there, and so we can successfully erase it to prevent caching.
      if (platforms.OperatingSystem.Current() ==
          platforms.OperatingSystem.MACOSX):
        log.debug('Clearing OSX credential cache.')
        try:
          input_string = 'protocol={protocol}\nhost={host}\n\n'.format(
              protocol=info.get('protocol'), host=info.get('host'))
          log.debug('Calling erase with input:\n%s', input_string)
          p = subprocess.Popen(['git-credential-osxkeychain', 'erase'],
                               stdin=subprocess.PIPE,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE)
          (out, err) = p.communicate(input_string)
          if p.returncode:
            log.debug(
                'Failed to clear OSX keychain:\nstdout: {%s}\nstderr: {%s}',
                out, err)
        # pylint:disable=broad-except, This can fail and should only be done as
        # best effort.
        except Exception as e:
          log.debug('Failed to clear OSX keychain', exc_info=True)
Exemple #37
0
def _RunSetupTools(package_root, setup_py_path, output_dir):
    """Executes the setuptools `sdist` command.

  Specifically, runs `python setup.py sdist` (with the full path to `setup.py`
  given by setup_py_path) with arguments to put the final output in output_dir
  and all possible temporary files in a temporary directory. package_root is
  used as the working directory.

  May attempt to run setup.py multiple times with different
  environments/commands if any execution fails:

  1. Using the Cloud SDK Python environment, with a full setuptools invocation
     (`egg_info`, `build`, and `sdist`).
  2. Using the system Python environment, with a full setuptools invocation
     (`egg_info`, `build`, and `sdist`).
  3. Using the Cloud SDK Python environment, with an intermediate setuptools
     invocation (`build` and `sdist`).
  4. Using the system Python environment, with an intermediate setuptools
     invocation (`build` and `sdist`).
  5. Using the Cloud SDK Python environment, with a simple setuptools
     invocation which will also work for plain distutils-based setup.py (just
     `sdist`).
  6. Using the system Python environment, with a simple setuptools
     invocation which will also work for plain distutils-based setup.py (just
     `sdist`).

  The reason for this order is that it prefers first the setup.py invocations
  which leave the fewest files on disk. Then, we prefer the Cloud SDK execution
  environment as it will be the most stable.

  package_root must be writable, or setuptools will fail (there are
  temporary files from setuptools that get put in the CWD).

  Args:
    package_root: str, the directory containing the package (that is, the
      *parent* of the package itself).
    setup_py_path: str, the path to the `setup.py` file to execute.
    output_dir: str, path to a directory in which the built packages should be
      created.

  Returns:
    list of str, the full paths to the generated packages.

  Raises:
    SysExecutableMissingError: if sys.executable is None
    RuntimeError: if the execution of setuptools exited non-zero.
  """
    # Unfortunately, there doesn't seem to be any easy way to move *all*
    # temporary files out of the current directory, so we'll fail here if we
    # can't write to it.
    with _TempDirOrBackup(package_root) as working_dir:
        # Simpler, but more messy (leaves artifacts on disk) command. This will work
        # for both distutils- and setuputils-based setup.py files.
        sdist_args = ['sdist', '--dist-dir', output_dir]
        # The 'build' and 'egg_info commands (which are invoked anyways as a
        # subcommands of 'sdist') are included to ensure that the fewest possible
        # artifacts are left on disk.
        build_args = [
            'build', '--build-base', working_dir, '--build-temp', working_dir
        ]
        # Some setuptools versions don't support directly running the egg_info
        # command
        egg_info_args = ['egg_info', '--egg-base', working_dir]
        setup_py_arg_sets = (egg_info_args + build_args + sdist_args,
                             build_args + sdist_args, sdist_args)

        # See docstring for the reasoning behind this order.
        setup_py_commands = []
        for setup_py_args in setup_py_arg_sets:
            setup_py_commands.append(
                _CloudSdkPythonSetupPyCommand(setup_py_path, setup_py_args,
                                              package_root))
            setup_py_commands.append(
                _SystemPythonSetupPyCommand(setup_py_path, setup_py_args,
                                            package_root))

        for setup_py_command in setup_py_commands:
            out = io.StringIO()
            return_code = setup_py_command.Execute(out)
            if not return_code:
                break
        else:
            raise RuntimeError(out.getvalue())

    local_paths = [
        os.path.join(output_dir, rel_file)
        for rel_file in os.listdir(output_dir)
    ]
    log.debug('Python packaging resulted in [%s]', ', '.join(local_paths))
    return local_paths
Exemple #38
0
def RunBaseCreateCommand(args):
    """Creates a new Cloud SQL instance.

  Args:
    args: argparse.Namespace, The arguments that this command was invoked
        with.

  Returns:
    A dict object representing the operations resource describing the create
    operation if the create was successful.
  Raises:
    HttpException: A http error response was received while executing api
        request.
    ToolException: An error other than http error occured while executing the
        command.
  """
    client = api_util.SqlClient(api_util.API_VERSION_DEFAULT)
    sql_client = client.sql_client
    sql_messages = client.sql_messages

    validate.ValidateInstanceName(args.instance)
    instance_ref = client.resource_parser.Parse(
        args.instance,
        params={'project': properties.VALUES.core.project.GetOrFail},
        collection='sql.instances')
    instance_resource = instances.InstancesV1Beta4.ConstructInstanceFromArgs(
        sql_messages, args, instance_ref=instance_ref)

    if args.pricing_plan == 'PACKAGE':
        if not console_io.PromptContinue(
                'Charges will begin accruing immediately. Really create Cloud '
                'SQL instance?'):
            raise exceptions.ToolException('canceled by the user.')

    operation_ref = None
    try:
        result_operation = sql_client.instances.Insert(instance_resource)

        operation_ref = client.resource_parser.Create(
            'sql.operations',
            operation=result_operation.name,
            project=instance_ref.project)

        if args. async:
            if not args.IsSpecified('format'):
                args.format = 'default'
            return sql_client.operations.Get(
                sql_messages.SqlOperationsGetRequest(
                    project=operation_ref.project,
                    operation=operation_ref.operation))

        operations.OperationsV1Beta4.WaitForOperation(
            sql_client, operation_ref, 'Creating Cloud SQL instance')

        log.CreatedResource(instance_ref)

        new_resource = sql_client.instances.Get(
            sql_messages.SqlInstancesGetRequest(
                project=instance_ref.project, instance=instance_ref.instance))
        return new_resource
    except apitools_exceptions.HttpError as error:
        log.debug('operation : %s', str(operation_ref))
        exc = exceptions.HttpException(error)
        if resource_property.Get(
                exc.payload.content,
                resource_lex.ParseKey('error.errors[0].reason'),
                None) == 'errorMaxInstancePerLabel':
            msg = resource_property.Get(exc.payload.content,
                                        resource_lex.ParseKey('error.message'),
                                        None)
            raise exceptions.HttpException(msg)
        raise
Exemple #39
0
    def _UpsertBuildTrigger(self, build_trigger, add_gcb_trigger_id):
        """Creates a BuildTrigger using the CloudBuild API if it doesn't exist, else updates it.

    A BuildTrigger "exists" if one with the same name already exists in the
    project.

    Args:
      build_trigger: Config of BuildTrigger to create.
      add_gcb_trigger_id: If True, adds the gcb-trigger-id=<trigger-id>
        annotation to the deployed Kubernetes objects. The annotation must be
        added to an existing trigger because the trigger-id is only known after
        the trigger is created.

    Returns:
      The upserted trigger.
    """
        client = cloudbuild_util.GetClientInstance()
        messages = cloudbuild_util.GetMessagesModule()
        project = properties.VALUES.core.project.Get(required=True)

        # Check if trigger with this name already exists.
        existing = self._GetTriggerIfExists(client, messages, project,
                                            build_trigger.name)
        if existing:
            trigger_id = existing.id

            if add_gcb_trigger_id:
                # Use the existing trigger's id to patch the trigger object we created
                # to add gcb-trigger-id=<trigger-id> annotation for its deployed
                # resources.
                build_util.AddAnnotationToPrepareDeployStep(
                    build_trigger, 'gcb-trigger-id', trigger_id)

            upserted_build_trigger = client.projects_triggers.Patch(
                messages.CloudbuildProjectsTriggersPatchRequest(
                    buildTrigger=build_trigger,
                    projectId=project,
                    triggerId=trigger_id))
            log.debug('updated existing BuildTrigger: ' +
                      six.text_type(upserted_build_trigger))

        else:
            upserted_build_trigger = client.projects_triggers.Create(
                messages.CloudbuildProjectsTriggersCreateRequest(
                    buildTrigger=build_trigger, projectId=project))
            log.debug('created BuildTrigger: ' +
                      six.text_type(upserted_build_trigger))

            trigger_id = upserted_build_trigger.id
            if add_gcb_trigger_id:
                # Since <trigger-id> is only known after a BuildTrigger is created, we
                # must patch the newly created trigger to add
                # gcb-trigger-id=<trigger-id> annotation for its deployed resources.
                build_util.AddAnnotationToPrepareDeployStep(
                    upserted_build_trigger, 'gcb-trigger-id', trigger_id)
                upserted_build_trigger = client.projects_triggers.Patch(
                    messages.CloudbuildProjectsTriggersPatchRequest(
                        buildTrigger=upserted_build_trigger,
                        projectId=project,
                        triggerId=trigger_id))
                log.debug(
                    'updated BuildTrigger with gcb-trigger-id annotation: ' +
                    six.text_type(upserted_build_trigger))

        # Log trigger full name
        build_trigger_ref = resources.REGISTRY.Parse(
            None,
            collection='cloudbuild.projects.triggers',
            api_version='v1',
            params={
                'projectId': project,
                'triggerId': trigger_id,
            })

        if existing:
            log.UpdatedResource(build_trigger_ref)
        else:
            log.CreatedResource(build_trigger_ref)

        return upserted_build_trigger
Exemple #40
0
    def Run(self, args):
        """Creates a new Cloud SQL instance.

    Args:
      args: argparse.Namespace, The arguments that this command was invoked
          with.

    Returns:
      A dict object representing the operations resource describing the create
      operation if the create was successful.
    Raises:
      HttpException: A http error response was received while executing api
          request.
      ToolException: An error other than http error occured while executing the
          command.
    """

        # Added this temporarily for debugging SQL instance creation failures
        log.SetVerbosity(logging.DEBUG)
        sql_client = self.context['sql_client']
        sql_messages = self.context['sql_messages']
        resources = self.context['registry']

        util.ValidateInstanceName(args.instance)
        instance_ref = resources.Parse(args.instance,
                                       collection='sql.instances')

        instance_resource = util.ConstructInstanceFromArgs(sql_messages, args)

        if args.master_instance_name:
            replication = 'ASYNCHRONOUS'
            activation_policy = 'ALWAYS'
        else:
            replication = 'SYNCHRONOUS'
            activation_policy = 'ON_DEMAND'
        if not args.replication:
            instance_resource.settings.replicationType = replication
        if not args.activation_policy:
            instance_resource.settings.activationPolicy = activation_policy

        instance_resource.project = instance_ref.project
        instance_resource.instance = instance_ref.instance
        operation_ref = None

        if args.pricing_plan == 'PACKAGE':
            if not console_io.PromptContinue(
                    'Charges will begin accruing immediately. Really create Cloud '
                    'SQL instance?'):
                raise exceptions.ToolException('canceled by the user.')

        try:
            result = sql_client.instances.Insert(instance_resource)

            operation_ref = resources.Create(
                'sql.operations',
                operation=result.operation,
                project=instance_ref.project,
                instance=instance_ref.instance,
            )

            if args. async:
                return sql_client.operations.Get(operation_ref.Request())

            util.WaitForOperation(sql_client, operation_ref,
                                  'Creating Cloud SQL instance')

            log.CreatedResource(instance_ref)

            rsource = sql_client.instances.Get(instance_ref.Request())
            cache = remote_completion.RemoteCompletion()
            cache.AddToCache(instance_ref.SelfLink())
            return rsource
        except apitools_base.HttpError:
            log.debug('operation : %s', str(operation_ref))
            raise
Exemple #41
0
def RunDeploy(
        args,
        api_client,
        enable_endpoints=False,
        use_beta_stager=False,
        runtime_builder_strategy=runtime_builders.RuntimeBuilderStrategy.NEVER,
        use_service_management=None,
        parallel_build=True,
        flex_image_build_option=FlexImageBuildOptions.ON_CLIENT):
    """Perform a deployment based on the given args.

  Args:
    args: argparse.Namespace, An object that contains the values for the
        arguments specified in the ArgsDeploy() function.
    api_client: api_lib.app.appengine_api_client.AppengineClient, App Engine
        Admin API client.
    enable_endpoints: Enable Cloud Endpoints for the deployed app.
    use_beta_stager: Use the stager registry defined for the beta track rather
        than the default stager registry.
    runtime_builder_strategy: runtime_builders.RuntimeBuilderStrategy, when to
      use the new CloudBuild-based runtime builders (alternative is old
      externalized runtimes).
    use_service_management: ServiceManagementOption, enum declaring whether
      to use Service Management to prepare Flexible deployments, or to
      default to the app.use_deprecated_preparation property.
    parallel_build: bool, whether to use parallel build and deployment path.
      Only supported in v1beta and v1alpha App Engine Admin API.
    flex_image_build_option: FlexImageBuildOptions, whether a flex deployment
      should upload files so that the server can build the image or build the
      image on client.

  Returns:
    A dict on the form `{'versions': new_versions, 'configs': updated_configs}`
    where new_versions is a list of version_util.Version, and updated_configs
    is a list of config file identifiers, see yaml_parsing.ConfigYamlInfo.
  """
    project = properties.VALUES.core.project.Get(required=True)
    deploy_options = DeployOptions.FromProperties(
        enable_endpoints,
        runtime_builder_strategy=runtime_builder_strategy,
        parallel_build=parallel_build,
        use_service_management=use_service_management,
        flex_image_build_option=flex_image_build_option)

    with files.TemporaryDirectory() as staging_area:
        stager = _MakeStager(args.skip_staging, use_beta_stager,
                             args.staging_command, staging_area)
        services, configs = deployables.GetDeployables(
            args.deployables, stager, deployables.GetPathMatchers())
        service_infos = [d.service_info for d in services]

        flags.ValidateImageUrl(args.image_url, service_infos)

        # pylint: disable=protected-access
        log.debug(
            'API endpoint: [{endpoint}], API version: [{version}]'.format(
                endpoint=api_client.client.url,
                version=api_client.client._VERSION))
        # The legacy admin console API client.
        # The Admin Console API existed long before the App Engine Admin API, and
        # isn't being improved. We're in the process of migrating all of the calls
        # over to the Admin API, but a few things (notably config deployments)
        # haven't been ported over yet.
        ac_client = appengine_client.AppengineClient(args.server,
                                                     args.ignore_bad_certs)

        app = _PossiblyCreateApp(api_client, project)
        _RaiseIfStopped(api_client, app)
        app = _PossiblyRepairApp(api_client, app)

        # Tell the user what is going to happen, and ask them to confirm.
        version_id = args.version or util.GenerateVersionId()
        deployed_urls = output_helpers.DisplayProposedDeployment(
            app, project, services, configs, version_id,
            deploy_options.promote)
        console_io.PromptContinue(cancel_on_no=True)
        if service_infos:
            # Do generic app setup if deploying any services.
            # All deployment paths for a service involve uploading source to GCS.
            metrics.CustomTimedEvent(metric_names.GET_CODE_BUCKET_START)
            code_bucket_ref = args.bucket or flags.GetCodeBucket(app, project)
            metrics.CustomTimedEvent(metric_names.GET_CODE_BUCKET)
            log.debug(
                'Using bucket [{b}].'.format(b=code_bucket_ref.ToBucketUrl()))

            # Prepare Flex if any service is going to deploy an image.
            if any([s.RequiresImage() for s in service_infos]):
                if deploy_options.use_service_management:
                    deploy_command_util.PossiblyEnableFlex(project)
                else:
                    deploy_command_util.DoPrepareManagedVms(ac_client)

            all_services = dict([(s.id, s) for s in api_client.ListServices()])
        else:
            code_bucket_ref = None
            all_services = {}
        new_versions = []
        deployer = ServiceDeployer(api_client, deploy_options)

        # Track whether a service has been deployed yet, for metrics.
        service_deployed = False
        for service in services:
            if not service_deployed:
                metrics.CustomTimedEvent(
                    metric_names.FIRST_SERVICE_DEPLOY_START)
            new_version = version_util.Version(project, service.service_id,
                                               version_id)
            deployer.Deploy(service,
                            new_version,
                            code_bucket_ref,
                            args.image_url,
                            all_services,
                            app.gcrDomain,
                            flex_image_build_option=flex_image_build_option)
            new_versions.append(new_version)
            log.status.Print('Deployed service [{0}] to [{1}]'.format(
                service.service_id, deployed_urls[service.service_id]))
            if not service_deployed:
                metrics.CustomTimedEvent(metric_names.FIRST_SERVICE_DEPLOY)
            service_deployed = True

    # Deploy config files.
    if configs:
        metrics.CustomTimedEvent(metric_names.UPDATE_CONFIG_START)
        for config in configs:
            message = 'Updating config [{config}]'.format(config=config.name)
            with progress_tracker.ProgressTracker(message):
                ac_client.UpdateConfig(config.name, config.parsed)
        metrics.CustomTimedEvent(metric_names.UPDATE_CONFIG)

    updated_configs = [c.name for c in configs]

    PrintPostDeployHints(new_versions, updated_configs)

    # Return all the things that were deployed.
    return {'versions': new_versions, 'configs': updated_configs}
Exemple #42
0
    def _ConfigureCleanPreviewSchedulerJob(self, repo_owner, repo_name,
                                           pull_request_pattern,
                                           clean_preview_trigger_id,
                                           scheduler_location):

        log.status.Print(
            'Upserting Cloud Scheduler to run Cloud Build trigger to '
            'clean expired preview deployments of your application.')

        messages = apis.GetMessagesModule('cloudscheduler', 'v1')
        client = apis.GetClientInstance('cloudscheduler', 'v1')
        project = properties.VALUES.core.project.Get(required=True)
        service_account_email = project + '@appspot.gserviceaccount.com'

        # Generate deterministic scheduler job name (id)
        job_id = self._FixSchedulerName(
            self._GenerateResourceName(
                function_code='cp',  # Clean Preview
                repo_type='github',  # Only supports github for now.
                full_repo_name=repo_owner + '-' + repo_name))

        name = 'projects/{}/locations/{}/jobs/{}'.format(
            project, scheduler_location, job_id)

        job = messages.Job(
            name=name,
            description='Every day, run trigger to clean expired preview '
            'deployments for PRs against "{}" in {}/{}'.format(
                pull_request_pattern, repo_owner, repo_name),
            schedule=_CLEAN_PREVIEW_SCHEDULE,
            timeZone='UTC',
            httpTarget=messages.HttpTarget(
                uri=
                'https://cloudbuild.googleapis.com/v1/projects/{}/triggers/{}:run'
                .format(project, clean_preview_trigger_id),
                httpMethod=messages.HttpTarget.HttpMethodValueValuesEnum.POST,
                body=bytes(
                    # We don't actually use the branchName value but it has to be
                    # set to an existing branch, so set it to master.
                    '{{"projectId":"{}","repoName":"{}","branchName":"master"}}'
                    .format(project, repo_name).encode('utf-8')),
                oauthToken=messages.OAuthToken(
                    serviceAccountEmail=service_account_email)))

        existing = None
        try:
            existing = client.projects_locations_jobs.Get(
                messages.CloudschedulerProjectsLocationsJobsGetRequest(
                    name=name))

            upserted_job = client.projects_locations_jobs.Patch(
                messages.CloudschedulerProjectsLocationsJobsPatchRequest(
                    name=name, job=job))
            log.debug('updated existing CloudScheduler job: ' +
                      six.text_type(upserted_job))

        except HttpNotFoundError:
            upserted_job = client.projects_locations_jobs.Create(
                messages.CloudschedulerProjectsLocationsJobsCreateRequest(
                    parent='projects/{}/locations/{}'.format(
                        project, scheduler_location),
                    job=job))
            log.debug('created CloudScheduler job: ' +
                      six.text_type(upserted_job))

        job_id = upserted_job.name.split('/')[-1]
        job_ref = resources.REGISTRY.Parse(
            None,
            collection='cloudscheduler.projects.locations.jobs',
            api_version='v1',
            params={
                'projectsId': project,
                'locationsId': scheduler_location,
                'jobsId': job_id,
            })

        if existing:
            log.UpdatedResource(job_ref)
        else:
            log.CreatedResource(job_ref)

        return upserted_job
    def _SubmitBuild(self, client, messages, build_config,
                     gcs_config_staging_path, suggest_configs, async_):
        """Submits the build.

    Args:
      client: Client used to make calls to Cloud Build API.
      messages: Cloud Build messages module. This is the value returned from
        cloudbuild_util.GetMessagesModule().
      build_config: Build to submit.
      gcs_config_staging_path: A path to a GCS subdirectory where deployed
        configs will be saved to. This value will be printed to the user.
      suggest_configs: If True, suggest YAML configs for the user to add to
        their repo.
      async_: If true, exit immediately after submitting Build, rather than
        waiting for it to complete or fail.

    Raises:
      FailedDeployException: If the build is completed and not 'SUCCESS'.
    """
        project = properties.VALUES.core.project.Get(required=True)
        op = client.projects_builds.Create(
            messages.CloudbuildProjectsBuildsCreateRequest(build=build_config,
                                                           projectId=project))
        log.debug('submitting build: ' + six.text_type(build_config))

        json = encoding.MessageToJson(op.metadata)
        build = encoding.JsonToMessage(messages.BuildOperationMetadata,
                                       json).build

        build_ref = resources.REGISTRY.Create(
            collection='cloudbuild.projects.builds',
            projectId=build.projectId,
            id=build.id)

        log.status.Print(
            'Starting Cloud Build to build and deploy to the target '
            'Google Kubernetes Engine cluster...\n')

        log.CreatedResource(build_ref)
        if build.logUrl:
            log.status.Print('Logs are available at [{log_url}].'.format(
                log_url=build.logUrl))
        else:
            log.status.Print('Logs are available in the Cloud Console.')

        suggested_configs_path = build_util.SuggestedConfigsPath(
            gcs_config_staging_path, build.id)
        expanded_configs_path = build_util.ExpandedConfigsPath(
            gcs_config_staging_path, build.id)

        if async_:
            log.status.Print(
                '\nIf successful, you can find the configuration files of the deployed '
                'Kubernetes objects stored at gs://{expanded} or by visiting '
                'https://console.cloud.google.com/storage/browser/{expanded}/.'
                .format(expanded=expanded_configs_path))
            if suggest_configs:
                log.status.Print(
                    '\nYou will also be able to find the suggested base Kubernetes '
                    'configuration files at gs://{suggested} or by visiting '
                    'https://console.cloud.google.com/storage/browser/{suggested}/.'
                    .format(suggested=suggested_configs_path))

            # Return here, otherwise, logs are streamed from GCS.
            return

        mash_handler = execution.MashHandler(
            execution.GetCancelBuildHandler(client, messages, build_ref))

        with execution_utils.CtrlCSection(mash_handler):
            build = cb_logs.CloudBuildClient(client,
                                             messages).Stream(build_ref)

        if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
            log.status.Print(
                'Your build and deploy timed out. Use the [--timeout=DURATION] flag '
                'to change the timeout threshold.')

        if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
            if build_util.SaveConfigsBuildStepIsSuccessful(messages, build):
                log.status.Print(
                    'You can find the configuration files for this attempt at gs://{}.'
                    .format(expanded_configs_path))
            raise FailedDeployException(build)

        log.status.Print(
            'Successfully deployed to your Google Kubernetes Engine cluster.\n\n'
            'You can find the configuration files of the deployed Kubernetes '
            'objects stored at gs://{expanded} or by visiting '
            'https://console.cloud.google.com/storage/browser/{expanded}/.'.
            format(expanded=expanded_configs_path))
        if suggest_configs:
            log.status.Print(
                '\nYou can also find suggested base Kubernetes configuration files at '
                'gs://{suggested} or by visiting '
                'https://console.cloud.google.com/storage/browser/{suggested}/.'
                .format(suggested=suggested_configs_path))
Exemple #44
0
def WaitForOperations(operations_data,
                      http,
                      batch_url,
                      warnings,
                      errors,
                      progress_tracker=None,
                      timeout=None):
    """Blocks until the given operations are done or until a timeout is reached.

  Args:
    operations_data: A list of OperationData objects holding Operations to poll.
    http: An HTTP object.
    batch_url: The URL to which batch requests should be sent.
    warnings: An output parameter for capturing warnings.
    errors: An output parameter for capturing errors.
    progress_tracker: progress tracker to tick while waiting for operations to
                      finish.
    timeout: The maximum amount of time, in seconds, to wait for the
      operations to reach the DONE state.

  Yields:
    The resources pointed to by the operations' targetLink fields if
    the operation type is not delete. Only resources whose
    corresponding operations reach done are yielded.
  """
    timeout = timeout or _POLLING_TIMEOUT_SEC

    # Operation -> OperationData mapping will be used to reify operation_service
    # and resource_service from operation_service.Get(operation) response.
    # It is necessary because poll operation is returning only response, but we
    # also need to get operation details to know the service to poll for all
    # unfinished_operations.
    operation_details = {}
    unfinished_operations = []
    for operation in operations_data:
        operation_details[operation.operation.selfLink] = operation
        unfinished_operations.append(operation.operation)

    responses = []
    start = time_util.CurrentTimeSec()
    sleep_sec = 0

    while unfinished_operations:
        if progress_tracker:
            progress_tracker.Tick()
        resource_requests = []
        operation_requests = []

        log.debug('Operations to inspect: %s', unfinished_operations)
        for operation in unfinished_operations:
            # Reify operation
            data = operation_details[operation.selfLink]
            project = data.project
            operation_service = data.operation_service
            resource_service = data.resource_service

            operation_type = operation_service.GetResponseType('Get')

            if operation.status == operation_type.StatusValueValuesEnum.DONE:
                # The operation has reached the DONE state, so we record any
                # problems it contains (if any) and proceed to get the target
                # resource if there were no problems and the operation is not
                # a deletion.

                _RecordProblems(operation, warnings, errors)

                # We shouldn't attempt to get the target resource if there was
                # anything wrong with the operation. Note that
                # httpErrorStatusCode is set only when the operation is not
                # successful.
                if (operation.httpErrorStatusCode and
                        operation.httpErrorStatusCode != 200):  # httplib.OK
                    continue

                # Just in case the server did not set httpErrorStatusCode but
                # the operation did fail, we check the "error" field.
                if operation.error:
                    continue

                target_link = operation.targetLink

                # We shouldn't get the target resource if the operation type
                # is delete because there will be no resource left.
                if not _IsDeleteOp(operation.operationType):
                    request = resource_service.GetRequestType('Get')(
                        project=project)
                    if operation.zone:
                        request.zone = path_simplifier.Name(operation.zone)
                    elif operation.region:
                        request.region = path_simplifier.Name(operation.region)
                    name_field = resource_service.GetMethodConfig(
                        'Get').ordered_params[-1]
                    setattr(request, name_field,
                            path_simplifier.Name(operation.targetLink))
                    resource_requests.append(
                        (resource_service, 'Get', request))

                log.status.write('{0} [{1}].\n'.format(
                    _HumanFrieldlyNameForOpPastTense(
                        operation.operationType).capitalize(), target_link))

            else:
                # The operation has not reached the DONE state, so we add a
                # get request to poll the operation.
                request = operation_service.GetRequestType('Get')(
                    operation=operation.name, project=project)
                if operation.zone:
                    request.zone = path_simplifier.Name(operation.zone)
                elif operation.region:
                    request.region = path_simplifier.Name(operation.region)
                operation_requests.append((operation_service, 'Get', request))

        requests = resource_requests + operation_requests
        if not requests:
            break

        responses, request_errors = batch_helper.MakeRequests(
            requests=requests, http=http, batch_url=batch_url)
        errors.extend(request_errors)

        unfinished_operations = []
        for response in responses:
            if isinstance(response, operation_type):
                unfinished_operations.append(response)
            else:
                yield response

        # If there are no more operations, we are done.
        if not unfinished_operations:
            break

        # Did we time out? If so, record the operations that timed out so
        # they can be reported to the user.
        if time_util.CurrentTimeSec() - start > timeout:
            log.debug('Timeout of %ss reached.', timeout)
            _RecordUnfinishedOperations(unfinished_operations, errors)
            break

        # Sleeps before trying to poll the operations again.
        sleep_sec += 1
        # Don't re-use sleep_sec, since we want to keep the same time increment
        sleep_time = min(sleep_sec, _MAX_TIME_BETWEEN_POLLS_SEC)
        log.debug('Sleeping for %ss.', sleep_time)
        time_util.Sleep(sleep_time)
Exemple #45
0
    def _DoFreshInstall(self, e):
        """Do a reinstall of what we have based on a fresh download of the SDK.

    Args:
      e: snapshots.IncompatibleSchemaVersionError, The exception we got with
        information about the new schema version.

    Returns:
      bool, True if the update succeeded, False if it was cancelled.
    """
        self._EnsureNotDisabled()
        if os.environ.get('CLOUDSDK_REINSTALL_COMPONENTS'):
            # We are already reinstalling but got here somehow.  Something is very
            # wrong and we want to avoid the infinite loop.
            self._RaiseReinstallationFailedError()

        # Print out an arbitrary message that we wanted to show users for this
        # update.
        message = e.schema_version.message
        if message:
            self.__Write(log.status, msg=message, word_wrap=True)

        # We can decide that for some reason we just never want to update past this
        # version of the schema.
        if e.schema_version.no_update:
            return False

        answer = console_io.PromptContinue(
            message=
            '\nThe component manager must perform a self update before you '
            'can continue.  It and all components will be updated to their '
            'latest versions.')
        if not answer:
            return False

        self._CheckCWD()
        install_state = self._GetInstallState()
        self.__Write(log.status,
                     'Downloading and extracting updated components...\n')
        download_url = e.schema_version.url
        try:
            staging_state = install_state.CreateStagingFromDownload(
                download_url)
        except local_state.Error:
            log.error('An updated Cloud SDK failed to download')
            log.debug('Handling re-installation error', exc_info=True)
            self._RaiseReinstallationFailedError()

        # shell out to install script
        installed_component_ids = sorted(
            install_state.InstalledComponents().keys())
        env = dict(os.environ)
        env['CLOUDSDK_REINSTALL_COMPONENTS'] = ','.join(
            installed_component_ids)
        installer_path = os.path.join(staging_state.sdk_root, 'bin',
                                      'bootstrapping', 'install.py')
        p = subprocess.Popen([sys.executable, '-S', installer_path], env=env)
        ret_val = p.wait()
        if ret_val:
            self._RaiseReinstallationFailedError()

        self.__Write(log.status,
                     'Creating backup and activating new installation...')
        install_state.ReplaceWith(staging_state)

        self.__Write(log.status, '\nDone!\n')
        return True
def SubmitTraining(jobs_client,
                   job,
                   job_dir=None,
                   staging_bucket=None,
                   packages=None,
                   package_path=None,
                   scale_tier=None,
                   config=None,
                   module_name=None,
                   runtime_version=None,
                   stream_logs=None,
                   user_args=None):
    """Submit a training job."""
    region = properties.VALUES.compute.region.Get(required=True)
    staging_location = jobs_prep.GetStagingLocation(
        staging_bucket=staging_bucket, job_id=job, job_dir=job_dir)
    try:
        uris = jobs_prep.UploadPythonPackages(
            packages=packages,
            package_path=package_path,
            staging_location=staging_location)
    except jobs_prep.NoStagingLocationError:
        raise flags.ArgumentError(
            'If local packages are provided, the `--staging-bucket` or '
            '`--job-dir` flag must be given.')
    log.debug('Using {0} as trainer uris'.format(uris))

    scale_tier_enum = jobs_client.training_input_class.ScaleTierValueValuesEnum
    scale_tier = scale_tier_enum(scale_tier) if scale_tier else None

    job = jobs_client.BuildTrainingJob(
        path=config,
        module_name=module_name,
        job_name=job,
        trainer_uri=uris,
        region=region,
        job_dir=job_dir.ToUrl() if job_dir else None,
        scale_tier=scale_tier,
        user_args=user_args,
        runtime_version=runtime_version)

    project_ref = resources.REGISTRY.Parse(
        properties.VALUES.core.project.Get(required=True),
        collection='ml.projects')
    job = jobs_client.Create(project_ref, job)
    if not stream_logs:
        PrintSubmitFollowUp(job.jobId, print_follow_up_message=True)
        return job
    else:
        PrintSubmitFollowUp(job.jobId, print_follow_up_message=False)

    log_fetcher = stream.LogFetcher(
        filters=log_utils.LogFilters(job.jobId),
        polling_interval=properties.VALUES.ml_engine.polling_interval.GetInt(),
        continue_interval=_CONTINUE_INTERVAL,
        continue_func=log_utils.MakeContinueFunction(job.jobId))

    printer = resource_printer.Printer(log_utils.LOG_FORMAT, out=log.err)
    with execution_utils.RaisesKeyboardInterrupt():
        try:
            printer.Print(log_utils.SplitMultiline(log_fetcher.YieldLogs()))
        except KeyboardInterrupt:
            log.status.Print('Received keyboard interrupt.\n')
            log.status.Print(
                _FOLLOW_UP_MESSAGE.format(job_id=job.jobId,
                                          project=project_ref.Name()))
        except exceptions.HttpError as err:
            log.status.Print('Polling logs failed:\n{}\n'.format(str(err)))
            log.info('Failure details:', exc_info=True)
            log.status.Print(
                _FOLLOW_UP_MESSAGE.format(job_id=job.jobId,
                                          project=project_ref.Name()))

    job_ref = resources.REGISTRY.Parse(
        job.jobId,
        params={'projectsId': properties.VALUES.core.project.GetOrFail},
        collection='ml.projects.jobs')
    job = jobs_client.Get(job_ref)

    return job
Exemple #47
0
 def Wrapper(*args, **kwds):
   try:
     return func(*args, **kwds)
   # pylint:disable=bare-except
   except:
     log.debug('Exception captured in %s', func.func_name, exc_info=True)
Exemple #48
0
def Exec(args,
         env=None,
         no_exit=False,
         out_func=None,
         err_func=None,
         in_str=None,
         **extra_popen_kwargs):
    """Emulates the os.exec* set of commands, but uses subprocess.

  This executes the given command, waits for it to finish, and then exits this
  process with the exit code of the child process.

  Args:
    args: [str], The arguments to execute.  The first argument is the command.
    env: {str: str}, An optional environment for the child process.
    no_exit: bool, True to just return the exit code of the child instead of
      exiting.
    out_func: str->None, a function to call with the stdout of the executed
      process. This can be e.g. log.file_only_logger.debug or log.out.write.
    err_func: str->None, a function to call with the stderr of the executed
      process. This can be e.g. log.file_only_logger.debug or log.err.write.
    in_str: bytes or str, input to send to the subprocess' stdin.
    **extra_popen_kwargs: Any additional kwargs will be passed through directly
      to subprocess.Popen

  Returns:
    int, The exit code of the child if no_exit is True, else this method does
    not return.

  Raises:
    PermissionError: if user does not have execute permission for cloud sdk bin
    files.
    InvalidCommandError: if the command entered cannot be found.
  """
    log.debug('Executing command: %s', args)
    # We use subprocess instead of execv because windows does not support process
    # replacement.  The result of execv on windows is that a new processes is
    # started and the original is killed.  When running in a shell, the prompt
    # returns as soon as the parent is killed even though the child is still
    # running.  subprocess waits for the new process to finish before returning.
    env = encoding.EncodeEnv(_GetToolEnv(env=env))

    process_holder = _ProcessHolder()
    with _ReplaceSignal(signal.SIGTERM, process_holder.Handler):
        with _ReplaceSignal(signal.SIGINT, process_holder.Handler):
            if out_func:
                extra_popen_kwargs['stdout'] = subprocess.PIPE
            if err_func:
                extra_popen_kwargs['stderr'] = subprocess.PIPE
            if in_str:
                extra_popen_kwargs['stdin'] = subprocess.PIPE
            try:
                if args and isinstance(args, list):
                    # On Python 2.x on Windows, the first arg can't be unicode. We encode
                    # encode it anyway because there is really nothing else we can do if
                    # that happens.
                    # https://bugs.python.org/issue19264
                    args = [encoding.Encode(a) for a in args]
                p = subprocess.Popen(args, env=env, **extra_popen_kwargs)
            except OSError as err:
                if err.errno == errno.EACCES:
                    raise PermissionError(err.strerror)
                elif err.errno == errno.ENOENT:
                    raise InvalidCommandError(args[0])
                raise
            process_holder.process = p

            if isinstance(in_str, six.text_type):
                in_str = in_str.encode('utf-8')
            stdout, stderr = list(
                map(encoding.Decode, p.communicate(input=in_str)))

            if out_func:
                out_func(stdout)
            if err_func:
                err_func(stderr)
            ret_val = p.returncode

    if no_exit:
        return ret_val
    sys.exit(ret_val)
    def WaitForComputeOperations(self,
                                 project,
                                 zone,
                                 operation_ids,
                                 message,
                                 timeout_s=1200,
                                 poll_period_s=5):
        """Poll Compute Operations until their status is done or timeout reached.

    Args:
      project: project on which the operation is performed
      zone: zone on which the operation is performed
      operation_ids: list/set of ids of the compute operations to wait for
      message: str, message to display to user while polling.
      timeout_s: number, seconds to poll with retries before timing out.
      poll_period_s: number, delay in seconds between requests.

    Returns:
      Operations: list of the last successful operations.getrequest for each op.

    Raises:
      Error: if the operation times out or finishes with an error.
    """
        operation_ids = deque(operation_ids)
        operations = {}
        errors = []
        with console_io.ProgressTracker(message, autotick=True):
            start_time = time.clock()
            ops_to_retry = []
            while timeout_s > (time.clock() - start_time) and operation_ids:
                op_id = operation_ids.popleft()
                try:
                    operation = self.GetComputeOperation(project, zone, op_id)
                    operations[op_id] = operation
                    if not self.IsComputeOperationFinished(operation):
                        # Operation is still in progress.
                        ops_to_retry.append(op_id)
                        continue

                    log.debug('Operation %s succeeded after %.3f seconds',
                              operation, (time.clock() - start_time))
                    error = self.GetOperationError(operation)
                    if error:
                        # Operation Failed!
                        msg = 'Operation [{0}] finished with error: {1}'.format(
                            op_id, error)
                        log.debug(msg)
                        errors.append(msg)
                except apitools_exceptions.HttpError as error:
                    log.debug('GetComputeOperation failed: %s', error)
                    # Keep trying until we timeout in case error is transient.
                    # TODO(user): add additional backoff if server is returning 500s
                if not operation_ids and ops_to_retry:
                    operation_ids = deque(ops_to_retry)
                    ops_to_retry = []
                    time.sleep(poll_period_s)

        operation_ids.extend(ops_to_retry)
        for op_id in operation_ids:
            errors.append('Operation [{0}] is still running'.format(op_id))
        if errors:
            raise util.Error(linesep.join(errors))

        return operations.values()
  def Run(self, args):
    """See ssh_utils.BaseSSHCommand.Run."""
    holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
    client = holder.client

    ssh_helper = ssh_utils.BaseSSHHelper()
    ssh_helper.Run(args)
    ssh_helper.keys.EnsureKeysExist(args.force_key_file_overwrite,
                                    allow_passphrase=True)

    ssh_config_file = files.ExpandHomeDir(
        args.ssh_config_file or ssh.PER_USER_SSH_CONFIG_FILE)

    instances = None
    try:
      existing_content = files.ReadFileContents(ssh_config_file)
    except files.Error as e:
      existing_content = ''
      log.debug('SSH Config File [{0}] could not be opened: {1}'
                .format(ssh_config_file, e))

    if args.remove:
      compute_section = ''
      try:
        new_content = _RemoveComputeSection(existing_content)
      except MultipleComputeSectionsError:
        raise MultipleComputeSectionsError(ssh_config_file)
    else:
      ssh_helper.EnsureSSHKeyIsInProject(
          client, ssh.GetDefaultSshUsername(warn_on_account_user=True))
      instances = list(self.GetInstances(client))
      if instances:
        compute_section = _BuildComputeSection(
            instances, ssh_helper.keys.key_file, ssh.KnownHosts.DEFAULT_PATH)
      else:
        compute_section = ''

    if existing_content and not args.remove:
      try:
        new_content = _MergeComputeSections(existing_content, compute_section)
      except MultipleComputeSectionsError:
        raise MultipleComputeSectionsError(ssh_config_file)
    elif not existing_content:
      new_content = compute_section

    if args.dry_run:
      log.out.write(new_content or '')
      return

    if new_content != existing_content:
      if (os.path.exists(ssh_config_file) and
          platforms.OperatingSystem.Current() is not
          platforms.OperatingSystem.WINDOWS):
        ssh_config_perms = os.stat(ssh_config_file).st_mode
        # From `man 5 ssh_config`:
        #    this file must have strict permissions: read/write for the user,
        #    and not accessible by others.
        # We check that here:
        if not (
            ssh_config_perms & stat.S_IRWXU == stat.S_IWUSR | stat.S_IRUSR and
            ssh_config_perms & stat.S_IWGRP == 0 and
            ssh_config_perms & stat.S_IWOTH == 0):
          log.warning(
              'Invalid permissions on [{0}]. Please change to match ssh '
              'requirements (see man 5 ssh).')
      # TODO(b/36050483): This write will not work very well if there is
      # a lot of write contention for the SSH config file. We should
      # add a function to do a better job at "atomic file writes".
      files.WriteFileContents(ssh_config_file, new_content, private=True)

    if compute_section:
      log.out.write(textwrap.dedent("""\
          You should now be able to use ssh/scp with your instances.
          For example, try running:

            $ ssh {alias}

          """.format(alias=_CreateAlias(instances[0]))))

    elif not instances and not args.remove:
      log.warning(
          'No host aliases were added to your SSH configs because you do not '
          'have any instances. Try running this command again after creating '
          'some instances.')
Exemple #51
0
 def _RunCommand(self, cmd, dry_run):
   log.debug('Executing %s', cmd)
   if dry_run:
     log.out.Print(' '.join(cmd))
   else:
     subprocess.check_call(cmd)
Exemple #52
0
def read_or_create_download_tracker_file(source_object_resource,
                                         destination_url,
                                         existing_file_size=None,
                                         slice_start_byte=None,
                                         component_number=None,
                                         total_components=None,
                                         create=True):
    """Checks for a download tracker file and creates one if it does not exist.

  For normal downloads, if the tracker file exists, the existing_file_size
  in bytes is presumed to downloaded from the server. Therefore,
  existing_file_size becomes the download start point.

  For sliced downloads, the number of bytes previously retrieved from the server
  cannot be determined from existing_file_size. Therefore, it is retrieved
  from the tracker file.

  Args:
    source_object_resource (resource_reference.ObjectResource): Needed for
      object etag and generation.
    destination_url (storage_url.StorageUrl): Destination URL for tracker file.
    existing_file_size (int): Amount of file on disk that already exists.
    slice_start_byte (int|None): Start byte to use if we cannot find a
      matching tracker file for a download slice.
    component_number (int|None): The download component number to find the start
      point for. Indicates part of a multi-component download.
    total_components (int|None): The number of components in a sliced download.
      Indicates this is the master tracker for a multi-component operation.
    create (bool): Creates tracker file if one could not be found.

  Returns:
    tracker_file_path (str|None): The path to the tracker file, if one was used.
    download_start_byte (int|None): The first byte that still needs to be
      downloaded, if not a sliced download.

  Raises:
    ValueCannotBeDeterminedError: Source object resource does not have
      necessary metadata to decide on download start byte.
  """
    if not source_object_resource.etag:
        raise errors.ValueCannotBeDeterminedError(
            'Source object resource is missing etag.')
    if total_components and (slice_start_byte is not None
                             or component_number is not None):
        raise ValueError(
            'total_components indicates this is the master tracker file for a'
            ' multi-component operation. slice_start_byte and component_number'
            ' cannot be present since this is not for an individual component.'
        )

    if component_number:
        download_name_for_logger = '{} component {}'.format(
            destination_url.object_name, component_number)
        tracker_file_type = TrackerFileType.DOWNLOAD_COMPONENT
    else:
        download_name_for_logger = destination_url.object_name
        if total_components:
            tracker_file_type = TrackerFileType.SLICED_DOWNLOAD
        else:
            tracker_file_type = TrackerFileType.DOWNLOAD

    tracker_file_path = get_tracker_file_path(
        destination_url, tracker_file_type, component_number=component_number)
    tracker_file = None
    # Check to see if we already have a matching tracker file.
    try:
        tracker_file = files.FileReader(tracker_file_path)
        if tracker_file_type is TrackerFileType.DOWNLOAD:
            etag_value = tracker_file.readline().rstrip('\n')
            if etag_value == source_object_resource.etag:
                log.debug(
                    'Found tracker file starting at byte {} for {}.'.format(
                        existing_file_size, download_name_for_logger))
                return tracker_file_path, existing_file_size
        else:
            component_data = json.loads(tracker_file.read())
            if (component_data['etag'] == source_object_resource.etag
                    and component_data['generation']
                    == source_object_resource.generation):
                if (tracker_file_type is TrackerFileType.SLICED_DOWNLOAD
                        and component_data['total_components']
                        == total_components):
                    log.debug(
                        'Found tracker file for sliced download {}.'.format(
                            download_name_for_logger))
                    return tracker_file_path, None
                elif tracker_file_type is TrackerFileType.DOWNLOAD_COMPONENT:
                    # Normal resumable download.
                    start_byte = int(component_data['download_start_byte'])
                    log.debug('Found tracker file starting at byte {} for {}.'.
                              format(start_byte, download_name_for_logger))
                    return tracker_file_path, start_byte

    except files.MissingFileError:
        # Cannot read from file.
        pass

    finally:
        if tracker_file:
            tracker_file.close()

    log.debug(
        'No matching tracker file for {}.'.format(download_name_for_logger))

    start_byte = 0
    if create:
        if tracker_file_type is TrackerFileType.DOWNLOAD:
            _write_tracker_file(tracker_file_path,
                                source_object_resource.etag + '\n')
        elif tracker_file_type is TrackerFileType.DOWNLOAD_COMPONENT:
            write_tracker_file_with_component_data(
                tracker_file_path,
                source_object_resource,
                download_start_byte=slice_start_byte)
            start_byte = slice_start_byte
        elif tracker_file_type is TrackerFileType.SLICED_DOWNLOAD:
            # Delete component tracker files to reset full sliced download.
            delete_download_tracker_files(destination_url)
            write_tracker_file_with_component_data(
                tracker_file_path,
                source_object_resource,
                total_components=total_components)
            start_byte = None

    return tracker_file_path, start_byte
Exemple #53
0
    def __init__(self, ga_tid=_GA_TID):
        """Initialize a new MetricsCollector.

    This should only be invoked through the static GetCollector() function or
    the static ResetCollectorInstance() function.

    Args:
      ga_tid: The Google Analytics tracking ID to use for metrics collection.
              Defaults to _GA_TID.
    """
        current_platform = platforms.Platform.Current()
        self._user_agent = _MetricsCollector._GetUserAgent(current_platform)
        self._async_popen_args = current_platform.AsyncPopenArgs()
        self._project_ids = {}

        hostname = socket.gethostname()
        install_type = 'Google' if hostname.endswith(
            '.google.com') else 'External'
        cid = _MetricsCollector._GetCID()

        # Table of common params to send to both GA and CSI.
        # First column is GA name, second column is CSI name, third is the value.
        common_params = [
            ('cd1', 'release_channel',
             config.INSTALLATION_CONFIG.release_channel),
            ('cd2', 'install_type', install_type),
            ('cd3', 'environment', properties.GetMetricsEnvironment()),
            ('cd4', 'interactive',
             console_io.IsInteractive(error=True, heuristic=True)),
            ('cd5', 'python_version', platform.python_version()),
            # cd6 passed as argument to _GAEvent - cd6 = Flag Names
            ('cd7', 'environment_version',
             properties.VALUES.metrics.environment_version.Get()),
            # cd8 passed as argument to _GAEvent - cd8 = Error
            # cd9 passed as argument to _GAEvent - cd9 = Error Extra Info
        ]

        self._ga_event_params = [('v', '1'), ('tid', ga_tid), ('cid', cid),
                                 ('t', 'event')]
        self._ga_event_params.extend([(param[0], param[2])
                                      for param in common_params])
        self._ga_events = []

        self._ga_timing_params = [('v', '1'), ('tid', ga_tid), ('cid', cid),
                                  ('t', 'timing')]
        self._ga_timing_params.extend([(param[0], param[2])
                                       for param in common_params])

        self._csi_params = [('s', _CSI_ID), ('v', '2'),
                            ('rls', config.CLOUD_SDK_VERSION), ('c', cid)]
        self._csi_params.extend([(param[1], param[2])
                                 for param in common_params])
        self._timer = _CommandTimer()

        self._metrics = []

        # Tracking the level so we can only report metrics for the top level action
        # (and not other actions executed within an action). Zero is the top level.
        self._action_level = 0

        log.debug('Metrics collector initialized...')
Exemple #54
0
 def StatusUpdate(unused_result, unused_state):
     log.debug('Retrying request...')