Beispiel #1
0
  def ReadObject(self, object_ref):
    """Read a file from the given Cloud Storage bucket.

    Args:
      object_ref: storage_util.ObjectReference, The object to read from.

    Raises:
      BadFileException if the file read is not successful.

    Returns:
      file-like object containing the data read.
    """
    data = cStringIO.StringIO()
    download = transfer.Download.FromStream(data)
    get_req = self.messages.StorageObjectsGetRequest(
        bucket=object_ref.bucket,
        object=object_ref.name)

    log.info('Reading [%s]', object_ref)
    try:
      self.client.objects.Get(get_req, download=download)
    except api_exceptions.HttpError as err:
      raise exceptions.BadFileException(
          'Could not read [{object_}]: {err}. Please retry.'.format(
              object_=object_ref, err=err))

    data.seek(0)
    return data
Beispiel #2
0
  def __init__(self, project, bucket_name,
               tr_client, tr_messages, storage_client,
               clock=datetime.datetime.now):
    """Construct a ResultsBucketOps object to be used with a single matrix run.

    Args:
      project: string containing the Google Developers Console project id.
      bucket_name: string containing the name of the GCS bucket.
      tr_client: ToolResults API client library generated by Apitools.
      tr_messages: ToolResults API messages library generated by Apitools.
      storage_client: Cloud Storage API client library generated by Apitools.
      clock: injected function which will return a datetime object to be used
        as a timestamp for the test's results storage in GCS. We default to
        local time so the timestamp feels 'normal' to the user.
    """
    self._project = project
    self._storage_client = storage_client

    # Get a current timestamp string in the format YYYY-MM-DD_hh:mm:ss.sss
    self._timestamp = clock().isoformat('_')[:-3]
    log.info('Test timestamp is {t}'.format(t=self._timestamp))

    # If the user supplied a results bucket, make sure it exists. Otherwise,
    # call the SettingsService to get the project's existing default bucket.
    if bucket_name:
      self.EnsureBucketExists(bucket_name)
    else:
      bucket_name = self._GetDefaultBucket(tr_client, tr_messages)

    self._results_bucket = bucket_name
    self.gcs_results_root = ('gs://{b}/{t}/'
                             .format(b=bucket_name, t=self._timestamp))
    self._gcs_results_url = (
        'https://console.developers.google.com/storage/browser/{b}/{t}/'
        .format(b=bucket_name, t=self._timestamp))
Beispiel #3
0
def GetArgsFromArgFile(argspec, all_test_args_set):
  """Loads a group of test args from an optional user-supplied arg file.

  Args:
    argspec: string containing an ARG_FILE:ARG_GROUP_NAME pair, where ARG_FILE
      is the path to a file containing groups of test arguments in yaml format,
      and ARG_GROUP_NAME is a yaml object name of a group of arg:value pairs.
    all_test_args_set: a set of strings for every gcloud-test argument. Used
      for validation.

  Returns:
    A dictionary created from the file which maps arg names to arg values.

  Raises:
    ToolException: If the argument name is not a gcloud test arg.
    InvalidArgException: If an argument has an invalid value or no value.
  """
  if argspec is None:
    return {}

  arg_file, group_name = _SplitArgFileAndGroup(argspec)
  try:
    all_arg_groups = _ReadArgGroupsFromFile(arg_file)
  except IOError as err:
    raise exceptions.BadFileException(
        'Error reading argument file [{f}]: {e}'.format(f=arg_file, e=err))

  args_from_file = {}
  _MergeArgGroupIntoArgs(args_from_file, group_name, all_arg_groups,
                         all_test_args_set)
  log.info('Args loaded from file: ' + str(args_from_file))
  return args_from_file
Beispiel #4
0
def Fingerprint(path, params):
    """Check for a Python app.

  Args:
    path: (str) Application path.
    params: (ext_runtime.Params) Parameters passed through to the
      fingerprinters.

  Returns:
    (PythonConfigurator or None) Returns a module if the path contains a
    python app.
  """
    log.info("Checking for Python Compat.")

    # The only way we select these runtimes is if either the user has specified
    # it or a matching runtime is specified in the app.yaml.
    if not params.runtime and (not params.appinfo or params.appinfo.GetEffectiveRuntime() not in ALLOWED_RUNTIME_NAMES):
        return None

    if params.appinfo:
        runtime = params.appinfo.GetEffectiveRuntime()
    else:
        runtime = params.runtime

    log.info('Python Compat matches ([{0}] specified in "runtime" field)'.format(runtime))
    return PythonConfigurator(path, params, runtime)
def _GetSourceContextsForUpload(source_dir):
  """Gets source context file information.

  Args:
    source_dir: str, path to the service's source directory
  Returns:
    A dict of filename to (str) source context file contents.
  """
  source_contexts = {}
  # Error message in case of failure.
  m = ('Could not generate [{name}]: {error}\n'
       'Stackdriver Debugger may not be configured or enabled on this '
       'application. See https://cloud.google.com/debugger/ for more '
       'information.')
  try:
    contexts = context_util.CalculateExtendedSourceContexts(source_dir)
    source_contexts[context_util.EXT_CONTEXT_FILENAME] = json.dumps(contexts)
  except context_util.GenerateSourceContextError as e:
    log.info(m.format(name=context_util.EXT_CONTEXT_FILENAME, error=e))
    # It's OK if source contexts can't be found, we just stop looking.
    return source_contexts
  try:
    context = context_util.BestSourceContext(contexts)
    source_contexts[context_util.CONTEXT_FILENAME] = json.dumps(context)
  except KeyError as e:
    log.info(m.format(name=context_util.CONTEXT_FILENAME, error=e))
  return source_contexts
Beispiel #6
0
  def __init__(self, collection=None, api_version=None, param=None, **kwargs):
    """Constructor.

    Args:
      collection: The resource collection name.
      api_version: The API version for collection, None for the default version.
      param: The updated parameter column name.
      **kwargs: Base class kwargs.
    """
    self.api_version = api_version
    if collection:
      self.collection_info = resources.REGISTRY.GetCollectionInfo(
          collection, api_version=api_version)
      params = self.collection_info.GetParams('')
      log.info(u'cache collection=%s api_version=%s params=%s' % (
          collection, self.collection_info.api_version, params))
      parameters = [resource_cache.Parameter(name=name, column=column)
                    for column, name in enumerate(params)]
      parse = resources.REGISTRY.Parse

      def _Parse(string):
        return parse(string, collection=collection, enforce_collection=False,
                     validate=False).AsList()

      self.parse = _Parse
    else:
      params = []
      parameters = []

    super(ResourceCompleter, self).__init__(
        collection=collection,
        columns=len(params),
        column=params.index(param) if param else 0,
        parameters=parameters,
        **kwargs)
Beispiel #7
0
  def __init__(self, project, bucket_name, unique_obj_name,
               tr_client, tr_messages, storage_client):
    """Construct a ResultsBucketOps object to be used with a single matrix run.

    Args:
      project: string containing the Google Developers Console project id.
      bucket_name: string containing the name of the GCS bucket.
      unique_obj_name: the name of a unique GCS object to hold the raw test
        results within the supplied bucket_name.
      tr_client: ToolResults API client library generated by Apitools.
      tr_messages: ToolResults API messages library generated by Apitools.
      storage_client: Cloud Storage API client library generated by Apitools.
    """
    self._project = project
    self._storage_client = storage_client
    self._gcs_object_name = unique_obj_name

    # If the user supplied a results bucket, make sure it exists. Otherwise,
    # call the SettingsService to get the project's existing default bucket.
    if bucket_name:
      self.EnsureBucketExists(bucket_name)
    else:
      bucket_name = self._GetDefaultBucket(tr_client, tr_messages)

    self._results_bucket = bucket_name
    self._gcs_results_url = (
        'https://console.developers.google.com/storage/browser/{b}/{t}/'
        .format(b=bucket_name, t=self._gcs_object_name))
    self.gcs_results_root = ('gs://{b}/{t}/'
                             .format(b=bucket_name, t=self._gcs_object_name))
    log.info('Raw results root path is: [{0}]'.format(self.gcs_results_root))
Beispiel #8
0
def Fingerprint(path, params):
  """Check for a Go app.

  Args:
    path: (str) Application path.
    params: (fingerprinting.Params) Parameters passed through to the
      fingerprinters.

  Returns:
    (GoConfigurator or None) Returns a module if the path contains a
    Go app.
  """
  log.info('Checking for Go.')

  # Test #1 - are there any '*.go' files at or below 'path'?
  go_files = _GoFiles(path)
  if not go_files:
    return None

  # Test #2 - check that one of these files has "package main" and "func main".
  main_found = False
  for f in go_files:
    if _FindMain(f):
      log.info('Found Go main in %s', f)
      main_found = True
      break
  if not main_found:
    return None

  return GoConfigurator(path, params)
Beispiel #9
0
 def ExtendValues(self, values, perm, selected):
   """Add selected values to a template and extend the selected rows."""
   vals = [row[self.column] for row in selected]
   log.info('cache collection={} adding values={}'.format(
       self.collection, vals))
   v = [perm + [val] for val in vals]
   values.extend(v)
Beispiel #10
0
def RewriteFilter(args, message=None, frontend_fields=None):
  """Rewrites args.filter into client and server filter expression strings.

  Usage:

    args.filter, request_filter = flags.RewriteFilter(args)

  Args:
    args: The parsed args namespace containing the filter expression args.filter
      and display_info.
    message: The response resource message proto for the request.
    frontend_fields: A set of dotted key names supported client side only.

  Returns:
    A (client_filter, server_filter) tuple of filter expression strings.
    None means the filter does not need to applied on the respective
    client/server side.
  """
  if not args.filter:
    return None, None
  display_info = args.GetDisplayInfo()
  defaults = resource_projection_spec.ProjectionSpec(
      symbols=display_info.transforms,
      aliases=display_info.aliases)
  client_filter, server_filter = filter_rewrite.Rewriter(
      message=message, frontend_fields=frontend_fields).Rewrite(
          args.filter, defaults=defaults)
  log.info('client_filter=%r server_filter=%r', client_filter, server_filter)
  return client_filter, server_filter
Beispiel #11
0
  def Shutdown(self, wait_secs=10):
    """Shuts down the broker server."""
    if self._process:
      try:
        exec_utils.KillSubprocess(self._process)
        self._process = None
        if self._comm_thread:
          self._comm_thread.join()
          self._comm_thread = None
      except RuntimeError as e:
        log.warn('Failed to shutdown broker: %s' % e)
        raise BrokerError('Broker failed to shutdown: %s' % e)
    else:
      # Invoke the /shutdown handler.
      try:
        self._SendJsonRequest('POST', '/shutdown')
      except IOError:
        raise BrokerError('Broker failed to shutdown: '
                          'failed to send shutdown request')
      except httplib.HTTPException:
        # We may get an exception reading the response to the shutdown request,
        # because the shutdown may preempt the response.
        pass

    if not _Await(lambda: not self.IsRunning(), wait_secs):
      log.warn('Failed to shutdown broker: still running after {0}s'.format(
          wait_secs))
      raise BrokerError('Broker failed to shutdown: timed-out')

    log.info('Shutdown broker.')
Beispiel #12
0
  def SelectTable(self, table, row_template, parameter_info, aggregations=None):
    """Returns the list of rows matching row_template in table.

    Refreshes expired tables by calling the updater.

    Args:
      table: The persistent table object.
      row_template: A row template to match in Select().
      parameter_info: A ParamaterInfo object for accessing parameter values in
        the program state.
      aggregations: A list of aggregation Parameter objects.

    Returns:
      The list of rows matching row_template in table.
    """
    if not aggregations:
      aggregations = []
    log.info('cache table=%s aggregations=[%s]',
             table.name,
             ' '.join(['{}={}'.format(x.name, x.value) for x in aggregations]))
    try:
      return table.Select(row_template)
    except exceptions.CacheTableExpired:
      rows = self.Update(parameter_info, aggregations)
      table.DeleteRows()
      table.AddRows(rows)
      table.Validate()
      return table.Select(row_template, ignore_expiration=True)
Beispiel #13
0
def ProgressHandler(action, func_with_output_lines):
  """Handles the streaming output of the docker client.

  Args:
    action: str, action verb for logging purposes, for example "push" or "pull".
    func_with_output_lines: a function streaming output from the docker client.
  Raises:
    Error: if a problem occured during the operation with an explanation
           string if possible.
  """
  for line in func_with_output_lines():
    line = line.strip()
    if not line:
      continue
    log_record = json.loads(line)
    if 'status' in log_record:
      feedback = log_record['status'].strip()
      if 'progress' in log_record:
        feedback += ': ' + log_record['progress'] + '\r'
      else:
        feedback += '\n'
      log.info(feedback)
    elif 'error' in log_record:
      error = log_record['error'].strip()
      log.error(error)
      raise Error('Unable to %s the image to/from the registry: "%s"' %
                  (action, error))
    elif 'errorDetail' in log_record:
      error_detail = log_record['errorDetail'] or 'Unknown Error'
      raise Error('Unable to push the image to the registry: "%s"'
                  % error_detail)
 def _DownloadLogs(self, valid_dates, sentinel, output_file, mode):
   """Common utility method for both normal and append modes."""
   # A temporary file is used because the API for requesting logs
   # gives us the newest logs first.  We write them in this order to
   # the temporary file and then read the temporary file backwards,
   # copying to the output file line by line (special-casing null
   # bytes).
   tf = tempfile.TemporaryFile()
   last_offset = None
   try:
     while True:
       new_offset = self.RequestLogLines(
           tf, last_offset, valid_dates, sentinel)
       if not new_offset or new_offset == last_offset:
         break
       last_offset = new_offset
     if output_file == '-':
       of = log.out
     else:
       try:
         of = open(output_file, mode)
       except IOError as e:
         raise CannotOpenFileError(output_file, e)
     try:
       line_count = CopyReversedLines(tf, of)
     finally:
       of.flush()
       if of is not log.out:
         of.close()
   finally:
     tf.close()
   log.info('Copied %d records.', line_count)
Beispiel #15
0
def _ProcessStreamingOutput(line):
  """Handles the streaming output of the docker client.

  Args:
    line: a single line of streamed output.
  Raises:
    Error: if a problem occured during the operation with an explanation
           string if possible.
  """
  line = line.strip()
  if not line:
    return
  log_record = json.loads(line)
  if 'status' in log_record:
    feedback = log_record['status'].strip()
    if 'progress' in log_record:
      feedback += ': ' + log_record['progress'] + '\r'
    else:
      feedback += '\n'
    log.info(feedback)
  elif 'error' in log_record:
    error = log_record['error'].strip()
    log.error(error)
    raise Error('Unable to push the image to the registry: "%s"' % error)
  elif 'errorDetail' in log_record:
    error_detail = log_record['errorDetail'] or 'Unknown Error'
    raise Error('Unable to push the image to the registry: "%s"'
                % error_detail)
Beispiel #16
0
  def Build(self):
    """Calls "docker build".

    Raises:
      ImageBuildError: if the image could not be built.
    """
    log.info('Building docker image %s from %s/Dockerfile:',
             self.tag, self._image_opts.dockerfile_dir)

    width, _ = console_attr_os.GetTermSize()
    log.status.Print(DOCKER_OUTPUT_BEGIN.center(width, DOCKER_OUTPUT_LINE_CHAR))

    build_res = self._docker_client.build(
        path=self._image_opts.dockerfile_dir,
        tag=self.tag,
        quiet=False, fileobj=None, nocache=self._image_opts.nocache,
        rm=self._image_opts.rm)

    info = None
    error = None
    error_detail = None
    log_records = []
    try:
      for line in build_res:
        line = line.strip()
        if not line:
          continue
        log_record = json.loads(line)
        log_records.append(log_record)
        if 'stream' in log_record:
          info = log_record['stream'].strip()
          log.status.Print(info)
        if 'error' in log_record:
          error = log_record['error'].strip()
          # will be logged to log.error in the thrown exception
          log.status.Print(error)
        if 'errorDetail' in log_record:
          error_detail = log_record['errorDetail']['message'].strip()
          log.status.Print(error_detail)
    except docker.errors.APIError as e:
      log.error(e.explanation)
      error = e.explanation
      error_detail = ''
    finally:
      log.status.Print(DOCKER_OUTPUT_LINE_CHAR * width + '\n')

    if not log_records:
      raise ImageBuildError(
          'Error building docker image {0} [with no output]'.format(self.tag))

    success_message = log_records[-1].get(_STREAM)
    if success_message:
      m = _SUCCESSFUL_BUILD_PATTERN.match(success_message)
      if m:
        # The build was successful.
        self._id = m.group(1)
        log.info('Image %s built, id = %s', self.tag, self.id)
        return

    raise ImageBuildError('Docker build aborted: ' + error)
Beispiel #17
0
  def _GetIncludedPatterns(cls, line, dirname, recurse):
    """Gets the patterns from an '#!include' line.

    Args:
      line: str, the line containing the '#!include' directive
      dirname: str, the name of the base directory from which to include files
      recurse: int, how many layers of "#!include" directives to respect. 0
        means don't respect the directives, 1 means to respect the directives,
        but *not* in any "#!include"d files, etc.

    Returns:
      list of Pattern, the patterns recursively included from the specified
        file.

    Raises:
      ValueError: if dirname is not provided
      BadIncludedFileError: if the file being included does not exist or is not
        in the same directory.
    """
    if not dirname:
      raise ValueError('dirname must be provided in order to include a file.')
    start_idx = line.find(cls._INCLUDE_DIRECTIVE)
    included_file = line[start_idx + len(cls._INCLUDE_DIRECTIVE):]
    if _GCLOUDIGNORE_PATH_SEP in included_file:
      raise BadIncludedFileError(
          'May only include files in the same directory.')
    if not recurse:
      log.info('Not respecting `#!include` directive: [%s].', line)
      return []

    included_path = os.path.join(dirname, included_file)
    try:
      return cls.FromFile(included_path, recurse - 1).patterns
    except BadFileError as err:
      raise BadIncludedFileError(err.message)
Beispiel #18
0
def GetArgsFromArgFile(argspec, all_test_args_set):
  """Loads a group of test args from an optional user-supplied arg file.

  Args:
    argspec: string containing an ARG_FILE:ARG_GROUP_NAME pair, where ARG_FILE
      is the path to a file containing groups of test arguments in yaml format,
      and ARG_GROUP_NAME is a yaml object name of a group of arg:value pairs.
    all_test_args_set: a set of strings for every possible gcloud-test argument
      name regardless of test type. Used for validation.

  Returns:
    A {str:str} dict created from the file which maps arg names to arg values.

  Raises:
    BadFileException: the YAML parser encountered an I/O error or syntax error
      while reading the arg-file.
    InvalidTestArgError: an argument name was not a valid gcloud test arg.
    InvalidArgException: an argument has an invalid value or no value.
  """
  if argspec is None:
    return {}

  arg_file, group_name = _SplitArgFileAndGroup(argspec)
  try:
    all_arg_groups = _ReadArgGroupsFromFile(arg_file)
  except IOError as err:
    raise calliope_exceptions.BadFileException(
        'Error reading argument file [{f}]: {e}'.format(f=arg_file, e=err))
  _ValidateArgGroupNames(all_arg_groups.keys())

  args_from_file = {}
  _MergeArgGroupIntoArgs(args_from_file, group_name, all_arg_groups,
                         all_test_args_set)
  log.info('Args loaded from file: ' + str(args_from_file))
  return args_from_file
Beispiel #19
0
    def RequestLogLines(self, tf, offset, valid_dates, sentinel):
        """Make a single roundtrip to the server.

    Args:
      tf: Writable binary stream to which the log lines returned by
        the server are written, stripped of headers, and excluding
        lines skipped due to self.sentinel or self.valid_dates filtering.
      offset: Offset string for a continued request; None for the first.
      valid_dates: (datetime.date, datetime.date), A tuple of start and end
        dates to get the logs between.
      sentinel: str, The last line in the log file we are appending to, or None.

    Returns:
      The offset string to be used for the next request, if another
      request should be issued; or None, if not.
    """
        log.debug("Request with offset %r.", offset)
        params = dict(self._params)
        if offset:
            params["offset"] = offset

        response = self.rpcserver.Send("/api/request_logs", payload=None, **params)
        response = response.replace("\r", "\0")
        lines = response.splitlines()
        log.info("Received %d bytes, %d records.", len(response), len(lines))

        # Move all references to self.<anything> out of the loop.
        is_skipping = True
        (start, end) = valid_dates
        next_offset_regex = self._next_offset_regex
        len_sentinel = len(sentinel) if sentinel else None

        next_offset = None
        for line in lines:
            if line.startswith("#"):
                match = next_offset_regex.match(line)
                # We are now (May 2014) frequently seeing None instead of a blank or
                # not-present next_offset at all. This extra check handles that.
                if match and match.group(1) != "None":
                    next_offset = match.group(1)
                continue

            if sentinel and line.startswith(sentinel) and line[len_sentinel : len_sentinel + 1] in ("", "\0"):
                return None

            linedate = self.DateOfLogLine(line)
            # We don't write unparseable log lines, ever.
            if not linedate:
                continue
            if is_skipping:
                if linedate > end:
                    continue
                else:
                    # We are in the good date range, stop doing date comparisons.
                    is_skipping = False

            if start and linedate < start:
                return None
            tf.write(line + "\n")
        return next_offset
Beispiel #20
0
def _CheckForRubyRuntime(path, appinfo):
  """Determines whether to treat this application as runtime:ruby.

  Honors the appinfo runtime setting; otherwise looks at the contents of the
  current directory and confirms with the user.

  Args:
    path: (str) Application path.
    appinfo: (apphosting.api.appinfo.AppInfoExternal or None) The parsed
      app.yaml file for the module if it exists.

  Returns:
    (bool) Whether this app should be treated as runtime:ruby.
  """
  if appinfo and appinfo.GetEffectiveRuntime() == 'ruby':
    return True

  log.info('Checking for Ruby.')

  gemfile_path = os.path.join(path, 'Gemfile')
  if not os.path.isfile(gemfile_path):
    return False

  got_ruby_message = 'This looks like a Ruby application.'
  if console_io.CanPrompt():
    return console_io.PromptContinue(
        message=got_ruby_message,
        prompt_string='Proceed to configure deployment for Ruby?')
  else:
    log.info(got_ruby_message)
    return True
Beispiel #21
0
  def Shutdown(self, wait_secs=10):
    """Shuts down the broker server.

    Args:
      wait_secs: (float) The maximum time to wait for the broker to shutdown.

    Raises:
      BrokerError: If shutdown failed.
    """
    if self._process:
      try:
        execution_utils.KillSubprocess(self._process)
        self._process = None
        if self._comm_thread:
          self._comm_thread.join()
          self._comm_thread = None
      except RuntimeError as e:
        log.warn('Failed to shutdown broker: %s' % e)
        raise BrokerError('Broker failed to shutdown: %s' % e)
    else:
      # Invoke the /shutdown handler.
      try:
        self._SendJsonRequest('POST', '/shutdown')
      except RequestSocketError as e:
        if e.errno not in (SocketConnRefusedErrno(), SocketConnResetErrno()):
          raise
        # We may get an exception reading the response to the shutdown
        # request, because the shutdown may preempt the response.

    if not _Await(lambda: not self.IsRunning(), wait_secs):
      log.warn('Failed to shutdown broker: still running after {0}s'.format(
          wait_secs))
      raise BrokerError('Broker failed to shutdown: timed-out')

    log.info('Shutdown broker.')
Beispiel #22
0
def ValidateOsVersions(args, catalog):
  """Validate os-version-ids strings against the TestingEnvironmentCatalog.

  Also allow users to alternatively specify OS version strings (e.g. '5.1.x')
  but translate them here to their corresponding version IDs (e.g. '22').
  The final list of validated version IDs is sorted in ascending order.

  Args:
    args: an argparse namespace. All the arguments that were provided to the
      command invocation (i.e. group and command arguments combined).
    catalog: the TestingEnvironmentCatalog which includes all valid OS versions
      accepted by the Testing service.
  """
  validated_versions = set()  # Using a set will remove duplicates
  version_ids = [v.id for v in catalog.versions]
  # TODO(user): use dict comprehensions if py2.6 compatibility is dropped.
  # version_to_id_map = {v.versionString: v.id for v in catalog.versions}
  version_to_id_map = dict((v.versionString, v.id) for v in catalog.versions)

  for vers in args.os_version_ids:
    if vers in version_ids:
      validated_versions.add(vers)
    else:
      version_id = version_to_id_map.get(vers, None)
      if version_id is None:
        raise exceptions.InvalidArgumentException('os-version-ids', vers)
      validated_versions.add(version_id)
  args.os_version_ids = sorted(validated_versions)
  log.info('Testing against OS versions: {0}'.format(args.os_version_ids))
  def Flush(self):
    """Flush the current batch.

    This first attempts to send the batch as a single request; if that
    fails because the server doesn't support batching, the files are
    sent one by one, and self.batching is reset to False.

    At the end, self.batch and self.batch_size are reset.
    """
    if not self.batch:
      return
    try:
      self.SendBatch()
    except util.RPCError as err:
      if err.url_error.code != 404:
        raise

      # Assume it's an old server.  Disable batching.
      log.info('Old server detected; turning off %s batching.', self.what)
      self.batching = False

      # Send the files individually now.
      for path, payload, mime_type in self.batch:
        self.SendSingleFile(path, payload, mime_type)

      # And reset the batch info.
      self.batch = []
      self.batch_size = 0
Beispiel #24
0
  def Run(self, staging_area, descriptor, app_dir):
    """Invokes a staging command with a given <service>.yaml and temp dir.

    Args:
      staging_area: str, path to the staging area.
      descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
      app_dir: str, path to the unstaged app directory

    Returns:
      str, the path to the staged directory or None if staging was not required.

    Raises:
      StagingCommandFailedError: if the staging command process exited non-zero.
    """
    staging_dir = tempfile.mkdtemp(dir=staging_area)
    args = self.GetArgs(descriptor, app_dir, staging_dir)
    log.info('Executing staging command: [{0}]\n\n'.format(' '.join(args)))
    out = cStringIO.StringIO()
    err = cStringIO.StringIO()
    return_code = execution_utils.Exec(args, no_exit=True, out_func=out.write,
                                       err_func=err.write)
    message = _STAGING_COMMAND_OUTPUT_TEMPLATE.format(out=out.getvalue(),
                                                      err=err.getvalue())
    log.info(message)
    if return_code:
      raise StagingCommandFailedError(args, return_code, message)
    return staging_dir
Beispiel #25
0
def _StopPreviousVersionIfApplies(old_default_version, api_client):
    """Stop the previous default version if applicable.

  Cases where a version will not be stopped:

  * If the previous default version is not serving, there is no need to stop it.
  * If the previous default version is an automatically scaled standard
    environment app, it cannot be stopped.

  Args:
    old_default_version: Version, The old default version to stop.
    api_client: appengine_api_client.AppengineApiClient to use to make requests.
  """
    version_object = old_default_version.version
    status_enum = api_client.messages.Version.ServingStatusValueValuesEnum
    if version_object.servingStatus != status_enum.SERVING:
        log.info("Previous default version [{0}] not serving, so not stopping " "it.".format(old_default_version))
        return
    is_standard = not (version_object.vm or version_object.env == "flex" or version_object.env == "flexible")
    if is_standard and not version_object.basicScaling and not version_object.manualScaling:
        log.info(
            "Previous default version [{0}] is an automatically scaled "
            "standard environment app, so not stopping it.".format(old_default_version)
        )
        return

    try:
        api_client.StopVersion(service_name=old_default_version.service, version_id=old_default_version.id)
    except (calliope_exceptions.HttpException, operations.OperationError, operations.OperationTimeoutError) as err:
        log.warn("Error stopping version [{0}]: {1}".format(old_default_version, str(err)))
        log.warn(
            "Version [{0}] is still running and you must stop or delete it "
            "yourself in order to turn it off. (If you do not, you may be "
            "charged.)".format(old_default_version)
        )
Beispiel #26
0
  def _RunCmd(self, cmd, params=None, disable_user_output=True):
    if not self.cli.IsValidCommand(cmd):
      log.info('Command %s does not exist.', cmd)
      return None
    if params is None:
      params = []
    args = cmd + params
    log.info('Executing: [gcloud %s]', ' '.join(args))
    try:
      # Disable output from individual commands, so that we get
      # command run results, and don't clutter output of init.
      if disable_user_output:
        args.append('--no-user-output-enabled')

      if (properties.VALUES.core.verbosity.Get() is None and
          disable_user_output):
        # Unless user explicitly set verbosity, suppress from subcommands.
        args.append('--verbosity=none')

      result = self.cli.Execute(args)
      # Best effort to force result of Execute eagerly.  Don't just check
      # that result is iterable to avoid category errors (e.g., accidently
      # converting a string or dict to a list).
      if type(result) is types.GeneratorType:
        return list(result)
      return result

    except SystemExit as exc:
      log.status.write('[{0}] has failed\n'.format(' '.join(cmd + params)))
      raise c_exc.FailedSubCommand(cmd + params, exc.code)
    except BaseException:
      log.status.write('Failed to run [{0}]\n'.format(' '.join(cmd + params)))
      raise
Beispiel #27
0
def _CheckoutLatestVersion(target_dir, url):
  """Pull tags and checkout the latest version of the target directory.

  Args:
    target_dir: (str) Directory name.
    url: (str) Git repository URL.

  Raises:
    errors.HangupException: Hangup during communication to a remote repository.
  """
  local_repo = repo.Repo(target_dir)
  try:
    # We don't get the tags with a clone or a fetch, so we have to get them
    # after the fact.
    client_wrapper = WrapClient(url)
    local_repo = repo.Repo(target_dir)
    tag, revision = _PullTags(local_repo, client_wrapper, target_dir)

    log.info('Checking out revision [%s] of [%s] into [%s]', tag, url,
             target_dir)
    try:
      # Checkout the specified revision of the runtime definition from git.
      index.build_index_from_tree(local_repo.path, local_repo.index_path(),
                                  local_repo.object_store,
                                  revision.tree)
    except (IOError, OSError, WindowsError) as ex:
      raise InvalidTargetDirectoryError(
          'Unable to checkout directory {0}: {1}'.format(target_dir,
                                                         ex.message))
  finally:
    local_repo.close()
def _SubmitBuild(build, image, project, parallel_build):
  """Builds and pushes a set of docker images.

  Args:
    build: A fixed up Build object.
    image: docker_image.Image, A docker image.
    project: str, The project being deployed to.
    parallel_build: bool, if True, enable parallel build and deploy.

  Returns:
    BuildArtifact, Representing the pushed container image or in-progress build.
  """
  build_timeout = properties.VALUES.app.cloud_build_timeout.Get()
  if build_timeout and build_timeout > MAX_PARALLEL_BUILD_TIME:
    parallel_build = False
    log.info(
        'Property cloud_build_timeout configured to [{0}], which exceeds '
        'the maximum build time for parallelized beta deployments of [{1}] '
        'seconds. Performing serial deployment.'.format(
            build_timeout, MAX_PARALLEL_BUILD_TIME))

  if parallel_build:
    build_id = cloudbuild_build.CloudBuildClient().ExecuteCloudBuildAsync(
        build, project=project)
    return BuildArtifact.MakeBuildIdArtifact(build_id)
  else:
    metrics.CustomTimedEvent(metric_names.CLOUDBUILD_EXECUTE_START)
    cloudbuild_build.CloudBuildClient().ExecuteCloudBuild(
        build, project=project)
    metrics.CustomTimedEvent(metric_names.CLOUDBUILD_EXECUTE)
    return BuildArtifact.MakeImageArtifact(image.tagged_repo)
Beispiel #29
0
  def Filter(self, context, args):
    """Modify the context that will be given to this group's commands when run.

    Args:
      context: {str:object}, The current context, which is a set of key-value
          pairs that can be used for common initialization among commands.
      args: argparse.Namespace: The same Namespace given to the corresponding
          .Run() invocation.

    Returns:
      The refined command context.
    """
    # Get service endpoints and ensure they are compatible with each other
    testing_url = properties.VALUES.api_endpoint_overrides.testing.Get()
    toolresults_url = properties.VALUES.api_endpoint_overrides.toolresults.Get()
    log.info('Test Service endpoint: [{0}]'.format(testing_url))
    log.info('Tool Results endpoint: [{0}]'.format(toolresults_url))
    if ((toolresults_url is None or 'apis.com/toolresults' in toolresults_url)
        != (testing_url is None or 'testing.googleapis' in testing_url)):
      raise exceptions.ToolException(
          'Service endpoints [{0}] and [{1}] are not compatible.'
          .format(testing_url, toolresults_url))

    http = self.Http()

    # Create the Testing service client
    resources.SetParamDefault(
        api='test', collection=None, param='project',
        resolver=resolvers.FromProperty(properties.VALUES.core.project))
    # TODO(user) Support multiple versions when they exist
    testing_client_v1 = testing_v1.TestingV1(
        get_credentials=False,
        url=testing_url,
        http=http)
    testing_registry = resources.REGISTRY.CloneAndSwitchAPIs(testing_client_v1)
    context['testing_client'] = testing_client_v1
    context['testing_messages'] = testing_v1
    context['testing_registry'] = testing_registry

    # Create the Tool Results service client.
    resources.SetParamDefault(
        api='toolresults', collection=None, param='project',
        resolver=resolvers.FromProperty(properties.VALUES.core.project))
    toolresults_client_v1 = toolresults_v1beta3.ToolresultsV1beta3(
        get_credentials=False,
        url=toolresults_url,
        http=http)
    tr_registry = resources.REGISTRY.CloneAndSwitchAPIs(toolresults_client_v1)
    context['toolresults_client'] = toolresults_client_v1
    context['toolresults_messages'] = toolresults_v1beta3
    context['toolresults_registry'] = tr_registry

    # TODO(user): remove this message for general release.
    log.status.Print(
        '\nHave questions, feedback, or issues? Please let us know by using '
        'this Google Group:\n  https://groups.google.com/forum/#!forum'
        '/google-cloud-test-lab-external\n')

    return context
Beispiel #30
0
def Load(account=None):
  """Get the credentials associated with the provided account.

  Args:
    account: str, The account address for the credentials being fetched. If
        None, the account stored in the core.account property is used.

  Returns:
    oauth2client.client.Credentials, The specified credentials.

  Raises:
    NoActiveAccountException: If account is not provided and there is no
        active account.
    NoCredentialsForAccountException: If there are no valid credentials
        available for the provided or active account.
    c_gce.CannotConnectToMetadataServerException: If the metadata server cannot
        be reached.
    RefreshError: If the credentials fail to refresh.
  """
  # If a credential file is set, just use that and ignore the active account
  # and whatever is in the credential store.
  cred_file_override = properties.VALUES.auth.credential_file_override.Get()
  if cred_file_override:
    log.info('Using alternate credentials from file: [%s]',
             cred_file_override)
    try:
      cred = client.GoogleCredentials.from_stream(cred_file_override)
      if cred.create_scoped_required():
        cred = cred.create_scoped(config.CLOUDSDK_SCOPES)
      return cred
    except client.Error as e:
      raise InvalidCredentialFileException(cred_file_override, e)

  if not account:
    account = properties.VALUES.core.account.Get()

  if not account:
    raise NoActiveAccountException()

  devshell_creds = c_devshell.LoadDevshellCredentials()
  if devshell_creds and (
      devshell_creds.devshell_response.user_email == account):
    return devshell_creds

  if account in c_gce.Metadata().Accounts():
    return AcquireFromGCE(account)

  store = _StorageForAccount(account)
  if not store:
    raise NoCredentialsForAccountException(account)
  cred = store.get()
  if not cred:
    raise NoCredentialsForAccountException(account)

  # cred.token_expiry is in UTC time.
  if not cred.token_expiry or cred.token_expiry < cred.token_expiry.utcnow():
    Refresh(cred)

  return cred
Beispiel #31
0
  def Display(self):
    """The default display method."""

    if not log.IsUserOutputEnabled():
      log.info('Display disabled.')
      # NOTICE: Do not consume resources here. Some commands use this case to
      # access the results of Run() via the return value of Execute(). However,
      # to satisfy callers who are only interetsted in silent side effects,
      # generators/iterators must be converted to a list here.
      if resource_property.IsListLike(self._resources):
        return list(self._resources)
      return self._resources

    # Initialize the printer.
    self._InitPrinter()

    # Add a URI cache update tap if needed.
    self._AddUriCacheTap()

    # Add a resource page tap if needed.
    self._AddPageTap()

    # Add a resource flatten tap if needed.
    self._AddFlattenTap()

    # Add a sort tap if needed.
    self._AddSortByTap()

    # Add a resource filter tap if needed.
    self._AddFilterTap()

    # Add a resource limit tap if needed.
    self._AddLimitTap()

    # Add the URI replace tap if needed.
    self._AddUriReplaceTap()

    resources_were_displayed = True
    if self._printer:
      # Most command output will end up here.
      log.info('Display format "%s".', self._format)
      self._printer.Print(self._resources)
      resources_were_displayed = self._printer.ResourcesWerePrinted()
    elif hasattr(self._command, 'Display'):
      # This will eventually be rare.
      log.info('Explicit Display.')
      self._command.Display(self._args, self._resources)

    # Resource display is done.
    log.out.flush()

    # If the default format was used then display the epilog.
    if not self._args.IsSpecified('format'):
      self._command.Epilog(resources_were_displayed)

    return self._resources
Beispiel #32
0
    def PrintLog(self, build_ref):
        """Print the logs for a build.

    Args:
      build_ref: Build reference, The build whose logs shall be streamed.

    Raises:
      NoLogsBucketException: If the build does not specify a logsBucket.
    """
        build = self.GetBuild(build_ref)
        if build.options and build.options.logging in [
                self.messages.BuildOptions.LoggingValueValuesEnum.NONE,
                self.messages.BuildOptions.LoggingValueValuesEnum.
                STACKDRIVER_ONLY,
                self.messages.BuildOptions.LoggingValueValuesEnum.
                CLOUD_LOGGING_ONLY,
        ]:
            log.info(
                'GCS logs not available: build logging mode is {0}.'.format(
                    build.options.logging))
            return

        log_tailer = LogTailer.FromBuild(build)
        log_tailer.Poll(is_last=True)
Beispiel #33
0
    def LogClientDeploy(self, runtime, start_time_usec, success):
        """Logs a client deployment attempt.

    Args:
      runtime: The runtime for the app being deployed.
      start_time_usec: The start time of the deployment in micro seconds.
      success: True if the deployment succeeded otherwise False.
    """
        if not self.usage_reporting:
            log.info('Skipping usage reporting.')
            return
        end_time_usec = self.GetCurrentTimeUsec()
        try:
            info = client_deployinfo.ClientDeployInfoExternal(
                runtime=runtime,
                start_time_usec=start_time_usec,
                end_time_usec=end_time_usec,
                requests=self.requests,
                success=success,
                sdk_version=config.CLOUD_SDK_VERSION)
            self.Send('/api/logclientdeploy', info.ToYAML())
        except BaseException as e:  # pylint: disable=broad-except
            log.debug(
                'Exception logging deploy info continuing - {0}'.format(e))
Beispiel #34
0
def detect_and_set_best_config(is_estimated_multi_file_workload):
    """Determines best app config based on system and workload."""
    if is_estimated_multi_file_workload:
        _set_if_not_user_set('sliced_object_download_component_size',
                             COMPONENT_SIZE)
        _set_if_not_user_set('sliced_object_download_max_components',
                             MULTI_FILE_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS)
        if multiprocessing.cpu_count() < 4:
            log.info('Using low CPU count, multi-file workload config.')
            _set_if_not_user_set('process_count',
                                 MULTI_FILE_LOW_CPU_PROCESS_COUNT)
            _set_if_not_user_set('thread_count',
                                 MULTI_FILE_LOW_CPU_THREAD_COUNT)
            _set_if_not_user_set(
                'sliced_object_download_threshold',
                MULTI_FILE_LOW_CPU_SLICED_OBJECT_DOWNLOAD_THRESHOLD)
        else:
            log.info('Using high CPU count, multi-file workload config.')
            _set_if_not_user_set('process_count',
                                 MULTI_FILE_HIGH_CPU_PROCESS_COUNT)
            _set_if_not_user_set('thread_count',
                                 MULTI_FILE_HIGH_CPU_THREAD_COUNT)
            _set_if_not_user_set(
                'sliced_object_download_threshold',
                MULTI_FILE_HIGH_CPU_SLICED_OBJECT_DOWNLOAD_THRESHOLD)
    else:
        _set_if_not_user_set('sliced_object_download_threshold',
                             SINGLE_FILE_SLICED_OBJECT_DOWNLOAD_THRESHOLD)
        _set_if_not_user_set('sliced_object_download_component_size',
                             COMPONENT_SIZE)
        if multiprocessing.cpu_count() < 8:
            log.info('Using low CPU count, single-file workload config.')
            _set_if_not_user_set('process_count',
                                 SINGLE_FILE_LOW_CPU_PROCESS_COUNT)
            _set_if_not_user_set('thread_count', SINGLE_FILE_THREAD_COUNT)
            _set_if_not_user_set(
                'sliced_object_download_max_components',
                SINGLE_FILE_LOW_CPU_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS)
        else:
            log.info('Using high CPU count, single-file workload config.')
            _set_if_not_user_set('process_count',
                                 SINGLE_FILE_HIGH_CPU_PROCESS_COUNT)
            _set_if_not_user_set('thread_count', SINGLE_FILE_THREAD_COUNT)
            _set_if_not_user_set(
                'sliced_object_download_max_components',
                SINGLE_FILE_HIGH_CPU_SLICED_OBJECT_DOWNLOAD_MAX_COMPONENTS)
Beispiel #35
0
    def Run(self, args):
        client = privateca_base.GetClientInstance()
        messages = privateca_base.GetMessagesModule()

        client_filter, server_filter = filter_rewrite.BackendFilterRewrite(
        ).Rewrite(args.filter)
        log.info('original_filter=%r, client_filter=%r, server_filter=%r',
                 args.filter, client_filter, server_filter)
        # Overwrite client filter used by gcloud.
        args.filter = client_filter
        parent = args.CONCEPTS.issuer.Parse()
        request = messages.PrivatecaProjectsLocationsCertificateAuthoritiesCertificatesListRequest(
            parent=parent.RelativeName(),
            orderBy=common_args.ParseSortByArg(args.sort_by),
            filter=server_filter)

        return list_pager.YieldFromList(
            client.projects_locations_certificateAuthorities_certificates,
            request,
            field='certificates',
            limit=args.limit,
            batch_size_attribute='pageSize',
            batch_size=args.page_size,
            get_field_func=response_utils.GetFieldAndLogUnreachable)
Beispiel #36
0
    def _GetIncludedPatterns(cls, line, dirname, recurse):
        """Gets the patterns from an '#!include' line.

    Args:
      line: str, the line containing the '#!include' directive
      dirname: str, the name of the base directory from which to include files
      recurse: int, how many layers of "#!include" directives to respect. 0
        means don't respect the directives, 1 means to respect the directives,
        but *not* in any "#!include"d files, etc.

    Returns:
      list of Pattern, the patterns recursively included from the specified
        file.

    Raises:
      ValueError: if dirname is not provided
      BadIncludedFileError: if the file being included does not exist or is not
        in the same directory.
    """
        if not dirname:
            raise ValueError(
                'dirname must be provided in order to include a file.')
        start_idx = line.find(cls._INCLUDE_DIRECTIVE)
        included_file = line[start_idx + len(cls._INCLUDE_DIRECTIVE):]
        if _GCLOUDIGNORE_PATH_SEP in included_file:
            raise BadIncludedFileError(
                'May only include files in the same directory.')
        if not recurse:
            log.info('Not respecting `#!include` directive: [%s].', line)
            return []

        included_path = os.path.join(dirname, included_file)
        try:
            return cls.FromFile(included_path, recurse - 1).patterns
        except BadFileError as err:
            raise BadIncludedFileError(six.text_type(err))
  def GetVersionResource(self, api_client):
    """Attempts to load the Version resource for this version.

    Returns the cached Version resource if it exists. Otherwise, attempts to
    load it from the server. Errors are logged and ignored.

    Args:
      api_client: An AppengineApiClient.

    Returns:
      The Version resource, or None if it could not be loaded.
    """
    if not self.version:
      try:
        self.version = api_client.GetVersionResource(self.service, self.id)
        if not self.version:
          log.info('Failed to retrieve resource for version [{0}]'.format(self))
      except apitools_exceptions.Error as e:
        # Log and drop the exception so we don't introduce a new failure mode
        # into the app deployment flow. If we find this isn't happening very
        # often, we could choose to propagate the error.
        log.warning('Error retrieving Version resource [{0}]: {1}'
                    .format(six.text_type(self), six.text_type(e)))
    return self.version
Beispiel #38
0
    def InitiateConnection(self):
        """Initiate the WebSocket connection."""
        utils.CheckPythonVersion(self._ignore_certs)
        utils.ValidateParameters(self._tunnel_target)
        self._ca_certs = utils.CheckCACertsFile(self._ignore_certs)

        self._connect_url = utils.CreateWebSocketUrl(CONNECT_ENDPOINT,
                                                     self._tunnel_target)
        headers = [
            'User-Agent: ' + http.MakeUserAgentString(),
            'Sec-WebSocket-Protocol: ' + utils.SUBPROTOCOL_NAME
        ]
        if self._access_token:
            headers += ['Authorization: Bearer ' + self._access_token]
        log.info('Connecting to with URL %r', self._connect_url)
        self._websocket_open = False
        self._websocket_errors = []
        self._connection_sid = None

        if log.GetVerbosity() == logging.DEBUG:
            websocket.enableTrace(True)
        else:
            websocket_logger = logging.getLogger('websocket')
            websocket_logger.setLevel(logging.CRITICAL)

        self._websocket = websocket.WebSocketApp(self._connect_url,
                                                 header=headers,
                                                 on_open=self._OnOpen,
                                                 on_error=self._OnError,
                                                 on_close=self._OnClose,
                                                 on_data=self._OnData)
        log.info('Starting WebSocket receive thread.')
        self._websocket_thread = threading.Thread(
            target=self._ReceiveFromWebSocket)
        self._websocket_thread.daemon = True
        self._websocket_thread.start()
Beispiel #39
0
 def _ReceiveFromWebSocket(self):
   """Receive data from WebSocket connection."""
   try:
     if self._proxy_info:
       http_proxy_auth = None
       if self._proxy_info.proxy_user or self._proxy_info.proxy_pass:
         # The websocket library ultimately expects the proxy username and
         # password to be strings, unlike httplib2's ProxyInfo which encodes
         # these as bytes. So we need to ensure they're decoded here before
         # calling run_forever.
         http_proxy_auth = (encoding.Decode(self._proxy_info.proxy_user),
                            encoding.Decode(self._proxy_info.proxy_pass))
       self._websocket.run_forever(
           origin=TUNNEL_CLOUDPROXY_ORIGIN, sslopt=self._sslopt,
           http_proxy_host=self._proxy_info.proxy_host,
           http_proxy_port=self._proxy_info.proxy_port,
           http_proxy_auth=http_proxy_auth)
     else:
       self._websocket.run_forever(origin=TUNNEL_CLOUDPROXY_ORIGIN,
                                   sslopt=self._sslopt)
   except:  # pylint: disable=bare-except
     try:
       log.info('Error while receiving from WebSocket.', exc_info=True)
     except:
       # This is a daemon thread, so it could be running while the interpreter
       # is exiting, so logging could fail. At that point the only thing to do
       # is ignore the exception. Ideally we would make this a non-daemon
       # thread.
       pass
   try:
     self.Close()
   except:  # pylint: disable=bare-except
     try:
       log.info('Error while closing in receiving thread.', exc_info=True)
     except:
       pass
    def AddToBatch(self, path, payload, mime_type):
        """Batch a file, possibly flushing first, or perhaps upload it directly.

    Args:
      path: The name of the file.
      payload: The contents of the file.
      mime_type: The MIME Content-type of the file, or None.

    If mime_type is None, application/octet-stream is substituted.
    """
        if not mime_type:
            mime_type = 'application/octet-stream'
        size = len(payload)
        if size <= MAX_BATCH_FILE_SIZE:
            if (len(self.batch) >= MAX_BATCH_COUNT
                    or self.batch_size + size > MAX_BATCH_SIZE):
                self.Flush()
            if self.batching:
                log.info('Adding %s %s (%s bytes, type=%s) to batch.',
                         self.what, path, size, mime_type)
                self.batch.append((path, payload, mime_type))
                self.batch_size += size + BATCH_OVERHEAD
                return
        self.SendSingleFile(path, payload, mime_type)
Beispiel #41
0
    def Parse(self, parent_params, parameter_info, aggregations_dict):
        """Parse the parent resource from parameter info and aggregations.

    Args:
      parent_params: [str], a list of params in the current collection's parent
        collection.
      parameter_info: the runtime ResourceParameterInfo object.
      aggregations_dict: {str: str}, a dict of params to values that are
        being aggregated from earlier updates.

    Returns:
      resources.Resource | None, the parsed parent reference or None if there
        is not enough information to parse.
    """
        param_values = {
            self.param_translation.get(p, p): parameter_info.GetValue(p)
            for p in parent_params
        }
        for p, value in six.iteritems(aggregations_dict):
            translated_name = self.param_translation.get(p, p)
            if value and not param_values.get(translated_name, None):
                param_values[translated_name] = value
        try:
            return resources.Resource(
                resources.REGISTRY,
                collection_info=resources.REGISTRY.GetCollectionInfo(
                    self.collection),
                subcollection='',
                param_values=param_values,
                endpoint_url=None)
        # Not all completion list calls may need to have a parent, so even if we
        # can't parse a parent, we log the error and attempt to send an update call
        # without one. (Any error returned by the API will be raised.)
        except resources.Error as e:
            log.info(six.text_type(e).rstrip())
            return None
Beispiel #42
0
    def CopyTarballToGCS(self, storage_client, gcs_object):
        """Copy a tarball of the snapshot to GCS.

    Args:
      storage_client: storage_api.StorageClient, The storage client to use for
                      uploading.
      gcs_object: storage.objects Resource, The GCS object to write.

    Returns:
      storage_v1_messages.Object, The written GCS object.
    """
        with files.ChDir(self.src_dir):
            with files.TemporaryDirectory() as tmp:
                archive_path = os.path.join(tmp, 'file.tgz')
                tf = self._MakeTarball(archive_path)
                tf.close()
                ignore_file_path = os.path.join(self.src_dir,
                                                gcloudignore.IGNORE_FILE_NAME)
                if self.any_files_ignored:
                    if os.path.exists(ignore_file_path):
                        log.info('Using gcloudignore file [{}]'.format(
                            ignore_file_path))
                    else:
                        log.status.Print(
                            _IGNORED_FILE_MESSAGE.format(
                                log_file=log.GetLogFilePath()))
                log.status.write(
                    'Uploading tarball of [{src_dir}] to '
                    '[gs://{bucket}/{object}]\n'.format(
                        src_dir=self.src_dir,
                        bucket=gcs_object.bucket,
                        object=gcs_object.object,
                    ), )
                return storage_client.CopyFileToGCS(
                    storage_util.BucketReference.FromBucketUrl(
                        gcs_object.bucket), archive_path, gcs_object.object)
Beispiel #43
0
def _GenerateSetupPyIfNeeded(setup_py_path, temp_dir, package_name):
    """Generates a temporary setup.py file if there is none at the given path.

  Args:
    setup_py_path: str, a path to the expected setup.py location.
    temp_dir: str, the temporary directory in which to generate setup.py, if
      necessary.
    package_name: str, the name of the Python package for which to write a
      setup.py file (used in the generated file contents).

  Returns:
    str, path to a working setup.py file (possibly generated)
  """
    log.debug('Looking for setup.py file at [%s]', setup_py_path)
    if os.path.isfile(setup_py_path):
        log.info('Using existing setup.py file at [%s]', setup_py_path)
        return setup_py_path

    setup_contents = DEFAULT_SETUP_FILE.format(package_name=package_name)
    log.info('Generating temporary setup.py file:\n%s', setup_contents)
    generated_path = os.path.join(temp_dir, 'setup.py')
    with open(generated_path, 'w') as setup_file:
        setup_file.write(setup_contents)
    return generated_path
Beispiel #44
0
 def Update(self, parameter_info, aggregations):
   if self.method is None:
     return None
   log.info(
       'Cache query parameters={} aggregations={}'
       'resource info={}'.format(
           [(p, parameter_info.GetValue(p))
            for p in self.collection_info.GetParams('')],
           [(p.name, p.value) for p in aggregations],
           parameter_info.resource_info.attribute_to_args_map))
   parent_translator = self._GetParentTranslator(parameter_info, aggregations)
   try:
     query = self.BuildListQuery(parameter_info, aggregations,
                                 parent_translator=parent_translator)
   except Exception as e:  # pylint: disable=broad-except
     if properties.VALUES.core.print_completion_tracebacks.GetBool():
       raise
     log.info(six.text_type(e).rstrip())
     raise Error('Could not build query to list completions: {} {}'.format(
         type(e), six.text_type(e).rstrip()))
   try:
     response = self.method.Call(query)
     response_collection = self.method.collection
     items = [self._ParseResponse(r, response_collection,
                                  parameter_info=parameter_info,
                                  aggregations=aggregations,
                                  parent_translator=parent_translator)
              for r in response]
     log.info('cache items={}'.format(
         [i.RelativeName() for i in items]))
   except Exception as e:  # pylint: disable=broad-except
     if properties.VALUES.core.print_completion_tracebacks.GetBool():
       raise
     log.info(six.text_type(e).rstrip())
     # Give user more information if they hit an apitools validation error,
     # which probably means that they haven't provided enough information
     # for us to complete.
     if isinstance(e, messages.ValidationError):
       raise Error('Update query failed, may not have enough information to '
                   'list existing resources: {} {}'.format(
                       type(e), six.text_type(e).rstrip()))
     raise Error('Update query [{}]: {} {}'.format(
         query, type(e), six.text_type(e).rstrip()))
   return [self.StringToRow(item.RelativeName()) for item in items]
Beispiel #45
0
 def SendClose(self):
   """Send WebSocket Close message."""
   try:
     if log.GetVerbosity() == logging.DEBUG:
       log.info('CLOSE')
     self._websocket.sock.send_close()
   except (EnvironmentError,
           websocket.WebSocketConnectionClosedException) as e:
     log.info('Unable to send WebSocket Close message [%s].', str(e))
     self.Close()
   except:  # pylint: disable=bare-except
     log.info('Error during WebSocket send of Close message.', exc_info=True)
     self.Close()
Beispiel #46
0
def FindOrCopyDockerfile(runtime, dst, cleanup=True):
    """Copies default Dockerfile for a given runtime into destination directory.

  Default Dockerfile for runtime is used if there is no user provided dockerfile
  in the destination directory.

  Args:
    runtime: str, Runtime that we're looking for the Dockerfile for.
    dst: str, Directory path where to check for and copy to the Dockerfile.
    cleanup: bool, If true, delete the file on gcloud exit.

  Raises:
    IOError: raised by pkg_resources.GetData if the Dockerfile doesn't exist
      in the expected location.

  Returns:
    callable(), A function to be called to clean up the generated Dockerfile.
  """
    log.info('Looking for the %s in %s', config.DOCKERFILE, dst)
    if os.path.exists(os.path.join(dst, config.DOCKERFILE)):
        log.info('Using %s found in %s', config.DOCKERFILE, dst)
        return lambda: None
    log.info('Looking for the default %s for runtime [%s]', config.DOCKERFILE,
             runtime)
    runtime = _GetCanonicalRuntime(runtime)
    default_dockerfiles_dir = GetGCloudDockerfilesDir()
    src = os.path.join(default_dockerfiles_dir,
                       '{runtime}_app'.format(runtime=runtime),
                       config.DOCKERFILE)
    src_data = pkg_resources.GetData(src)
    log.info(
        '%s for runtime [%s] is found in %s. Copying it into application '
        'directory.', config.DOCKERFILE, runtime, default_dockerfiles_dir)
    with open(os.path.join(dst, os.path.basename(src)), 'w') as dst_file:
        dst_file.write(src_data)
    # Delete the file after we're done if necessary.
    if cleanup:
        full_name = os.path.join(dst, config.DOCKERFILE)
        atexit.register(Clean, full_name)
        return lambda: Clean(full_name)
    return lambda: None
Beispiel #47
0
  def Display(self):
    """The default display method."""

    if not log.IsUserOutputEnabled():
      log.info('Display disabled.')
      # NOTICE: Do not consume resources here. Some commands use this case to
      # access the results of Run() via the return value of Execute().
      return self._resources

    # Initialize the printer.
    self._InitPrinter()

    # Add a URI cache update tap if needed.
    self._AddUriCacheTap()

    # Add a resource page tap if needed.
    self._AddPageTap()

    # Add a resource flatten tap if needed.
    self._AddFlattenTap()

    # Add a resource filter tap if needed.
    self._AddFilterTap()

    # Add a resource limit tap if needed.
    self._AddLimitTap()

    # Add the URI replace tap if needed.
    self._AddUriReplaceTap()

    resources_were_displayed = True
    if self._printer:
      # Most command output will end up here.
      log.info('Display format "%s".', self._format)
      self._printer.Print(self._resources)
      resources_were_displayed = self._printer.ResourcesWerePrinted()
    elif hasattr(self._command, 'Display'):
      # This will eventually be rare.
      log.info('Explict Display.')
      self._command.Display(self._args, self._resources)

    # Resource display is done.
    log.out.flush()

    # If the default format was used then display the epilog.
    if self._default_format_used:
      self._command.Epilog(resources_were_displayed)

    return self._resources
Beispiel #48
0
    def _RunCmd(self, cmd, params=None, disable_user_output=True):
        if not self.cli.IsValidCommand(cmd):
            log.info('Command %s does not exist.', cmd)
            return None
        if params is None:
            params = []
        args = cmd + params
        log.info('Executing: [gcloud %s]', ' '.join(args))
        try:
            # Disable output from individual commands, so that we get
            # command run results, and don't clutter output of init.
            if disable_user_output:
                args.append('--no-user-output-enabled')

            if (properties.VALUES.core.verbosity.Get() is None
                    and disable_user_output):
                # Unless user explicitly set verbosity, suppress from subcommands.
                args.append('--verbosity=none')

            if properties.VALUES.core.log_http.GetBool():
                args.append('--log-http')

            result = self.cli.Execute(args)
            # Best effort to force result of Execute eagerly.  Don't just check
            # that result is iterable to avoid category errors (e.g., accidently
            # converting a string or dict to a list).
            if isinstance(result, types.GeneratorType):
                return list(result)
            return result

        except SystemExit as exc:
            log.info('[%s] has failed\n', ' '.join(cmd + params))
            raise c_exc.FailedSubCommand(cmd + params, exc.code)
        except BaseException:
            log.info('Failed to run [%s]\n', ' '.join(cmd + params))
            raise
Beispiel #49
0
    def Display(self):
        """The default display method."""

        if not log.IsUserOutputEnabled():
            log.info('Display disabled.')
            # NOTICE: Do not consume resources here. Some commands use this case to
            # access the results of Run() via the return value of Execute().
            return self._resources

        # Determine the format.
        fmt = self._GetFormat()

        # Add a URI cache update tap if needed.
        self._AddUriCacheTap()

        # Add a resource page tap if needed.
        self._AddPageTap()

        # Add a resource flatten tap if needed.
        self._AddFlattenTap()

        # Add a resource filter tap if needed.
        self._AddFilterTap()

        # Add a resource limit tap if needed.
        self._AddLimitTap()

        if fmt:
            # Most command output will end up here.
            log.info('Display format "%s".', fmt)
            resource_printer.Print(self._resources,
                                   fmt,
                                   defaults=self._defaults,
                                   out=log.out)
        elif hasattr(self._command, 'Display'):
            # This will eventually be rare.
            log.info('Explict Display.')
            self._command.Display(self._args, self._resources)

        # If the default format was used then display the epilog.
        if self._default_format_used:
            self._command.Epilog(self._args)

        return self._resources
Beispiel #50
0
 def _OnData(self, unused_websocket_app, binary_data, opcode, unused_finished):
   """Callback for WebSocket Data messages."""
   if log.GetVerbosity() == logging.DEBUG:
     log.info('RECV opcode [%r] data_len [%d] binary_data[:20] [%r]', opcode,
              len(binary_data), binary_data[:20])
   try:
     # Even though we will only be processing BINARY messages, a bug in the
     # underlying websocket library will report the last opcode in a
     # multi-frame message instead of the first opcode - so CONT instead of
     # BINARY.
     if opcode not in (websocket.ABNF.OPCODE_CONT,
                       websocket.ABNF.OPCODE_BINARY):
       raise WebSocketInvalidOpcodeError('Unexpected WebSocket opcode [%r].' %
                                         opcode)
     self._on_data(binary_data)
   except EnvironmentError as e:
     log.info('Error [%s] while sending to client.', str(e))
     self.Close()
     raise
   except:  # pylint: disable=bare-except
     log.info('Error while processing Data message.', exc_info=True)
     self.Close()
     raise
Beispiel #51
0
 def _RunReceiveLocalData(self, conn, socket_address, websocket_conn):
   """Receive data from provided local connection and send over WebSocket."""
   data = None
   try:
     while True:
       data = conn.recv(utils.SUBPROTOCOL_MAX_DATA_FRAME_SIZE)
       if not data:
         break
       websocket_conn.Send(data)
     if self._server_shutdown:
       log.info('Terminating connection to [%r].', socket_address)
     else:
       log.info('Client closed connection from [%r].', socket_address)
   except socket.error as e:
     if e.errno != errno.EBADF:
       raise
   finally:
     websocket_conn.Close()
     if data:  # Then this was not previously logged.
       log.info('Closing connection from [%r].', socket_address)
     try:
       conn.close()
     except (EnvironmentError, socket.error):
       pass
Beispiel #52
0
def GetDefaultPolicy():
  """Gets the ID of the default policy for the current account."""
  account = properties.VALUES.core.account.Get()
  if not account:
    log.info('Unable to automatically resolve policy since account property '
             'is not set.')
    return None

  domain = _GetDomain(account)
  if not domain:
    log.info('Unable to resolve domain for account [%s]', account)
    return None

  with meta_cache_util.GetCache('resource://', create=True) as cache:
    try:
      # pylint: disable=too-many-function-args
      organization_ref = _GetOrganization(cache, domain)
      policy_ref = _GetPolicy(cache, organization_ref.RelativeName(),
                              (organization_ref,))
    except DefaultPolicyResolutionError as err:
      log.info('Unable to automatically resolve policy: %s', err)
      return None

  return policy_ref.Name()
def BuildAndPushDockerImage(
    project,
    service,
    source_dir,
    version_id,
    code_bucket_ref,
    gcr_domain,
    runtime_builder_strategy=runtime_builders.RuntimeBuilderStrategy.NEVER,
    parallel_build=False):
  """Builds and pushes a set of docker images.

  Args:
    project: str, The project being deployed to.
    service: ServiceYamlInfo, The parsed service config.
    source_dir: str, path to the service's source directory
    version_id: The version id to deploy these services under.
    code_bucket_ref: The reference to the GCS bucket where the source will be
      uploaded.
    gcr_domain: str, Cloud Registry domain, determines the physical location
      of the image. E.g. `us.gcr.io`.
    runtime_builder_strategy: runtime_builders.RuntimeBuilderStrategy, whether
      to use the new CloudBuild-based runtime builders (alternative is old
      externalized runtimes).
    parallel_build: bool, if True, enable parallel build and deploy.

  Returns:
    BuildArtifact, Representing the pushed container image or in-progress build.

  Raises:
    DockerfileError: if a Dockerfile is present, but the runtime is not
      "custom".
    NoDockerfileError: Raised if a user didn't supply a Dockerfile and chose a
      custom runtime.
    UnsatisfiedRequirementsError: Raised if the code in the directory doesn't
      satisfy the requirements of the specified runtime type.
    ValueError: if an unrecognized runtime_builder_strategy is given
  """
  needs_dockerfile = _NeedsDockerfile(service, source_dir)
  use_runtime_builders = ShouldUseRuntimeBuilders(service,
                                                  runtime_builder_strategy,
                                                  needs_dockerfile)

  # Nothing to do if this is not an image-based deployment.
  if not service.RequiresImage():
    return None
  log.status.Print(
      'Building and pushing image for service [{service}]'
      .format(service=service.module))

  gen_files = dict(_GetSourceContextsForUpload(source_dir))
  if needs_dockerfile and not use_runtime_builders:
    # The runtime builders will generate a Dockerfile in the Cloud, so we only
    # need to do this if use_runtime_builders is True
    gen_files.update(_GetDockerfiles(service, source_dir))

  image = docker_image.Image(
      dockerfile_dir=source_dir,
      repo=_GetImageName(project, service.module, version_id, gcr_domain),
      nocache=False,
      tag=config.DOCKER_IMAGE_TAG)

  metrics.CustomTimedEvent(metric_names.CLOUDBUILD_UPLOAD_START)
  object_ref = storage_util.ObjectReference(code_bucket_ref, image.tagged_repo)
  relative_yaml_path = _GetYamlPath(source_dir, service.file,
                                    service.parsed.skip_files, gen_files)

  try:
    cloud_build.UploadSource(image.dockerfile_dir, object_ref,
                             gen_files=gen_files,
                             skip_files=service.parsed.skip_files.regex)
  except (OSError, IOError) as err:
    if platforms.OperatingSystem.IsWindows():
      if err.filename and len(err.filename) > _WINDOWS_MAX_PATH:
        raise WindowMaxPathError(err.filename)
    raise
  metrics.CustomTimedEvent(metric_names.CLOUDBUILD_UPLOAD)

  if use_runtime_builders:
    builder_reference = runtime_builders.FromServiceInfo(service, source_dir)
    log.info('Using runtime builder [%s]', builder_reference.build_file_uri)
    builder_reference.WarnIfDeprecated()
    yaml_path = util.ConvertToPosixPath(relative_yaml_path)
    build = builder_reference.LoadCloudBuild(
        {'_OUTPUT_IMAGE': image.tagged_repo,
         '_GAE_APPLICATION_YAML_PATH': yaml_path})
  else:
    build = cloud_build.GetDefaultBuild(image.tagged_repo)

  build = cloud_build.FixUpBuild(build, object_ref)
  return _SubmitBuild(build, image, project, parallel_build)
Beispiel #54
0
def BuildImage(base_image,
               host_workdir,
               main_script,
               output_image_name,
               python_module=None,
               requirements=None,
               extra_packages=None,
               container_workdir=None,
               container_home=None,
               no_cache=True,
               **kwargs):
  """Builds a Docker image.

  Generates a Dockerfile and passes it to `docker build` via stdin.
  All output from the `docker build` process prints to stdout.

  Args:
    base_image: (str) ID or name of the base image to initialize the build
      stage.
    host_workdir: (str) A path indicating where all the required sources
      locates.
    main_script: (str) A string that identifies the executable script under the
      working directory.
    output_image_name: (str) Name of the built image.
    python_module: (str) Represents the executable main_script in form of a
      python module, if applicable.
    requirements: (List[str]) Required dependencies to install from PyPI.
    extra_packages: (List[str]) User custom dependency packages to install.
    container_workdir: (str) Working directory in the container.
    container_home: (str) the $HOME directory in the container.
    no_cache: (bool) Do not use cache when building the image.
    **kwargs: Other arguments to pass to underlying method that generates the
      Dockerfile.

  Returns:
    A Image class that contains info of the built image.

  Raises:
    DockerError: An error occurred when executing `docker build`
  """

  tag_options = ["-t", output_image_name]

  cache_args = ["--no-cache"] if no_cache else []
  command = ["docker", "build"
            ] + cache_args + tag_options + ["--rm", "-f-", host_workdir]

  setup_path = _DEFAULT_SETUP_PATH if os.path.exists(
      _DEFAULT_SETUP_PATH) else None
  requirments_path = _DEFAULT_REQUIREMENTS_PATH if os.path.exists(
      _DEFAULT_REQUIREMENTS_PATH) else None

  home_dir = container_home or _DEFAULT_HOME
  work_dir = container_workdir or _DEFAULT_WORKDIR

  # The package will be used in Docker, thus norm it to POSIX path format.
  main_package = Package(
      script=main_script.replace(os.sep, posixpath.sep),
      package_path=host_workdir.replace(os.sep, posixpath.sep),
      python_module=python_module)

  dockerfile = _MakeDockerfile(
      base_image,
      main_package=main_package,
      container_home=home_dir,
      container_workdir=work_dir,
      requirements_path=requirments_path,
      setup_path=setup_path,
      extra_requirements=requirements,
      extra_packages=extra_packages,
      **kwargs)

  joined_command = " ".join(command)
  log.info("Running command: {}".format(joined_command))

  return_code = local_util.ExecuteCommand(command, input_str=dockerfile)
  if return_code == 0:
    return Image(output_image_name, home_dir, work_dir)
  else:
    error_msg = textwrap.dedent("""
        Docker failed with error code {code}.
        Command: {cmd}
        """.format(code=return_code, cmd=joined_command))
    raise errors.DockerError(error_msg, command, return_code)
def _NeedsDockerfile(info, source_dir):
  """Returns True if the given directory needs a Dockerfile for this app.

  A Dockerfile is necessary when there is no Dockerfile in source_dir,
  regardless of whether we generate it here on the client-side, or in Cloud
  Container Builder server-side.

  The reason this function is more complicated than that is that it additionally
  verifies the sanity of the provided configuration by raising an exception if:

  - The runtime is "custom", but no Dockerfile is present
  - The runtime is not "custom", and a Dockerfile or cloudbuild.yaml is present
  - The runtime is "custom", and has both a cloudbuild.yaml and a Dockerfile.

  (The reason cloudbuild.yaml is tied into this method is that its use should be
  mutually exclusive with the Dockerfile.)

  Args:
    info: (googlecloudsdk.api_lib.app.yaml_parsing.ServiceYamlInfo). The
      configuration for the service.
    source_dir: str, the path to the service's source directory

  Raises:
    CloudbuildYamlError: if a cloudbuild.yaml is present, but the runtime is not
      "custom".
    DockerfileError: if a Dockerfile is present, but the runtime is not
      "custom".
    NoDockerfileError: Raised if a user didn't supply a Dockerfile and chose a
      custom runtime.
    CustomRuntimeFilesError: if a custom runtime had both a Dockerfile and a
      cloudbuild.yaml file.

  Returns:
    bool, whether Dockerfile generation is necessary.
  """
  has_dockerfile = os.path.exists(
      os.path.join(source_dir, config.DOCKERFILE))
  has_cloudbuild = os.path.exists(
      os.path.join(source_dir, runtime_builders.Resolver.CLOUDBUILD_FILE))
  if info.runtime == 'custom':
    if has_dockerfile and has_cloudbuild:
      raise CustomRuntimeFilesError(
          ('A custom runtime must have exactly one of [{}] and [{}] in the '
           'source directory; [{}] contains both').format(
               config.DOCKERFILE, runtime_builders.Resolver.CLOUDBUILD_FILE,
               source_dir))
    elif has_dockerfile:
      log.info('Using %s found in %s', config.DOCKERFILE, source_dir)
      return False
    elif has_cloudbuild:
      log.info('Not using %s because cloudbuild.yaml was found instead.',
               config.DOCKERFILE)
      return True
    else:
      raise NoDockerfileError(
          'You must provide your own Dockerfile when using a custom runtime. '
          'Otherwise provide a "runtime" field with one of the supported '
          'runtimes.')
  else:
    if has_dockerfile:
      raise DockerfileError(
          'There is a Dockerfile in the current directory, and the runtime '
          'field in {0} is currently set to [runtime: {1}]. To use your '
          'Dockerfile to build a custom runtime, set the runtime field to '
          '[runtime: custom]. To continue using the [{1}] runtime, please '
          'remove the Dockerfile from this directory.'.format(info.file,
                                                              info.runtime))
    elif has_cloudbuild:
      raise CloudbuildYamlError(
          'There is a cloudbuild.yaml in the current directory, and the '
          'runtime field in {0} is currently set to [runtime: {1}]. To use '
          'your cloudbuild.yaml to build a custom runtime, set the runtime '
          'field to [runtime: custom]. To continue using the [{1}] runtime, '
          'please remove the cloudbuild.yaml from this directory.'.format(
              info.file, info.runtime))
    log.info('Need Dockerfile to be generated for runtime %s', info.runtime)
    return True
Beispiel #56
0
def _MergeArgGroupIntoArgs(args_from_file,
                           group_name,
                           all_arg_groups,
                           all_test_args_set,
                           already_included_set=None):
    """Merges args from an arg group into the given args_from_file dictionary.

  Args:
    args_from_file: dict of arg:value pairs already loaded from the arg-file.
    group_name: str, the name of the arg-group to merge into args_from_file.
    all_arg_groups: dict containing all arg-groups loaded from the arg-file.
    all_test_args_set: set of str, all possible test arg names.
    already_included_set: set of str, all group names which were already
      included. Used to detect 'include:' cycles.

  Raises:
    BadFileException: an undefined arg-group name was encountered.
    InvalidArgException: a valid argument name has an invalid value, or
      use of include: led to cyclic references.
    ToolException: an undefined argument name was encountered.
  """
    if already_included_set is None:
        already_included_set = set()
    elif group_name in already_included_set:
        raise arg_validate.InvalidArgException(
            _INCLUDE, 'Detected cyclic reference to arg group [{g}]'.format(
                g=group_name))
    if group_name not in all_arg_groups:
        raise exceptions.BadFileException(
            'Could not find argument group [{g}] in argument file.'.format(
                g=group_name))

    arg_group = all_arg_groups[group_name]
    if not arg_group:
        log.warning('Argument group [{0}] is empty.'.format(group_name))
        return

    for arg_name in arg_group:
        arg = arg_validate.InternalArgNameFrom(arg_name)
        # Must process include: groups last in order to follow precedence rules.
        if arg == _INCLUDE:
            continue

        if arg not in all_test_args_set:
            raise exceptions.ToolException(
                '[{0}] is not a valid argument name for: gcloud test run.'.
                format(arg_name))
        if arg in args_from_file:
            log.info(
                'Skipping include: of arg [{0}] because it already had value [{1}].'
                .format(arg_name, args_from_file[arg]))
        else:
            args_from_file[arg] = arg_validate.ValidateArgFromFile(
                arg, arg_group[arg_name])

    already_included_set.add(group_name)  # Prevent "include:" cycles

    if _INCLUDE in arg_group:
        included_groups = arg_validate.ValidateStringList(
            _INCLUDE, arg_group[_INCLUDE])
        for included_group in included_groups:
            _MergeArgGroupIntoArgs(args_from_file, included_group,
                                   all_arg_groups, all_test_args_set,
                                   already_included_set)
Beispiel #57
0
 def _LogTestComplete(self, matrix_state):
     """Let the user know that their test matrix has completed running."""
     log.info('Test matrix completed in state: {0}'.format(matrix_state))
     log.status.Print('\n{0} testing complete.'.format(
         self._test_type.capitalize()))
def SubmitTraining(jobs_client,
                   job,
                   job_dir=None,
                   staging_bucket=None,
                   packages=None,
                   package_path=None,
                   scale_tier=None,
                   config=None,
                   module_name=None,
                   runtime_version=None,
                   python_version=None,
                   stream_logs=None,
                   user_args=None,
                   labels=None,
                   custom_train_server_config=None):
    """Submit a training job."""
    region = properties.VALUES.compute.region.Get(required=True)
    staging_location = jobs_prep.GetStagingLocation(
        staging_bucket=staging_bucket, job_id=job, job_dir=job_dir)
    try:
        uris = jobs_prep.UploadPythonPackages(
            packages=packages,
            package_path=package_path,
            staging_location=staging_location)
    except jobs_prep.NoStagingLocationError:
        raise flags.ArgumentError(
            'If local packages are provided, the `--staging-bucket` or '
            '`--job-dir` flag must be given.')
    log.debug('Using {0} as trainer uris'.format(uris))

    scale_tier_enum = jobs_client.training_input_class.ScaleTierValueValuesEnum
    scale_tier = scale_tier_enum(scale_tier) if scale_tier else None

    try:
        job = jobs_client.BuildTrainingJob(
            path=config,
            module_name=module_name,
            job_name=job,
            trainer_uri=uris,
            region=region,
            job_dir=job_dir.ToUrl() if job_dir else None,
            scale_tier=scale_tier,
            user_args=user_args,
            runtime_version=runtime_version,
            python_version=python_version,
            labels=labels,
            custom_train_server_config=custom_train_server_config)
    except jobs_prep.NoStagingLocationError:
        raise flags.ArgumentError(
            'If `--package-path` is not specified, at least one Python package '
            'must be specified via `--packages`.')

    project_ref = resources.REGISTRY.Parse(
        properties.VALUES.core.project.Get(required=True),
        collection='ml.projects')
    job = jobs_client.Create(project_ref, job)
    if not stream_logs:
        PrintSubmitFollowUp(job.jobId, print_follow_up_message=True)
        return job
    else:
        PrintSubmitFollowUp(job.jobId, print_follow_up_message=False)

    log_fetcher = stream.LogFetcher(
        filters=log_utils.LogFilters(job.jobId),
        polling_interval=properties.VALUES.ml_engine.polling_interval.GetInt(),
        continue_interval=_CONTINUE_INTERVAL,
        continue_func=log_utils.MakeContinueFunction(job.jobId))

    printer = resource_printer.Printer(log_utils.LOG_FORMAT, out=log.err)
    with execution_utils.RaisesKeyboardInterrupt():
        try:
            printer.Print(log_utils.SplitMultiline(log_fetcher.YieldLogs()))
        except KeyboardInterrupt:
            log.status.Print('Received keyboard interrupt.\n')
            log.status.Print(
                _FOLLOW_UP_MESSAGE.format(job_id=job.jobId,
                                          project=project_ref.Name()))
        except exceptions.HttpError as err:
            log.status.Print('Polling logs failed:\n{}\n'.format(
                six.text_type(err)))
            log.info('Failure details:', exc_info=True)
            log.status.Print(
                _FOLLOW_UP_MESSAGE.format(job_id=job.jobId,
                                          project=project_ref.Name()))

    job_ref = resources.REGISTRY.Parse(
        job.jobId,
        params={'projectsId': properties.VALUES.core.project.GetOrFail},
        collection='ml.projects.jobs')
    job = jobs_client.Get(job_ref)

    return job
Beispiel #59
0
 def TryJson():
   try:
     return json.loads(input_string)
   except ValueError:
     log.info('No JSON detected in service config. Trying YAML...')
Beispiel #60
0
    def CopyFileToGCS(self, local_path, target_obj_ref):
        """Upload a file to the GCS results bucket using the storage API.

    Args:
      local_path: str, the path of the file to upload. File must be on the local
        filesystem.
      target_obj_ref: storage_util.ObjectReference, the path of the file on GCS.

    Returns:
      Object, the storage object that was copied to.

    Raises:
      BucketNotFoundError if the user-specified bucket does not exist.
      UploadError if the file upload is not successful.
      exceptions.BadFileException if the uploaded file size does not match the
          size of the local file.
    """
        file_size = _GetFileSize(local_path)
        src_obj = self.messages.Object(size=file_size)
        mime_type = _GetMimetype(local_path)

        chunksize = self._GetChunkSize()
        upload = transfer.Upload.FromFile(local_path,
                                          mime_type=mime_type,
                                          chunksize=chunksize)
        insert_req = self.messages.StorageObjectsInsertRequest(
            bucket=target_obj_ref.bucket,
            name=target_obj_ref.object,
            object=src_obj)

        gsc_path = '{bucket}/{target_path}'.format(
            bucket=target_obj_ref.bucket,
            target_path=target_obj_ref.object,
        )

        log.info('Uploading [{local_file}] to [{gcs}]'.format(
            local_file=local_path, gcs=gsc_path))
        try:
            response = self.client.objects.Insert(insert_req, upload=upload)
        except api_exceptions.HttpNotFoundError:
            raise BucketNotFoundError(
                'Could not upload file: [{bucket}] bucket does not exist.'.
                format(bucket=target_obj_ref.bucket))
        except api_exceptions.HttpError as err:
            log.debug(
                'Could not upload file [{local_file}] to [{gcs}]: {e}'.format(
                    local_file=local_path,
                    gcs=gsc_path,
                    e=http_exc.HttpException(err)))
            raise UploadError(
                '{code} Could not upload file [{local_file}] to [{gcs}]: {message}'
                .format(code=err.status_code,
                        local_file=local_path,
                        gcs=gsc_path,
                        message=http_exc.HttpException(
                            err, error_format='{status_message}')))

        if response.size != file_size:
            log.debug('Response size: {0} bytes, but local file is {1} bytes.'.
                      format(response.size, file_size))
            raise exceptions.BadFileException(
                'Cloud storage upload failure. Uploaded file does not match local '
                'file: {0}. Please retry.'.format(local_path))
        return response