Esempio n. 1
0
def RunSteps(api, properties):
  try:
    _validate_props(properties)
  except ValueError as exc:
    raise recipe_api.InfraFailure('Bad input properties: %s' % exc)

  # Checkout either the committed code or a pending CL, depending on the mode.
  # This also calculates metadata (labels, tags) to apply to images built from
  # this code.
  if properties.mode == PROPERTIES.MODE_CI:
    co, meta = _checkout_ci(api, properties.project)
  elif properties.mode == PROPERTIES.MODE_CL:
    co, meta = _checkout_cl(api, properties.project)
  else:
    raise recipe_api.InfraFailure(
        '%s is not implemented yet' % PROPERTIES.Mode.Name(properties.mode))
  co.gclient_runhooks()

  # Discover what *.yaml manifests (full paths to them) we need to build.
  manifests = _discover_manifests(api, co.path, properties.manifests)
  if not manifests:  # pragma: no cover
    raise recipe_api.InfraFailure('Found no manifests to build')

  with co.go_env():
    # Use 'cloudbuildhelper' that comes with the infra checkout (it's in PATH),
    # to make sure builders use same version as developers.
    api.cloudbuildhelper.command = 'cloudbuildhelper'

    # Report the exact version we picked up from the infra checkout.
    api.cloudbuildhelper.report_version()

    # Build, tag and upload corresponding images.
    fails = []
    for m in manifests:
      # TODO(vadimsh): Run this in parallel when it's possible.
      try:
        api.cloudbuildhelper.build(
            manifest=m,
            canonical_tag=meta.canonical_tag,
            build_id=api.buildbucket.build_url(),
            infra=properties.infra,
            labels=meta.labels,
            tags=meta.tags,
        )
      except api.step.StepFailure:
        fails.append(api.path.basename(m))

  if fails:
    raise recipe_api.StepFailure('Failed to build: %s' % ', '.join(fails))
Esempio n. 2
0
    def _isolated_script_collect_step(self, task, **kwargs):
        step_test_data = kwargs.pop('step_test_data', None)
        if not step_test_data:
            step_test_data = self.m.test_utils.test_api.canned_isolated_script_output(
                passing=True, is_win=self.m.platform.is_win, swarming=True)

        args = self.get_collect_cmd_args(task)
        args.extend(['--task-output-dir', self.m.raw_io.output_dir()])

        try:
            self.m.python(
                name=self._get_step_name('', task),
                script=self.m.swarming_client.path.join('swarming.py'),
                args=args,
                step_test_data=lambda: step_test_data,
                **kwargs)
        finally:
            # Regardless of the outcome of the test (pass or fail), we try to parse
            # the results. If any error occurs while parsing results, then we set them
            # to None, which caller should treat as invalid results.
            # Note that try-except block below will not mask the
            # recipe_api.StepFailure exception from the collect step above. Instead
            # it is being allowed to propagate after the results have been parsed.
            try:
                step_result = self.m.step.active_result
                outdir_json = self.m.json.dumps(step_result.raw_io.output_dir,
                                                indent=2)
                step_result.presentation.logs[
                    'outdir_json'] = outdir_json.splitlines()

                # Check if it's an internal failure.
                summary = self.m.json.loads(
                    step_result.raw_io.output_dir['summary.json'])
                if any(shard['internal_failure']
                       for shard in summary['shards']):
                    raise recipe_api.InfraFailure('Internal swarming failure.')

                # Always show the shards' links in the collect step. (It looks
                # like show_isolated_out_in_collect_step is false by default
                # in recipe runs.)
                links = step_result.presentation.links
                for index in xrange(task.shards):
                    url = task.get_shard_view_url(index)
                    if url:
                        links['shard #%d' % index] = url

                step_result.isolated_script_results = \
                  self._merge_isolated_script_shards(task, step_result)

                self._display_pending(summary, step_result.presentation)
            except Exception as e:
                self.m.step.active_result.presentation.logs[
                    'no_results_exc'] = [str(e)]
                self.m.step.active_result.isolated_script_results = None
Esempio n. 3
0
    def _isolated_script_collect_step(self, task, **kwargs):
        step_test_data = kwargs.pop('step_test_data', None)
        if not step_test_data:
            step_test_data = self.m.test_utils.test_api.canned_isolated_script_output(
                passing=True, is_win=self.m.platform.is_win, swarming=True)

        args = self.get_collect_cmd_args(task)
        args.extend(['--task-output-dir', self.m.raw_io.output_dir()])

        try:
            self.m.python(
                name=self._get_step_name('', task),
                script=self.m.swarming_client.path.join('swarming.py'),
                args=args,
                step_test_data=lambda: step_test_data,
                **kwargs)
        finally:
            # Regardless of the outcome of the test (pass or fail), we try to parse
            # the results. If any error occurs while parsing results, then we set them
            # to None, which caller should treat as invalid results.
            # Note that try-except block below will not mask the
            # recipe_api.StepFailure exception from the collect step above. Instead
            # it is being allowed to propagate after the results have been parsed.
            try:
                step_result = self.m.step.active_result
                outdir_json = self.m.json.dumps(step_result.raw_io.output_dir,
                                                indent=2)
                step_result.presentation.logs[
                    'outdir_json'] = outdir_json.splitlines()

                # Check if it's an internal failure.
                summary = self.m.json.loads(
                    step_result.raw_io.output_dir['summary.json'])
                if any(shard['internal_failure']
                       for shard in summary['shards']):
                    raise recipe_api.InfraFailure('Internal swarming failure.')

                # TODO(nednguyen, kbr): Combine isolated script results from multiple
                # shards rather than assuming that there is always just one shard.
                assert len(summary['shards']) == 1
                results_raw = step_result.raw_io.output_dir[self.m.path.join(
                    '0', 'output.json')]
                step_result.isolated_script_results = self.m.json.loads(
                    results_raw)

                self._display_pending(summary, step_result.presentation)
            except Exception as e:
                self.m.step.active_result.presentation.logs[
                    'no_results_exc'] = [str(e)]
                self.m.step.active_result.isolated_script_results = None
Esempio n. 4
0
  def _handle_summary_json(self, task, summary, step_result):
    # We store this now, and add links to all shards first, before failing the
    # build. Format is tuple of (error message, shard that failed)
    infra_failures = []
    links = step_result.presentation.links
    for index, shard in enumerate(summary['shards']):
      url = task.get_shard_view_url(index)
      display_text = 'shard #%d' % index

      if not shard or shard.get('internal_failure'):  # pragma: no cover
        display_text = (
          'shard #%d had an internal swarming failure' % index)
        infra_failures.append((index, 'Internal swarming failure'))
      elif self._is_expired(shard):
        display_text = (
          'shard #%d expired, not enough capacity' % index)
        infra_failures.append((
            index, 'There isn\'t enough capacity to run your test'))
      elif self._is_timed_out(shard):
        display_text = (
          'shard #%d timed out, took too much time to complete' % index)
      elif self._get_exit_code(shard) != '0':  # pragma: no cover
        display_text = 'shard #%d (failed)' % index

      if self.show_isolated_out_in_collect_step:
        isolated_out = shard.get('isolated_out')
        if isolated_out:
          link_name = 'shard #%d isolated out' % index
          links[link_name] = isolated_out['view_url']

      if url and self.show_shards_in_collect_step:
        links[display_text] = url

    self._display_pending(summary, step_result.presentation)

    if infra_failures:
      template = 'Shard #%s failed: %s'

      # Done so that raising an InfraFailure doesn't cause an error.
      # TODO(martiniss): Remove this hack. Requires recipe engine change
      step_result._retcode = 2
      step_result.presentation.status = self.m.step.EXCEPTION
      raise recipe_api.InfraFailure(
          '\n'.join(template % f for f in infra_failures), result=step_result)
Esempio n. 5
0
def _checkout_ci(api, project):
  """Checks out some committed revision (based on Buildbucket properties).

  Args:
    api: recipes API.
    project: PROPERTIES.Project enum.

  Returns:
    (infra_checkout.Checkout, Metadata).
  """
  conf, internal, repo_url = {
    PROPERTIES.PROJECT_INFRA: (
        'infra',
        False,
        'https://chromium.googlesource.com/infra/infra',
    ),
    PROPERTIES.PROJECT_INFRA_INTERNAL: (
        'infra_internal',
        True,
        'https://chrome-internal.googlesource.com/infra/infra_internal',
    ),
  }[project]

  co = api.infra_checkout.checkout(gclient_config_name=conf, internal=internal)
  rev = co.bot_update_step.presentation.properties['got_revision']
  cp = co.bot_update_step.presentation.properties['got_revision_cp']

  cp_ref, cp_num = api.commit_position.parse(cp)
  if cp_ref != 'refs/heads/master':  # pragma: no cover
    raise recipe_api.InfraFailure(
        'Only refs/heads/master commits are supporte for now, got %r' % cp_ref)

  return co, Metadata(
      canonical_tag='ci-%s-%d-%s' % (
          _date(api),
          cp_num,
          rev[:7],
      ),
      labels={
          'org.opencontainers.image.source': repo_url,
          'org.opencontainers.image.revision': rev,
      },
      tags=['latest'])
Esempio n. 6
0
    def get_fuchsia_version(self, flutter_bin):
        """Get the Fuchsia SDK version from the given Flutter SDK.

    Args:
      flutter_bin: Path to Flutter bin with internal/fuchsia-linux.version.

    Returns:
      String of the Fuchsia SDK version to pull artifacts from GCP.
    """
        # Flutter SDK only stores the CIPD version, so CIPD must be queried to
        # find the SDK version tag for this ref.
        version_path = flutter_bin.join('internal', 'fuchsia-linux.version')
        version = self.m.file.read_text('Read fuchsia cipd version',
                                        version_path)
        fuchsia_cipd = self.m.cipd.describe(FUCHSIA_SDK_CIPD, version=version)
        # There are multiple tags in a Fuchsia SDK CIPD description requiring
        # a search through the tags tuple for the version tag.
        for tag in fuchsia_cipd.tags:
            if 'version:' in tag.tag:
                return tag.tag.replace('version:', '')
        raise recipe_api.InfraFailure('No version tag on Fuchsia SDK CIPD ref')
Esempio n. 7
0
 def _merge_isolated_script_shards(self, task, step_result):
     # This code is unfortunately specialized to the "simplified"
     # JSON format that used to be the standard for recipes. The
     # isolated scripts should be changed to use the now-standard
     # Chromium JSON test results format:
     # https://www.chromium.org/developers/the-json-test-results-format
     # . Note that gtests, above, don't seem to conform to this
     # format yet, so it didn't seem like a good prerequisite to
     # switch the isolated tests over when adding sharding support.
     #
     # These are the only keys we pay attention to in the output JSON.
     merged_results = {
         'successes': [],
         'failures': [],
         'valid': True,
     }
     for i in xrange(task.shards):
         path = self.m.path.join(str(i), 'output.json')
         if path not in step_result.raw_io.output_dir:
             raise Exception('no results from shard #%d' % i)
         results_raw = step_result.raw_io.output_dir[path]
         try:
             results_json = self.m.json.loads(results_raw)
         except Exception as e:
             raise Exception('error decoding JSON results from shard #%d' %
                             i)
         for key in merged_results:
             if key in results_json:
                 if isinstance(merged_results[key], list):
                     merged_results[key].extend(results_json[key])
                 elif isinstance(merged_results[key], bool):
                     merged_results[
                         key] = merged_results[key] and results_json[key]
                 else:
                     raise recipe_api.InfraFailure(
                         'Unknown key type ' + type(merged_results[key]) +
                         ' when handling key ' + key +
                         '.')  # pragma: no cover
     return merged_results
Esempio n. 8
0
  def count_objects(self, previous_result=None, can_fail_build=False, **kwargs):
    """Returns `git count-objects` result as a dict.

    Args:
      previous_result (dict): the result of previous count_objects call.
        If passed, delta is reported in the log and step text.
      can_fail_build (bool): if True, may fail the build and/or raise an
        exception. Defaults to False.

    Returns:
      A dict of count-object values, or None if count-object run failed.
    """
    if previous_result:
      assert isinstance(previous_result, dict)
      assert all(isinstance(v, long) for v in previous_result.itervalues())
      assert 'size' in previous_result
      assert 'size-pack' in previous_result

    step_result = None
    try:
      step_result = self(
          'count-objects', '-v', stdout=self.m.raw_io.output(),
          can_fail_build=can_fail_build, **kwargs)

      if not step_result.stdout:
        return None

      result = {}
      for line in step_result.stdout.splitlines():
        name, value = line.split(':', 1)
        result[name] = long(value.strip())

      def results_to_text(results):
        return ['  %s: %s' % (k, v) for k, v in results.iteritems()]

      step_result.presentation.logs['result'] = results_to_text(result)

      if previous_result:
        delta = {
            key: value - previous_result[key]
            for key, value in result.iteritems()
            if key in previous_result}
        step_result.presentation.logs['delta'] = (
            ['before:'] + results_to_text(previous_result) +
            ['', 'after:'] + results_to_text(result) +
            ['', 'delta:'] + results_to_text(delta)
        )

        size_delta = (
            result['size'] + result['size-pack']
            - previous_result['size'] - previous_result['size-pack'])
        # size_delta is in KiB.
        step_result.presentation.step_text = (
            'size delta: %+.2f MiB' % (size_delta / 1024.0))

      return result
    except Exception as ex:
      if step_result:
        step_result.presentation.logs['exception'] = ['%r' % ex]
        step_result.presentation.status = self.m.step.WARNING
      if can_fail_build:
        raise recipe_api.InfraFailure('count-objects failed: %s' % ex)
      return None
Esempio n. 9
0
  def build(self,
            manifest,
            canonical_tag=None,
            build_id=None,
            infra=None,
            labels=None,
            tags=None,
            step_test_image=None):
    """Calls `cloudbuildhelper build <manifest>` interpreting the result.

    Args:
      * manifest (Path) - path to YAML file with definition of what to build.
      * canonical_tag (str) - tag to push the image to if we built a new image.
      * build_id (str) - identifier of the CI build to put into metadata.
      * infra (str) - what section to pick from 'infra' field in the YAML.
      * labels ({str: str}) - labels to attach to the docker image.
      * tags ([str]) - tags to unconditionally push the image to.
      * step_test_image (Image) - image to produce in training mode.

    Returns:
      Image instance or NotUploadImage if the YAML doesn't specify a registry.

    Raises:
      StepFailure on failures.
    """
    name, _ = self.m.path.splitext(self.m.path.basename(manifest))

    cmd = [self.command, 'build', manifest]
    if canonical_tag:
      cmd += ['-canonical-tag', canonical_tag]
    if build_id:
      cmd += ['-build-id', build_id]
    if infra:
      cmd += ['-infra', infra]
    for k in sorted(labels or {}):
      cmd += ['-label', '%s=%s' % (k, labels[k])]
    for t in (tags or []):
      cmd += ['-tag', t]
    cmd += ['-json-output', self.m.json.output()]

    # Expected JSON output (may be produced even on failures).
    #
    # {
    #   "error": "...",  # error message on errors
    #   "image": {
    #     "image": "registry/name",
    #     "digest": "sha256:...",
    #     "tag": "its-canonical-tag",
    #   },
    #   "view_image_url": "https://...",  # for humans
    #   "view_build_url": "https://...",  # for humans
    # }
    try:
      res = self.m.step(
          name='cloudbuildhelper build %s' % name,
          cmd=cmd,
          step_test_data=lambda: self.test_api.output(
              step_test_image, name, canonical_tag,
          ),
      )
      if not res.json.output:  # pragma: no cover
        res.presentation.status = self.m.step.FAILURE
        raise recipe_api.InfraFailure(
            'Call succeeded, but didn\'t produce -json-output')
      img = res.json.output.get('image')
      if not img:
        return self.NotUploadImage
      return self.Image(
          image=img['image'],
          digest=img['digest'],
          tag=img.get('tag'),
      )
    finally:
      self._make_step_pretty(self.m.step.active_result, tags)
Esempio n. 10
0
    def _telemetry_gpu_collect_step(self, task, **kwargs):
        step_test_data = kwargs.pop('step_test_data', None)
        if not step_test_data:
            step_test_data = self.m.test_utils.test_api.canned_telemetry_gpu_output(
                passing=True, is_win=self.m.platform.is_win, swarming=True)

        args = self.get_collect_cmd_args(task)
        args.extend(['--task-output-dir', self.m.raw_io.output_dir()])

        try:
            self.m.python(
                name=self._get_step_name('', task),
                script=self.m.swarming_client.path.join('swarming.py'),
                args=args,
                step_test_data=lambda: step_test_data,
                **kwargs)
        finally:
            # Regardless of the outcome of the test (pass or fail), we try to parse
            # the results. If any error occurs while parsing results, then we set them
            # to None, which will be treated as invalid test results by
            # SwarmingTelemetryGPUTest class in recipe_modules/chromium/steps.py. Note
            # that try-except block below will not mask the recipe_api.StepFailure
            # exception from the collect step above. Instead it is being allowed to
            # propagate after the results have been parsed.
            try:
                step_result = self.m.step.active_result
                outdir_json = self.m.json.dumps(step_result.raw_io.output_dir,
                                                indent=2)
                step_result.presentation.logs[
                    'outdir_json'] = outdir_json.splitlines()

                # Check if it's an internal failure.
                summary = self.m.json.loads(
                    step_result.raw_io.output_dir['summary.json'])
                if any(shard['internal_failure']
                       for shard in summary['shards']):
                    raise recipe_api.InfraFailure('Internal swarming failure.')

                # TODO(sergiyb): Combine telemetry results from multiple shards rather
                # than assuming that there is always just one shard.
                assert len(summary['shards']) == 1
                results_raw = step_result.raw_io.output_dir[self.m.path.join(
                    '0', 'results.json')]

                # GPU test launcher may bail out early with return code 0 and empty
                # results file if there were no tests to run, e.g. when all tests are
                # disabled on current platform.
                # TODO(sergiyb): We should instead rewrite run_gpu_test.py to always
                # write valid results.json regardless of the return code.
                if step_result.retcode == 0 and results_raw == '':
                    step_result.telemetry_results = {
                        'per_page_values': [],
                        'pages': []
                    }
                else:
                    step_result.telemetry_results = self.m.json.loads(
                        results_raw)

                self._display_pending(summary, step_result.presentation)
            except Exception as e:
                self.m.step.active_result.presentation.logs[
                    'no_results_exc'] = [str(e)]
                self.m.step.active_result.telemetry_results = None