Example #1
0
def GetBuildExtraEnv(build_type):
    """Gets the extra_env for building a package.

  Args:
    build_type: The type of build we want to do.

  Returns:
    The extra_env to use when building.
  """
    if build_type is None:
        build_type = BuildType.ASAN

    use_flags = os.environ.get('USE', '').split()
    # Check that the user hasn't already set USE flags that we can set.
    # No good way to iterate over an enum in python2.
    for use_flag in BuildType.CHOICES:
        if use_flag in use_flags:
            logging.warn('%s in USE flags. Please use --build_type instead.',
                         use_flag)

    # Set USE flags.
    fuzzer_build_type = 'fuzzer'
    use_flags += [fuzzer_build_type, build_type]
    features_flags = os.environ.get('FEATURES', '').split()
    if build_type == BuildType.COVERAGE:
        # We must use ASan when doing coverage builds.
        use_flags.append(BuildType.ASAN)
        # Use noclean so that a coverage report can be generated based on the source
        # code.
        features_flags.append('noclean')

    return {
        'FEATURES': ' '.join(features_flags),
        'USE': ' '.join(use_flags),
    }
Example #2
0
def StripFuzzerPrefixes(fuzzer_name):
    """Strip the prefix ClusterFuzz uses in case they are specified.

  Strip the prefixes used by ClusterFuzz if the users has included them by
  accident.

  Args:
    fuzzer_name: The fuzzer who's name may contain prefixes.

  Returns:
    The name of the fuzz target without prefixes.
  """
    initial_name = fuzzer_name

    def StripPrefix(prefix):
        if fuzzer_name.startswith(prefix):
            return fuzzer_name[len(prefix):]
        return fuzzer_name

    clusterfuzz_prefixes = ['libFuzzer_', 'chromeos_']

    for prefix in clusterfuzz_prefixes:
        fuzzer_name = StripPrefix(prefix)

    if initial_name != fuzzer_name:
        logging.warn(
            '%s contains a prefix from ClusterFuzz (one or more of %s) that is not '
            "part of the fuzzer's name. Interpreting --fuzzer as %s.",
            initial_name, clusterfuzz_prefixes, fuzzer_name)

    return fuzzer_name
Example #3
0
    def _DeleteExistingResources(self):
        """Deletes instances, image and the tarball on GCS if they exist."""
        steps = []

        if self.tarball_remote:
            steps.append(partial(self.gscontext.DoCommand, ["rm", self.tarball_remote]))
        if self.image:
            steps.append(partial(self.gce_context.DeleteImage, self.image))

        for instance in self.instances.values():
            steps.append(
                partial(
                    self._DeleteExistingResouce,
                    resource=instance,
                    existence_checker=self.gce_context.InstanceExists,
                    deletor=self._DeleteInstance,
                )
            )

        # Delete all resources in parallel.
        try:
            parallel.RunParallelSteps(steps)
        except Exception as e:
            logging.warn("Infrastructure failure. Error: %r" % e)

        # Reset variables.
        self.tarball_remote = None
        self.image = None
        self.image_link = None
        self.instances = {}
Example #4
0
  def _DeleteExistingResources(self):
    """Deletes instances, image and the tarball on GCS if they exist."""
    steps = []

    if self.tarball_remote:
      steps.append(partial(self.gscontext.DoCommand,
                           ['rm', self.tarball_remote]))
    if self.image:
      steps.append(partial(self.gce_context.DeleteImage, self.image))

    for instance in self.instances.values():
      steps.append(partial(
          self._DeleteExistingResouce,
          resource=instance,
          existence_checker=self.gce_context.InstanceExists,
          deletor=self._DeleteInstance))

    # Delete all resources in parallel.
    try:
      parallel.RunParallelSteps(steps)
    except Exception as e:
      logging.warn('Infrastructure failure. Error: %r' % e)

    # Reset variables.
    self.tarball_remote = None
    self.image = None
    self.image_link = None
    self.instances = {}
Example #5
0
    def Start(self):
        """Starts the nebraska process remotely on the remote device."""
        if self.is_alive():
            logging.warn('Nebraska is already running, not running again.')
            return

        self.start()
        self._WaitUntilStarted()
Example #6
0
def CopyToArcBucket(android_bucket_url, build_branch, build_id, subpaths,
                    targets, arc_bucket_url, acls):
    """Copies from source Android bucket to ARC++ specific bucket.

  Copies each build to the ARC bucket eliminating the subpath.
  Applies build specific ACLs for each file.

  Args:
    android_bucket_url: URL of Android build gs bucket
    build_branch: branch of Android builds
    build_id: A string. The Android build id number to check.
    subpaths: Subpath dictionary for each build to copy.
    targets: Dict from build key to (targe build suffix, artifact file pattern)
        pair.
    arc_bucket_url: URL of the target ARC build gs bucket
    acls: ACLs dictionary for each build to copy.
  """
    gs_context = gs.GSContext()
    for build, subpath in subpaths.iteritems():
        target, pattern = targets[build]
        build_dir = '%s-%s' % (build_branch, target)
        android_dir = os.path.join(android_bucket_url, build_dir, build_id,
                                   subpath)
        arc_dir = os.path.join(arc_bucket_url, build_dir, build_id)

        # Copy all target files from android_dir to arc_dir, setting ACLs.
        for targetfile in gs_context.List(android_dir):
            if re.search(pattern, targetfile.url):
                basename = os.path.basename(targetfile.url)
                arc_path = os.path.join(arc_dir, basename)
                acl = acls[build]
                needs_copy = True

                # Check a pre-existing file with the original source.
                if gs_context.Exists(arc_path):
                    if (gs_context.Stat(targetfile.url).hash_crc32c !=
                            gs_context.Stat(arc_path).hash_crc32c):
                        logging.warn('Removing incorrect file %s', arc_path)
                        gs_context.Remove(arc_path)
                    else:
                        logging.info('Skipping already copied file %s',
                                     arc_path)
                        needs_copy = False

                # Copy if necessary, and set the ACL unconditionally.
                # The Stat() call above doesn't verify the ACL is correct and
                # the ChangeACL should be relatively cheap compared to the copy.
                # This covers the following caes:
                # - handling an interrupted copy from a previous run.
                # - rerunning the copy in case one of the googlestorage_acl_X.txt
                #   files changes (e.g. we add a new variant which reuses a build).
                if needs_copy:
                    logging.info('Copying %s -> %s (acl %s)', targetfile.url,
                                 arc_path, acl)
                    gs_context.Copy(targetfile.url, arc_path, version=0)
                gs_context.ChangeACL(arc_path, acl_args_file=acl)
 def testTimeZones(self):
   for r in regions.BuildRegionsDict(include_all=True).values():
     for tz in r.time_zones:
       if tz not in self.time_zones:
         if r.region_code in regions.REGIONS:
           self.fail(
               'Missing time zones: %r; does a new time zone need to be added '
               'to CrOS, or does testdata need to be updated?' % tz)
         else:
           # This is an unconfirmed region; just print a warning.
           logging.warn(_WARN_UNKNOWN_DATA_IN_UNCONFIRMED_REGION, 'time zone',
                        tz, r.region_code)
    def setupMockBuild(self, key, version, valid=True):
        """Helper to mock a build."""
        def _RaiseGSNoSuchKey(*_args, **_kwargs):
            raise gs.GSNoSuchKey('file does not exist')

        target = constants.ANDROID_BUILD_TARGETS[key][0]
        src_url = self.makeSrcUrl(target, version)
        if valid:
            # Show source subpath directory.
            src_subdir = os.path.join(src_url,
                                      self.makeSubpath(target, version))
            self.gs_mock.AddCmdResult(['ls', '--', src_url], output=src_subdir)

            # Show files.
            mock_file_template_list = {
                'ARM': ['file-%(version)s.zip'],
                'X86': ['file-%(version)s.zip'],
                'CTS': ['android-cts.zip'],
                'SDK_TOOLS': ['aapt', 'adb']
            }
            filelist = [
                template % {
                    'version': version
                } for template in mock_file_template_list[key]
            ]
            src_filelist = [
                os.path.join(src_subdir, filename) for filename in filelist
            ]
            self.gs_mock.AddCmdResult(['ls', '--', src_subdir],
                                      output='\n'.join(src_filelist))
            for src_file in src_filelist:
                self.gs_mock.AddCmdResult(['stat', '--', src_file],
                                          output=(self.STAT_OUTPUT) % src_url)

            # Show nothing in destination.
            dst_url = self.makeDstUrl(target, version)
            dst_filelist = [
                os.path.join(dst_url, filename) for filename in filelist
            ]
            for dst_file in dst_filelist:
                self.gs_mock.AddCmdResult(['stat', '--', dst_file],
                                          side_effect=_RaiseGSNoSuchKey)
            logging.warn('mocking no %s', dst_url)

            # Allow copying of source to dest.
            for src_file, dst_file in itertools.izip(src_filelist,
                                                     dst_filelist):
                self.gs_mock.AddCmdResult(
                    ['cp', '-v', '--', src_file, dst_file])
        else:
            self.gs_mock.AddCmdResult(['ls', '--', src_url],
                                      side_effect=_RaiseGSNoSuchKey)
 def testLocales(self):
   missing = []
   for r in regions.BuildRegionsDict(include_all=True).values():
     for l in r.locales:
       if l not in self.locales:
         if r.region_code in regions.REGIONS:
           missing.append(l)
         else:
           logging.warn(_WARN_UNKNOWN_DATA_IN_UNCONFIRMED_REGION, 'locale', l,
                        r.region_code)
   self.assertFalse(missing,
                    ('Missing locale; does testdata need to be updated?: %r' %
                     missing))
Example #10
0
def ExtractKernel(image, kern_out):
    """Extracts the kernel from the given image.

  Args:
    image: The image containing the kernel partition.
    kern_out: The output kernel file.
  """
    ExtractPartition(image, constants.PART_KERN_B, kern_out)
    with open(kern_out, 'r') as kern:
        if not any(kern.read(65536)):
            logging.warn('%s: Kernel B is empty, patching kernel A.', image)
            ExtractPartition(image, constants.PART_KERN_A, kern_out)
            PatchKernel(image, kern_out)
 def testInputMethods(self):
   # Verify that every region is present in the dict.
   for r in regions.BuildRegionsDict(include_all=True).values():
     for k in r.keyboards:
       resolved_method = self._ResolveInputMethod(k)
       # Make sure the keyboard method is present.
       if resolved_method not in self.input_methods:
         if r.region_code in regions.REGIONS:
           self.fail('Missing keyboard layout %r (resolved from %r)' % (
               resolved_method, k))
         else:
           # This is an unconfirmed region; just print a warning.
           logging.warn(_WARN_UNKNOWN_DATA_IN_UNCONFIRMED_REGION, 'keyboard',
                        k, r.region_code)
Example #12
0
def make_job_entry(tko, job, parent=None, suite_job=False, job_entries=None):
    """Generate a Suite or HWTest event log entry.

    @param tko: TKO database handle.
    @param job: A frontend.Job to generate an entry for.
    @param parent: A (Kind, id) tuple representing the parent key.
    @param suite_job: A boolean indicating wheret this represents a suite job.
    @param job_entries: A dictionary mapping job id to earlier job entries.

    @return A dictionary representing the entry suitable for dumping via JSON.
    """
    statuses = tko.get_job_test_statuses_from_db(job.id)
    status = 'pass'
    dut = None
    for s in statuses:
        parsed_status = parse_tko_status_string(s.status)
        # TODO: Improve this generation of status.
        if parsed_status != 'pass':
            status = parsed_status
        if s.hostname:
            dut = s.hostname
        if s.test_started_time == 'None' or s.test_finished_time == 'None':
            logging.warn('TKO entry for %d missing time: %s' %
                         (job.id, str(s)))
    start_time, finish_time = find_start_finish_times(statuses)
    entry = make_entry(('Suite' if suite_job else 'HWTest', int(job.id)),
                       job.name.split('/')[-1],
                       status,
                       start_time,
                       finish_time=finish_time,
                       parent=parent)

    entry['job_id'] = int(job.id)
    if dut:
        entry['dut'] = dut
    if job.shard:
        entry['shard'] = job.shard
    # Determine the try of this job by looking back through what the
    # original job id is.
    if 'retry_original_job_id' in job.keyvals:
        original_job_id = int(job.keyvals['retry_original_job_id'])
        original_job = job_entries.get(original_job_id, None)
        if original_job:
            entry['try'] = original_job['try'] + 1
        else:
            entry['try'] = 0
    else:
        entry['try'] = 1
    entry['gs_url'] = status_history.get_job_gs_url(job)
    return entry
Example #13
0
def _WhiteSpaceLintData(path, data):
    """Run basic whitespace checks on |data|.

  Args:
    path: The name of the file (for diagnostics).
    data: The file content to lint.

  Returns:
    True if everything passed.
  """
    ret = True

    # Make sure files all have a trailing newline.
    if not data.endswith('\n'):
        ret = False
        logging.warn('%s: file needs a trailing newline', path)

    # Disallow leading & trailing blank lines.
    if data.startswith('\n'):
        ret = False
        logging.warn('%s: delete leading blank lines', path)
    if data.endswith('\n\n'):
        ret = False
        logging.warn('%s: delete trailing blank lines', path)

    for i, line in enumerate(data.splitlines(), start=1):
        if line.rstrip() != line:
            ret = False
            logging.warn('%s:%i: trim trailing whitespace: %s', path, i, line)

    return ret
Example #14
0
 def replenish(self):
     """Replenish the number of active provisions to match goals."""
     while ((self.simultaneous == 0 or len(self.active) < self.simultaneous)
            and (self.total == 0 or self.started < self.total)):
         host_name = self.find_idle_dut()
         if host_name:
             build_name = self.find_build_for_dut(host_name)
             self.spawn(host_name, build_name)
         elif self.simultaneous:
             logging.warn('Insufficient DUTs to satisfy goal')
             return False
         else:
             return len(self.active) > 0
     return True
Example #15
0
def SafeList(ctx, url):
  """Get a GS listing with details enabled.

  Ignore most any error. This is because GS flake can trigger all sorts of
  random failures, and we don't want flake to interrupt a multi-day script run.
  It is generally safe to return [] since any files that would have been
  discovered will be presumed to not exist, and so ignored during the current
  cleanup pass.

  Also, this script is convenient for mocking out results in unittests.
  """
  try:
    return ctx.List(url, details=True)
  except Exception as e:
    # We can fail for lots of repeated random reasons.
    logging.warn('List of "%s" failed, ignoring: "%s"', url, e)
    return []
Example #16
0
def SafeList(ctx, url):
    """Get a GS listing with details enabled.

  Ignore most any error. This is because GS flake can trigger all sorts of
  random failures, and we don't want flake to interrupt a multi-day script run.
  It is generally safe to return [] since any files that would have been
  discovered will be presumed to not exist, and so ignored during the current
  cleanup pass.

  Also, this script is convenient for mocking out results in unittests.
  """
    try:
        return ctx.List(url, details=True)
    except Exception as e:
        # We can fail for lots of repeated random reasons.
        logging.warn('List of "%s" failed, ignoring: "%s"', url, e)
        return []
Example #17
0
def Expire(ctx, dryrun, url):
  """Given a url, move it to the backup buckets.

  Args:
    ctx: GS context.
    dryrun: Do we actually move the file?
    url: Address of file to move.
  """
  logging.info('Expiring: %s', url)
  # Move gs://foo/some/file -> gs://foo-backup/some/file
  parts = urlparse.urlparse(url)
  expired_parts = list(parts)
  expired_parts[1] = parts.netloc + '-backup'
  target_url = urlparse.urlunparse(expired_parts)
  if dryrun:
    logging.notice('gsutil mv %s %s', url, target_url)
  else:
    try:
      ctx.Move(url, target_url)
    except Exception as e:
      # We can fail for lots of repeated random reasons.
      logging.warn('Move of "%s" failed, ignoring: "%s"', url, e)
Example #18
0
def Expire(ctx, dryrun, url):
    """Given a url, move it to the backup buckets.

  Args:
    ctx: GS context.
    dryrun: Do we actually move the file?
    url: Address of file to move.
  """
    logging.info('Expiring: %s', url)
    # Move gs://foo/some/file -> gs://foo-backup/some/file
    parts = urllib.parse.urlparse(url)
    expired_parts = list(parts)
    expired_parts[1] = parts.netloc + '-backup'
    target_url = urllib.parse.urlunparse(expired_parts)
    if dryrun:
        logging.notice('gsutil mv %s %s', url, target_url)
    else:
        try:
            ctx.Move(url, target_url)
        except Exception as e:
            # We can fail for lots of repeated random reasons.
            logging.warn('Move of "%s" failed, ignoring: "%s"', url, e)
Example #19
0
def DepotToolsEnsureBootstrap(depot_tools_path):
    """Start cbuildbot in specified directory with all arguments.

  Args:
    buildroot: Directory to be passed to cbuildbot with --buildroot.
    depot_tools_path: Directory for depot_tools to be used by cbuildbot.
    argv: Command line options passed to cbuildbot_launch.

  Returns:
    Return code of cbuildbot as an integer.
  """
    ensure_bootstrap_script = os.path.join(depot_tools_path,
                                           'ensure_bootstrap')
    if os.path.exists(ensure_bootstrap_script):
        extra_env = {'PATH': PrependPath(depot_tools_path)}
        cros_build_lib.RunCommand([ensure_bootstrap_script],
                                  extra_env=extra_env,
                                  cwd=depot_tools_path)
    else:
        # This is normal when checking out branches older than this script.
        logging.warn('ensure_bootstrap not found, skipping: %s',
                     ensure_bootstrap_script)
  def testFirmwareLocales(self):
    bmpblk_dir = os.path.join(
        os.environ.get('CROS_WORKON_SRCROOT'), 'src', 'platform', 'bmpblk')
    if not os.path.exists(bmpblk_dir):
      logging.warn('Skipping testFirmwareLocales, since %r is missing',
                   bmpblk_dir)
      return

    bmp_locale_dir = os.path.join(bmpblk_dir, 'strings', 'locale')
    for r in regions.BuildRegionsDict(include_all=True).values():
      for l in r.locales:
        paths = [os.path.join(bmp_locale_dir, l)]
        if '-' in l:
          paths.append(os.path.join(bmp_locale_dir, l.partition('-')[0]))
        if not any([os.path.exists(x) for x in paths]):
          if r.region_code in regions.REGIONS:
            self.fail(
                'For region %r, none of %r exists' % (r.region_code, paths))
          else:
            logging.warn('For region %r, none of %r exists; '
                         'just a warning since this region is not confirmed',
                         r.region_code, paths)
Example #21
0
def ChunkedBatchWrite(entities, client, batch_size=_BATCH_CHUNK_SIZE):
    """Write |entities| to datastore |client| in batches of size |batch_size|.

  Datastore has a entities-per-batch limit of 500. This utility function breaks
  helps write a large number of entities to datastore by splitting it into
  limited size batch writes.

  Args:
    entities: iterator of datastore entities to write.
    client: datastore.Client instance.
    batch_size: (default: 500) Maximum number of entities per batch.
  """
    for chunk in iter_utils.SplitToChunks(entities, batch_size):
        entities = list(chunk)

        batch = client.batch()
        for entity in entities:
            batch.put(entity)
        try:
            batch.commit()
        except gcloud.exceptions.BadRequest:
            logging.warn('Unexportable entities:\n%s', entities)
            raise
Example #22
0
def IsBuildIdValid(bucket_url, build_branch, build_id, targets):
    """Checks that a specific build_id is valid.

  Looks for that build_id for all builds. Confirms that the subpath can
  be found and that the zip file is present in that subdirectory.

  Args:
    bucket_url: URL of Android build gs bucket
    build_branch: branch of Android builds
    build_id: A string. The Android build id number to check.
    targets: Dict from build key to (targe build suffix, artifact file pattern)
        pair.

  Returns:
    Returns subpaths dictionary if build_id is valid.
    None if the build_id is not valid.
  """
    gs_context = gs.GSContext()
    subpaths_dict = {}
    for build, (target, _) in targets.iteritems():
        build_dir = '%s-%s' % (build_branch, target)
        build_id_path = os.path.join(bucket_url, build_dir, build_id)

        # Find name of subpath.
        try:
            subpaths = gs_context.List(build_id_path)
        except gs.GSNoSuchKey:
            logging.warn(
                'Directory [%s] does not contain any subpath, ignoring it.',
                build_id_path)
            return None
        if len(subpaths) > 1:
            logging.warn(
                'Directory [%s] contains more than one subpath, ignoring it.',
                build_id_path)
            return None

        subpath_dir = subpaths[0].url.rstrip('/')
        subpath_name = os.path.basename(subpath_dir)

        # Look for a zipfile ending in the build_id number.
        try:
            gs_context.List(subpath_dir)
        except gs.GSNoSuchKey:
            logging.warn(
                'Did not find a file for build id [%s] in directory [%s].',
                build_id, subpath_dir)
            return None

        # Record subpath for the build.
        subpaths_dict[build] = subpath_name

    # If we got here, it means we found an appropriate build for all platforms.
    return subpaths_dict
Example #23
0
def GetLatestBuild(bucket_url, build_branch, targets):
    """Searches the gs bucket for the latest green build.

  Args:
    bucket_url: URL of Android build gs bucket
    build_branch: branch of Android builds
    targets: Dict from build key to (targe build suffix, artifact file pattern)
        pair.

  Returns:
    Tuple of (latest version string, subpaths dictionary)
    If no latest build can be found, returns None, None
  """
    gs_context = gs.GSContext()
    common_build_ids = None
    # Find builds for each target.
    for target, _ in targets.itervalues():
        build_dir = '-'.join((build_branch, target))
        base_path = os.path.join(bucket_url, build_dir)
        build_ids = []
        for gs_result in gs_context.List(base_path):
            # Remove trailing slashes and get the base name, which is the build_id.
            build_id = os.path.basename(gs_result.url.rstrip('/'))
            if not build_id.isdigit():
                logging.warn(
                    'Directory [%s] does not look like a valid build_id.',
                    gs_result.url)
                continue
            build_ids.append(build_id)

        # Update current list of builds.
        if common_build_ids is None:
            # First run, populate it with the first platform.
            common_build_ids = set(build_ids)
        else:
            # Already populated, find the ones that are common.
            common_build_ids.intersection_update(build_ids)

    if common_build_ids is None:
        logging.warn('Did not find a build_id common to all platforms.')
        return None, None

    # Otherwise, find the most recent one that is valid.
    for build_id in sorted(common_build_ids, key=int, reverse=True):
        subpaths = IsBuildIdValid(bucket_url, build_branch, build_id, targets)
        if subpaths:
            return build_id, subpaths

    # If not found, no build_id is valid.
    logging.warn('Did not find a build_id valid on all platforms.')
    return None, None
def CreateCacheTarball(extensions, outputdir, identifier, tarball):
    """Cache |extensions| in |outputdir| and pack them in |tarball|."""

    crxdir = os.path.join(outputdir, 'crx')
    jsondir = os.path.join(outputdir, 'json', 'extensions')
    validationdir = os.path.join(outputdir, 'validation')

    osutils.SafeMakedirs(os.path.join(crxdir, 'extensions'))
    osutils.SafeMakedirs(jsondir)
    was_errors = False
    for ext in extensions:
        extension = extensions[ext]
        # It should not be in use at this moment.
        if 'managed_users' in extension:
            cros_build_lib.Die(
                'managed_users is deprecated and not supported. '
                'Please use user_type.')
        # In case we work with old type json, use default 'user_type'.
        # TODO: Update all external_extensions.json files and deprecate this.
        if 'user_type' not in extension:
            user_type = ['unmanaged']
            if extension.get('child_users', 'no') == 'yes':
                user_type.append('child')
            logging.warn(
                'user_type filter has to be set explicitly for %s, using '
                '%s by default.', ext, user_type)
            extension['user_type'] = user_type
        else:
            if 'child_users' in extension:
                cros_build_lib.Die(
                    'child_users is not supported when user_type is '
                    'set.')

        # Verify user type is well-formed.
        allowed_user_types = {
            'unmanaged', 'managed', 'child', 'supervised', 'guest'
        }
        if not extension['user_type']:
            cros_build_lib.Die('user_type is not set')
        ext_keys = set(extension['user_type'])
        unknown_keys = ext_keys - allowed_user_types
        if unknown_keys:
            cros_build_lib.Die('user_type %s is not allowed', unknown_keys)

        cache_crx = extension.get('cache_crx', 'yes')

        # Remove fields that shouldn't be in the output file.
        for key in ('cache_crx', 'child_users'):
            extension.pop(key, None)

        if cache_crx == 'yes':
            if not DownloadCrx(ext, extension, crxdir):
                was_errors = True
        elif cache_crx == 'no':
            pass
        else:
            cros_build_lib.Die('Unknown value for "cache_crx" %s for %s',
                               cache_crx, ext)

        json_file = os.path.join(jsondir, '%s.json' % ext)
        json.dump(extension,
                  open(json_file, 'w'),
                  sort_keys=True,
                  indent=2,
                  separators=(',', ': '))

    if was_errors:
        cros_build_lib.Die('FAIL to download some extensions')

    CreateValidationFiles(validationdir, crxdir, identifier)
    cros_build_lib.CreateTarball(tarball, outputdir)
    logging.info('Tarball created %s', tarball)
Example #25
0
def main(argv):
    """Load generator for a devserver."""
    parser = get_parser()
    options = parser.parse_args(argv)

    # Parse devserver.
    if options.server:
        if re.match(r'^https?://', options.server):
            server = options.server
        else:
            server = 'http://%s/' % options.server
        ds = dev_server.ImageServer(server)
    else:
        parser.print_usage()
        logging.error('Must specify devserver')
        sys.exit(1)

    # Parse config file and determine master list of duts and their board type,
    # filtering by board type if specified.
    duts = {}
    if options.config:
        with open(options.config, 'r') as f:
            config = json.load(f)
            boards = (options.boards.split(',')
                      if options.boards else config.keys())
            duts_specified = (set(options.duts.split(','))
                              if options.duts else None)
            for board in boards:
                duts.update({
                    dut: board
                    for dut in config[board]['duts']
                    if duts_specified is None or dut in duts_specified
                })
        logging.info('Config file %s: %d boards, %d duts', options.config,
                     len(boards), len(duts))
    else:
        parser.print_usage()
        logging.error('Must specify config file')
        sys.exit(1)

    if options.ping:
        logging.info('Performing ping tests')
        duts_alive = {}
        for dut, board in duts.items():
            if ping_dut(dut):
                duts_alive[dut] = board
            else:
                logging.error(
                    'Ignoring DUT %s (%s) for failing initial '
                    'ping check', dut, board)
        duts = duts_alive
        logging.info('After ping tests: %d boards, %d duts', len(boards),
                     len(duts))

    # Set up the test runner and stage all the builds.
    outputlog = open(options.outputlog, 'a') if options.outputlog else None
    runner = Runner(ds,
                    duts,
                    config,
                    simultaneous=options.simultaneous,
                    total=options.total,
                    outputlog=outputlog,
                    ping=options.ping,
                    blacklist_consecutive=options.blacklist_consecutive,
                    blacklist_success=options.blacklist_success,
                    blacklist_total=options.blacklist_total,
                    dryrun=options.dryrun)
    if options.stage:
        runner.stage_all()

    # Run all the provisions.
    with locking.FileLock(options.config, blocking=True).lock():
        completed = runner.loop()
    logging.info('%s in %s', 'Completed' if completed else 'Interrupted',
                 runner.elapsed())
    # Write all entries as JSON.
    entries = runner.get_completed_entries()
    if options.output:
        with open(options.output, 'w') as f:
            dump_entries_as_json(entries, f)
    else:
        dump_entries_as_json(entries, sys.stdout)
    logging.info(
        'Summary: %s',
        dict(
            collections.Counter(
                [e['status'] for e in entries if e['name'] != 'Runner'])))

    # List blacklisted DUTs.
    if runner.dut_blacklist:
        logging.warn('Blacklisted DUTs:')
        for host_name in runner.dut_blacklist:
            logging.warn('  %s', host_name)
Example #26
0
def _ShellLintFile(path, output_format, debug, gentoo_format=False):
    """Returns result of running lint checks on |path|.

  Args:
    path: The path to the script on which to run the linter.
    output_format: The format of the output that the linter should emit. See
                   |SHLINT_OUTPUT_FORMAT_MAP|.
    debug: Whether to print out the linter command.
    gentoo_format: Whether to treat this file as an ebuild style script.

  Returns:
    A CommandResult object.
  """
    # TODO: Try using `checkbashisms`.
    syntax_check = _LinterRunCommand(['bash', '-n', path], debug)
    if syntax_check.returncode != 0:
        return syntax_check

    # Try using shellcheck if it exists, with a preference towards finding it
    # inside the chroot. This is OK as it is statically linked.
    shellcheck = (osutils.Which('shellcheck',
                                path='/usr/bin',
                                root=os.path.join(constants.SOURCE_ROOT,
                                                  'chroot'))
                  or osutils.Which('shellcheck'))

    if not shellcheck:
        logging.notice('Install shellcheck for additional shell linting.')
        return syntax_check

    # Instruct shellcheck to run itself from the shell script's dir. Note that
    # 'SCRIPTDIR' is a special string that shellcheck rewrites to the dirname of
    # the given path.
    extra_checks = [
        'avoid-nullary-conditions',  # SC2244
        'check-unassigned-uppercase',  # Include uppercase in SC2154
        'require-variable-braces',  # SC2250
    ]
    if not gentoo_format:
        extra_checks.append('quote-safe-variables')  # SC2248

    cmd = [
        shellcheck, '--source-path=SCRIPTDIR',
        '--enable=%s' % ','.join(extra_checks)
    ]
    if output_format != 'default':
        cmd.extend(SHLINT_OUTPUT_FORMAT_MAP[output_format])
    cmd.append('-x')
    if gentoo_format:
        # ebuilds don't explicitly export variables or contain a shebang.
        cmd.append('--exclude=SC2148')
        # ebuilds always use bash.
        cmd.append('--shell=bash')
    cmd.append(path)

    lint_result = _LinterRunCommand(cmd, debug)

    # During testing, we don't want to fail the linter for shellcheck errors,
    # so override the return code.
    if lint_result.returncode != 0:
        bug_url = (
            'https://bugs.chromium.org/p/chromium/issues/entry?' +
            urllib.parse.urlencode({
                'template':
                'Defect report from Developer',
                'summary':
                'Bad shellcheck warnings for %s' % os.path.basename(path),
                'components':
                'Infra>Client>ChromeOS>Build,',
                'cc':
                '[email protected],[email protected]',
                'comment':
                'Shellcheck output from file:\n%s\n\n<paste output here>\n\n'
                "What is wrong with shellcheck's findings?\n" % path,
            }))
        logging.warn(
            'Shellcheck found problems. These will eventually become '
            'errors.  If the shellcheck findings are not useful, '
            'please file a bug at:\n%s', bug_url)
        lint_result.returncode = 0
    return lint_result
Example #27
0
    def UploadMetadata(self,
                       upload_queue=None,
                       filename=constants.METADATA_JSON,
                       export=False):
        """Create and upload JSON file of the builder run's metadata, and to cidb.

    This uses the existing metadata stored in the builder run. The default
    metadata.json file should only be uploaded once, at the end of the run,
    and considered immutable. During the build, intermediate metadata snapshots
    can be uploaded to other files, such as partial-metadata.json.

    This method also updates the metadata in the cidb database, if there is a
    valid cidb connection set up.

    Args:
      upload_queue: If specified then put the artifact file to upload on
        this queue.  If None then upload it directly now.
      filename: Name of file to dump metadata to.
                Defaults to constants.METADATA_JSON
      export: If true, constants.METADATA_TAGS will be exported to gcloud.

    Returns:
      If upload was successful or not
    """
        metadata_json = os.path.join(self.archive_path, filename)

        # Stages may run in parallel, so we have to do atomic updates on this.
        logging.info('Writing metadata to %s.', metadata_json)
        osutils.WriteFile(metadata_json,
                          self._run.attrs.metadata.GetJSON(),
                          atomic=True,
                          makedirs=True)

        if upload_queue is not None:
            logging.info('Adding metadata file %s to upload queue.',
                         metadata_json)
            upload_queue.put([filename])
        else:
            logging.info('Uploading metadata file %s now.', metadata_json)
            self.UploadArtifact(filename, archive=False)

        build_id, db = self._run.GetCIDBHandle()
        if db:
            logging.info(
                'Writing updated metadata to database for build_id %s.',
                build_id)
            db.UpdateMetadata(build_id, self._run.attrs.metadata)
            if export:
                d = self._run.attrs.metadata.GetDict()
                if constants.METADATA_TAGS in d:
                    c_file = topology.topology.get(
                        topology.DATASTORE_WRITER_CREDS_KEY)
                    if c_file:
                        with tempfile.NamedTemporaryFile() as f:
                            logging.info('Export tags to gcloud via %s.',
                                         f.name)
                            logging.debug('Exporting: %s' %
                                          d[constants.METADATA_TAGS])
                            osutils.WriteFile(f.name,
                                              json.dumps(
                                                  d[constants.METADATA_TAGS]),
                                              atomic=True,
                                              makedirs=True)
                            commands.ExportToGCloud(self._build_root,
                                                    c_file,
                                                    f.name,
                                                    caller=type(self).__name__)
                    else:
                        logging.warn(
                            'No datastore credential file found, Skipping Export'
                        )
                        return False
        else:
            logging.info('Skipping database update, no database or build_id.')
            return False
        return True
def GenerateAlertsSummary(db,
                          builds=None,
                          logdog_client=None,
                          milo_client=None,
                          allow_experimental=False):
    """Generates the full set of alerts to send to Sheriff-o-Matic.

  Args:
    db: cidb.CIDBConnection object.
    builds: A list of (waterfall, builder_name, severity) tuples to summarize.
      Defaults to SOM_BUILDS[SOM_TREE].
    logdog_client: logdog.LogdogClient object.
    milo_client: milo.MiloClient object.
    allow_experimental: Boolean if non-important builds should be included.

  Returns:
    JSON-marshalled AlertsSummary object.
  """
    if not builds:
        builds = constants.SOM_BUILDS[constants.SOM_TREE]
    if not logdog_client:
        logdog_client = logdog.LogdogClient()
    if not milo_client:
        milo_client = milo.MiloClient()

    funcs = []
    now = datetime.datetime.utcnow()

    # Iterate over relevant masters.
    # build_tuple is either: waterfall, build_config, severity
    #  or: build_id, severity
    for build_tuple in builds:
        # Find the specified build.
        if len(build_tuple) == 2:
            # pylint: disable=unbalanced-tuple-unpacking
            build_id, severity = build_tuple
            # pylint: enable=unbalanced-tuple-unpacking
            master = db.GetBuildStatus(build_id)
            if master is None:
                logging.warn('Could not locate build id %s', build_id)
                continue
            wfall = master['waterfall']
            build_config = master['build_config']
        elif len(build_tuple) == 3:
            wfall, build_config, severity = build_tuple
            master = db.GetMostRecentBuild(wfall, build_config)
            if master is None:
                logging.warn('Could not locate build %s %s', wfall,
                             build_config)
                continue
        else:
            logging.error('Invalid build tuple: %s' % str(build_tuple))
            continue

        statuses = [master]
        stages = db.GetBuildStages(master['id'])
        exceptions = db.GetBuildsFailures([master['id']])
        messages = db.GetBuildMessages(master['id'])
        annotations = []
        logging.info(
            '%s %s (id %d): single/master build, %d stages, %d messages',
            wfall, build_config, master['id'], len(stages), len(messages))

        # Find any slave builds, and the individual slave stages.
        slave_statuses = db.GetSlaveStatuses(master['id'])
        if len(slave_statuses):
            statuses.extend(slave_statuses)
            slave_stages = db.GetSlaveStages(master['id'])
            stages.extend(slave_stages)
            exceptions.extend(db.GetSlaveFailures(master['id']))
            annotations.extend(
                db.GetAnnotationsForBuilds([master['id']
                                            ]).get(master['id'], []))
            logging.info('- %d slaves, %d slave stages, %d annotations',
                         len(slave_statuses), len(slave_stages),
                         len(annotations))

        # Look for failing and inflight (signifying timeouts) slave builds.
        for build in sorted(statuses, key=lambda s: s['builder_name']):
            funcs.append(
                lambda build_=build, stages_=stages, exceptions_=exceptions,
                messages_=messages, annotations_=annotations, siblings_=
                statuses, severity_=severity: GenerateBuildAlert(
                    build_,
                    stages_,
                    exceptions_,
                    messages_,
                    annotations_,
                    siblings_,
                    severity_,
                    now,
                    db,
                    logdog_client,
                    milo_client,
                    allow_experimental=allow_experimental))

    alerts = [
        alert for alert in parallel.RunParallelSteps(funcs, return_values=True)
        if alert
    ]

    revision_summaries = {}
    summary = som.AlertsSummary(alerts, revision_summaries, ToEpoch(now))

    return json.dumps(summary, cls=ObjectEncoder)
def GenerateAlertStage(build, stage, exceptions, aborted, buildinfo,
                       logdog_client):
    """Generate alert details for a single build stage.

  Args:
    build: Dictionary of build details from CIDB.
    stage: Dictionary of stage details from CIDB.
    exceptions: A list of instances of failure_message_lib.StageFailure.
    aborted: Boolean indicated if the build was aborted.
    buildinfo: BuildInfo build JSON file from MILO.
    logdog_client: logdog.LogdogClient object.

  Returns:
    som.CrosStageFailure object if stage requires alert.  None otherwise.
  """
    STAGE_IGNORE_STATUSES = frozenset([
        constants.BUILDER_STATUS_PASSED, constants.BUILDER_STATUS_PLANNED,
        constants.BUILDER_STATUS_SKIPPED
    ])
    ABORTED_IGNORE_STATUSES = frozenset([
        constants.BUILDER_STATUS_INFLIGHT, constants.BUILDER_STATUS_FORGIVEN,
        constants.BUILDER_STATUS_WAITING
    ])
    NO_LOG_RETRY_STATUSES = frozenset(
        [constants.BUILDER_STATUS_INFLIGHT, constants.BUILDER_STATUS_ABORTED])
    # IGNORE_EXCEPTIONS should be ignored if they're the only exception.
    IGNORE_EXCEPTIONS = frozenset(['ImportantBuilderFailedException'])
    # ABORTED_DISREGARD_EXCEPTIONS should cause any failures of aborted stages
    # to be entirely disregarded.
    ABORTED_DISREGARD_EXCEPTIONS = frozenset(['_ShutDownException'])
    if (stage['build_id'] != build['id']
            or stage['status'] in STAGE_IGNORE_STATUSES):
        return None
    if aborted and stage['status'] in ABORTED_IGNORE_STATUSES:
        return None

    logging.info('    stage %s (id %d): %s', stage['name'], stage['id'],
                 stage['status'])
    logs_links = []
    notes = []

    # Generate links to the logs of the stage and use them for classification.
    if buildinfo and stage['name'] in buildinfo['steps']:
        prefix = buildinfo['annotationStream']['prefix']
        annotation = buildinfo['steps'][stage['name']]
        AddLogsLink(logdog_client, 'stdout', buildinfo['project'], prefix,
                    annotation.get('stdoutStream'), logs_links)
        AddLogsLink(logdog_client, 'stderr', buildinfo['project'], prefix,
                    annotation.get('stderrStream'), logs_links)

        # Use the logs in an attempt to classify the failure.
        if (annotation.get('stdoutStream')
                and annotation['stdoutStream'].get('name')):
            path = '%s/+/%s' % (prefix, annotation['stdoutStream']['name'])
            try:
                # If either the build or stage is reporting as being inflight,
                # LogDog might still be waiting for logs so don't wait unnecesarily
                # for them.
                retry = (build['status'] not in NO_LOG_RETRY_STATUSES
                         and stage['status'] not in NO_LOG_RETRY_STATUSES)
                logs = logdog_client.GetLines(buildinfo['project'],
                                              path,
                                              allow_retries=retry)
                classification = classifier.ClassifyFailure(
                    stage['name'], logs)
                for c in classification or []:
                    notes.append('Classification: %s' % (c))
            except Exception as e:
                logging.exception('Could not classify logs: %s', e)
                notes.append('Warning: unable to classify logs: %s' % (e))
    elif aborted:
        # Aborted build with no stage logs is not worth reporting on.
        return None
    else:
        notes.append('Warning: stage logs unavailable')

    # Copy the links from the buildbot build JSON.
    stage_links = []
    if buildinfo:
        if stage['status'] == constants.BUILDER_STATUS_FORGIVEN:
            # TODO: Include these links but hide them by default in frontend.
            pass
        elif stage['name'] in buildinfo['steps']:
            step = buildinfo['steps'][stage['name']]
            stage_links = [
                som.Link(l['label'], l['url'])
                for l in step.get('otherLinks', [])
            ]
        else:
            steps = [
                s for s in buildinfo['steps'].keys()
                if s is not None and not isinstance(s, tuple)
            ]
            logging.warn('Could not find stage %s in: %s', stage['name'],
                         ', '.join(steps))
    else:
        notes.append('Warning: stage details unavailable')

    # Limit the number of links that will be displayed for a single stage.
    # Let there be one extra since it doesn't make sense to have a line
    # saying there is one more.
    # TODO: Move this to frontend so they can be unhidden by clicking.
    if len(stage_links) > MAX_STAGE_LINKS + 1:
        # Insert at the beginning of the notes which come right after the links.
        notes.insert(
            0, '... and %d more URLs' % (len(stage_links) - MAX_STAGE_LINKS))
        del stage_links[MAX_STAGE_LINKS:]

    # Add all exceptions recording in CIDB as notes.
    has_other_exceptions = False
    has_ignore_exception = False
    for e in exceptions:
        if e.build_stage_id == stage['id']:
            notes.append('%s: %s' % (e.exception_type, e.exception_message))
            if aborted and e.exception_type in ABORTED_DISREGARD_EXCEPTIONS:
                # Don't generate alert if the exception indicates it should be
                # entirely disregarded.
                return None
            elif e.exception_type in IGNORE_EXCEPTIONS:
                # Ignore this exception (and stage if there aren't other exceptions).
                has_ignore_exception = True
                continue
            has_other_exceptions = True

    # If there is an ignored exception and no other exceptions, treat this
    # stage as non-failed.
    if has_ignore_exception and not has_other_exceptions:
        return None

    # Add the stage to the alert.
    return som.CrosStageFailure(stage['name'],
                                MapCIDBToSOMStatus(stage['status']),
                                logs_links, stage_links, notes)
Example #30
0
    def _LoadTests(self):
        """Loads the tests to run from <overlay>/scripts/gce_tests.json.

    If the JSON file exists, loads the tests and flags to create instance for
    each test with. The JSON file should contain a "tests" object, which is an
    array of objects, each of which has only two keys: "name" and "flags".

    "name" could be any valid Autotest test name, or a suite name, in the form
    of "suite:<suite_name>", e.g., "suite:gce-smoke".

    "flags" is a JSON object whose members must be valid proterties of the GCE
    Instance Resource, as specificed at:
    https://cloud.google.com/compute/docs/reference/latest/instances#resource.

    These flags will be used to create instances. Each flag must strictly follow
    the property schema as defined in the Instance Resource. Failure to do so
    will result in instance creation failures.

    Note that a dedicated instance will be created for every test object
    specified in scripts/gce_tests.json. So group test cases that require
    similar instance properties together as suites whenever possible.

    An example scripts/gce_tests.json may look like:
    {
      "tests": [
        {
          "name": "suite:gce-smoke",
          "flags": []
        },
        {
          "name": "suite:cloud-init",
          "flags": {
              "description": "Test instance",
              "metadata": {
                "items": [
                  {
                    "key": "fake_key",
                    "value": "fake_value"
                  }
                ]
              }
          }
        }
      ]
    }

    If the JSON file does not exist, the 'gce-smoke' suite will be used to
    verify the image.
    """
        # Defaults to run the gce-smoke suite if no custom tests are given.
        tests = [dict(name="suite:gce-smoke", flags=dict())]

        custom_tests = None
        try:
            custom_tests = portage_util.ReadOverlayFile("scripts/gce_tests.json", board=self.board)
        except portage_util.MissingOverlayException as e:
            logging.warn("Board overlay not found. Error: %r", e)

        if custom_tests is not None:
            if self.board not in constants.TRUSTED_BOARDS:
                logging.warn("Custom tests and flags are not allowed for this board " "(%s)!", self.board)
            else:
                # Read the list of tests.
                try:
                    json_file = json.loads(custom_tests)
                    tests = json_file.get("tests")
                except ValueError as e:
                    logging.warn(
                        "scripts/gce_tests.json contains invalid JSON content. "
                        "Default tests will be run and default flags will be "
                        "used to create instances. Error: %r",
                        e,
                    )
        self.tests = tests
  def setupMockBuild(self, key, version, valid=True):
    """Helper to mock a build."""
    def _RaiseGSNoSuchKey(*_args, **_kwargs):
      raise gs.GSNoSuchKey('file does not exist')

    target = self.targets[key][0]
    src_url = self.makeSrcUrl(target, version)
    if valid:
      # Show source subpath directory.
      src_subdir = os.path.join(src_url, self.makeSubpath(target, version))
      self.gs_mock.AddCmdResult(['ls', '--', src_url], output=src_subdir)

      # Show files.
      mock_file_template_list = {
          'ARM': ['file-%(version)s.zip', 'adb', 'sepolicy.zip'],
          'X86': ['file-%(version)s.zip', 'file.zip.internal'],
          'X86_NDK_TRANSLATION': ['file_ndk_translation-%(version)s.zip'],
          'X86_INTERNAL': ['file.zip.internal', 'file-%(version)s.zip'],
          'X86_64': ['file-%(version)s.zip'],
          'X86_USERDEBUG': ['cheets_x86-file-%(version)s.zip', 'sepolicy.zip'],
          'X86_NDK_TRANSLATION_USERDEBUG': [
              'cheets_x86_ndk_translation-file-%(version)s.zip', 'sepolicy.zip'
          ],
          'X86_64_USERDEBUG': ['cheets_x86_64-file-%(version)s.zip'],
          'AOSP_X86_USERDEBUG': ['aosp_cheets_x86-file-%(version)s.zip'],
          'SDK_GOOGLE_X86_USERDEBUG':
              ['sdk_google_cheets_x86-file-%(version)s.zip'],
          'SDK_GOOGLE_X86_64_USERDEBUG':
              ['sdk_google_cheets_x86_64-file-%(version)s.zip'],
          'SDK_TOOLS': ['aapt', 'adb']
      }
      filelist = [template % {'version': version}
                  for template in mock_file_template_list[key]]
      src_filelist = [os.path.join(src_subdir, filename)
                      for filename in filelist]
      self.gs_mock.AddCmdResult(['ls', '--', src_subdir],
                                output='\n'.join(src_filelist))
      for src_file in src_filelist:
        self.gs_mock.AddCmdResult(['stat', '--', src_file],
                                  output=(self.STAT_OUTPUT) % src_url)

      # Show nothing in destination.
      dst_url = self.makeDstUrl(target, version)
      # Show files.
      mock_file_template_list = {
          'ARM': ['file-%(version)s.zip', 'adb', 'sepolicy.zip'],
          # Skip internal files.
          'X86': ['file-%(version)s.zip'],
          'X86_NDK_TRANSLATION': [
              'file_ndk_translation-%(version)s.zip'
          ],
          # Internal files only.
          'X86_INTERNAL': ['file.zip.internal'],
          'X86_64': ['file-%(version)s.zip'],
          'X86_USERDEBUG':
              ['cheets_x86_userdebug-file-%(version)s.zip', 'sepolicy.zip'],
          'X86_NDK_TRANSLATION_USERDEBUG': [
              'cheets_x86_ndk_translation_userdebug-file-%(version)s.zip',
              'sepolicy.zip'
          ],
          'X86_64_USERDEBUG': ['cheets_x86_64_userdebug-file-%(version)s.zip'],
          'AOSP_X86_USERDEBUG':
              ['cheets_aosp_x86_userdebug-file-%(version)s.zip'],
          'SDK_GOOGLE_X86_USERDEBUG':
              ['cheets_sdk_google_x86_userdebug-file-%(version)s.zip'],
          'SDK_GOOGLE_X86_64_USERDEBUG':
              ['cheets_sdk_google_x86_64_userdebug-file-%(version)s.zip'],
          'SDK_TOOLS': ['aapt', 'adb']
      }
      filelist = [template % {'version': version}
                  for template in mock_file_template_list[key]]
      dst_filelist = [os.path.join(dst_url, filename)
                      for filename in filelist]
      for dst_file in dst_filelist:
        self.gs_mock.AddCmdResult(['stat', '--', dst_file],
                                  side_effect=_RaiseGSNoSuchKey)
      logging.warn('mocking no %s', dst_url)

      # Allow copying of source to dest.
      for src_file, dst_file in itertools.izip(src_filelist, dst_filelist):
        self.gs_mock.AddCmdResult(['cp', '-v', '--', src_file, dst_file])
    else:
      self.gs_mock.AddCmdResult(['ls', '--', src_url],
                                side_effect=_RaiseGSNoSuchKey)
Example #32
0
  def _LoadTests(self):
    """Loads the tests to run from <overlay>/scripts/gce_tests.json.

    If the JSON file exists, loads the tests and flags to create instance for
    each test with. The JSON file should contain a "tests" object, which is an
    array of objects, each of which has only two keys: "name" and "flags".

    "name" could be any valid Autotest test name, or a suite name, in the form
    of "suite:<suite_name>", e.g., "suite:gce-smoke".

    "flags" is a JSON object whose members must be valid proterties of the GCE
    Instance Resource, as specificed at:
    https://cloud.google.com/compute/docs/reference/latest/instances#resource.

    These flags will be used to create instances. Each flag must strictly follow
    the property schema as defined in the Instance Resource. Failure to do so
    will result in instance creation failures.

    Note that a dedicated instance will be created for every test object
    specified in scripts/gce_tests.json. So group test cases that require
    similar instance properties together as suites whenever possible.

    An example scripts/gce_tests.json may look like:
    {
      "tests": [
        {
          "name": "suite:gce-smoke",
          "flags": []
        },
        {
          "name": "suite:cloud-init",
          "flags": {
              "description": "Test instance",
              "metadata": {
                "items": [
                  {
                    "key": "fake_key",
                    "value": "fake_value"
                  }
                ]
              }
          }
        }
      ]
    }

    If the JSON file does not exist, the 'gce-smoke' suite will be used to
    verify the image.
    """
    # Defaults to run the gce-smoke suite if no custom tests are given.
    tests = [dict(name="suite:gce-smoke", flags=dict())]

    custom_tests = None
    try:
      custom_tests = portage_util.ReadOverlayFile(
          'scripts/gce_tests.json', board=self.board)
    except portage_util.MissingOverlayException as e:
      logging.warn('Board overlay not found. Error: %r', e)

    if custom_tests is not None:
      if self.board not in constants.TRUSTED_BOARDS:
        logging.warn('Custom tests and flags are not allowed for this board '
                     '(%s)!', self.board)
      else:
        # Read the list of tests.
        try:
          json_file = json.loads(custom_tests)
          tests = json_file.get('tests')
        except ValueError as e:
          logging.warn('scripts/gce_tests.json contains invalid JSON content. '
                       'Default tests will be run and default flags will be '
                       'used to create instances. Error: %r', e)
    self.tests = tests