Exemple #1
0
def main(args):
    tools.disable_buffering()
    parser = tools.OptionParserWithLogging(usage="%prog <options>", version=__version__, log_file=RUN_ISOLATED_LOG_FILE)

    data_group = optparse.OptionGroup(parser, "Data source")
    data_group.add_option("-s", "--isolated", metavar="FILE", help="File/url describing what to map or run")
    data_group.add_option("-H", "--hash", help="Hash of the .isolated to grab from the hash table")
    isolateserver.add_isolate_server_options(data_group, True)
    parser.add_option_group(data_group)

    cache_group = optparse.OptionGroup(parser, "Cache management")
    cache_group.add_option("--cache", default="cache", metavar="DIR", help="Cache directory, default=%default")
    cache_group.add_option(
        "--max-cache-size",
        type="int",
        metavar="NNN",
        default=20 * 1024 * 1024 * 1024,
        help="Trim if the cache gets larger than this value, default=%default",
    )
    cache_group.add_option(
        "--min-free-space",
        type="int",
        metavar="NNN",
        default=2 * 1024 * 1024 * 1024,
        help="Trim if disk free space becomes lower than this value, " "default=%default",
    )
    cache_group.add_option(
        "--max-items",
        type="int",
        metavar="NNN",
        default=100000,
        help="Trim if more than this number of items are in the cache " "default=%default",
    )
    parser.add_option_group(cache_group)

    auth.add_auth_options(parser)
    options, args = parser.parse_args(args)
    auth.process_auth_options(parser, options)
    isolateserver.process_isolate_server_options(data_group, options)

    if bool(options.isolated) == bool(options.hash):
        logging.debug("One and only one of --isolated or --hash is required.")
        parser.error("One and only one of --isolated or --hash is required.")

    options.cache = os.path.abspath(options.cache)
    policies = CachePolicies(options.max_cache_size, options.min_free_space, options.max_items)

    try:
        # |options.cache| path may not exist until DiskCache() instance is created.
        cache = DiskCache(options.cache, policies, isolateserver.get_hash_algo(options.namespace))
        remote = options.isolate_server or options.indir
        with isolateserver.get_storage(remote, options.namespace) as storage:
            # Hashing schemes used by |storage| and |cache| MUST match.
            assert storage.hash_algo == cache.hash_algo
            return run_tha_test(options.isolated or options.hash, storage, cache, args)
    except Exception as e:
        # Make sure any exception is logged.
        tools.report_error(e)
        logging.exception(e)
        return 1
Exemple #2
0
def upload_zip_bundle(isolate_server, bundle):
    """Uploads a zip package to isolate storage and returns raw fetch URL.

  Args:
    isolate_server: URL of an isolate server.
    bundle: instance of ZipPackage to upload.

  Returns:
    URL to get the file from on success.
    None on failure.
  """
    # Swarming bot would need to be able to grab the file from the storage
    # using raw HTTP GET. Use 'default' namespace so that the raw data returned
    # to a bot is not zipped, since swarm_bot doesn't understand compressed
    # data yet. This namespace have nothing to do with |namespace| passed to
    # run_isolated.py that is used to store files for isolated task.
    logging.info('Zipping up and uploading files...')
    try:
        start_time = now()
        isolate_item = isolateserver.BufferItem(bundle.zip_into_buffer(),
                                                high_priority=True)
        with isolateserver.get_storage(isolate_server, 'default') as storage:
            uploaded = storage.upload_items([isolate_item])
            bundle_url = storage.get_fetch_url(isolate_item)
        elapsed = now() - start_time
    except (IOError, OSError) as exc:
        tools.report_error('Failed to upload the zip file: %s' % exc)
        return None
    if isolate_item in uploaded:
        logging.info('Upload complete, time elapsed: %f', elapsed)
    else:
        logging.info('Zip file already on server, time elapsed: %f', elapsed)
    return bundle_url
Exemple #3
0
def main(args):
  dispatcher = subcommand.CommandDispatcher(__name__)
  try:
    return dispatcher.execute(OptionParserAuth(version=__version__), args)
  except Exception as e:
    tools.report_error(e)
    return 1
Exemple #4
0
def CMDtrigger(parser, args):
  """Triggers a Swarming task.

  Accepts either the hash (sha1) of a .isolated file already uploaded or the
  path to an .isolated file to archive, packages it if needed and sends a
  Swarming manifest file to the Swarming server.

  If an .isolated file is specified instead of an hash, it is first archived.
  """
  add_trigger_options(parser)
  options, args = parser.parse_args(args)
  process_trigger_options(parser, options, args)

  try:
    result, task_name = trigger(
        swarming=options.swarming,
        isolate_server=options.isolate_server or options.indir,
        namespace=options.namespace,
        file_hash_or_isolated=args[0],
        task_name=options.task_name,
        dimensions=options.dimensions,
        shards=options.shards,
        env=dict(options.env),
        working_dir=options.working_dir,
        verbose=options.verbose,
        profile=options.profile,
        priority=options.priority)
    if task_name != options.task_name and not result:
      print('Triggered task: %s' % task_name)
    return result
  except Failure as e:
    tools.report_error(e)
    return 1
Exemple #5
0
def main(args):
  dispatcher = subcommand.CommandDispatcher(__name__)
  try:
    return dispatcher.execute(OptionParserAuth(version=__version__), args)
  except Exception as e:
    tools.report_error(e)
    return 1
Exemple #6
0
def trigger_by_manifest(swarming, manifest):
    """Given a task manifest, triggers it for execution on swarming.

  Args:
    swarming: URL of a swarming service.
    manifest: instance of Manifest.

  Returns:
    tuple(Task id, priority) on success. tuple(None, None) on failure.
  """
    logging.info('Triggering: %s', manifest.task_name)
    manifest_text = manifest.to_json()
    result = net.url_read(swarming + '/test', data={'request': manifest_text})
    if not result:
        tools.report_error('Failed to trigger task %s' % manifest.task_name)
        return None
    try:
        data = json.loads(result)
    except (ValueError, TypeError) as e:
        msg = '\n'.join(('Failed to trigger task %s' % manifest.task_name,
                         'Manifest: %s' % manifest_text,
                         'Bad response: %s' % result, str(e)))
        tools.report_error(msg)
        return None, None
    if not data:
        return None, None
    return data['test_keys'][0]['test_key'], data['priority']
Exemple #7
0
def CMDcollect(parser, args):
    """Retrieves results of a Swarming task.

  The result can be in multiple part if the execution was sharded. It can
  potentially have retries.
  """
    add_collect_options(parser)
    (options, args) = parser.parse_args(args)
    if not args:
        parser.error("Must specify one task name.")
    elif len(args) > 1:
        parser.error("Must specify only one task name.")

    try:
        return collect(
            options.swarming,
            args[0],
            options.timeout,
            options.decorate,
            options.print_status_updates,
            options.task_output_dir,
        )
    except Failure as e:
        tools.report_error(e)
        return 1
Exemple #8
0
def CMDtrigger(parser, args):
    """Triggers a Swarming task.

  Accepts either the hash (sha1) of a .isolated file already uploaded or the
  path to an .isolated file to archive, packages it if needed and sends a
  Swarming manifest file to the Swarming server.

  If an .isolated file is specified instead of an hash, it is first archived.

  Passes all extra arguments provided after '--' as additional command line
  arguments for an isolated command specified in *.isolate file.
  """
    add_trigger_options(parser)
    add_sharding_options(parser)
    args, isolated_cmd_args = extract_isolated_command_extra_args(args)
    parser.add_option(
        '--dump-json',
        metavar='FILE',
        help='Dump details about the triggered task(s) to this file as json')
    options, args = parser.parse_args(args)
    process_trigger_options(parser, options, args)

    try:
        tasks, task_name = trigger(swarming=options.swarming,
                                   isolate_server=options.isolate_server
                                   or options.indir,
                                   namespace=options.namespace,
                                   file_hash_or_isolated=args[0],
                                   task_name=options.task_name,
                                   extra_args=isolated_cmd_args,
                                   shards=options.shards,
                                   dimensions=options.dimensions,
                                   env=dict(options.env),
                                   deadline=options.deadline,
                                   verbose=options.verbose,
                                   profile=options.profile,
                                   priority=options.priority)
        if tasks:
            if task_name != options.task_name:
                print('Triggered task: %s' % task_name)
            if options.dump_json:
                data = {
                    'base_task_name': task_name,
                    'tasks': tasks,
                }
                tools.write_json(options.dump_json, data, True)
        return int(not tasks)
    except Failure as e:
        tools.report_error(e)
        return 1
Exemple #9
0
def isolated_to_hash(isolate_server, namespace, arg, algo, verbose):
    """Archives a .isolated file if needed.

  Returns the file hash to trigger and a bool specifying if it was a file (True)
  or a hash (False).
  """
    if arg.endswith(".isolated"):
        file_hash = archive(isolate_server, namespace, arg, algo, verbose)
        if not file_hash:
            tools.report_error("Archival failure %s" % arg)
            return None, True
        return file_hash, True
    elif isolateserver.is_valid_hash(arg, algo):
        return arg, False
    else:
        tools.report_error("Invalid hash %s" % arg)
        return None, False
Exemple #10
0
def isolated_to_hash(isolate_server, namespace, arg, algo, verbose):
    """Archives a .isolated file if needed.

  Returns the file hash to trigger and a bool specifying if it was a file (True)
  or a hash (False).
  """
    if arg.endswith('.isolated'):
        file_hash = archive(isolate_server, namespace, arg, algo, verbose)
        if not file_hash:
            tools.report_error('Archival failure %s' % arg)
            return None, True
        return file_hash, True
    elif isolateserver.is_valid_hash(arg, algo):
        return arg, False
    else:
        tools.report_error('Invalid hash %s' % arg)
        return None, False
def zip_and_upload(manifest):
    """Zips up all the files necessary to run a manifest and uploads to Swarming
  master.
  """
    try:
        start_time = now()
        with manifest.storage:
            uploaded = manifest.storage.upload_items([manifest.isolate_item])
        elapsed = now() - start_time
    except (IOError, OSError) as exc:
        tools.report_error('Failed to upload the zip file: %s' % exc)
        return False

    if manifest.isolate_item in uploaded:
        logging.info('Upload complete, time elapsed: %f', elapsed)
    else:
        logging.info('Zip file already on server, time elapsed: %f', elapsed)
    return True
Exemple #12
0
def zip_and_upload(manifest):
    """Zips up all the files necessary to run a manifest and uploads to Swarming
  master.
  """
    try:
        start_time = now()
        with manifest.storage:
            uploaded = manifest.storage.upload_items([manifest.isolate_item])
        elapsed = now() - start_time
    except (IOError, OSError) as exc:
        tools.report_error("Failed to upload the zip file: %s" % exc)
        return False

    if manifest.isolate_item in uploaded:
        logging.info("Upload complete, time elapsed: %f", elapsed)
    else:
        logging.info("Zip file already on server, time elapsed: %f", elapsed)
    return True
def CMDcollect(parser, args):
    """Retrieves results of a Swarming task.

  The result can be in multiple part if the execution was sharded. It can
  potentially have retries.
  """
    add_collect_options(parser)
    (options, args) = parser.parse_args(args)
    if not args:
        parser.error('Must specify one task name.')
    elif len(args) > 1:
        parser.error('Must specify only one task name.')

    try:
        return collect(options.swarming, args[0], options.timeout,
                       options.decorate, options.print_status_updates,
                       options.task_output_dir)
    except Failure as e:
        tools.report_error(e)
        return 1
Exemple #14
0
def CMDtrigger(parser, args):
    """Triggers a Swarming task.

  Accepts either the hash (sha1) of a .isolated file already uploaded or the
  path to an .isolated file to archive, packages it if needed and sends a
  Swarming manifest file to the Swarming server.

  If an .isolated file is specified instead of an hash, it is first archived.

  Passes all extra arguments provided after '--' as additional command line
  arguments for an isolated command specified in *.isolate file.
  """
    add_trigger_options(parser)
    args, isolated_cmd_args = extract_isolated_command_extra_args(args)
    options, args = parser.parse_args(args)
    process_trigger_options(parser, options, args)

    try:
        result, task_name = trigger(
            swarming=options.swarming,
            isolate_server=options.isolate_server or options.indir,
            namespace=options.namespace,
            file_hash_or_isolated=args[0],
            task_name=options.task_name,
            extra_args=isolated_cmd_args,
            shards=options.shards,
            dimensions=options.dimensions,
            env=dict(options.env),
            working_dir=options.working_dir,
            deadline=options.deadline,
            verbose=options.verbose,
            profile=options.profile,
            priority=options.priority,
        )
        if task_name != options.task_name and not result:
            print ("Triggered task: %s" % task_name)
        return result
    except Failure as e:
        tools.report_error(e)
        return 1
def CMDtrigger(parser, args):
    """Triggers a Swarming task.

  Accepts either the hash (sha1) of a .isolated file already uploaded or the
  path to an .isolated file to archive, packages it if needed and sends a
  Swarming manifest file to the Swarming server.

  If an .isolated file is specified instead of an hash, it is first archived.

  Passes all extra arguments provided after '--' as additional command line
  arguments for an isolated command specified in *.isolate file.
  """
    add_trigger_options(parser)
    args, isolated_cmd_args = extract_isolated_command_extra_args(args)
    options, args = parser.parse_args(args)
    process_trigger_options(parser, options, args)

    try:
        result, task_name = trigger(swarming=options.swarming,
                                    isolate_server=options.isolate_server
                                    or options.indir,
                                    namespace=options.namespace,
                                    file_hash_or_isolated=args[0],
                                    task_name=options.task_name,
                                    extra_args=isolated_cmd_args,
                                    shards=options.shards,
                                    dimensions=options.dimensions,
                                    env=dict(options.env),
                                    working_dir=options.working_dir,
                                    deadline=options.deadline,
                                    verbose=options.verbose,
                                    profile=options.profile,
                                    priority=options.priority)
        if task_name != options.task_name and not result:
            print('Triggered task: %s' % task_name)
        return result
    except Failure as e:
        tools.report_error(e)
        return 1
Exemple #16
0
def CMDrun(parser, args):
    """Triggers a task and wait for the results.

  Basically, does everything to run a command remotely.
  """
    add_trigger_options(parser)
    add_collect_options(parser)
    args, isolated_cmd_args = extract_isolated_command_extra_args(args)
    options, args = parser.parse_args(args)
    process_trigger_options(parser, options, args)

    try:
        result, task_name = trigger(
            swarming=options.swarming,
            isolate_server=options.isolate_server or options.indir,
            namespace=options.namespace,
            file_hash_or_isolated=args[0],
            task_name=options.task_name,
            extra_args=isolated_cmd_args,
            shards=options.shards,
            dimensions=options.dimensions,
            env=dict(options.env),
            working_dir=options.working_dir,
            deadline=options.deadline,
            verbose=options.verbose,
            profile=options.profile,
            priority=options.priority,
        )
    except Failure as e:
        tools.report_error("Failed to trigger %s(%s): %s" % (options.task_name, args[0], e.args[0]))
        return 1
    if result:
        tools.report_error("Failed to trigger the task.")
        return result
    if task_name != options.task_name:
        print ("Triggered task: %s" % task_name)
    try:
        return collect(
            options.swarming,
            task_name,
            options.timeout,
            options.decorate,
            options.print_status_updates,
            options.task_output_dir,
        )
    except Failure as e:
        tools.report_error(e)
        return 1
Exemple #17
0
def process_manifest(
    swarming, isolate_server, namespace, isolated_hash, task_name, shards,
    dimensions, env, working_dir, verbose, profile, priority, algo):
  """Processes the manifest file and send off the swarming task request."""
  try:
    manifest = Manifest(
        isolate_server=isolate_server,
        namespace=namespace,
        isolated_hash=isolated_hash,
        task_name=task_name,
        shards=shards,
        dimensions=dimensions,
        env=env,
        working_dir=working_dir,
        verbose=verbose,
        profile=profile,
        priority=priority,
        algo=algo)
  except ValueError as e:
    tools.report_error('Unable to process %s: %s' % (task_name, e))
    return 1

  chromium_setup(manifest)

  logging.info('Zipping up files...')
  if not zip_and_upload(manifest):
    return 1

  logging.info('Server: %s', swarming)
  logging.info('Task name: %s', task_name)
  trigger_url = swarming + '/test'
  manifest_text = manifest.to_json()
  result = net.url_read(trigger_url, data={'request': manifest_text})
  if not result:
    tools.report_error(
        'Failed to trigger task %s\n%s' % (task_name, trigger_url))
    return 1
  try:
    json.loads(result)
  except (ValueError, TypeError) as e:
    msg = '\n'.join((
        'Failed to trigger task %s' % task_name,
        'Manifest: %s' % manifest_text,
        'Bad response: %s' % result,
        str(e)))
    tools.report_error(msg)
    return 1
  return 0
def process_manifest(swarming, isolate_server, namespace, isolated_hash,
                     task_name, extra_args, shards, dimensions, env,
                     working_dir, deadline, verbose, profile, priority):
    """Processes the manifest file and send off the swarming task request."""
    try:
        manifest = Manifest(isolate_server=isolate_server,
                            namespace=namespace,
                            isolated_hash=isolated_hash,
                            task_name=task_name,
                            extra_args=extra_args,
                            shards=shards,
                            dimensions=dimensions,
                            env=env,
                            working_dir=working_dir,
                            deadline=deadline,
                            verbose=verbose,
                            profile=profile,
                            priority=priority)
    except ValueError as e:
        tools.report_error('Unable to process %s: %s' % (task_name, e))
        return 1

    chromium_setup(manifest)

    logging.info('Zipping up files...')
    if not zip_and_upload(manifest):
        return 1

    logging.info('Server: %s', swarming)
    logging.info('Task name: %s', task_name)
    trigger_url = swarming + '/test'
    manifest_text = manifest.to_json()
    result = net.url_read(trigger_url, data={'request': manifest_text})
    if not result:
        tools.report_error('Failed to trigger task %s\n%s' %
                           (task_name, trigger_url))
        return 1
    try:
        json.loads(result)
    except (ValueError, TypeError) as e:
        msg = '\n'.join(('Failed to trigger task %s' % task_name,
                         'Manifest: %s' % manifest_text,
                         'Bad response: %s' % result, str(e)))
        tools.report_error(msg)
        return 1
    return 0
Exemple #19
0
def CMDrun(parser, args):
    """Triggers a task and wait for the results.

  Basically, does everything to run a command remotely.
  """
    add_trigger_options(parser)
    add_collect_options(parser)
    add_sharding_options(parser)
    args, isolated_cmd_args = extract_isolated_command_extra_args(args)
    options, args = parser.parse_args(args)
    process_trigger_options(parser, options, args)

    try:
        tasks, task_name = trigger(swarming=options.swarming,
                                   isolate_server=options.isolate_server
                                   or options.indir,
                                   namespace=options.namespace,
                                   file_hash_or_isolated=args[0],
                                   task_name=options.task_name,
                                   extra_args=isolated_cmd_args,
                                   shards=options.shards,
                                   dimensions=options.dimensions,
                                   env=dict(options.env),
                                   deadline=options.deadline,
                                   verbose=options.verbose,
                                   profile=options.profile,
                                   priority=options.priority)
    except Failure as e:
        tools.report_error('Failed to trigger %s(%s): %s' %
                           (options.task_name, args[0], e.args[0]))
        return 1
    if not tasks:
        tools.report_error('Failed to trigger the task.')
        return 1
    if task_name != options.task_name:
        print('Triggered task: %s' % task_name)
    try:
        # TODO(maruel): Use task_ids, it's much more efficient!
        return collect(options.swarming, task_name, options.shards,
                       options.timeout, options.decorate,
                       options.print_status_updates, options.task_output_dir)
    except Failure as e:
        tools.report_error(e)
        return 1
def main(args):
  tools.disable_buffering()
  parser = tools.OptionParserWithLogging(
      usage='%prog <options>',
      version=__version__,
      log_file=RUN_ISOLATED_LOG_FILE)

  data_group = optparse.OptionGroup(parser, 'Data source')
  data_group.add_option(
      '-s', '--isolated',
      metavar='FILE',
      help='File/url describing what to map or run')
  data_group.add_option(
      '-H', '--hash',
      help='Hash of the .isolated to grab from the hash table')
  isolateserver.add_isolate_server_options(data_group, True)
  parser.add_option_group(data_group)

  cache_group = optparse.OptionGroup(parser, 'Cache management')
  cache_group.add_option(
      '--cache',
      default='cache',
      metavar='DIR',
      help='Cache directory, default=%default')
  cache_group.add_option(
      '--max-cache-size',
      type='int',
      metavar='NNN',
      default=20*1024*1024*1024,
      help='Trim if the cache gets larger than this value, default=%default')
  cache_group.add_option(
      '--min-free-space',
      type='int',
      metavar='NNN',
      default=2*1024*1024*1024,
      help='Trim if disk free space becomes lower than this value, '
           'default=%default')
  cache_group.add_option(
      '--max-items',
      type='int',
      metavar='NNN',
      default=100000,
      help='Trim if more than this number of items are in the cache '
           'default=%default')
  parser.add_option_group(cache_group)

  auth.add_auth_options(parser)
  options, args = parser.parse_args(args)
  auth.process_auth_options(parser, options)
  isolateserver.process_isolate_server_options(data_group, options)

  if bool(options.isolated) == bool(options.hash):
    logging.debug('One and only one of --isolated or --hash is required.')
    parser.error('One and only one of --isolated or --hash is required.')

  options.cache = os.path.abspath(options.cache)
  policies = CachePolicies(
      options.max_cache_size, options.min_free_space, options.max_items)
  algo = isolateserver.get_hash_algo(options.namespace)

  try:
    # |options.cache| may not exist until DiskCache() instance is created.
    cache = DiskCache(options.cache, policies, algo)
    remote = options.isolate_server or options.indir
    with isolateserver.get_storage(remote, options.namespace) as storage:
      return run_tha_test(
          options.isolated or options.hash, storage, cache, algo, args)
  except Exception as e:
    # Make sure any exception is logged.
    tools.report_error(e)
    logging.exception(e)
    return 1
def run_tha_test(isolated_hash, storage, cache, algo, extra_args):
  """Downloads the dependencies in the cache, hardlinks them into a temporary
  directory and runs the executable from there.

  A temporary directory is created to hold the output files. The content inside
  this directory will be uploaded back to |storage| packaged as a .isolated
  file.

  Arguments:
    isolated_hash: the sha-1 of the .isolated file that must be retrieved to
                   recreate the tree of files to run the target executable.
    storage: an isolateserver.Storage object to retrieve remote objects. This
             object has a reference to an isolateserver.StorageApi, which does
             the actual I/O.
    cache: an isolateserver.LocalCache to keep from retrieving the same objects
           constantly by caching the objects retrieved. Can be on-disk or
           in-memory.
    algo: an hashlib class to hash content. Usually hashlib.sha1.
    extra_args: optional arguments to add to the command stated in the .isolate
                file.
  """
  run_dir = make_temp_dir('run_tha_test', cache.cache_dir)
  out_dir = unicode(tempfile.mkdtemp(prefix='run_tha_test'))
  result = 0
  try:
    try:
      settings = isolateserver.fetch_isolated(
          isolated_hash=isolated_hash,
          storage=storage,
          cache=cache,
          algo=algo,
          outdir=run_dir,
          os_flavor=get_flavor(),
          require_command=True)
    except isolateserver.ConfigError as e:
      tools.report_error(e)
      result = 1
      return result

    change_tree_read_only(run_dir, settings.read_only)
    cwd = os.path.normpath(os.path.join(run_dir, settings.relative_cwd))
    command = settings.command + extra_args

    # subprocess.call doesn't consider 'cwd' when searching for executable.
    # Yet isolate can specify command relative to 'cwd'. Convert it to absolute
    # path if necessary.
    if not os.path.isabs(command[0]):
      command[0] = os.path.abspath(os.path.join(cwd, command[0]))
    command = process_command(command, out_dir)
    logging.info('Running %s, cwd=%s' % (command, cwd))

    # TODO(csharp): This should be specified somewhere else.
    # TODO(vadimsh): Pass it via 'env_vars' in manifest.
    # Add a rotating log file if one doesn't already exist.
    env = os.environ.copy()
    if MAIN_DIR:
      env.setdefault('RUN_TEST_CASES_LOG_FILE',
          os.path.join(MAIN_DIR, RUN_TEST_CASES_LOG))
    try:
      with tools.Profiler('RunTest'):
        result = subprocess.call(command, cwd=cwd, env=env)
    except OSError as e:
      tools.report_error('Failed to run %s; cwd=%s: %s' % (command, cwd, e))
      result = 1

    # Upload out_dir and generate a .isolated file out of this directory. It is
    # only done if files were written in the directory.
    if os.listdir(out_dir):
      with tools.Profiler('ArchiveOutput'):
        results = isolateserver.archive_files_to_storage(
            storage, algo, [out_dir], None)
      # TODO(maruel): Implement side-channel to publish this information.
      print('run_isolated output: %s' % results[0][0])

  finally:
    try:
      rmtree(out_dir)
    finally:
      try:
        rmtree(run_dir)
      except OSError:
        logging.warning('Leaking %s', run_dir)
        # Swallow the exception so it doesn't generate an infrastructure error.
        #
        # It usually happens on Windows when a child process is not properly
        # terminated, usually because of a test case starting child processes
        # that time out. This causes files to be locked and it becomes
        # impossible to delete them.
        #
        # Only report an infrastructure error if the test didn't fail. This is
        # because a swarming bot will likely not reboot. This situation will
        # cause accumulation of temporary hardlink trees.
        if not result:
          raise
  return result
def run_tha_test(isolated_hash, storage, cache, extra_args):
  """Downloads the dependencies in the cache, hardlinks them into a temporary
  directory and runs the executable from there.

  A temporary directory is created to hold the output files. The content inside
  this directory will be uploaded back to |storage| packaged as a .isolated
  file.

  Arguments:
    isolated_hash: the sha-1 of the .isolated file that must be retrieved to
                   recreate the tree of files to run the target executable.
    storage: an isolateserver.Storage object to retrieve remote objects. This
             object has a reference to an isolateserver.StorageApi, which does
             the actual I/O.
    cache: an isolateserver.LocalCache to keep from retrieving the same objects
           constantly by caching the objects retrieved. Can be on-disk or
           in-memory.
    extra_args: optional arguments to add to the command stated in the .isolate
                file.
  """
  run_dir = make_temp_dir('run_tha_test', cache.cache_dir)
  out_dir = unicode(tempfile.mkdtemp(prefix='run_tha_test'))
  result = 0
  try:
    try:
      settings = isolateserver.fetch_isolated(
          isolated_hash=isolated_hash,
          storage=storage,
          cache=cache,
          outdir=run_dir,
          require_command=True)
    except isolateserver.ConfigError as e:
      tools.report_error(e)
      result = 1
      return result

    change_tree_read_only(run_dir, settings.read_only)
    cwd = os.path.normpath(os.path.join(run_dir, settings.relative_cwd))
    command = settings.command + extra_args

    # subprocess.call doesn't consider 'cwd' when searching for executable.
    # Yet isolate can specify command relative to 'cwd'. Convert it to absolute
    # path if necessary.
    if not os.path.isabs(command[0]):
      command[0] = os.path.abspath(os.path.join(cwd, command[0]))
    command = process_command(command, out_dir)
    logging.info('Running %s, cwd=%s' % (command, cwd))

    # TODO(csharp): This should be specified somewhere else.
    # TODO(vadimsh): Pass it via 'env_vars' in manifest.
    # Add a rotating log file if one doesn't already exist.
    env = os.environ.copy()
    if MAIN_DIR:
      env.setdefault('RUN_TEST_CASES_LOG_FILE',
          os.path.join(MAIN_DIR, RUN_TEST_CASES_LOG))
    try:
      with tools.Profiler('RunTest'):
        result = subprocess.call(command, cwd=cwd, env=env)
    except OSError as e:
      tools.report_error('Failed to run %s; cwd=%s: %s' % (command, cwd, e))
      result = 1

    # Upload out_dir and generate a .isolated file out of this directory. It is
    # only done if files were written in the directory.
    if os.listdir(out_dir):
      with tools.Profiler('ArchiveOutput'):
        results = isolateserver.archive_files_to_storage(
            storage, [out_dir], None)
      # TODO(maruel): Implement side-channel to publish this information.
      output_data = {
        'hash': results[0][0],
        'namespace': storage.namespace,
        'storage': storage.location,
      }
      sys.stdout.flush()
      sys.stderr.flush()
      print(
          '[run_isolated_out_hack]%s[/run_isolated_out_hack]' %
          tools.format_json(output_data, dense=True))

  finally:
    try:
      rmtree(out_dir)
    finally:
      try:
        rmtree(run_dir)
      except OSError:
        logging.warning('Leaking %s', run_dir)
        # Swallow the exception so it doesn't generate an infrastructure error.
        #
        # It usually happens on Windows when a child process is not properly
        # terminated, usually because of a test case starting child processes
        # that time out. This causes files to be locked and it becomes
        # impossible to delete them.
        #
        # Only report an infrastructure error if the test didn't fail. This is
        # because a swarming bot will likely not reboot. This situation will
        # cause accumulation of temporary hardlink trees.
        if not result:
          raise
  return result
def main(args):
  tools.disable_buffering()
  parser = tools.OptionParserWithLogging(
      usage='%prog <options>',
      version=__version__,
      log_file=RUN_ISOLATED_LOG_FILE)

  data_group = optparse.OptionGroup(parser, 'Data source')
  data_group.add_option(
      '-s', '--isolated',
      metavar='FILE',
      help='File/url describing what to map or run')
  data_group.add_option(
      '-H', '--hash',
      help='Hash of the .isolated to grab from the hash table')
  isolateserver.add_isolate_server_options(data_group, True)
  parser.add_option_group(data_group)

  cache_group = optparse.OptionGroup(parser, 'Cache management')
  cache_group.add_option(
      '--cache',
      default='cache',
      metavar='DIR',
      help='Cache directory, default=%default')
  cache_group.add_option(
      '--max-cache-size',
      type='int',
      metavar='NNN',
      default=20*1024*1024*1024,
      help='Trim if the cache gets larger than this value, default=%default')
  cache_group.add_option(
      '--min-free-space',
      type='int',
      metavar='NNN',
      default=2*1024*1024*1024,
      help='Trim if disk free space becomes lower than this value, '
           'default=%default')
  cache_group.add_option(
      '--max-items',
      type='int',
      metavar='NNN',
      default=100000,
      help='Trim if more than this number of items are in the cache '
           'default=%default')
  parser.add_option_group(cache_group)

  auth.add_auth_options(parser)
  options, args = parser.parse_args(args)
  auth.process_auth_options(parser, options)
  isolateserver.process_isolate_server_options(data_group, options)

  if bool(options.isolated) == bool(options.hash):
    logging.debug('One and only one of --isolated or --hash is required.')
    parser.error('One and only one of --isolated or --hash is required.')

  options.cache = os.path.abspath(options.cache)
  policies = CachePolicies(
      options.max_cache_size, options.min_free_space, options.max_items)

  try:
    # |options.cache| path may not exist until DiskCache() instance is created.
    cache = DiskCache(
        options.cache, policies, isolateserver.get_hash_algo(options.namespace))
    remote = options.isolate_server or options.indir
    with isolateserver.get_storage(remote, options.namespace) as storage:
      # Hashing schemes used by |storage| and |cache| MUST match.
      assert storage.hash_algo == cache.hash_algo
      return run_tha_test(
          options.isolated or options.hash, storage, cache, args)
  except Exception as e:
    # Make sure any exception is logged.
    tools.report_error(e)
    logging.exception(e)
    return 1
Exemple #24
0
def process_manifest(
    swarming,
    isolate_server,
    namespace,
    isolated_hash,
    task_name,
    extra_args,
    shards,
    dimensions,
    env,
    working_dir,
    deadline,
    verbose,
    profile,
    priority,
):
    """Processes the manifest file and send off the swarming task request."""
    try:
        manifest = Manifest(
            isolate_server=isolate_server,
            namespace=namespace,
            isolated_hash=isolated_hash,
            task_name=task_name,
            extra_args=extra_args,
            shards=shards,
            dimensions=dimensions,
            env=env,
            working_dir=working_dir,
            deadline=deadline,
            verbose=verbose,
            profile=profile,
            priority=priority,
        )
    except ValueError as e:
        tools.report_error("Unable to process %s: %s" % (task_name, e))
        return 1

    chromium_setup(manifest)

    logging.info("Zipping up files...")
    if not zip_and_upload(manifest):
        return 1

    logging.info("Server: %s", swarming)
    logging.info("Task name: %s", task_name)
    trigger_url = swarming + "/test"
    manifest_text = manifest.to_json()
    result = net.url_read(trigger_url, data={"request": manifest_text})
    if not result:
        tools.report_error("Failed to trigger task %s\n%s" % (task_name, trigger_url))
        return 1
    try:
        json.loads(result)
    except (ValueError, TypeError) as e:
        msg = "\n".join(
            (
                "Failed to trigger task %s" % task_name,
                "Manifest: %s" % manifest_text,
                "Bad response: %s" % result,
                str(e),
            )
        )
        tools.report_error(msg)
        return 1
    return 0
Exemple #25
0
def run_tha_test(isolated_hash, storage, cache, extra_args):
    """Downloads the dependencies in the cache, hardlinks them into a temporary
  directory and runs the executable from there.

  A temporary directory is created to hold the output files. The content inside
  this directory will be uploaded back to |storage| packaged as a .isolated
  file.

  Arguments:
    isolated_hash: the sha-1 of the .isolated file that must be retrieved to
                   recreate the tree of files to run the target executable.
    storage: an isolateserver.Storage object to retrieve remote objects. This
             object has a reference to an isolateserver.StorageApi, which does
             the actual I/O.
    cache: an isolateserver.LocalCache to keep from retrieving the same objects
           constantly by caching the objects retrieved. Can be on-disk or
           in-memory.
    extra_args: optional arguments to add to the command stated in the .isolate
                file.
  """
    run_dir = make_temp_dir("run_tha_test", cache.cache_dir)
    out_dir = unicode(make_temp_dir("isolated_out", cache.cache_dir))
    result = 0
    try:
        try:
            settings = isolateserver.fetch_isolated(
                isolated_hash=isolated_hash, storage=storage, cache=cache, outdir=run_dir, require_command=True
            )
        except isolateserver.ConfigError as e:
            tools.report_error(e)
            return 1

        change_tree_read_only(run_dir, settings.read_only)
        cwd = os.path.normpath(os.path.join(run_dir, settings.relative_cwd))
        command = settings.command + extra_args

        # subprocess.call doesn't consider 'cwd' when searching for executable.
        # Yet isolate can specify command relative to 'cwd'. Convert it to absolute
        # path if necessary.
        if not os.path.isabs(command[0]):
            command[0] = os.path.abspath(os.path.join(cwd, command[0]))
        command = process_command(command, out_dir)
        logging.info("Running %s, cwd=%s" % (command, cwd))

        # TODO(csharp): This should be specified somewhere else.
        # TODO(vadimsh): Pass it via 'env_vars' in manifest.
        # Add a rotating log file if one doesn't already exist.
        env = os.environ.copy()
        if MAIN_DIR:
            env.setdefault("RUN_TEST_CASES_LOG_FILE", os.path.join(MAIN_DIR, RUN_TEST_CASES_LOG))
        try:
            sys.stdout.flush()
            with tools.Profiler("RunTest"):
                result = subprocess.call(command, cwd=cwd, env=env)
                logging.info("Command finished with exit code %d (%s)", result, hex(0xFFFFFFFF & result))
        except OSError as e:
            tools.report_error("Failed to run %s; cwd=%s: %s" % (command, cwd, e))
            result = 1

    finally:
        try:
            try:
                rmtree(run_dir)
            except OSError:
                logging.warning("Leaking %s", run_dir)
                # Swallow the exception so it doesn't generate an infrastructure error.
                #
                # It usually happens on Windows when a child process is not properly
                # terminated, usually because of a test case starting child processes
                # that time out. This causes files to be locked and it becomes
                # impossible to delete them.
                #
                # Only report an infrastructure error if the test didn't fail. This is
                # because a swarming bot will likely not reboot. This situation will
                # cause accumulation of temporary hardlink trees.
                if not result:
                    raise

            # HACK(vadimsh): On Windows rmtree(run_dir) call above has
            # a synchronization effect: it finishes only when all task child processes
            # terminate (since a running process locks *.exe file). Examine out_dir
            # only after that call completes (since child processes may
            # write to out_dir too and we need to wait for them to finish).

            # Upload out_dir and generate a .isolated file out of this directory.
            # It is only done if files were written in the directory.
            if os.listdir(out_dir):
                with tools.Profiler("ArchiveOutput"):
                    results = isolateserver.archive_files_to_storage(storage, [out_dir], None)
                # TODO(maruel): Implement side-channel to publish this information.
                output_data = {"hash": results[0][0], "namespace": storage.namespace, "storage": storage.location}
                sys.stdout.flush()
                print("[run_isolated_out_hack]%s[/run_isolated_out_hack]" % tools.format_json(output_data, dense=True))

        finally:
            rmtree(out_dir)

    return result