def test_bootstrap_command_linux_stable(self, tempdir, get_params, isfile):
    gce.Authenticator.is_gce.return_value = True
    recipe_cmd = ['run_recipe.py', 'recipe_params...']

    tempdir.return_value = 'foo'
    get_params.return_value = ldbs.Params(
        project='myproject', cipd_tag='stable', api=self.stable_api,
        mastername='mastername', buildername='buildername', buildnumber=1337,
        logdog_only=False, generation=None)
    isfile.return_value = True

    streamserver_uri = 'unix:%s' % (os.path.join('foo', 'butler.sock'),)

    bs = ldbs.bootstrap(self.rt, self.opts, self.basedir, self.tdir,
                        self.properties, recipe_cmd)

    # Check CIPD installation.
    cipd_dir = os.path.join(self.basedir, '.recipe_cipd')
    cipd_bootstrap_v2.install_cipd_packages.assert_called_once_with(
        cipd_dir,
        cipd.CipdPackage(
            name='infra/tools/luci/logdog/butler/${platform}',
            version='stable'),
        cipd.CipdPackage(
            name='infra/tools/luci/logdog/annotee/${platform}',
            version='stable'),
    )

    # Check bootstrap command.
    self.assertEqual(
        bs.cmd,
        [os.path.join(cipd_dir, 'logdog_butler'),
            '-log-level', 'warning',
            '-project', 'myproject',
            '-prefix', 'bb/mastername/buildername/1337',
            '-coordinator-host', 'luci-logdog.appspot.com',
            '-output', 'logdog',
            '-tag', 'buildbot.master=mastername',
            '-tag', 'buildbot.builder=buildername',
            '-tag', 'buildbot.buildnumber=1337',
            '-tag', 'logdog.viewer_url=https://luci-milo.appspot.com/buildbot/'
                    'mastername/buildername/1337',
            '-service-account-json', ':gce',
            '-output-max-buffer-age', '30s',
            'run',
            '-stdout', 'tee=stdout',
            '-stderr', 'tee=stderr',
            '-streamserver-uri', streamserver_uri,
            '--',
            os.path.join(cipd_dir, 'logdog_annotee'),
                '-log-level', 'warning',
                '-name-base', 'recipes',
                '-print-summary',
                '-tee', 'annotations,text',
                '-json-args-path', self._tp('logdog_annotee_cmd.json'),
                '-result-path', self._tp('bootstrap_result.json'),
        ])

    self._assertAnnoteeCommand(recipe_cmd)
def all_cipd_packages():
    """Generator which yields all referenced CIPD packages."""
    package_name = 'infra/tools/cipd/${platform}'
    yield cipd.CipdPackage(name=package_name, version=DEFAULT_CIPD_VERSION)
    yield cipd.CipdPackage(name=package_name, version=STAGING_CIPD_VERSION)
    for packages in AUX_BINARY_PACKAGES.itervalues():
        for pkg in packages:
            yield pkg
Example #3
0
def all_cipd_packages():
    """Generator which yields all referenced CIPD packages."""
    # All CIPD packages are in top-level platform config.
    for pins in (_STABLE_CIPD_PINS, _CANARY_CIPD_PINS):
        # TODO(dnj): Remove me when everything runs on Kitchen.
        yield cipd.CipdPackage(name=_RECIPES_PY_CIPD_PACKAGE,
                               version=pins.recipes)
        yield cipd.CipdPackage(name=_KITCHEN_CIPD_PACKAGE,
                               version=pins.kitchen)
Example #4
0
    def test_cipd_install_failure_raises_bootstrap_error(self):
        ldbs._check_call.side_effect = subprocess.CalledProcessError(0, [], '')

        self.assertRaises(
            ldbs.BootstrapError,
            ldbs._install_cipd,
            self.basedir,
            cipd.CipdBinary(cipd.CipdPackage('infra/foo', 'v0'), 'foo'),
            cipd.CipdBinary(cipd.CipdPackage('infra/bar', 'v1'), 'baz'),
        )
def all_cipd_packages():
    """Generator which yields all referenced CIPD packages."""
    # All CIPD packages are in top-level platform config.
    pcfg = infra_platform.cascade_config(_PLATFORM_CONFIG, plat=())
    for name in (pcfg['butler'], pcfg['annotee']):
        for version in (_STABLE_CIPD_TAG, _CANARY_CIPD_TAG):
            yield cipd.CipdPackage(name=name, version=version)
Example #6
0
    def test_cipd_install(self):
        pkgs = ldbs._install_cipd(
            self.basedir,
            cipd.CipdBinary(cipd.CipdPackage('infra/foo', 'v0'), 'foo'),
            cipd.CipdBinary(cipd.CipdPackage('infra/bar', 'v1'), 'baz'),
        )
        self.assertEqual(pkgs, (self._bp('foo'), self._bp('baz')))

        ldbs._check_call.assert_called_once_with([
            sys.executable,
            os.path.join(env.Build, 'scripts', 'slave', 'cipd.py'),
            '--dest-directory',
            self.basedir,
            '--json-output',
            os.path.join(self.basedir, 'packages.json'),
            '-P',
            'infra/foo@v0',
            '-P',
            'infra/bar@v1',
        ])
  def test_bootstrap_command_win_canary(self, tempdir, get_params,
                                        service_account, isfile):
    infra_platform.get.return_value = ('win', 'x86_64', 64)

    recipe_cmd = ['run_recipe.py', 'recipe_params...']

    tempdir.return_value = 'foo'
    get_params.return_value = ldbs.Params(
        project='myproject', cipd_tag='canary',
        api=self.latest_api, mastername='mastername',
        buildername='buildername', buildnumber=1337, logdog_only=True,
        generation=None)
    service_account.return_value = 'creds.json'
    isfile.return_value = True

    bs = ldbs.bootstrap(self.rt, self.opts, self.basedir, self.tdir,
                        self.properties, recipe_cmd)

    # Check CIPD installation.
    cipd_dir = os.path.join(self.basedir, '.recipe_cipd')
    cipd_bootstrap_v2.install_cipd_packages.assert_called_once_with(
        cipd_dir,
        cipd.CipdPackage(
            name='infra/tools/luci/logdog/butler/${platform}',
            version='canary'),
        cipd.CipdPackage(
            name='infra/tools/luci/logdog/annotee/${platform}',
            version='canary'),
    )

    # Check bootstrap command.
    self.assertEqual(
        bs.cmd,
        [os.path.join(cipd_dir, 'logdog_butler.exe'),
            '-log-level', 'warning',
            '-project', 'myproject',
            '-prefix', 'bb/mastername/buildername/1337',
            '-coordinator-host', 'luci-logdog.appspot.com',
            '-output', 'logdog',
            '-tag', 'buildbot.master=mastername',
            '-tag', 'buildbot.builder=buildername',
            '-tag', 'buildbot.buildnumber=1337',
            '-tag', 'logdog.viewer_url=https://luci-milo.appspot.com/buildbot/'
                    'mastername/buildername/1337',
            '-service-account-json', 'creds.json',
            '-output-max-buffer-age', '30s',
            '-io-keepalive-stderr', '5m',
            'run',
            '-stdout', 'tee=stdout',
            '-stderr', 'tee=stderr',
            '-streamserver-uri', 'net.pipe:LUCILogDogButler',
            '--',
            os.path.join(cipd_dir, 'logdog_annotee.exe'),
                '-log-level', 'warning',
                '-name-base', 'recipes',
                '-print-summary',
                '-tee', 'annotations',
                '-json-args-path', self._tp('logdog_annotee_cmd.json'),
                '-result-path', self._tp('bootstrap_result.json'),
        ])

    service_account.assert_called_once_with(
        self.opts, ldbs._PLATFORM_CONFIG[('win',)]['credential_path'])
    self._assertAnnoteeCommand(recipe_cmd)
Example #8
0
def _exec_recipe(args, rt, stream, basedir, buildbot_build_dir):
    tempdir = rt.tempdir(basedir)
    LOGGER.info('Using temporary directory: [%s].', tempdir)

    build_data_dir = rt.tempdir(basedir)
    LOGGER.info('Using build data directory: [%s].', build_data_dir)

    # Construct our properties.
    properties = copy.copy(args.factory_properties)
    properties.update(args.build_properties)

    # Determine our pins.
    mastername = properties.get('mastername')
    buildername = properties.get('buildername')

    # Determine if this build is an opt-in build.
    is_opt_in = get_is_opt_in(properties)

    # Determine our CIPD pins.
    #
    # If a property includes "remote_run_canary", we will explicitly use canary
    # pins. This can be done by manually submitting a build to the waterfall.
    is_canary = (_get_is_canary(mastername) or is_opt_in
                 or 'remote_run_canary' in properties or args.canary)
    pins = _STABLE_CIPD_PINS if not is_canary else _CANARY_CIPD_PINS

    # Determine if we're running Kitchen.
    #
    # If a property includes "remote_run_kitchen", we will explicitly use canary
    # pins. This can be done by manually submitting a build to the waterfall.
    is_kitchen = (_get_is_kitchen(mastername, buildername) or is_opt_in
                  or 'remote_run_kitchen' in properties)

    # Allow command-line "--kitchen" to override.
    if args.kitchen:
        pins = pins._replace(kitchen=args.kitchen)
        is_kitchen = True

    # Augment our input properties...
    properties['build_data_dir'] = build_data_dir
    properties['builder_id'] = 'master.%s:%s' % (mastername, buildername)

    if not is_kitchen:
        # path_config property defines what paths a build uses for checkout, git
        # cache, goma cache, etc.
        #
        # TODO(dnj or phajdan): Rename "kitchen" path config to "remote_run_legacy".
        # "kitchen" was never correct, and incorrectly implies that Kitchen is
        # somehow involved int his path config.
        properties['path_config'] = 'kitchen'
        properties['bot_id'] = properties['slavename']
    else:
        # If we're using Kitchen, our "path_config" must be empty or "kitchen".
        path_config = properties.pop('path_config', None)
        if path_config and path_config != 'kitchen':
            raise ValueError(
                "Users of 'remote_run.py' MUST specify either 'kitchen' "
                "or no 'path_config', not [%s]." % (path_config, ))

    LOGGER.info('Using properties: %r', properties)

    monitoring_utils.write_build_monitoring_event(build_data_dir, properties)

    # Ensure that the CIPD client is installed and available on PATH.
    from slave import cipd_bootstrap_v2
    cipd_bootstrap_v2.high_level_ensure_cipd_client(basedir, mastername)

    # "/b/c" as a cache directory.
    cache_dir = os.path.join(BUILDBOT_ROOT, 'c')

    # Cleanup data from old builds.
    _cleanup_old_layouts(is_kitchen, properties, buildbot_build_dir, cache_dir)

    # (Canary) Use Kitchen if configured.
    # TODO(dnj): Make this the only path once we move to Kitchen.
    if is_kitchen:
        return _remote_run_with_kitchen(args, stream, is_canary, pins.kitchen,
                                        properties, tempdir, basedir,
                                        cache_dir)

    ##
    # Classic Remote Run
    #
    # TODO(dnj): Delete this in favor of Kitchen.
    ##

    properties_file = os.path.join(tempdir, 'remote_run_properties.json')
    with open(properties_file, 'w') as f:
        json.dump(properties, f)

    cipd_path = os.path.join(basedir, '.remote_run_cipd')

    cipd_bootstrap_v2.install_cipd_packages(
        cipd_path, cipd.CipdPackage(_RECIPES_PY_CIPD_PACKAGE, pins.recipes))

    engine_flags = {
        'use_result_proto': True,
    }

    engine_args = []
    if engine_flags:
        engine_flags_path = os.path.join(tempdir, 'engine_flags.json')
        with open(engine_flags_path, 'w') as f:
            json.dump({'engine_flags': engine_flags}, f)

        engine_args = ['--operational-args-path', engine_flags_path]

    recipe_result_path = os.path.join(tempdir, 'recipe_result.json')
    recipe_cmd = [
        sys.executable,
        os.path.join(cipd_path, 'recipes.py'),
    ] + engine_args + [
        '--verbose',
        'remote',
        '--repository',
        args.repository,
        '--workdir',
        os.path.join(tempdir, 'rw'),
    ]
    if args.revision:
        recipe_cmd.extend(['--revision', args.revision])
    if args.use_gitiles:
        recipe_cmd.append('--use-gitiles')
    recipe_cmd.extend([
        '--',
    ] + (engine_args) + [
        '--verbose',
        'run',
        '--properties-file',
        properties_file,
        '--workdir',
        os.path.join(tempdir, 'w'),
        '--output-result-json',
        recipe_result_path,
        properties.get('recipe') or args.recipe,
    ])
    # If we bootstrap through logdog, the recipe command line gets written
    # to a temporary file and does not appear in the log.
    LOGGER.info('Recipe command line: %r', recipe_cmd)

    # Default to return code != 0 is for the benefit of buildbot, which uses
    # return code to decide if a step failed or not.
    recipe_return_code = 1
    try:
        raise logdog_bootstrap.NotBootstrapped()
        bs = logdog_bootstrap.bootstrap(rt, args, basedir, tempdir, properties,
                                        recipe_cmd)

        LOGGER.info('Bootstrapping through LogDog: %s', bs.cmd)
        bs.annotate(stream)
        _ = _call(bs.cmd)
        recipe_return_code = bs.get_result()
    except logdog_bootstrap.NotBootstrapped:
        LOGGER.info('Not using LogDog. Invoking `recipes.py` directly.')
        recipe_return_code = _call(recipe_cmd)

    # Try to open recipe result JSON. Any failure will result in an exception
    # and an infra failure.
    with open(recipe_result_path) as f:
        return_value = json.load(f)

    f = return_value.get('failure')
    if f is not None and not f.get('step_failure'):
        # The recipe engine used to return -1, which got interpreted as 255
        # by os.exit in python, since process exit codes are a single byte.
        recipe_return_code = 255

    return recipe_return_code
Example #9
0
def _remote_run_with_kitchen(args, stream, _is_canary, kitchen_version,
                             properties, tempdir, basedir, cache_dir):
    # Write our build properties to a JSON file.
    properties_file = os.path.join(tempdir, 'remote_run_properties.json')
    with open(properties_file, 'w') as f:
        json.dump(properties, f)

    # Create our directory structure.
    recipe_temp_dir = os.path.join(tempdir, 't')
    os.makedirs(recipe_temp_dir)

    # Use CIPD to download Kitchen to a root within the temporary directory.
    cipd_root = os.path.join(basedir, '.remote_run_cipd')
    kitchen_pkg = cipd.CipdPackage(name=_KITCHEN_CIPD_PACKAGE,
                                   version=kitchen_version)

    from slave import cipd_bootstrap_v2
    cipd_bootstrap_v2.install_cipd_packages(cipd_root, kitchen_pkg)

    kitchen_bin = os.path.join(cipd_root,
                               'kitchen' + infra_platform.exe_suffix())

    kitchen_cmd = [
        kitchen_bin,
        '-log-level',
        ('debug' if LOGGER.isEnabledFor(logging.DEBUG) else 'info'),
    ]

    kitchen_result_path = os.path.join(tempdir, 'kitchen_result.json')
    kitchen_cmd += [
        'cook',
        '-mode',
        'buildbot',
        '-output-result-json',
        kitchen_result_path,
        '-properties-file',
        properties_file,
        '-recipe',
        args.recipe or properties.get('recipe'),
        '-repository',
        args.repository,
        '-cache-dir',
        cache_dir,
        '-temp-dir',
        recipe_temp_dir,
        '-checkout-dir',
        os.path.join(tempdir, 'rw'),
        '-workdir',
        os.path.join(tempdir, 'w'),
    ]

    # Add additional system Python paths. Ideally, none of these would be
    # required, since our remote checkout should be self-sufficient. Each of these
    # should be viewed as a hermetic breach.
    for python_path in [
            os.path.join(BUILD_ROOT, 'scripts'),
            os.path.join(BUILD_ROOT, 'site_config'),
    ]:
        kitchen_cmd += ['-python-path', python_path]

    # Master "remote_run" factory has been changed to pass "refs/heads/master" as
    # a default instead of "origin/master". However, this is a master-side change,
    # and requires a master restart. Rather than restarting all masters, we will
    # just pretend the change took effect here.
    #
    # No "-revision" means "latest", which is the same as "origin/master"'s
    # meaning.
    #
    # See: https://chromium-review.googlesource.com/c/446895/
    # See: crbug.com/696704
    #
    # TODO(dnj,nodir): Delete this once we're confident that all masters have been
    # restarted to take effect.
    if args.revision and (args.revision != 'origin/master'):
        kitchen_cmd += ['-revision', args.revision]

    # Using LogDog?
    try:
        # Load bootstrap configuration (may raise NotBootstrapped).
        cfg = logdog_bootstrap.get_config(args, properties)
        annotation_url = logdog_bootstrap.get_annotation_url(cfg)

        #    if cfg.logdog_only:
        #      kitchen_cmd += ['-logdog-only']

        kitchen_cmd += [
            '-logdog-annotation-url',
            annotation_url,
        ]

        # Add LogDog tags.
        if cfg.tags:
            for k, v in cfg.tags.iteritems():
                param = k
                if v is not None:
                    param += '=' + v
                kitchen_cmd += ['-logdog-tag', param]

        # (Debug) Use Kitchen output file if in debug mode.
        if args.logdog_debug_out_file:
            kitchen_cmd += [
                '-logdog-debug-out-file', args.logdog_debug_out_file
            ]

        logdog_bootstrap.annotate(cfg, stream)
    except logdog_bootstrap.NotBootstrapped as e:
        LOGGER.info('Not configured to use LogDog: %s', e)

    # Remove PYTHNONPATH, since Kitchen will re-establish its own hermetic path.
    kitchen_env = os.environ.copy()
    kitchen_env.pop('PYTHONPATH', None)

    # Invoke Kitchen, capture its return code.
    return_code = _call(kitchen_cmd, env=kitchen_env)

    # Try to open kitchen result file. Any failure will result in an exception
    # and an infra failure.
    with open(kitchen_result_path) as f:
        kitchen_result = json.load(f)
        if isinstance(kitchen_result,
                      dict) and 'recipe_result' in kitchen_result:
            recipe_result = kitchen_result['recipe_result']  # always a dict
        else:
            # Legacy mode: kitchen result is recipe result
            # TODO(nodir): remove this code path

            # On success, it may be JSON "null", so use an empty dict.
            recipe_result = kitchen_result or {}

    # If we failed, but aren't a step failure, we assume it was an
    # exception.
    f = recipe_result.get('failure', {})
    if f.get('timeout') or f.get('step_data'):
        # Return an infra failure for these failure types.
        #
        # The recipe engine used to return -1, which got interpreted as 255 by
        # os.exit in python, since process exit codes are a single byte.
        return_code = 255

    return return_code
def bootstrap(rt, opts, basedir, tempdir, properties, cmd):
    """Executes the recipe engine, bootstrapping it through LogDog/Annotee.

  This method executes the recipe engine, bootstrapping it through
  LogDog/Annotee so its output and annotations are streamed to LogDog. The
  bootstrap is configured to tee the annotations through STDOUT/STDERR so they
  will still be sent to BuildBot.

  The overall setup here is:
  [annotated_run.py] => [logdog_butler] => [logdog_annotee] => [recipes.py]

  Args:
    rt (RobustTempdir): context for temporary directories.
    opts (argparse.Namespace): Command-line options.
    basedir (str): The base (non-temporary) recipe directory.
    tempdir (str): The path to the session temporary directory.
    properties (dict): Build properties.
    cmd (list): The recipe runner command list to bootstrap.

  Returns (BootstrapState): The populated bootstrap state.

  Raises:
    NotBootstrapped: if the recipe engine was not executed because the
        LogDog bootstrap requirements are not available.
    BootstrapError: if there was an error bootstrapping the recipe runner
        through LogDog.
  """
    # Load bootstrap configuration (may raise NotBootstrapped).
    cfg = get_config(opts, properties)

    # Determine LogDog prefix.
    LOGGER.debug('Using log stream prefix: [%s]', cfg.prefix)

    # Install our Butler/Annotee packages from CIPD.
    cipd_path = os.path.join(basedir, '.recipe_cipd')

    packages = (
        # Butler
        cipd.CipdPackage(name=cfg.plat.butler, version=cfg.params.cipd_tag),

        # Annotee
        cipd.CipdPackage(name=cfg.plat.annotee, version=cfg.params.cipd_tag),
    )
    try:
        cipd_bootstrap_v2.install_cipd_packages(cipd_path, *packages)
    except Exception:
        LOGGER.exception('Failed to install LogDog CIPD packages: %s',
                         packages)
        raise BootstrapError('Failed to install CIPD packages.')

    def cipd_bin(base):
        return os.path.join(cipd_path, base + infra_platform.exe_suffix())

    def var(title, v, dflt):
        v = v or dflt
        if not v:
            raise NotBootstrapped('No value for [%s]' % (title, ))
        return v

    butler = var('butler', opts.logdog_butler_path, cipd_bin('logdog_butler'))
    if not os.path.isfile(butler):
        raise NotBootstrapped('Invalid Butler path: %s' % (butler, ))

    annotee = var('annotee', opts.logdog_annotee_path,
                  cipd_bin('logdog_annotee'))
    if not os.path.isfile(annotee):
        raise NotBootstrapped('Invalid Annotee path: %s' % (annotee, ))

    # Determine LogDog verbosity.
    if opts.logdog_verbose == 0:
        log_level = 'warning'
    elif opts.logdog_verbose == 1:
        log_level = 'info'
    else:
        log_level = 'debug'

    # Generate our Butler stream server URI.
    streamserver_uri = _get_streamserver_uri(rt, cfg.plat.streamserver)

    # If we are using file sentinel-based bootstrap error detection, enable.
    bootstrap_result_path = os.path.join(tempdir, 'bootstrap_result.json')

    # Dump the bootstrapped Annotee command to JSON for Annotee to load.
    #
    # Annotee can run accept bootstrap parameters through either JSON or
    # command-line, but using JSON effectively steps around any sort of command-
    # line length limits such as those experienced on Windows.
    cmd_json = os.path.join(tempdir, 'logdog_annotee_cmd.json')
    with open(cmd_json, 'w') as fd:
        json.dump(cmd, fd)

    # Butler Command, global options.
    butler_args = [
        butler,
        '-log-level',
        log_level,
        '-project',
        cfg.params.project,
        '-prefix',
        cfg.prefix,
        '-coordinator-host',
        cfg.host,
        '-output',
        _make_butler_output(opts, cfg),
    ]
    for k, v in cfg.tags.iteritems():
        if v:
            k = '%s=%s' % (k, v)
        butler_args += ['-tag', k]
    if cfg.service_account_path:
        butler_args += ['-service-account-json', cfg.service_account_path]
    if cfg.plat.max_buffer_age:
        butler_args += ['-output-max-buffer-age', cfg.plat.max_buffer_age]
    if cfg.logdog_only:
        butler_args += ['-io-keepalive-stderr', '5m']

    # Butler: subcommand run.
    butler_run_args = [
        '-stdout',
        'tee=stdout',
        '-stderr',
        'tee=stderr',
        '-streamserver-uri',
        streamserver_uri,
    ]

    # Annotee Command.
    annotee_args = [
        annotee,
        '-log-level',
        log_level,
        '-name-base',
        'recipes',
        '-print-summary',
        '-tee',
        ('annotations' if cfg.logdog_only else 'annotations,text'),
        '-json-args-path',
        cmd_json,
        '-result-path',
        bootstrap_result_path,
    ]

    # API transformation switch. Please prune as API versions become
    # unused.
    #
    # NOTE: Please update the above comment as new API versions and translation
    # functions are added.
    start_api = cur_api = max(_CIPD_TAG_API_MAP.itervalues())

    # Assert that we've hit the target "params.api".
    assert cur_api == cfg.params.api, 'Failed to transform API %s => %s' % (
        start_api, cfg.params.api)

    cmd = butler_args + ['run'] + butler_run_args + ['--'] + annotee_args
    return BootstrapState(cfg, cmd, bootstrap_result_path)
Example #11
0
def bootstrap(rt, opts, basedir, tempdir, properties, cmd):
    """Executes the recipe engine, bootstrapping it through LogDog/Annotee.

  This method executes the recipe engine, bootstrapping it through
  LogDog/Annotee so its output and annotations are streamed to LogDog. The
  bootstrap is configured to tee the annotations through STDOUT/STDERR so they
  will still be sent to BuildBot.

  The overall setup here is:
  [annotated_run.py] => [logdog_butler] => [logdog_annotee] => [recipes.py]

  Args:
    rt (RobustTempdir): context for temporary directories.
    opts (argparse.Namespace): Command-line options.
    basedir (str): The base (non-temporary) recipe directory.
    tempdir (str): The path to the session temporary directory.
    properties (dict): Build properties.
    cmd (list): The recipe runner command list to bootstrap.

  Returns (BootstrapState): The populated bootstrap state.

  Raises:
    NotBootstrapped: if the recipe engine was not executed because the
        LogDog bootstrap requirements are not available.
    BootstrapError: if there was an error bootstrapping the recipe runner
        through LogDog.
  """
    # If we have LOGDOG_STREAM_PREFIX defined, we are already bootstrapped. Don't
    # start a new instance.
    #
    # LOGDOG_STREAM_PREFIX is set by the Butler when it bootstraps a process, so
    # it should be set for all child processes of the initial bootstrap.
    if os.environ.get('LOGDOG_STREAM_PREFIX', None) is not None:
        raise NotBootstrapped(
            'LOGDOG_STREAM_PREFIX in enviornment, refusing to nest bootstraps.'
        )

    # Load our bootstrap parameters based on our master/builder.
    params = _get_params(properties)

    # Get our platform configuration. This will fail if any fields are missing.
    plat = _get_platform()

    # Determine LogDog prefix.
    prefix = _build_prefix(params)
    LOGGER.debug('Using log stream prefix: [%s]', prefix)

    def var(title, v, dflt):
        v = v or dflt
        if not v:
            raise NotBootstrapped('No value for [%s]' % (title, ))
        return v

    # Install our Butler/Annotee packages from CIPD.
    cipd_path = os.path.join(basedir, '.recipe_cipd')
    butler, annotee = _install_cipd(
        cipd_path,
        # butler
        cipd.CipdBinary(
            package=cipd.CipdPackage(name=plat.butler,
                                     version=params.cipd_tag),
            relpath=plat.butler_relpath,
        ),

        # annotee
        cipd.CipdBinary(
            package=cipd.CipdPackage(name=plat.annotee,
                                     version=params.cipd_tag),
            relpath=plat.annotee_relpath,
        ),
    )

    butler = var('butler', opts.logdog_butler_path, butler)
    if not os.path.isfile(butler):
        raise NotBootstrapped('Invalid Butler path: %s' % (butler, ))

    annotee = var('annotee', opts.logdog_annotee_path, annotee)
    if not os.path.isfile(annotee):
        raise NotBootstrapped('Invalid Annotee path: %s' % (annotee, ))

    service_host = var('service host', opts.logdog_service_host,
                       plat.service_host)
    viewer_host = var('viewer host', opts.logdog_viewer_host, plat.viewer_host)

    # Determine LogDog verbosity.
    if opts.logdog_verbose == 0:
        log_level = 'warning'
    elif opts.logdog_verbose == 1:
        log_level = 'info'
    else:
        log_level = 'debug'

    service_account_json = _get_service_account_json(opts,
                                                     plat.credential_path)

    # Generate our Butler stream server URI.
    streamserver_uri = _get_streamserver_uri(rt, plat.streamserver)

    # If we are using file sentinel-based bootstrap error detection, enable.
    bootstrap_result_path = os.path.join(tempdir, 'bootstrap_result.json')

    # Dump the bootstrapped Annotee command to JSON for Annotee to load.
    #
    # Annotee can run accept bootstrap parameters through either JSON or
    # command-line, but using JSON effectively steps around any sort of command-
    # line length limits such as those experienced on Windows.
    cmd_json = os.path.join(tempdir, 'logdog_annotee_cmd.json')
    with open(cmd_json, 'w') as fd:
        json.dump(cmd, fd)

    # Butler Command.
    cmd = [
        butler,
        '-log-level',
        log_level,
        '-project',
        params.project,
        '-prefix',
        prefix,
        '-output',
        'logdog,host="%s"' % (service_host, ),
    ]
    if service_account_json:
        cmd += ['-service-account-json', service_account_json]
    if plat.max_buffer_age:
        cmd += ['-output-max-buffer-age', plat.max_buffer_age]
    cmd += [
        'run',
        '-stdout',
        'tee=stdout',
        '-stderr',
        'tee=stderr',
        '-streamserver-uri',
        streamserver_uri,
        '--',
    ]

    # Annotee Command.
    cmd += [
        annotee,
        '-log-level',
        log_level,
        '-project',
        params.project,
        '-butler-stream-server',
        streamserver_uri,
        '-logdog-host',
        viewer_host,
        '-annotate',
        'tee',
        '-name-base',
        'recipes',
        '-print-summary',
        '-tee',
        '-json-args-path',
        cmd_json,
        '-result-path',
        bootstrap_result_path,
    ]
    return BootstrapState(cmd, bootstrap_result_path)
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--repository',
                        required=True,
                        help='URL of a git repository to fetch.')
    parser.add_argument('--revision', help='Git commit hash to check out.')
    parser.add_argument('--recipe',
                        required=True,
                        help='Name of the recipe to run')
    parser.add_argument('--build-properties-gz',
                        dest='build_properties',
                        type=chromium_utils.convert_gz_json_type,
                        default={},
                        help='Build properties in b64 gz JSON format')
    parser.add_argument('--factory-properties-gz',
                        dest='factory_properties',
                        type=chromium_utils.convert_gz_json_type,
                        default={},
                        help='factory properties in b64 gz JSON format')
    parser.add_argument('--leak',
                        action='store_true',
                        help='Refrain from cleaning up generated artifacts.')
    parser.add_argument('--verbose', action='store_true')

    group = parser.add_argument_group('LogDog Bootstrap')
    logdog_bootstrap.add_arguments(group)

    args = parser.parse_args(argv[1:])

    with robust_tempdir.RobustTempdir(prefix='rr', leak=args.leak) as rt:
        try:
            basedir = chromium_utils.FindUpward(os.getcwd(), 'b')
        except chromium_utils.PathNotFound as e:
            LOGGER.warn(e)
            # Use base directory inside system temporary directory - if we use slave
            # one (cwd), the paths get too long. Recipes which need different paths
            # or persistent directories should do so explicitly.
            basedir = tempfile.gettempdir()

        # Explicitly clean up possibly leaked temporary directories
        # from previous runs.
        rt.cleanup(basedir)

        tempdir = rt.tempdir(basedir)
        LOGGER.info('Using temporary directory: [%s].', tempdir)

        build_data_dir = rt.tempdir(basedir)
        LOGGER.info('Using build data directory: [%s].', build_data_dir)

        properties = copy.copy(args.factory_properties)
        properties.update(args.build_properties)
        properties['build_data_dir'] = build_data_dir
        LOGGER.info('Using properties: %r', properties)
        properties_file = os.path.join(tempdir, 'remote_run_properties.json')
        with open(properties_file, 'w') as f:
            json.dump(properties, f)

        monitoring_utils.write_build_monitoring_event(build_data_dir,
                                                      properties)

        # Make switching to remote_run easier: we do not use buildbot workdir,
        # and it takes disk space leading to out of disk errors.
        buildbot_workdir = properties.get('workdir')
        if buildbot_workdir:
            try:
                if os.path.exists(buildbot_workdir):
                    buildbot_workdir = os.path.realpath(buildbot_workdir)
                    cwd = os.path.realpath(os.getcwd())
                    if cwd.startswith(buildbot_workdir):
                        buildbot_workdir = cwd

                    LOGGER.info('Cleaning up buildbot workdir %r',
                                buildbot_workdir)

                    # Buildbot workdir is usually used as current working directory,
                    # so do not remove it, but delete all of the contents. Deleting
                    # current working directory of a running process may cause
                    # confusing errors.
                    for p in (os.path.join(buildbot_workdir, x)
                              for x in os.listdir(buildbot_workdir)):
                        LOGGER.info('Deleting %r', p)
                        chromium_utils.RemovePath(p)
            except Exception as e:
                # It's preferred that we keep going rather than fail the build
                # on optional cleanup.
                LOGGER.exception('Buildbot workdir cleanup failed: %s', e)

        # Should we use a CIPD pin?
        mastername = properties.get('mastername')
        cipd_pin = None
        if mastername:
            cipd_pin = _CIPD_PINS.get(mastername)
        if not cipd_pin:
            cipd_pin = _CIPD_PINS[None]

        cipd_path = os.path.join(basedir, '.remote_run_cipd')
        _install_cipd_packages(cipd_path,
                               cipd.CipdPackage('infra/recipes-py', cipd_pin))

        recipe_result_path = os.path.join(tempdir, 'recipe_result.json')
        recipe_cmd = [
            sys.executable,
            os.path.join(cipd_path, 'recipes.py'),
            '--verbose',
            'remote',
            '--repository',
            args.repository,
            '--revision',
            args.revision,
            '--workdir',
            os.path.join(tempdir, 'rw'),
            '--',
            '--verbose',
            'run',
            '--properties-file',
            properties_file,
            '--workdir',
            os.path.join(tempdir, 'w'),
            '--output-result-json',
            recipe_result_path,
            args.recipe,
        ]
        # If we bootstrap through logdog, the recipe command line gets written
        # to a temporary file and does not appear in the log.
        LOGGER.info('Recipe command line: %r', recipe_cmd)
        recipe_return_code = None
        try:
            bs = logdog_bootstrap.bootstrap(rt, args, basedir, tempdir,
                                            properties, recipe_cmd)

            LOGGER.info('Bootstrapping through LogDog: %s', bs.cmd)
            _ = _call(bs.cmd)
            recipe_return_code = bs.get_result()
        except logdog_bootstrap.NotBootstrapped as e:
            LOGGER.info('Not bootstrapped: %s', e.message)
        except logdog_bootstrap.BootstrapError as e:
            LOGGER.warning('Could not bootstrap LogDog: %s', e.message)
        except Exception as e:
            LOGGER.exception('Exception while bootstrapping LogDog.')
        finally:
            if recipe_return_code is None:
                LOGGER.info(
                    'Not using LogDog. Invoking `recipes.py` directly.')
                recipe_return_code = _call(recipe_cmd)

            # Try to open recipe result JSON. Any failure will result in an exception
            # and an infra failure.
            with open(recipe_result_path) as f:
                json.load(f)
        return recipe_return_code
# maps mastername (e.g. chromium.infra) as seen in the buildbot 'mastername'
# property to STAGING or CANARY.
#
# STAGING will get the STAGING_CIPD_VERSION, and CANARY will get 'latest'.
# 'mastername' values not in this map will get DEFAULT_CIPD_VERSION.
MASTER_VERSION = {
    'chromium.infra': STAGING,
    'chromium.infra.cron': STAGING,
}

# Auxiliary binary packages to add to PATH during CIPD installation.
AUX_BINARY_PACKAGES = {
    # Default (production) packages.
    None: (
        cipd.CipdPackage(
            name='infra/tools/luci/vpython/${platform}',
            version='git_revision:00f5394a90dfa98e53e191d5a6b543bce70c295f'),
        cipd.CipdPackage(
            name='infra/tools/git/${platform}',
            version='git_revision:a78b5f3658c0578a017db48df97d20ac09822bcd'),
    ),
    STAGING: (
        cipd.CipdPackage(
            name='infra/tools/luci/vpython/${platform}',
            version='git_revision:00f5394a90dfa98e53e191d5a6b543bce70c295f'),
        cipd.CipdPackage(
            name='infra/tools/git/${platform}',
            version='git_revision:a78b5f3658c0578a017db48df97d20ac09822bcd'),
    ),
    CANARY: (
        cipd.CipdPackage(name='infra/tools/luci/vpython/${platform}',