def _parse_build_set(bs_string): """Parses a buildset string to GerritChange or GitilesCommit. A port of https://chromium.googlesource.com/infra/luci/luci-go/+/fe4e304639d11ca00537768f8bfbf20ffecf73e6/buildbucket/buildset.go#105 """ assert isinstance(bs_string, basestring) p = bs_string.split('/') if '' in p: return None n = len(p) if n == 5 and p[0] == 'patch' and p[1] == 'gerrit': return common_pb2.GerritChange(host=p[2], change=int(p[3]), patchset=int(p[4])) if n >= 5 and p[0] == 'commit' and p[1] == 'gitiles': if p[n - 2] != '+' or not is_sha1_hex(p[n - 1]): return None return common_pb2.GitilesCommit( host=p[2], project='/'.join(p[3:n - 2]), # exclude plus id=p[n - 1], ) return None
def ci_build_message( self, project='project', bucket='ci', # shortname. builder='builder', git_repo=None, git_ref='refs/heads/master', revision='2d72510e447ab60a9728aeea2362d8be2cbd7789', build_number=0, build_id=8945511751514863184, tags=None, status=None, ): """Returns a typical buildbucket CI build scheduled by luci-scheduler.""" git_repo = git_repo or self._default_git_repo(project) gitiles_host, gitiles_project = util.parse_gitiles_repo_url(git_repo) assert gitiles_host and gitiles_project, 'invalid repo %s' % git_repo build = build_pb2.Build( id=build_id, number=build_number, tags=tags or [], builder=build_pb2.BuilderID( project=project, bucket=bucket, builder=builder, ), created_by='user:[email protected]', create_time=timestamp_pb2.Timestamp(seconds=1527292217), input=build_pb2.Build.Input( gitiles_commit=common_pb2.GitilesCommit( host=gitiles_host, project=gitiles_project, ref=git_ref, id=revision, ), ), ) if status: build.status = common_pb2.Status.Value(status) return build
def ensure_checkout(self, gclient_config=None, suffix=None, patch=True, update_presentation=True, patch_root=None, with_branch_heads=False, with_tags=False, no_fetch_tags=False, refs=None, patch_oauth2=None, oauth2_json=None, use_site_config_creds=None, clobber=False, root_solution_revision=None, rietveld=None, issue=None, patchset=None, gerrit_no_reset=False, gerrit_no_rebase_patch_ref=False, disable_syntax_validation=False, patch_refs=None, ignore_input_commit=False, set_output_commit=False, step_test_data=None, **kwargs): """ Args: gclient_config: The gclient configuration to use when running bot_update. If omitted, the current gclient configuration is used. no_fetch_tags: When true, the root git repo being checked out will not fetch any tags referenced from the references being fetched. When a repo has many references, it can become a performance bottleneck, so avoid tags if the checkout will not need them present. disable_syntax_validation: (legacy) Disables syntax validation for DEPS. Needed as migration paths for recipes dealing with older revisions, such as bisect. ignore_input_commit: if True, ignore api.buildbucket.gitiles_commit. Exists for historical reasons. Please do not use. set_output_commit: if True, mark the checked out commit as the primary output commit of this build, i.e. call api.buildbucket.set_output_gitiles_commit. In case of multiple repos, the repo is the one specified in api.buildbucket.gitiles_commit or the first configured solution. When sorting builds by commit position, this commit will be used. Requires falsy ignore_input_commit. step_test_data: a null function that returns test bot_update.py output. Use test_api.output_json to generate test data. """ assert use_site_config_creds is None, "use_site_config_creds is deprecated" assert rietveld is None, "rietveld is deprecated" assert issue is None, "issue is deprecated" assert patchset is None, "patchset is deprecated" assert patch_oauth2 is None, "patch_oauth2 is deprecated" assert oauth2_json is None, "oauth2_json is deprecated" assert not (ignore_input_commit and set_output_commit) refs = refs or [] # We can re-use the gclient spec from the gclient module, since all the # data bot_update needs is already configured into the gclient spec. cfg = gclient_config or self.m.gclient.c assert cfg is not None, ( 'missing gclient_config or forgot api.gclient.set_config(...) before?') # Construct our bot_update command. This basically be inclusive of # everything required for bot_update to know: patch_root = patch_root or self.m.gclient.get_gerrit_patch_root( gclient_config=cfg) # Allow patched project's revision if necessary. # This is important for projects which are checked out as DEPS of the # gclient solution. self.m.gclient.set_patch_repo_revision(cfg) reverse_rev_map = self.m.gclient.got_revision_reverse_mapping(cfg) flags = [ # What do we want to check out (spec/root/rev/reverse_rev_map). ['--spec-path', self.m.raw_io.input( self.m.gclient.config_to_pythonish(cfg))], ['--patch_root', patch_root], ['--revision_mapping_file', self.m.json.input(reverse_rev_map)], ['--git-cache-dir', cfg.cache_dir], ['--cleanup-dir', self.m.path['cleanup'].join('bot_update')], # Hookups to JSON output back into recipes. ['--output_json', self.m.json.output()], ] # How to find the patch, if any if patch: repo_url = self.m.tryserver.gerrit_change_repo_url fetch_ref = self.m.tryserver.gerrit_change_fetch_ref target_ref = self.m.tryserver.gerrit_change_target_ref if repo_url and fetch_ref: flags.append([ '--patch_ref', '%s@%s:%s' % (repo_url, target_ref, fetch_ref), ]) if patch_refs: flags.extend( ['--patch_ref', patch_ref] for patch_ref in patch_refs) # Compute requested revisions. revisions = {} for solution in cfg.solutions: if solution.revision: revisions[solution.name] = solution.revision # HACK: ensure_checkout API must be redesigned so that we don't pass such # parameters. Existing semantics is too opiniated. in_commit = self.m.buildbucket.gitiles_commit in_commit_rev = in_commit.id or in_commit.ref if not ignore_input_commit and in_commit_rev: # Note: this is not entirely correct. build.input.gitiles_commit # definition says "The Gitiles commit to run against.". # However, here we ignore it if the config specified a revision. # This is necessary because existing builders rely on this behavior, # e.g. they want to force refs/heads/master at the config level. in_commit_repo_path = self._get_commit_repo_path(in_commit, cfg) revisions[in_commit_repo_path] = ( revisions.get(in_commit_repo_path) or in_commit_rev) parsed_solution_urls = set( self.m.gitiles.parse_repo_url(s.url) for s in cfg.solutions) if (in_commit.id and in_commit.ref and (in_commit.host, in_commit.project) in parsed_solution_urls): refs = [in_commit.ref] + refs # Guarantee that first solution has a revision. # TODO(machenbach): We should explicitly pass HEAD for ALL solutions # that don't specify anything else. first_sol = cfg.solutions[0].name revisions[first_sol] = revisions.get(first_sol) or 'HEAD' if cfg.revisions: # Only update with non-empty values. Some recipe might otherwise # overwrite the HEAD default with an empty string. revisions.update( (k, v) for k, v in cfg.revisions.items() if v) if cfg.solutions and root_solution_revision: revisions[first_sol] = root_solution_revision # Allow for overrides required to bisect into rolls. revisions.update(self._deps_revision_overrides) # Compute command-line parameters for requested revisions. # Also collect all fixed revisions to simulate them in the json output. # Fixed revision are the explicit input revisions of bot_update.py, i.e. # every command line parameter "--revision name@value". fixed_revisions = {} for name, revision in sorted(revisions.items()): fixed_revision = self.m.gclient.resolve_revision(revision) if fixed_revision: fixed_revisions[name] = fixed_revision if fixed_revision.upper() == 'HEAD': # Sync to correct destination ref if HEAD was specified. fixed_revision = self._destination_ref(cfg, name) # If we're syncing to a ref, we want to make sure it exists before # trying to check it out. if (fixed_revision.startswith('refs/') and # TODO(crbug.com/874501): fetching additional refs is currently # only supported for the root solution. We should investigate # supporting it for other dependencies. cfg.solutions and cfg.solutions[0].name == name): # Handle the "ref:revision" syntax, e.g. # refs/branch-heads/4.2:deadbeef refs.append(fixed_revision.split(':')[0]) flags.append(['--revision', '%s@%s' % (name, fixed_revision)]) for ref in refs: assert not ref.startswith('refs/remotes/'), ( 'The "refs/remotes/*" syntax is not supported.\n' 'The "remotes" syntax is dependent on the way the local repo is ' 'configured, and while there are defaults that can often be ' 'assumed, there is no guarantee the mapping will always be done in ' 'a particular way.') # Add extra fetch refspecs. for ref in refs: flags.append(['--refs', ref]) # Filter out flags that are None. cmd = [item for flag_set in flags for item in flag_set if flag_set[1] is not None] if clobber: cmd.append('--clobber') if with_branch_heads or cfg.with_branch_heads: cmd.append('--with_branch_heads') if with_tags or cfg.with_tags: cmd.append('--with_tags') if gerrit_no_reset: cmd.append('--gerrit_no_reset') if no_fetch_tags: cmd.append('--no_fetch_tags') if gerrit_no_rebase_patch_ref: cmd.append('--gerrit_no_rebase_patch_ref') if disable_syntax_validation or cfg.disable_syntax_validation: cmd.append('--disable-syntax-validation') # Inject Json output for testing. first_sln = cfg.solutions[0].name step_test_data = step_test_data or (lambda: self.test_api.output_json( patch_root, first_sln, reverse_rev_map, self._fail_patch, fixed_revisions=fixed_revisions)) name = 'bot_update' if not patch: name += ' (without patch)' if suffix: name += ' - %s' % suffix # Ah hah! Now that everything is in place, lets run bot_update! step_result = None try: # 87 and 88 are the 'patch failure' codes for patch download and patch # apply, respectively. We don't actually use the error codes, and instead # rely on emitted json to determine cause of failure. step_result = self( name, cmd, step_test_data=step_test_data, ok_ret=(0, 87, 88), **kwargs) except self.m.step.StepFailure as f: step_result = f.result raise finally: if step_result and step_result.json.output: result = step_result.json.output self._last_returned_properties = result.get('properties', {}) if update_presentation: # Set properties such as got_revision. for prop_name, prop_value in ( self.last_returned_properties.items()): step_result.presentation.properties[prop_name] = prop_value # Add helpful step description in the step UI. if 'step_text' in result: step_text = result['step_text'] step_result.presentation.step_text = step_text # Set output commit of the build. if set_output_commit: # As of April 2019, got_revision describes the output commit, # the same commit that Build.output.gitiles_commit describes. # In particular, users tend to set got_revision to make Milo display # it. Derive output commit from got_revision. out_commit = common_pb2.GitilesCommit( id=self._last_returned_properties['got_revision'], ) out_solution = reverse_rev_map['got_revision'] out_manifest = result['manifest'][out_solution] assert out_manifest['revision'] == out_commit.id, ( out_manifest, out_commit.id) out_commit.host, out_commit.project = ( self.m.gitiles.parse_repo_url(out_manifest['repository']) ) # Determine the output ref. got_revision_cp = self._last_returned_properties.get('got_revision_cp') in_rev = revisions.get(out_solution) if got_revision_cp: # If commit position string is available, read the ref from there. out_commit.ref, out_commit.position = ( self.m.commit_position.parse(got_revision_cp)) elif in_rev.startswith('refs/'): # If we were asked to check out a specific ref, use it as output # ref. out_commit.ref = in_rev elif in_rev == 'HEAD': # bot_update.py interprets HEAD as refs/heads/master out_commit.ref = 'refs/heads/master' elif out_commit.id == in_commit.id and in_commit.ref: # Derive output ref from the input ref. out_commit.ref = in_commit.ref else: # pragma: no cover assert False, ( 'Unsupposed case. ' 'Call buildbucket.set_output_gitiles_commit directly.' ) self.m.buildbucket.set_output_gitiles_commit(out_commit) # Set the "checkout" path for the main solution. # This is used by the Chromium module to figure out where to look for # the checkout. # If there is a patch failure, emit another step that said things # failed. if result.get('patch_failure'): return_code = result.get('patch_apply_return_code') patch_body = result.get('failed_patch_body') try: if return_code == 3: # This is download failure, hence an infra failure. with self.m.context(infra_steps=True): self.m.python.failing_step( 'Patch failure', 'Git reported a download failure') else: # This is actual patch failure. self.m.tryserver.set_patch_failure_tryjob_result() self.m.cq.set_do_not_retry_build() self.m.python.failing_step( 'Patch failure', 'See attached log. Try rebasing?') except self.m.step.StepFailure as e: if patch_body: e.result.presentation.logs['patch error'] = ( patch_body.splitlines()) raise e # bot_update actually just sets root to be the folder name of the # first solution. if (result.get('did_run') and 'checkout' not in self.m.path and 'root' in result): co_root = result['root'] cwd = self.m.context.cwd or self.m.path['start_dir'] self.m.path['checkout'] = cwd.join(*co_root.split(self.m.path.sep)) return step_result
def RunSteps(api): build = api.buildbucket.build if build.builder.bucket == 'try': assert build.builder.project == 'proj' assert build.builder.builder == 'try-builder' assert '-review' in build.input.gerrit_changes[0].host assert build.input.gitiles_commit.id == 'a' * 40 assert (build.input.gitiles_commit.project == build.input.gerrit_changes[0].project) elif build.builder.bucket == 'ci': assert build.builder.project == 'proj-internal' assert build.builder.builder == 'ci-builder' gm = build.input.gitiles_commit assert 'chrome-internal.googlesource.com' == gm.host assert 'repo' == gm.project assert len(build.tags) == 2 assert build.tags[0].key == 'user_agent' assert build.tags[0].value == 'cq' assert build.tags[1].key == 'user_agent' assert build.tags[1].value == 'recipe' else: return # Note: this is not needed when running on LUCI. Buildbucket will use the # default account associated with the task. api.buildbucket.use_service_account_key('some-fake-key.json') build_parameters = { 'builder_name': 'linux_perf_bisect', 'properties': { 'bisect_config': { 'bad_revision': '351054', 'bug_id': 537649, 'command': ('src/tools/perf/run_benchmark -v ' '--browser=release --output-format=chartjson ' '--also-run-disabled-tests speedometer'), 'good_revision': '351045', 'gs_bucket': 'chrome-perf', 'max_time_minutes': '20', 'metric': 'Total/Total', 'recipe_tester_name': 'linux_perf_bisect', 'repeat_count': '10', 'test_type': 'perf' }, } } build_tags = { 'master': 'overriden.master.url', 'builder': 'overriden_builder' } build_tags2 = {'master': 'someother.master.url', 'builder': 'some_builder'} build_parameters_mac = build_parameters.copy() build_parameters_mac['builder_name'] = 'mac_perf_bisect' example_bucket = 'master.user.username' put_build_result = api.buildbucket.put([{ 'bucket': example_bucket, 'parameters': build_parameters, 'tags': build_tags }, { 'bucket': example_bucket, 'parameters': build_parameters_mac, 'tags': build_tags2 }]) new_job_id = put_build_result.stdout['builds'][0]['id'] get_build_result = api.buildbucket.get_build(new_job_id) if get_build_result.stdout['build']['status'] == 'SCHEDULED': api.buildbucket.cancel_build(new_job_id) # Setting values for expectations coverage only. api.buildbucket.set_buildbucket_host('cr-buildbucket-test.appspot.com') api.buildbucket.set_output_gitiles_commit( common_pb2.GitilesCommit( host='chromium.googlesource.com', project='infra/infra', ref='refs/heads/master', id='a' * 40, position=42, ), ) api.step('build_url', cmd=None).presentation.step_text = (api.buildbucket.build_url()) api.step('builder_cache', cmd=None).presentation.step_text = (str( api.buildbucket.builder_cache_path))