def RunSteps(api): if 'commit_position' in api.properties.keys(): cp = api.properties['commit_position'] try: api.crrev.to_commit_hash(cp) except ValueError: raise recipe_api.StepFailure('Invalid commit position: %s' % cp) if 'commit_hash' in api.properties.keys(): sha = api.properties['commit_hash'] try: api.crrev.to_commit_position(sha) except ValueError: raise recipe_api.StepFailure('Invalid commit hash: %s' % sha)
def get_footers(self, patch_text=None): """Retrieves footers from the patch description. footers are machine readable tags embedded in commit messages. See git-footers documentation for more information. """ if patch_text is None: if self.is_gerrit_issue: patch_text = self.m.gerrit.get_change_description( self.m.properties['patch_gerrit_url'], self.m.properties['patch_issue'], self.m.properties['patch_set']) elif self.can_apply_issue: patch_url = (self.m.properties['rietveld'].rstrip('/') + '/' + str(self.m.properties['issue'])) patch_text = self.m.git_cl.get_description( patch_url=patch_url, codereview='rietveld').stdout else: # pragma: no cover raise recipe_api.StepFailure('Unknown patch storage.') result = self.m.python('parse description', self.package_repo_resource('git_footers.py'), args=['--json', self.m.json.output()], stdin=self.m.raw_io.input(data=patch_text)) return result.json.output
def RunSteps(api): # Try to resolve a commit position to a hash if 'revision_to_resolve' in api.properties.keys(): api.commit_position.chromium_hash_from_commit_position( api.properties['revision_to_resolve']) # Try to resolve a hash to a commit_position if 'hash_to_resolve' in api.properties.keys(): api.commit_position.chromium_commit_position_from_hash( api.properties['hash_to_resolve']) cp = api.properties['cp'] expect_revision = api.properties.get('revision') expect_branch = api.properties.get('branch') # Parse a valid commit position (branch). try: branch = api.commit_position.parse_branch(cp) except ValueError: raise recipe_api.StepFailure("Failed to parse branch from: %s" % (cp, )) api.step('test branch parse', ['/bin/echo', branch]) assert branch == expect_branch, "Invalid parsed branch: %s" % (branch, ) # Parse a valid commit position (revision). revision = api.commit_position.parse_revision(cp) api.step('test revision parse', ['/bin/echo', revision]) assert revision == expect_revision, "Invalid parsed revision: %s" % ( revision, ) # Construct a commit position. value = api.commit_position.construct(branch, revision) api.step('test construction', ['/bin/echo', value]) assert value == cp, "Construction failed: %s" % (value, )
def run_tryjob(self, patches_raw, rietveld, issue, patchset, patch_project): patches = parse_patches(self.m.python.failing_step, patches_raw, rietveld, issue, patchset, patch_project) root_dir = self.m.path['slave_build'] # Needed to set up the infra checkout, for _python self.m.gclient.set_config('infra') self.m.gclient.c.solutions[0].revision = 'origin/master' self.m.gclient.checkout() self.m.gclient.runhooks() url_mapping = self.m.luci_config.get_projects() # TODO(martiniss): use luci-config smarter; get recipes.cfg directly, rather # than in two steps. # luci config project name to recipe config namedtuple recipe_configs = {} # List of all the projects we care about testing. luci-config names all_projects = set(p for p in url_mapping if p in PROJECTS_TO_TRY) recipe_configs = {p: self._get_project_config(p) for p in all_projects} deps, downstream_projects = get_deps_info(all_projects, recipe_configs) should_fail_build_mapping = self.get_fail_build_info( downstream_projects, patches) projs_to_test, locations = self._checkout_projects( root_dir, url_mapping, deps, downstream_projects, patches) bad_projects = [] for proj in projs_to_test: deps_locs = {dep: locations[dep] for dep in deps[proj]} try: result = self.simulation_test(proj, recipe_configs[proj], locations[proj], deps_locs) except recipe_api.StepFailure as f: result = f.result if should_fail_build_mapping.get(proj, True): bad_projects.append(proj) finally: link = PROJECT_TO_CONTINUOUS_WATERFALL.get(proj) if link: result.presentation.links['reference builder'] = link else: result.presentation.links[ 'no reference builder; file a bug to get one?'] = ( FILE_BUG_FOR_CONTINUOUS_LINK) if bad_projects: raise recipe_api.StepFailure( "One or more projects failed tests: %s" % (','.join(bad_projects)))
def run(self, root): if not self.m.platform.is_linux: raise recipe_api.StepFailure('WCT only runs on Linux.') wct_root = self.m.path['start_dir'].join('packages') node_path = self.m.path['start_dir'].join('packages', 'bin') env = {'PATH': self.m.path.pathsep.join([str(node_path), '%(PATH)s'])} wct_bin = wct_root.join('node_modules', 'web-component-tester', 'bin', 'wct') with self.m.context(env=env): self.m.step('Run WCT', [ 'xvfb-run', '-a', wct_bin, 'test', '--root', root, '--verbose', '--simpleOutput', '--browsers', 'chrome' ])
def RunSteps(api, properties): try: _validate_props(properties) except ValueError as exc: raise recipe_api.InfraFailure('Bad input properties: %s' % exc) # Checkout either the committed code or a pending CL, depending on the mode. # This also calculates metadata (labels, tags) to apply to images built from # this code. if properties.mode == PROPERTIES.MODE_CI: co, meta = _checkout_ci(api, properties.project) elif properties.mode == PROPERTIES.MODE_CL: co, meta = _checkout_cl(api, properties.project) else: raise recipe_api.InfraFailure( '%s is not implemented yet' % PROPERTIES.Mode.Name(properties.mode)) co.gclient_runhooks() # Discover what *.yaml manifests (full paths to them) we need to build. manifests = _discover_manifests(api, co.path, properties.manifests) if not manifests: # pragma: no cover raise recipe_api.InfraFailure('Found no manifests to build') with co.go_env(): # Use 'cloudbuildhelper' that comes with the infra checkout (it's in PATH), # to make sure builders use same version as developers. api.cloudbuildhelper.command = 'cloudbuildhelper' # Report the exact version we picked up from the infra checkout. api.cloudbuildhelper.report_version() # Build, tag and upload corresponding images. fails = [] for m in manifests: # TODO(vadimsh): Run this in parallel when it's possible. try: api.cloudbuildhelper.build( manifest=m, canonical_tag=meta.canonical_tag, build_id=api.buildbucket.build_url(), infra=properties.infra, labels=meta.labels, tags=meta.tags, ) except api.step.StepFailure: fails.append(api.path.basename(m)) if fails: raise recipe_api.StepFailure('Failed to build: %s' % ', '.join(fails))
def get_footers(self, patch_text=None): """Retrieves footers from the patch description. footers are machine readable tags embedded in commit messages. See git-footers documentation for more information. """ if patch_text is None: codereview = None if not self.can_apply_issue: #pragma: no cover raise recipe_api.StepFailure( "Cannot get tags from gerrit yet.") else: codereview = 'rietveld' patch = (self.m.properties['rietveld'].strip('/') + '/' + str(self.m.properties['issue'])) patch_text = self.m.git_cl.get_description( patch=patch, codereview=codereview).stdout result = self.m.python('parse description', self.package_repo_resource('git_footers.py'), args=['--json', self.m.json.output()], stdin=self.m.raw_io.input(data=patch_text)) return result.json.output
def space_usage(self, path=None, warning_level=None, previous_result=None, can_fail_build=False, name=None, **kwargs): """Displays disk space usage. Does not support Windows yet, does not emit steps. Prints disk space usage in step log and step text. Args: path (str): path mapped to the disk. Defaults to [SLAVE_BUILD]. warning_level (float): a value from 0 to 1.0. If usage reaches this level, mark the step as WARNING. Defaults to 0.9. previous_result (dict): previous result of space_usage call. If passed, delta is displayed in step text. name (str): step name. Defaults to "disk space usage". Returns: A dict with disk usage info or None if step fails. Dict keys: * capacity (float): disk capacity, in MiB. * used (float): disk usage, in MiB. """ path = path or self.m.path['slave_build'] name = name or 'disk space usage' warning_level = warning_level or 0.9 kwargs.setdefault( 'step_test_data', lambda: self.m.json.test_api.output_stream( self.test_api.space_usage_result())) if self.m.platform.is_win: # Not supported. Feel free to implement. return step = None try: step = self.m.python(name, self.resource('statvfs.py'), stdout=self.m.json.output(), args=[path], **kwargs) capacity_mb = step.stdout['capacity'] / 1024.0 / 1024.0 used_mb = step.stdout['used'] / 1024.0 / 1024.0 percent = used_mb / capacity_mb step.presentation.step_text = '%.2f/%.2f GiB (%d%%) used' % ( used_mb / 1024.0, capacity_mb / 1024.0, percent * 100) if percent >= warning_level: step.presentation.status = self.m.step.WARNING if previous_result: step.presentation.step_text += '. Delta: %+.2f MiB' % ( used_mb - previous_result['used']) return { 'capacity': capacity_mb, 'used': used_mb, } except Exception as ex: # Do not fail entire build because of a disk space step failure. if step: step.presentation.logs['exception'] = ['%r' % ex] step.presentation.status = self.m.step.WARNING if can_fail_build: raise recipe_api.StepFailure('Could not get disk info: %s' % ex) return