def test_true_multiple(self): project = self.create_project() plan = self.create_plan(project, status=PlanStatus.active) self.create_option(item_id=plan.id, name='bazel.autogenerate', value='0') plan = self.create_plan(project, status=PlanStatus.active) self.create_option(item_id=plan.id, name='bazel.autogenerate', value='1') assert project_lib.contains_active_autogenerated_plan(project)
def test_true(self): project = self.create_project() plan = self.create_plan(project, status=PlanStatus.active) self.create_option(item_id=plan.id, name='bazel.autogenerate', value='1') assert project_lib.contains_active_autogenerated_plan(project)
def post(self, build_id): args = self.parser.parse_args() build = Build.query.options( joinedload('project', innerjoin=True), joinedload('author'), joinedload('source').joinedload('revision'), ).get(build_id) if build is None: return '', 404 selective_testing_policy = SelectiveTestingPolicy.disabled build_message = None if args.selective_testing and project_lib.contains_active_autogenerated_plan( build.project): if not build.source.patch: selective_testing_policy, reasons = get_selective_testing_policy( build.project, build.source.revision_sha, None) if reasons: if selective_testing_policy is SelectiveTestingPolicy.disabled: reasons = [ "Selective testing was requested but not done because:" ] + [' ' + m for m in reasons] build_message = '\n'.join(reasons) else: # NOTE: for diff builds, it makes sense to just do selective testing, # since it will never become a parent build and will never be used to # calculate revision results. selective_testing_policy = SelectiveTestingPolicy.enabled collection_id = uuid.uuid4() new_build = create_build( project=build.project, collection_id=collection_id, label=build.label, target=build.target, message=build.message, author=build.author, source=build.source, cause=Cause.retry, selective_testing_policy=selective_testing_policy, ) if build_message: message = BuildMessage( build=new_build, text=build_message, ) db.session.add(message) db.session.commit() return '', 302, { 'Location': '/api/0/builds/{0}/'.format(new_build.id.hex) }
def get(self, project_id): project = Project.get(project_id) if project is None: return '', 404 last_build = Build.query.options( joinedload('author'), contains_eager('source') ).join( Source, Build.source_id == Source.id, ).filter( Build.project == project, Build.status == Status.finished, *build_type.get_any_commit_build_filters() ).order_by( Build.date_created.desc(), ).first() if not last_build or last_build.result == Result.passed: last_passing_build = last_build else: last_passing_build = Build.query.options( joinedload('author'), contains_eager('source') ).join( Source, Build.source_id == Source.id, ).filter( Build.project == project, Build.result == Result.passed, Build.status == Status.finished, *build_type.get_any_commit_build_filters() ).order_by( Build.date_created.desc(), ).first() options = dict( (o.name, o.value) for o in ProjectOption.query.filter( ProjectOption.project_id == project.id, ) ) for key, value in OPTION_DEFAULTS.iteritems(): options.setdefault(key, value) data = self.serialize(project) data['lastBuild'] = last_build data['lastPassingBuild'] = last_passing_build data['repository'] = project.repository data['options'] = options data['stats'] = self._get_stats(project) data['containsActiveAutogeneratedPlan'] = project_lib.contains_active_autogenerated_plan(project) return self.respond(data)
def post(self, build_id): args = self.parser.parse_args() build = Build.query.options( joinedload('project', innerjoin=True), joinedload('author'), joinedload('source').joinedload('revision'), ).get(build_id) if build is None: return '', 404 selective_testing_policy = SelectiveTestingPolicy.disabled build_message = None if args.selective_testing and project_lib.contains_active_autogenerated_plan(build.project): if not build.source.patch: selective_testing_policy, reasons = get_selective_testing_policy(build.project, build.source.revision_sha, None) if reasons: if selective_testing_policy is SelectiveTestingPolicy.disabled: reasons = ["Selective testing was requested but not done because:"] + [' ' + m for m in reasons] build_message = '\n'.join(reasons) else: # NOTE: for diff builds, it makes sense to just do selective testing, # since it will never become a parent build and will never be used to # calculate revision results. selective_testing_policy = SelectiveTestingPolicy.enabled collection_id = uuid.uuid4() new_build = create_build( project=build.project, collection_id=collection_id, label=build.label, target=build.target, message=build.message, author=build.author, source=build.source, cause=Cause.retry, selective_testing_policy=selective_testing_policy, ) if build_message: message = BuildMessage( build=new_build, text=build_message, ) db.session.add(message) db.session.commit() return '', 302, {'Location': '/api/0/builds/{0}/'.format(new_build.id.hex)}
def get(self, project_id): project = Project.get(project_id) if project is None: return '', 404 last_build = Build.query.options( joinedload('author'), contains_eager('source')).join( Source, Build.source_id == Source.id, ).filter(Build.project == project, Build.status == Status.finished, *build_type.get_any_commit_build_filters()).order_by( Build.date_created.desc(), ).first() if not last_build or last_build.result == Result.passed: last_passing_build = last_build else: last_passing_build = Build.query.options( joinedload('author'), contains_eager('source')).join( Source, Build.source_id == Source.id, ).filter(Build.project == project, Build.result == Result.passed, Build.status == Status.finished, *build_type.get_any_commit_build_filters()).order_by( Build.date_created.desc(), ).first() options = dict((o.name, o.value) for o in ProjectOption.query.filter( ProjectOption.project_id == project.id, )) for key, value in OPTION_DEFAULTS.iteritems(): options.setdefault(key, value) data = self.serialize(project) data['lastBuild'] = last_build data['lastPassingBuild'] = last_passing_build data['repository'] = project.repository data['options'] = options data['stats'] = self._get_stats(project) data[ 'containsActiveAutogeneratedPlan'] = project_lib.contains_active_autogenerated_plan( project) return self.respond(data)
def post(self): """ Create a new commit or diff build. The API roughly goes like this: 1. Identify the project(s) to build for. This can be done by specifying ``project``, ``repository``, or ``repository[callsign]``. If a repository is specified somehow, then all projects for that repository are considered for building. 2. Using the ``sha``, find the appropriate revision object. This may involve updating the repo. 3. If ``patch`` is given, then apply the patch and mark this as a diff build. Otherwise, this is a commit build. 4. If ``snapshot_id`` is given, verify that the snapshot can be used by all projects. 5. If provided, apply project_whitelist, filtering out projects not in this whitelist. 6. Based on the flag ``apply_project_files_trigger`` (see comment on the argument itself for default values), decide whether or not to filter out projects by file blacklist and whitelist. 7. Attach metadata and create/ensure existence of a build for each project, depending on the flag ``ensure_only``. NOTE: In ensure-only mode, the collection_ids of the returned builds are not necessarily identical, as we give new builds new collection IDs and preserve the existing builds' collection IDs. NOTE: If ``patch`` is specified ``sha`` is assumed to be the original base revision to apply the patch. Not relevant until we fix TODO: ``sha`` is **not** guaranteed to be the rev used to apply the patch. See ``find_green_parent_sha`` for the logic of identifying the correct revision. """ args = self.parser.parse_args() if args.patch_file and args.ensure_only: return error("Ensure-only mode does not work with a diff build yet.", problems=["patch", "ensure_only"]) if not (args.project or args.repository or args['repository[phabricator.callsign]']): return error("Project or repository must be specified", problems=["project", "repository", "repository[phabricator.callsign]"]) # read arguments if args.patch_data: try: patch_data = json.loads(args.patch_data) except Exception: return error("Invalid patch data (must be JSON dict)", problems=["patch[data]"]) if not isinstance(patch_data, dict): return error("Invalid patch data (must be JSON dict)", problems=["patch[data]"]) else: patch_data = None # 1. identify project(s) projects, repository = try_get_projects_and_repository(args) if not projects: return error("Unable to find project(s).") # read arguments label = args.label author = args.author message = args.message tag = args.tag snapshot_id = args.snapshot_id no_snapshot = args.no_snapshot cause = Cause[args.cause] if no_snapshot and snapshot_id: return error("Cannot specify snapshot with no_snapshot option") if not tag and args.patch_file: tag = 'patch' # 2. validate snapshot if snapshot_id: snapshot = Snapshot.query.get(snapshot_id) if not snapshot: return error("Unable to find snapshot.") if snapshot.status != SnapshotStatus.active: return error("Snapshot is in an invalid state: %s" % snapshot.status) for project in projects: plans = get_build_plans(project) for plan in plans: plan_options = plan.get_item_options() allow_snapshot = '1' == plan_options.get('snapshot.allow', '1') or plan.snapshot_plan if allow_snapshot and not SnapshotImage.get(plan, snapshot_id): # We want to create a build using a specific snapshot but no image # was found for this plan so fail. return error("Snapshot cannot be applied to %s's %s" % (project.slug, plan.label)) # 3. find revision try: revision = identify_revision(repository, args.sha) except MissingRevision: # if the default fails, we absolutely can't continue and the # client should send a valid revision return error("Unable to find commit %s in %s." % (args.sha, repository.url), problems=['sha', 'repository']) # get default values for arguments if revision: if not author: author = revision.author if not label: label = revision.subject # only default the message if its absolutely not set if message is None: message = revision.message sha = revision.sha else: sha = args.sha if not args.target: target = sha[:12] else: target = args.target[:128] if not label: if message: label = message.splitlines()[0] if not label: label = 'A homeless build' label = label[:128] # 4. Check for patch if args.patch_file: fp = StringIO() for line in args.patch_file: fp.write(line) patch_file = fp else: patch_file = None if patch_file: patch = Patch( repository=repository, parent_revision_sha=sha, diff=patch_file.getvalue(), ) db.session.add(patch) else: patch = None project_options = ProjectOptionsHelper.get_options(projects, ['build.file-whitelist']) # mark as commit or diff build if not patch: is_commit_build = True else: is_commit_build = False apply_project_files_trigger = args.apply_project_files_trigger if apply_project_files_trigger is None: apply_project_files_trigger = args.apply_file_whitelist if apply_project_files_trigger is None: if is_commit_build: apply_project_files_trigger = False else: apply_project_files_trigger = True if apply_project_files_trigger: if patch: diff_parser = DiffParser(patch.diff) files_changed = diff_parser.get_changed_files() elif revision: try: files_changed = _get_revision_changed_files(repository, revision) except MissingRevision: return error("Unable to find commit %s in %s." % (args.sha, repository.url), problems=['sha', 'repository']) else: # the only way that revision can be null is if this repo does not have a vcs backend logging.warning('Revision and patch are both None for sha %s. This is because the repo %s does not have a VCS backend.', sha, repository.url) files_changed = None else: # we won't be applying file whitelist, so there is no need to get the list of changed files. files_changed = None collection_id = uuid.uuid4() builds = [] for project in projects: plan_list = get_build_plans(project) if not plan_list: logging.warning('No plans defined for project %s', project.slug) continue # 5. apply project whitelist as appropriate if args.project_whitelist is not None and project.slug not in args.project_whitelist: logging.info('Project %s is not in the supplied whitelist', project.slug) continue forced_sha = sha # TODO(dcramer): find_green_parent_sha needs to take branch # into account # if patch_file: # forced_sha = find_green_parent_sha( # project=project, # sha=sha, # ) # 6. apply file whitelist as appropriate diff = None if patch is not None: diff = patch.diff if ( apply_project_files_trigger and files_changed is not None and not files_changed_should_trigger_project( files_changed, project, project_options[project.id], sha, diff) ): logging.info('Changed files do not trigger build for project %s', project.slug) continue # 7. create/ensure build build_message = None selective_testing_policy = SelectiveTestingPolicy.disabled if args.selective_testing and project_lib.contains_active_autogenerated_plan(project): if is_commit_build: selective_testing_policy, reasons = get_selective_testing_policy(project, sha, diff) if reasons: if selective_testing_policy is SelectiveTestingPolicy.disabled: reasons = ["Selective testing was requested but not done because:"] + [' ' + m for m in reasons] build_message = '\n'.join(reasons) else: # NOTE: for diff builds, it makes sense to just do selective testing, # since it will never become a parent build and will never be used to # calculate revision results. selective_testing_policy = SelectiveTestingPolicy.enabled if args.ensure_only: potentials = list(Build.query.filter( Build.project_id == project.id, Build.source.has(revision_sha=sha, patch=patch), ).order_by( Build.date_created.desc() # newest first ).limit(1)) if len(potentials) == 0: builds.append(create_build( project=project, collection_id=collection_id, sha=forced_sha, target=target, label=label, message=message, author=author, patch=patch, source_data=patch_data, tag=tag, cause=cause, snapshot_id=snapshot_id, no_snapshot=no_snapshot, selective_testing_policy=selective_testing_policy, )) else: builds.append(potentials[0]) else: builds.append(create_build( project=project, collection_id=collection_id, sha=forced_sha, target=target, label=label, message=message, author=author, patch=patch, source_data=patch_data, tag=tag, cause=cause, snapshot_id=snapshot_id, no_snapshot=no_snapshot, selective_testing_policy=selective_testing_policy, )) if build_message: message = BuildMessage( build=builds[-1], text=build_message, ) db.session.add(message) db.session.commit() return self.respond(builds)
def post(self): """ Create a new commit or diff build. The API roughly goes like this: 1. Identify the project(s) to build for. This can be done by specifying ``project``, ``repository``, or ``repository[callsign]``. If a repository is specified somehow, then all projects for that repository are considered for building. 2. Using the ``sha``, find the appropriate revision object. This may involve updating the repo. 3. If ``patch`` is given, then apply the patch and mark this as a diff build. Otherwise, this is a commit build. 4. If ``snapshot_id`` is given, verify that the snapshot can be used by all projects. 5. If provided, apply project_whitelist, filtering out projects not in this whitelist. 6. Based on the flag ``apply_project_files_trigger`` (see comment on the argument itself for default values), decide whether or not to filter out projects by file blacklist and whitelist. 7. Attach metadata and create/ensure existence of a build for each project, depending on the flag ``ensure_only``. NOTE: In ensure-only mode, the collection_ids of the returned builds are not necessarily identical, as we give new builds new collection IDs and preserve the existing builds' collection IDs. NOTE: If ``patch`` is specified ``sha`` is assumed to be the original base revision to apply the patch. Not relevant until we fix TODO: ``sha`` is **not** guaranteed to be the rev used to apply the patch. See ``find_green_parent_sha`` for the logic of identifying the correct revision. """ args = self.parser.parse_args() if args.patch_file and args.ensure_only: return error( "Ensure-only mode does not work with a diff build yet.", problems=["patch", "ensure_only"]) if not (args.project or args.repository or args['repository[phabricator.callsign]']): return error("Project or repository must be specified", problems=[ "project", "repository", "repository[phabricator.callsign]" ]) # read arguments if args.patch_data: try: patch_data = json.loads(args.patch_data) except Exception: return error("Invalid patch data (must be JSON dict)", problems=["patch[data]"]) if not isinstance(patch_data, dict): return error("Invalid patch data (must be JSON dict)", problems=["patch[data]"]) else: patch_data = None # 1. identify project(s) projects, repository = try_get_projects_and_repository(args) if not projects: return error("Unable to find project(s).") # read arguments label = args.label author = args.author message = args.message tag = args.tag snapshot_id = args.snapshot_id no_snapshot = args.no_snapshot cause = Cause[args.cause] if no_snapshot and snapshot_id: return error("Cannot specify snapshot with no_snapshot option") if not tag and args.patch_file: tag = 'patch' # 2. validate snapshot if snapshot_id: snapshot = Snapshot.query.get(snapshot_id) if not snapshot: return error("Unable to find snapshot.") if snapshot.status != SnapshotStatus.active: return error("Snapshot is in an invalid state: %s" % snapshot.status) for project in projects: plans = get_build_plans(project) for plan in plans: plan_options = plan.get_item_options() allow_snapshot = '1' == plan_options.get( 'snapshot.allow', '1') or plan.snapshot_plan if allow_snapshot and not SnapshotImage.get( plan, snapshot_id): # We want to create a build using a specific snapshot but no image # was found for this plan so fail. return error("Snapshot cannot be applied to %s's %s" % (project.slug, plan.label)) # 3. find revision try: revision = identify_revision(repository, args.sha) except MissingRevision: # if the default fails, we absolutely can't continue and the # client should send a valid revision return error("Unable to find commit %s in %s." % (args.sha, repository.url), problems=['sha', 'repository']) # get default values for arguments if revision: if not author: author = revision.author if not label: label = revision.subject # only default the message if its absolutely not set if message is None: message = revision.message sha = revision.sha else: sha = args.sha if not args.target: target = sha[:12] else: target = args.target[:128] if not label: if message: label = message.splitlines()[0] if not label: label = 'A homeless build' label = label[:128] # 4. Check for patch if args.patch_file: fp = StringIO() for line in args.patch_file: fp.write(line) patch_file = fp else: patch_file = None if patch_file: patch = Patch( repository=repository, parent_revision_sha=sha, diff=patch_file.getvalue(), ) db.session.add(patch) else: patch = None project_options = ProjectOptionsHelper.get_options( projects, ['build.file-whitelist']) # mark as commit or diff build if not patch: is_commit_build = True else: is_commit_build = False apply_project_files_trigger = args.apply_project_files_trigger if apply_project_files_trigger is None: apply_project_files_trigger = args.apply_file_whitelist if apply_project_files_trigger is None: if is_commit_build: apply_project_files_trigger = False else: apply_project_files_trigger = True if apply_project_files_trigger: if patch: diff_parser = DiffParser(patch.diff) files_changed = diff_parser.get_changed_files() elif revision: try: files_changed = _get_revision_changed_files( repository, revision) except MissingRevision: return error("Unable to find commit %s in %s." % (args.sha, repository.url), problems=['sha', 'repository']) else: # the only way that revision can be null is if this repo does not have a vcs backend logging.warning( 'Revision and patch are both None for sha %s. This is because the repo %s does not have a VCS backend.', sha, repository.url) files_changed = None else: # we won't be applying file whitelist, so there is no need to get the list of changed files. files_changed = None collection_id = uuid.uuid4() builds = [] for project in projects: plan_list = get_build_plans(project) if not plan_list: logging.warning('No plans defined for project %s', project.slug) continue # 5. apply project whitelist as appropriate if args.project_whitelist is not None and project.slug not in args.project_whitelist: logging.info('Project %s is not in the supplied whitelist', project.slug) continue forced_sha = sha # TODO(dcramer): find_green_parent_sha needs to take branch # into account # if patch_file: # forced_sha = find_green_parent_sha( # project=project, # sha=sha, # ) # 6. apply file whitelist as appropriate diff = None if patch is not None: diff = patch.diff if (apply_project_files_trigger and files_changed is not None and not files_changed_should_trigger_project( files_changed, project, project_options[project.id], sha, diff)): logging.info( 'Changed files do not trigger build for project %s', project.slug) continue # 7. create/ensure build build_message = None selective_testing_policy = SelectiveTestingPolicy.disabled if args.selective_testing and project_lib.contains_active_autogenerated_plan( project): if is_commit_build: selective_testing_policy, reasons = get_selective_testing_policy( project, sha, diff) if reasons: if selective_testing_policy is SelectiveTestingPolicy.disabled: reasons = [ "Selective testing was requested but not done because:" ] + [' ' + m for m in reasons] build_message = '\n'.join(reasons) else: # NOTE: for diff builds, it makes sense to just do selective testing, # since it will never become a parent build and will never be used to # calculate revision results. selective_testing_policy = SelectiveTestingPolicy.enabled if args.ensure_only: potentials = list( Build.query.filter( Build.project_id == project.id, Build.source.has(revision_sha=sha, patch=patch), ).order_by(Build.date_created.desc() # newest first ).limit(1)) if len(potentials) == 0: builds.append( create_build( project=project, collection_id=collection_id, sha=forced_sha, target=target, label=label, message=message, author=author, patch=patch, source_data=patch_data, tag=tag, cause=cause, snapshot_id=snapshot_id, no_snapshot=no_snapshot, selective_testing_policy=selective_testing_policy, )) else: builds.append(potentials[0]) else: builds.append( create_build( project=project, collection_id=collection_id, sha=forced_sha, target=target, label=label, message=message, author=author, patch=patch, source_data=patch_data, tag=tag, cause=cause, snapshot_id=snapshot_id, no_snapshot=no_snapshot, selective_testing_policy=selective_testing_policy, )) if build_message: message = BuildMessage( build=builds[-1], text=build_message, ) db.session.add(message) db.session.commit() return self.respond(builds)
def test_false(self): project = self.create_project() assert not project_lib.contains_active_autogenerated_plan(project)
def test_false_inactive(self): project = self.create_project() plan = self.create_plan(project, status=PlanStatus.inactive) self.create_option(item_id=plan.id, name='bazel.autogenerate', value='1') assert not project_lib.contains_active_autogenerated_plan(project)
def post_impl(self): """ Notify Changes of a newly created diff. Depending on system configuration, this may create 0 or more new builds, and the resulting response will be a list of those build objects. """ # we manually check for arg presence here so we can send a more specific # error message to the user (rather than a plain 400) args = self.parser.parse_args() if not args.repository: # No need to postback a comment for this statsreporter.stats().incr("diffs_repository_not_found") return error("Repository not found") repository = args.repository projects = list(Project.query.options( subqueryload_all('plans'), ).filter( Project.status == ProjectStatus.active, Project.repository_id == repository.id, )) # no projects bound to repository if not projects: return self.respond([]) options = dict( db.session.query( ProjectOption.project_id, ProjectOption.value ).filter( ProjectOption.project_id.in_([p.id for p in projects]), ProjectOption.name.in_([ 'phabricator.diff-trigger', ]) ) ) # Filter out projects that aren't configured to run builds off of diffs # - Diff trigger disabled # - No build plans projects = [ p for p in projects if options.get(p.id, '1') == '1' and get_build_plans(p) ] if not projects: return self.respond([]) statsreporter.stats().incr('diffs_posted_from_phabricator') label = args.label[:128] author = args.author message = args.message sha = args.sha target = 'D%s' % args['phabricator.revisionID'] try: identify_revision(repository, sha) except MissingRevision: # This may just be a broken request (which is why we respond with a 400) but # it also might indicate Phabricator and Changes being out of sync somehow, # so we err on the side of caution and log it as an error. logging.error("Diff %s was posted for an unknown revision (%s, %s)", target, sha, repository.url) # We should postback since this can happen if a user diffs dependent revisions statsreporter.stats().incr("diffs_missing_base_revision") return self.postback_error( "Unable to find base revision {revision} in {repo} on Changes. Some possible reasons:\n" " - You may be working on multiple stacked diffs in your local repository.\n" " {revision} only exists in your local copy. Changes thus cannot apply your patch\n" " - If you are sure that's not the case, it's possible you applied your patch to an extremely\n" " recent revision which Changes hasn't picked up yet. Retry in a minute\n".format( revision=sha, repo=repository.url, ), target, problems=['sha', 'repository']) source_data = { 'phabricator.buildTargetPHID': args['phabricator.buildTargetPHID'], 'phabricator.diffID': args['phabricator.diffID'], 'phabricator.revisionID': args['phabricator.revisionID'], 'phabricator.revisionURL': args['phabricator.revisionURL'], } patch = Patch( repository=repository, parent_revision_sha=sha, diff=''.join(line.decode('utf-8') for line in args.patch_file), ) db.session.add(patch) source = Source( patch=patch, repository=repository, revision_sha=sha, data=source_data, ) db.session.add(source) phabricatordiff = try_create(PhabricatorDiff, { 'diff_id': args['phabricator.diffID'], 'revision_id': args['phabricator.revisionID'], 'url': args['phabricator.revisionURL'], 'source': source, }) if phabricatordiff is None: logging.warning("Diff %s, Revision %s already exists", args['phabricator.diffID'], args['phabricator.revisionID']) # No need to inform user about this explicitly statsreporter.stats().incr("diffs_already_exists") return error("Diff already exists within Changes") project_options = ProjectOptionsHelper.get_options(projects, ['build.file-whitelist']) diff_parser = DiffParser(patch.diff) files_changed = diff_parser.get_changed_files() collection_id = uuid.uuid4() builds = [] for project in projects: plan_list = get_build_plans(project) # We already filtered out empty build plans assert plan_list, ('No plans defined for project {}'.format(project.slug)) if not files_changed_should_trigger_project(files_changed, project, project_options[project.id], sha, diff=patch.diff): logging.info('No changed files matched project trigger for project %s', project.slug) continue selective_testing_policy = SelectiveTestingPolicy.disabled if args.selective_testing and project_lib.contains_active_autogenerated_plan(project): selective_testing_policy = SelectiveTestingPolicy.enabled builds.append(create_build( project=project, collection_id=collection_id, sha=sha, target=target, label=label, message=message, author=author, patch=patch, tag="phabricator", selective_testing_policy=selective_testing_policy, )) # This is the counterpoint to the above 'diffs_posted_from_phabricator'; # at this point we've successfully processed the diff, so comparing this # stat to the above should give us the phabricator diff failure rate. statsreporter.stats().incr('diffs_successfully_processed_from_phabricator') return self.respond(builds)