def post(self): user = get_current_user() if user is None: return error('Not logged in.', http_code=401) args = self.post_parser.parse_args() slug = str(args.slug or args.name.replace(' ', '-').lower()) if not user_has_project_permission(user, slug): return error('User does not have permission to create a project with slug {}.'.format(slug), http_code=403) match = Project.query.filter( Project.slug == slug, ).first() if match: return error('Project with slug {} already exists.'.format(slug), http_code=400) repository = Repository.get(args.repository) if repository is None: return error('Repository with url {} does not exist.'.format(args.repository), http_code=400) project = Project( name=args.name, slug=slug, repository=repository, ) db.session.add(project) db.session.commit() return self.respond(project)
def get(self, project_id): project = Project.get(project_id) if not project: return error("Project not found", http_code=404) args = self.parser.parse_args() if args.date: try: query_date = datetime.strptime(args.date, '%Y-%m-%d').date() except: return error('Can\'t parse date "%s"' % (args.date)) else: # This `7` is hard-coded to match the code in config.py which kicks # off the cron job 7 hours past midnight GMT (which corresponds to # midnight PST) delta = timedelta(days=2 if datetime.utcnow().hour < 7 else 1) query_date = datetime.utcnow().date() - delta data = { 'date': str(query_date), 'chartData': self.get_chart_data(project.id, query_date), 'flakyTests': self.get_flaky_tests(project.id, query_date) } return self.respond(data)
def post(self, node_id): args = self.get_parser.parse_args() if not args.toggle: return self.get(node_id) node = Node.query.get(node_id) if node is None: return error('Node not found.', ['node_id'], 404) if not node.label: return error('Node does not contain a label.', ['node_id'], 404) user = get_current_user() if user is None: return error('User is not logged in.', ['user'], 401) master = self.get_master(node_id) if not master: return error('Node master not found.', ['node_id'], 404) toggle_url = '%s/toggleOffline' % (self.get_jenkins_url(master, node.label)) timestamp = datetime.utcnow() data = { 'offlineMessage': '[changes] Disabled by %s at %s' % (user.email, timestamp) } response = requests.Session().post(toggle_url, data=data) if response.status_code != 200: logging.warning('Unable to toggle offline status (%s)' % (toggle_url)) return self.respond_status(node, master)
def post(self): user = get_current_user() if user is None: return error('Not logged in.', http_code=401) args = self.post_parser.parse_args() slug = str(args.slug or args.name.replace(' ', '-').lower()) if not user_has_project_permission(user, slug): return error( 'User does not have permission to create a project with slug {}.' .format(slug), http_code=403) match = Project.query.filter(Project.slug == slug, ).first() if match: return error('Project with slug {} already exists.'.format(slug), http_code=400) repository = Repository.get(args.repository) if repository is None: return error('Repository with url {} does not exist.'.format( args.repository), http_code=400) project = Project( name=args.name, slug=slug, repository=repository, ) db.session.add(project) db.session.commit() return self.respond(project)
def post(self, node_id): args = self.get_parser.parse_args() if not args.toggle: return self.get(node_id) node = Node.query.get(node_id) if node is None: return error('Node not found.', ['node_id'], 404) if not node.label: return error('Node does not contain a label.', ['node_id'], 404) user = get_current_user() if user is None: return error('User is not logged in.', ['user'], 401) master = self.get_master(node_id) if not master: return error('Node master not found.', ['node_id'], 404) toggle_url = '%s/toggleOffline' % (self.get_jenkins_url( master, node.label)) timestamp = datetime.utcnow() data = { 'offlineMessage': '[changes] Disabled by %s at %s' % (user.email, timestamp) } response = requests.Session().post(toggle_url, data=data, timeout=10) if response.status_code != 200: logging.warning('Unable to toggle offline status (%s)' % (toggle_url)) return self.respond_status(node, master)
def post(self, step_id): step = Step.query.get(step_id) if step is None: return error("step not found", http_code=404) args = self.parser.parse_args() if args.implementation is not None: step.implementation = args.implementation if args.data is not None: try: data = json.loads(args.data) except ValueError as e: return error("invalid JSON: %s" % e) if not isinstance(data, dict): return error("data must be a JSON mapping") impl_cls = step.get_implementation(load=False) if impl_cls is None: return error("unable to load build step implementation") try: # XXX(dcramer): It's important that we deepcopy data so any # mutations within the BuildStep don't propagate into the db impl_cls(**deepcopy(data)) except Exception as exc: return error( "unable to create build step mapping provided data: %s" % exc) step.data = data if args.order is not None: step.order = args.order step.date_modified = datetime.utcnow() db.session.add(step) plan = step.plan plan.date_modified = step.date_modified db.session.add(plan) for name in STEP_OPTIONS.keys(): value = args.get(name) if value is None: continue create_or_update(ItemOption, where={ 'item_id': step.id, 'name': name, }, values={ 'value': value, }) db.session.commit() return self.respond(step)
def get(self, project_id): project = Project.get(project_id) if not project: return error('project not found', http_code=404) args = self.get_parser.parse_args() # we want to only return commits in the repo that are within the # project's whitelist paths = None if not args.every_commit: paths = self.get_whitelisted_paths(project) repo = project.repository offset = (args.page - 1) * args.per_page limit = args.per_page + 1 # +1 to tell if there are more revs to get vcs = repo.get_vcs() if vcs: try: commits = self.get_commits_from_vcs(repo, vcs, offset, limit, paths, args.parent, args.branch) except ValueError as err: return error(err.message) else: if args.parent or args.branch: param = 'Branches' if args.branch else 'Parents' return error( '{0} not supported for projects with no repository.'. format(param), http_code=422) # TODO: right now this fallback returns every commit for projects # with whitelisted paths. At the very least, we need to tell the # frontend about this (perhaps using a response header) commits = self.get_commits_from_db(repo, offset, limit) page_links = self.make_links( current_page=args.page, has_next_page=len(commits) > args.per_page, ) # we fetched one extra commit so that we'd know whether to create a # next link. Delete it commits = commits[:args.per_page] builds_map = {} if commits: builds_map = self.get_builds_for_commits(commits, project, args.all_builds) results = [] for result in commits: if args.all_builds: result['builds'] = builds_map.get(result['id'], []) else: result['build'] = builds_map.get(result['id']) results.append(result) return self.respond(results, serialize=False, links=page_links)
def get(self, project_id): project = Project.get(project_id) if not project: return error('project not found', http_code=404) args = self.get_parser.parse_args() # we want to only return commits in the repo that are within the # project's whitelist paths = None if not args.every_commit: paths = self.get_whitelisted_paths(project) repo = project.repository offset = (args.page - 1) * args.per_page limit = args.per_page + 1 # +1 to tell if there are more revs to get vcs = repo.get_vcs() if vcs: try: commits = self.get_commits_from_vcs( repo, vcs, offset, limit, paths, args.parent, args.branch) except ValueError as err: return error(err.message) else: if args.parent or args.branch: param = 'Branches' if args.branch else 'Parents' return error( '{0} not supported for projects with no repository.'.format(param), http_code=422) # TODO: right now this fallback returns every commit for projects # with whitelisted paths. At the very least, we need to tell the # frontend about this (perhaps using a response header) commits = self.get_commits_from_db(repo, offset, limit) page_links = self.make_links( current_page=args.page, has_next_page=len(commits) > args.per_page, ) # we fetched one extra commit so that we'd know whether to create a # next link. Delete it commits = commits[:args.per_page] builds_map = {} if commits: builds_map = self.get_builds_for_commits( commits, project, args.all_builds) results = [] for result in commits: if args.all_builds: result['builds'] = builds_map.get(result['id'], []) else: result['build'] = builds_map.get(result['id']) results.append(result) return self.respond(results, serialize=False, links=page_links)
def post(self, plan_id): plan = Plan.query.get(plan_id) if plan is None: return error("plan not found", http_code=404) args = self.parser.parse_args() step = Step( plan=plan, order=args.order, implementation=args.implementation, ) try: data = json.loads(args.data) except ValueError as e: db.session.rollback() return error("invalid JSON: %s" % e) if not isinstance(data, dict): db.session.rollback() return error("data must be a JSON mapping") impl_cls = step.get_implementation(load=False) if impl_cls is None: db.session.rollback() return error("unable to load build step implementation") try: # XXX(dcramer): It's important that we deepcopy data so any # mutations within the BuildStep don't propagate into the db impl_cls(**deepcopy(data)) except Exception as exc: db.session.rollback() return error("unable to create build step provided data: %s" % exc) step.data = data step.order = args.order db.session.add(step) plan.date_modified = step.date_modified db.session.add(plan) for name in STEP_OPTIONS.keys(): value = args.get(name) if value is None: continue create_or_update(ItemOption, where={ 'item_id': step.id, 'name': name, }, values={ 'value': value, }) return self.serialize(step), 201
def post(self, step_id): step = Step.query.get(step_id) if step is None: return error("step not found", http_code=404) args = self.parser.parse_args() if args.implementation is not None: step.implementation = args.implementation if args.data is not None: try: data = json.loads(args.data) except ValueError as e: return error("invalid JSON: %s" % e) if not isinstance(data, dict): return error("data must be a JSON mapping") impl_cls = step.get_implementation(load=False) if impl_cls is None: return error("unable to load build step implementation") try: # XXX(dcramer): It's important that we deepcopy data so any # mutations within the BuildStep don't propagate into the db impl_cls(**deepcopy(data)) except Exception as exc: return error("unable to create build step mapping provided data: %s" % exc) step.data = data if args.order is not None: step.order = args.order step.date_modified = datetime.utcnow() db.session.add(step) plan = step.plan plan.date_modified = step.date_modified db.session.add(plan) for name in STEP_OPTIONS.keys(): value = args.get(name) if value is None: continue create_or_update(ItemOption, where={ 'item_id': step.id, 'name': name, }, values={ 'value': value, }) db.session.commit() return self.serialize(step), 200
def wrapped(self, *args, **kwargs): user = get_current_user() if user is None: return error('Not logged in', http_code=401) try: slug = get_project_slug(self, *args, **kwargs) except ResourceNotFound as e: return error('{}'.format(e), http_code=404) if user_has_project_permission(user, slug): return method(self, *args, **kwargs) return error('User does not have access to this project.', http_code=403)
def post(self, node_id): args = self.get_parser.parse_args() if not args.toggle: return self.get(node_id) node = Node.query.get(node_id) if node is None: return error('Node not found.', ['node_id'], 404) if not node.label: return error('Node does not contain a label.', ['node_id'], 404) user = get_current_user() if user is None: return error('User is not logged in.', ['user'], 401) jenkins_master = self.get_jenkins_master(node_id) if not jenkins_master: # We are most likely dealing with a Mesos slave here node_hostname = node.label.strip() mesos_master = mesos_lib.get_mesos_master() if not mesos_lib.is_active_slave(mesos_master, node_hostname): return error('Node is currently not active on Mesos master', 400) try: mesos_lib.toggle_node_maintenance_status( mesos_master, node_hostname) except Exception as err: return error('Unable to toggle offline status of node %s: %s' % (node_hostname, err), http_code=500) return self.respond_mesos_status(node, mesos_master) toggle_url = '%s/toggleOffline' % (self.get_jenkins_url( jenkins_master, node.label)) timestamp = datetime.utcnow() data = { 'offlineMessage': '[changes] Disabled by %s at %s' % (user.email, timestamp) } response = requests.Session().post(toggle_url, data=data, timeout=10) if response.status_code != 200: logging.warning('Unable to toggle offline status (%s)' % (toggle_url)) return self.respond_jenkins_status(node, jenkins_master)
def get(self): args = self.get_parser.parse_args() # this can take either a source id or a revision/repo id. For the # latter, only non-patch sources are looked at source_id = args.source_id revision_sha = args.revision_sha repo_id = args.repo_id if source_id: source = Source.query.filter( Source.id == source_id, ).first() elif revision_sha and repo_id: source = Source.query.filter( Source.revision_sha == revision_sha, Source.repository_id == repo_id, Source.patch_id == None # NOQA ).first() else: return error('invalid args') if source is None: return error("source not found", http_code=404) filters = [Build.source_id == source.id] if args.tag: tags = filter(bool, args.tag) # Avoid empty tags, which historically are meant to mean "no tag" restriction. if tags: filters.append(or_(*[Build.tags.any(t) for t in tags])) builds = self.serialize(list( Build.query.options( joinedload('author') ).filter( *filters ).order_by(Build.date_created.desc()) )) build_ids = [build['id'] for build in builds] if len(builds) > 0: jobs = self.serialize(list(Job.query.filter( Job.build_id.in_(build_ids) ))) for b in builds: b['jobs'] = [j for j in jobs if j['build']['id'] == b['id']] return self.paginate(builds, serialize=False)
def get(self, project_id): project = Project.get(project_id) if not project: return '', 404 args = self.parser.parse_args() if args.build_id: build = Build.query.get(args.build_id) if not build: return error("Build not found", http_code=404) else: latest_build = Build.query.join( Source, Source.id == Build.source_id, ).filter( Source.patch_id == None, # NOQA Build.project_id == project.id, Build.result == Result.passed, Build.status == Status.finished, ).order_by( Build.date_created.desc(), ).limit(1).first() if not latest_build: return self.respond({}) build = latest_build return self.respond(_generate_testgroup_data(build, project.id, args.parent))
def post(self, project_id): project = self._get_project(project_id) if project is None: return error("Project not found", http_code=404) args = self.parser.parse_args() for name, value in args.iteritems(): if value is None: continue # If we're rolling back a snapshot, take note. if name == 'snapshot.current': current = Snapshot.get_current(project.id) if current: # If value is empty, we're deactivating a snapshot without replacing it, # and that's a downgrade too. replacement = value and Snapshot.query.get(value) if not replacement or replacement.date_created < current.date_created: _report_snapshot_downgrade(project) create_or_update(ProjectOption, where={ 'project': project, 'name': name, }, values={ 'value': value, }) return self.respond({})
def get(self, source_id): source = Source.query.filter( Source.id == source_id, ).first() if source is None: return error("Source not found", http_code=404) args = self.get_parser.parse_args() context = self.serialize(source) diff = source.generate_diff() if diff: files = self._get_files_from_raw_diff(diff) if args.coverage: coverage = merged_coverage_data(c for c in get_coverage_by_source_id(source_id) if c.filename in files) coverage_for_added_lines = self._filter_coverage_for_added_lines(diff, coverage) tails_info = dict(source.data) else: coverage = None coverage_for_added_lines = None tails_info = None context['diff'] = diff if args.coverage: context['coverage'] = coverage context['coverageForAddedLines'] = coverage_for_added_lines context['tailsInfo'] = tails_info return self.respond(context)
def post(self): try: return self.post_impl() except Exception as e: # catch everything so that we can tell phabricator logging.exception("Error creating builds") return error("Error creating builds (%s): %s" % (type(e).__name__, e.message), http_code=500)
def post(self, diff_id): """ Ask Changes to restart all builds for this diff. The response will be the list of all builds. """ diff = self._get_diff_by_id(diff_id) if not diff: return error("Diff with ID %s does not exist." % (diff_id, )) diff_parser = DiffParser(diff.source.patch.diff) files_changed = diff_parser.get_changed_files() try: projects = self._get_projects_for_diff(diff, files_changed) except InvalidDiffError: return error('Patch does not apply') except ProjectConfigError: return error('Project config is not in a valid format.') collection_id = uuid.uuid4() builds = self._get_builds_for_diff(diff) new_builds = [] for project in projects: builds_for_project = [ x for x in builds if x.project_id == project.id ] if not builds_for_project: logging.warning('Project with id %s does not have a build.', project.id) continue build = max(builds_for_project, key=lambda x: x.number) if build.status is not Status.finished: continue if build.result is Result.passed: continue new_build = create_build( project=project, collection_id=collection_id, label=build.label, target=build.target, message=build.message, author=build.author, source=diff.source, cause=Cause.retry, selective_testing_policy=build.selective_testing_policy, ) new_builds.append(new_build) return self.respond(new_builds)
def get(self, build_id, target_id): target = BazelTarget.query.get(target_id) if not target: return error('target not found', http_code=404) queryset = BazelTargetMessage.query.filter( BazelTargetMessage.target_id == target.id, ).order_by(BazelTargetMessage.date_created.asc()) return self.paginate(queryset)
def get(self, build_id): build = Build.query.get(build_id) if not build: return error('build not found', http_code=404) queryset = BuildMessage.query.filter( BuildMessage.build_id == build.id, ).order_by(BuildMessage.date_created.asc()) return self.paginate(queryset)
def get(self, build_id, target_id): target = BazelTarget.query.get(target_id) if not target: return error('target not found', http_code=404) queryset = BazelTargetMessage.query.filter( BazelTargetMessage.target_id == target.id, ).order_by( BazelTargetMessage.date_created.asc()) return self.paginate(queryset)
def get(self, project_id, test_hash): project = Project.get(project_id) if not project: return error("Project not found", http_code=404) # use the most recent test run to find basic details test = TestCase.query.filter( TestCase.project_id == project.id, TestCase.name_sha == test_hash, ).order_by(TestCase.date_created.desc()).limit(1).first() if not test: return error("Test not found", http_code=404) context = self.serialize(test, { TestCase: GeneralizedTestCase(), }) return self.respond(context)
def postback_error(self, msg, target, problems=None, http_code=400): """Return an error AND postback a comment to phabricator""" if target: message = ( 'An error occurred somewhere between Phabricator and Changes:\n%s\n' 'Please contact %s with any questions {icon times, color=red}' % (msg, current_app.config['SUPPORT_CONTACT'])) post_comment(target, message) return error(msg, problems=problems, http_code=http_code)
def get(self, build_id): build = Build.query.get(build_id) if not build: return error('build not found', http_code=404) queryset = BuildMessage.query.filter( BuildMessage.build_id == build.id, ).order_by( BuildMessage.date_created.asc()) return self.paginate(queryset)
def get(self, node_hostname): node = Node.query.filter(Node.label == node_hostname).first() if node is None: return error("Node not found", http_code=404) context = self.serialize(node) context['clusters'] = self.serialize(list(node.clusters)) return self.respond(context, serialize=False)
def post(self, step_id): jobstep = JobStep.query.get(step_id) if jobstep is None: return error("Not found", http_code=404) # NOTE(josiah): we think this is okay as is, but it might be better to # report infra_failure the same way as aborted. if jobstep.result == Result.aborted: return error("Aborted", http_code=410) args = self.post_parser.parse_args() current_datetime = args.date or datetime.utcnow() jobstep.last_heartbeat = current_datetime db.session.add(jobstep) db.session.commit() return self.respond(jobstep)
def postback_error(self, msg, target, problems=None, http_code=400): """Return an error AND postback a comment to phabricator""" if target: message = ( 'An error occurred somewhere between Phabricator and Changes:\n%s\n' 'Please contact %s with any questions {icon times, color=red}' % (msg, current_app.config['SUPPORT_CONTACT']) ) post_comment(target, message) return error(msg, problems=problems, http_code=http_code)
def post(self, diff_id): """ Ask Changes to restart all builds for this diff. The response will be the list of all builds. """ diff = self._get_diff_by_id(diff_id) if not diff: return error("Diff with ID %s does not exist." % (diff_id,)) diff_parser = DiffParser(diff.source.patch.diff) files_changed = diff_parser.get_changed_files() try: projects = self._get_projects_for_diff(diff, files_changed) except InvalidDiffError: return error('Patch does not apply') except ProjectConfigError: return error('Project config is not in a valid format.') collection_id = uuid.uuid4() builds = self._get_builds_for_diff(diff) new_builds = [] for project in projects: builds_for_project = [x for x in builds if x.project_id == project.id] if not builds_for_project: logging.warning('Project with id %s does not have a build.', project.id) continue build = max(builds_for_project, key=lambda x: x.number) if build.status is not Status.finished: continue if build.result is Result.passed: continue new_build = create_build( project=project, collection_id=collection_id, label=build.label, target=build.target, message=build.message, author=build.author, source=diff.source, cause=Cause.retry, selective_testing_policy=build.selective_testing_policy, ) new_builds.append(new_build) return self.respond(new_builds)
def post(self, node_id): args = self.get_parser.parse_args() if not args.toggle: return self.get(node_id) node = Node.query.get(node_id) if node is None: return error('Node not found.', ['node_id'], 404) if not node.label: return error('Node does not contain a label.', ['node_id'], 404) user = get_current_user() if user is None: return error('User is not logged in.', ['user'], 401) jenkins_master = self.get_jenkins_master(node_id) if not jenkins_master: # We are most likely dealing with a Mesos slave here node_hostname = node.label.strip() mesos_master = mesos_lib.get_mesos_master() if not mesos_lib.is_active_slave(mesos_master, node_hostname): return error('Node is currently not active on Mesos master', 400) try: mesos_lib.toggle_node_maintenance_status(mesos_master, node_hostname) except Exception as err: return error('Unable to toggle offline status of node %s: %s' % (node_hostname, err), http_code=500) return self.respond_mesos_status(node, mesos_master) toggle_url = '%s/toggleOffline' % (self.get_jenkins_url(jenkins_master, node.label)) timestamp = datetime.utcnow() data = { 'offlineMessage': '[changes] Disabled by %s at %s' % (user.email, timestamp) } response = requests.Session().post(toggle_url, data=data, timeout=10) if response.status_code != 200: logging.warning('Unable to toggle offline status (%s)' % (toggle_url)) return self.respond_jenkins_status(node, jenkins_master)
def post(self): try: with redis.lock('jobstep:allocate', nowait=True): to_allocate = self.find_next_jobstep() # Should 204, but flask/werkzeug throws StopIteration (bug!) for tests if to_allocate is None: return self.respond([]) to_allocate.status = Status.allocated db.session.add(to_allocate) db.session.flush() except redis.UnableToGetLock: return error('Another allocation is in progress', http_code=503) try: jobplan, buildstep = JobPlan.get_build_step_for_job(to_allocate.job_id) assert jobplan and buildstep context = self.serialize(to_allocate) context['project'] = self.serialize(to_allocate.project) context['resources'] = { 'cpus': to_allocate.data.get('cpus', 4), 'mem': to_allocate.data.get('mem', 8 * 1024), } context['cmd'] = buildstep.get_allocation_command(to_allocate) return self.respond([context]) except Exception: to_allocate.status = Status.finished to_allocate.result = Result.aborted db.session.add(to_allocate) db.session.flush() logging.exception( 'Exception occurred while allocating job step for project %s', to_allocate.project.slug) return error('Internal error while attempting allocation', http_code=503)
def get(self, plan_id): plan = Plan.query.get(plan_id) if plan is None: return error("Plan not found", http_code=404) options = dict( (o.name, o.value) for o in ItemOption.query.filter(ItemOption.item_id == plan.id, )) for key, value in OPTION_DEFAULTS.iteritems(): options.setdefault(key, value) return self.respond(options)
def post(self): """This endpoint kicks off a sync_repo task asynchronously for the given repository url. """ args = self.parser.parse_args() if args.repository is None: # this is None when the url is not recognized return error('Repository url is not recognized.', problems=['repository']) # TODO should we worry about DoS? Maybe only start the task if it's not # already running? sync_repo.delay(repo_id=args.repository.id.hex, continuous=False) return ''
def get(self, author_id): if author_id == 'me' and not get_current_user(): return error('Must be logged in to ask about yourself', http_code=401) authors = Author.find(author_id, get_current_user()) if not authors: return self.respond([]) args = self.get_parser.parse_args() # serialize everything when fetching so that we batch any needed data # fetching. we'll still rearrange things later # grab recent revisions by author (for any repository/project, which # means we can't use vcs commands) sources = self.serialize( list( Source.query.options(joinedload('revision')). join(Revision, Source.revision_sha == Revision.sha).filter( # Repository matching not required for correctness, but # enables an index hit that improves performance significantly. Revision.repository_id == Source.repository_id, Revision.author_id.in_([a.id for a in authors]), Source.patch_id.is_(None), ).order_by(Revision.date_committed.desc(), ).limit( args.num_revs))) if not sources: return self.respond(sources) # grab builds for those revisions commit_builds_list = self.serialize( list( Build.query.options( joinedload('project'), joinedload('author'), ).filter(Build.source_id.in_([s['id'] for s in sources]), ).order_by( Build.date_created.desc(), Build.date_started.desc()))) # move builds into sources builds_map = defaultdict(list) for build in commit_builds_list: builds_map[build['source']['id']].append(build) for source in sources: source['builds'] = builds_map[source['id']] return self.respond(sources, serialize=False)
def get(self, plan_id): plan = Plan.query.get(plan_id) if plan is None: return error("Plan not found", http_code=404) options = dict( (o.name, o.value) for o in ItemOption.query.filter( ItemOption.item_id == plan.id, ) ) for key, value in OPTION_DEFAULTS.iteritems(): options.setdefault(key, value) return self.respond(options)
def get(self, author_id): if author_id == 'me' and not get_current_user(): return error('Must be logged in to ask about yourself', http_code=401) authors = Author.find(author_id, get_current_user()) if not authors: return self.respond([]) try: author_email = authors[0].email request = PhabricatorClient() request.connect() user_info = request.call('user.query', {'emails': [author_email]}) if not user_info: return 'phabricator: %s not found' % author_email, 404 author_phid = user_info[0]["phid"] diff_info = request.call('differential.query', { 'authors': [author_phid], 'status': "status-open" }) diff_info.sort(key=lambda k: -1 * int(k['dateModified'])) except requests.exceptions.ConnectionError: return 'Unable to connect to Phabricator', 503 if not diff_info: # No diffs, no point in trying to find builds. return self.respond([]) rows = list( db.session.query(PhabricatorDiff, Build).join( Build, Build.source_id == PhabricatorDiff.source_id).filter( PhabricatorDiff.revision_id.in_( [d['id'] for d in diff_info]))) serialized_builds = zip(self.serialize([row.Build for row in rows]), [row.PhabricatorDiff for row in rows]) builds_map = defaultdict(list) for build, phabricator_diff in serialized_builds: builds_map[str(phabricator_diff.revision_id)].append(build) for d in diff_info: d['builds'] = builds_map[str(d['id'])] return self.respond(diff_info)
def post(self): """ Given a list of jobstep ids, returns the ids of those that should be aborted. This is a POST only because we're sending large-ish amounts of data--no state is changed by this call. """ args = json.loads(request.data) try: jobstep_ids = args['jobstep_ids'] except KeyError: return error('Missing jobstep_ids attribute') for id in jobstep_ids: try: UUID(id) except ValueError: err = "Invalid jobstep id sent to jobstep_needs_abort: %s" logging.warning(err, id, exc_info=True) return error(err % id) if len(jobstep_ids) == 0: return self.respond({'needs_abort': []}) with statsreporter.stats().timer('jobstep_needs_abort'): finished = db.session.query(JobStep.id, JobStep.result, JobStep.data).filter( JobStep.status == Status.finished, JobStep.id.in_(jobstep_ids), ).all() needs_abort = [] for (step_id, result, data) in finished: if result == Result.aborted or data.get('timed_out'): needs_abort.append(step_id) return self.respond({'needs_abort': needs_abort})
def get(self, author_id): if author_id == 'me' and not get_current_user(): return error('Must be logged in to ask about yourself', http_code=401) authors = Author.find(author_id, get_current_user()) if not authors: return self.respond([]) queryset = Build.query.options( joinedload('project'), joinedload('author'), joinedload('source').joinedload('revision'), ).filter( Build.author_id.in_([a.id for a in authors]) ).order_by(Build.date_created.desc(), Build.date_started.desc()) return self.paginate(queryset)
def get(self, author_id): if author_id == 'me' and not get_current_user(): return error('Must be logged in to ask about yourself', http_code=401) authors = Author.find(author_id, get_current_user()) if not authors: return self.respond([]) queryset = Build.query.options( joinedload('project'), joinedload('author'), joinedload('source').joinedload('revision'), ).filter(Build.author_id.in_([a.id for a in authors ])).order_by(Build.date_created.desc(), Build.date_started.desc()) return self.paginate(queryset)
def get(self): revision_id = request.args.get('revision_id') diff_id = request.args.get('diff_id') if not revision_id or not diff_id: return error('missing revision or diff id') # grab builds rows = list(db.session.query( Build, PhabricatorDiff ).join( PhabricatorDiff, Build.source_id == PhabricatorDiff.source_id, ).filter( PhabricatorDiff.revision_id == revision_id, PhabricatorDiff.diff_id == diff_id, )) return self.respond([row.Build for row in rows])
def get(self): revision_id = request.args.get('revision_id') diff_id = request.args.get('diff_id') if not revision_id or not diff_id: return error('missing revision or diff id') # grab builds rows = list( db.session.query(Build, PhabricatorDiff).join( PhabricatorDiff, Build.source_id == PhabricatorDiff.source_id, ).filter( PhabricatorDiff.revision_id == revision_id, PhabricatorDiff.diff_id == diff_id, )) return self.respond([row.Build for row in rows])
def respond_status(self, node, master): if node is None: return error('Node not found.', ['node_id'], 404) context = {} # If this is not a Jenkins node, we don't have master and return an empty dict. if master and node.label: info_url = '%s/api/json' % (self.get_jenkins_url(master, node.label)) response = requests.Session().get(info_url) if response.status_code == 200: node_info = json.loads(response.text) if 'temporarilyOffline' in node_info: context['offline'] = node_info['temporarilyOffline'] else: logging.warning('Unable to get node info (%s)', info_url) return self.respond(context, serialize=False)
def post(self): user = get_current_user() if user is None: return error("User not found", http_code=404) args = self.post_parser.parse_args() for name, value in args.iteritems(): if value is None: continue create_or_update(ItemOption, where={ 'item_id': user.id, 'name': name, }, values={ 'value': value, }) return self.respond({})
def post(self, plan_id): plan = Plan.query.get(plan_id) if plan is None: return error("Plan not found", http_code=404) args = self.post_parser.parse_args() for name, value in args.iteritems(): if value is None: continue create_or_update(ItemOption, where={ 'item_id': plan.id, 'name': name, }, values={ 'value': value, }) return self.respond({})
def post(self, build_id): """ Set tags associated with a build. """ args = self.post_parser.parse_args() if args.tags and (not all(len(tag) <= 16 for tag in args.tags)): return error("Tags must be 16 characters or less.") build = Build.query.get(build_id) # if the build is not in findable in db after we just fetched # it to put on the page, there's something wrong. if build is None: return self.respond({}, status_code=404) build.tags = args.tags db.session.add(build) db.session.commit() return self.respond({})
def post(self, build_id): """ Set tags associated with a build. """ args = self.post_parser.parse_args() if args.tags and (not all(len(tag) <= 16 for tag in args.tags)): return error('Tags must be 16 characters or less.') build = Build.query.get(build_id) # if the build is not in findable in db after we just fetched # it to put on the page, there's something wrong. if build is None: return self.respond({}, status_code=404) build.tags = args.tags db.session.add(build) db.session.commit() return self.respond({})
def respond_jenkins_status(self, node, master): if node is None: return error('Node not found.', ['node_id'], 404) context = {} # If this is not a Jenkins node, we don't have master and return an empty dict. if master and node.label: info_url = '%s/api/json' % (self.get_jenkins_url(master, node.label)) node_info = None try: response = requests.Session().get(info_url, timeout=10) response.raise_for_status() node_info = json.loads(response.text) except: logging.warning('Unable to get node info (%s)', info_url, exc_info=True) if node_info and 'temporarilyOffline' in node_info: context['offline'] = node_info['temporarilyOffline'] return self.respond(context, serialize=False)
def respond_status(self, node, master): if node is None: return error('Node not found.', ['node_id'], 404) context = {} # If this is not a Jenkins node, we don't have master and return an empty dict. if master and node.label: info_url = '%s/api/json' % (self.get_jenkins_url( master, node.label)) node_info = None try: response = requests.Session().get(info_url, timeout=10) response.raise_for_status() node_info = json.loads(response.text) except: logging.warning('Unable to get node info (%s)', info_url, exc_info=True) if node_info and 'temporarilyOffline' in node_info: context['offline'] = node_info['temporarilyOffline'] return self.respond(context, serialize=False)
def get(self, project_id): project = Project.get(project_id) if not project: return '', 404 args = self.get_parser.parse_args() repo = project.repository vcs = repo.get_vcs() offset = (args.page - 1) * args.per_page limit = args.per_page + 1 if vcs: try: vcs_log = list( vcs.log( offset=offset, limit=limit, parent=args.parent, branch=args.branch, )) except ValueError as err: return error(err.message) if vcs_log: revisions_qs = list( Revision.query.options(joinedload('author'), ).filter( Revision.repository_id == repo.id, Revision.sha.in_(c.id for c in vcs_log))) revisions_map = dict((c.sha, d) for c, d in itertools.izip( revisions_qs, self.serialize(revisions_qs))) commits = [] for commit in vcs_log: if commit.id in revisions_map: result = revisions_map[commit.id] else: result = self.serialize(commit) commits.append(result) else: commits = [] elif args.parent or args.branch: param = 'Branches' if args.branch else 'Parents' return error( '{0} not supported for projects with no repository.'.format( param), http_code=422) else: commits = self.serialize( list( Revision.query.options(joinedload('author'), ).filter( Revision.repository_id == repo.id, ).order_by( Revision.date_created.desc())[offset:offset + limit])) page_links = self.make_links( current_page=args.page, has_next_page=len(commits) > args.per_page, ) commits = commits[:args.per_page] if commits: builds_qs = list( Build.query.options( joinedload('author'), contains_eager('source'), ).join( Source, Source.id == Build.source_id, ).filter( Build.source_id == Source.id, Build.project_id == project.id, Build.status.in_( [Status.finished, Status.in_progress, Status.queued]), Build.cause != Cause.snapshot, Source.repository_id == project.repository_id, Source.revision_sha.in_(c['id'] for c in commits), Source.patch == None, # NOQA ).order_by(Build.date_created.asc())) builds_map = dict((b.source.revision_sha, d) for b, d in itertools.izip( builds_qs, self.serialize(builds_qs))) else: builds_map = {} results = [] for result in commits: result['build'] = builds_map.get(result['id']) results.append(result) return self.respond(results, serialize=False, links=page_links)
def post(self): """ Create a new commit or diff build. The API roughly goes like this: 1. Identify the project(s) to build for. This can be done by specifying ``project``, ``repository``, or ``repository[callsign]``. If a repository is specified somehow, then all projects for that repository are considered for building. 2. Using the ``sha``, find the appropriate revision object. This may involve updating the repo. 3. If ``patch`` is given, then apply the patch and mark this as a diff build. Otherwise, this is a commit build. 4. If ``snapshot_id`` is given, verify that the snapshot can be used by all projects. 5. If provided, apply project_whitelist, filtering out projects not in this whitelist. 6. Based on the flag ``apply_project_files_trigger`` (see comment on the argument itself for default values), decide whether or not to filter out projects by file blacklist and whitelist. 7. Attach metadata and create/ensure existence of a build for each project, depending on the flag ``ensure_only``. NOTE: In ensure-only mode, the collection_ids of the returned builds are not necessarily identical, as we give new builds new collection IDs and preserve the existing builds' collection IDs. NOTE: If ``patch`` is specified ``sha`` is assumed to be the original base revision to apply the patch. Not relevant until we fix TODO: ``sha`` is **not** guaranteed to be the rev used to apply the patch. See ``find_green_parent_sha`` for the logic of identifying the correct revision. """ args = self.parser.parse_args() if args.patch_file and args.ensure_only: return error("Ensure-only mode does not work with a diff build yet.", problems=["patch", "ensure_only"]) if not (args.project or args.repository or args['repository[phabricator.callsign]']): return error("Project or repository must be specified", problems=["project", "repository", "repository[phabricator.callsign]"]) # read arguments if args.patch_data: try: patch_data = json.loads(args.patch_data) except Exception: return error("Invalid patch data (must be JSON dict)", problems=["patch[data]"]) if not isinstance(patch_data, dict): return error("Invalid patch data (must be JSON dict)", problems=["patch[data]"]) else: patch_data = None # 1. identify project(s) projects, repository = try_get_projects_and_repository(args) if not projects: return error("Unable to find project(s).") # read arguments label = args.label author = args.author message = args.message tag = args.tag snapshot_id = args.snapshot_id no_snapshot = args.no_snapshot cause = Cause[args.cause] if no_snapshot and snapshot_id: return error("Cannot specify snapshot with no_snapshot option") if not tag and args.patch_file: tag = 'patch' # 2. validate snapshot if snapshot_id: snapshot = Snapshot.query.get(snapshot_id) if not snapshot: return error("Unable to find snapshot.") if snapshot.status != SnapshotStatus.active: return error("Snapshot is in an invalid state: %s" % snapshot.status) for project in projects: plans = get_build_plans(project) for plan in plans: plan_options = plan.get_item_options() allow_snapshot = '1' == plan_options.get('snapshot.allow', '1') or plan.snapshot_plan if allow_snapshot and not SnapshotImage.get(plan, snapshot_id): # We want to create a build using a specific snapshot but no image # was found for this plan so fail. return error("Snapshot cannot be applied to %s's %s" % (project.slug, plan.label)) # 3. find revision try: revision = identify_revision(repository, args.sha) except MissingRevision: # if the default fails, we absolutely can't continue and the # client should send a valid revision return error("Unable to find commit %s in %s." % (args.sha, repository.url), problems=['sha', 'repository']) # get default values for arguments if revision: if not author: author = revision.author if not label: label = revision.subject # only default the message if its absolutely not set if message is None: message = revision.message sha = revision.sha else: sha = args.sha if not args.target: target = sha[:12] else: target = args.target[:128] if not label: if message: label = message.splitlines()[0] if not label: label = 'A homeless build' label = label[:128] # 4. Check for patch if args.patch_file: fp = StringIO() for line in args.patch_file: fp.write(line) patch_file = fp else: patch_file = None if patch_file: patch = Patch( repository=repository, parent_revision_sha=sha, diff=patch_file.getvalue(), ) db.session.add(patch) else: patch = None project_options = ProjectOptionsHelper.get_options(projects, ['build.file-whitelist']) # mark as commit or diff build if not patch: is_commit_build = True else: is_commit_build = False apply_project_files_trigger = args.apply_project_files_trigger if apply_project_files_trigger is None: apply_project_files_trigger = args.apply_file_whitelist if apply_project_files_trigger is None: if is_commit_build: apply_project_files_trigger = False else: apply_project_files_trigger = True if apply_project_files_trigger: if patch: diff_parser = DiffParser(patch.diff) files_changed = diff_parser.get_changed_files() elif revision: try: files_changed = _get_revision_changed_files(repository, revision) except MissingRevision: return error("Unable to find commit %s in %s." % (args.sha, repository.url), problems=['sha', 'repository']) else: # the only way that revision can be null is if this repo does not have a vcs backend logging.warning('Revision and patch are both None for sha %s. This is because the repo %s does not have a VCS backend.', sha, repository.url) files_changed = None else: # we won't be applying file whitelist, so there is no need to get the list of changed files. files_changed = None collection_id = uuid.uuid4() builds = [] for project in projects: plan_list = get_build_plans(project) if not plan_list: logging.warning('No plans defined for project %s', project.slug) continue # 5. apply project whitelist as appropriate if args.project_whitelist is not None and project.slug not in args.project_whitelist: logging.info('Project %s is not in the supplied whitelist', project.slug) continue forced_sha = sha # TODO(dcramer): find_green_parent_sha needs to take branch # into account # if patch_file: # forced_sha = find_green_parent_sha( # project=project, # sha=sha, # ) # 6. apply file whitelist as appropriate diff = None if patch is not None: diff = patch.diff if ( apply_project_files_trigger and files_changed is not None and not files_changed_should_trigger_project( files_changed, project, project_options[project.id], sha, diff) ): logging.info('Changed files do not trigger build for project %s', project.slug) continue # 7. create/ensure build build_message = None selective_testing_policy = SelectiveTestingPolicy.disabled if args.selective_testing and project_lib.contains_active_autogenerated_plan(project): if is_commit_build: selective_testing_policy, reasons = get_selective_testing_policy(project, sha, diff) if reasons: if selective_testing_policy is SelectiveTestingPolicy.disabled: reasons = ["Selective testing was requested but not done because:"] + [' ' + m for m in reasons] build_message = '\n'.join(reasons) else: # NOTE: for diff builds, it makes sense to just do selective testing, # since it will never become a parent build and will never be used to # calculate revision results. selective_testing_policy = SelectiveTestingPolicy.enabled if args.ensure_only: potentials = list(Build.query.filter( Build.project_id == project.id, Build.source.has(revision_sha=sha, patch=patch), ).order_by( Build.date_created.desc() # newest first ).limit(1)) if len(potentials) == 0: builds.append(create_build( project=project, collection_id=collection_id, sha=forced_sha, target=target, label=label, message=message, author=author, patch=patch, source_data=patch_data, tag=tag, cause=cause, snapshot_id=snapshot_id, no_snapshot=no_snapshot, selective_testing_policy=selective_testing_policy, )) else: builds.append(potentials[0]) else: builds.append(create_build( project=project, collection_id=collection_id, sha=forced_sha, target=target, label=label, message=message, author=author, patch=patch, source_data=patch_data, tag=tag, cause=cause, snapshot_id=snapshot_id, no_snapshot=no_snapshot, selective_testing_policy=selective_testing_policy, )) if build_message: message = BuildMessage( build=builds[-1], text=build_message, ) db.session.add(message) db.session.commit() return self.respond(builds)