def get_rb_client(self): if not self.rb_client: options = {} with open(".reviewboardrc") as reviewboardrc: for line in reviewboardrc: if line.startswith("#"): continue if len(line.strip()) == 0: continue k, v = line.strip().split("=") k = k.strip() v = eval(v.strip()) options[k] = v rbclient = RBClient(options['REVIEWBOARD_URL']) self.repository = options['REPOSITORY'] self.branch = options.get('BRANCH') or options.get('TRACKING_BRANCH') self.target_groups = None if options.has_key('TARGET_GROUPS'): self.target_groups = options['TARGET_GROUPS'] if rbclient.get_root().get_session()['authenticated']: return rbclient.get_root() username = self.opt.reviewboard_username or raw_input("Enter review board Username: "******"Enter password: ") rbclient.login(username, password) self.rb_client = rbclient.get_root() return self.rb_client
def rb_client(self): options = {} if os.path.exists(".reviewboardrc"): with open(".reviewboardrc") as reviewboardrc: for line in reviewboardrc: if line.startswith("#"): continue if len(line.strip()) == 0: continue k, v = line.strip().split("=") k = k.strip() v = eval(v.strip()) options[k] = v rbclient = RBClient(options.get('REVIEWBOARD_URL') or 'https://reviews.apache.org/', RetryingSyncTransport) if not rbclient.get_root().get_session()['authenticated']: username = self.opt.reviewboard_username[0] if self.opt.reviewboard_username and \ self.opt.reviewboard_username[0] else raw_input( "Enter review board Username: "******"Enter password for %s: " % username) rbclient.login(username, password) root = rbclient.get_root() root.repository = options.get('REPOSITORY') or None root.branch = options.get('BRANCH') or options.get('TRACKING_BRANCH') root.target_groups = None if options.has_key('TARGET_GROUPS'): root.target_groups = options['TARGET_GROUPS'] return root
def rb_client(self): options = {} if os.path.exists(".reviewboardrc"): with open(".reviewboardrc") as reviewboardrc: for line in reviewboardrc: if line.startswith("#"): continue if len(line.strip()) == 0: continue k, v = line.strip().split("=") k = k.strip() v = eval(v.strip()) options[k] = v rbclient = RBClient( options.get('REVIEWBOARD_URL') or 'https://reviews.apache.org/', RetryingSyncTransport) if not rbclient.get_root().get_session()['authenticated']: username = self.opt.reviewboard_username[0] if self.opt.reviewboard_username and \ self.opt.reviewboard_username[0] else raw_input( "Enter review board Username: "******"Enter password for %s: " % username) rbclient.login(username, password) root = rbclient.get_root() root.repository = options.get('REPOSITORY') or None root.branch = options.get('BRANCH') or options.get('TRACKING_BRANCH') root.target_groups = None if options.has_key('TARGET_GROUPS'): root.target_groups = options['TARGET_GROUPS'] return root
def get_api(server_url, **kwargs): """Returns an RBClient instance and the associated root resource. Hooks should use this method to gain access to the API, instead of instantiating their own client. Args: server_url (unicode): The server URL to retrieve. **kwargs (dict): Additional keyword arguments to pass to the :py:class:`~rbtools.api.client.RBClient` constructor. See :py:meth:`SyncTransport.__init__() <rbtools.api.transport.sync.SyncTransport.__init__>` for arguments that are accepted. Returns: tuple: This returns a 2-tuple of the :py:class:`~rbtools.api.client.RBClient` and :py:class:`<root resource> rbtools.api.resource.Resource`. """ api_client = RBClient(server_url, **kwargs) try: api_root = api_client.get_root() except ServerInterfaceError as e: raise HookError('Could not reach the Review Board server at %s: %s' % (server_url, e)) except APIError as e: raise HookError('Unexpected API Error: %s' % e) return api_client, api_root
def process_review_requests(client, channel, nick, review_ids): """ Processes a list of review request ids using a shared client """ logger.info("Starting codereview of: {0}".format(review_ids)) api = RBClient(settings.CODEREVIEW_REVIEWBOARD_API_URL, username=settings.CODEREVIEW_REVIEWBOARD_API_USERNAME, password=settings.CODEREVIEW_REVIEWBOARD_API_PASSWORD) try: api_root = api.get_root() except: logger.exception("Cannot access reviewboard") client.msg( channel, "I can't complete your review {0} because I can't access reviewboard".format(nick) ) return errors = [] for review_id in review_ids: try: do_review(client, channel, nick, api_root, review_id) except: logger.exception("Cannot codereview cr{0}".format(review_id)) errors.append(review_id) if errors: cr_list = ', '.join(map(lambda id: 'cr{0}'.format(id), errors)) client.msg(channel, 'Codereview complete {0}, but I was unable to review: {1}'.format(nick, cr_list)) else: client.msg(channel, 'Codereview complete {0}'.format(nick))
def main(url, group_names, days_old=7, dry_run=False): """ do something """ try: user = os.environ['RBUSER'] except KeyError: raise SystemExit("please set RBUSER environment variable for reviewboard user") try: passwd = os.environ['RBPASS'] except KeyError: raise SystemExit("please set RBPASS environment variable for reviewboard password") #client = RBClient(url, user, passwd) client = RBClient(url) root = client.get_root() if not root: raise SystemExit("Error - could not get RBClient root.") for g_name in group_names: o = get_group_id_by_name(root, g_name, dry_run=dry_run) if not o: raise SystemExit("ERROR: no group '%s' found." % g_name) logger.debug("Found group '%s' id=%d" % (g_name, o)) reviews = get_reviews_for_groups(root, group_names, dry_run=dry_run) old_reviews = filter_reviews_older_than(root, reviews, days_old, dry_run=dry_run) logger.info("found %d reviews for target groups and last updated %d or more days ago" % (len(old_reviews), days_old)) if len(old_reviews) < 1: logger.info("Found no reviews matching criteria, exiting") return False users = get_submitters_for_reviews(old_reviews) logger.debug("got user information for %d users" % len(users)) recipients = [] for u in users: recipients.append("{u.fullname} <{u.email}>".format(u=users[u])) table = generate_report_html_table(old_reviews, url) body = "<h1>ReviewBoard reminder</h1>\n" body += """<p>You're receiving this message because you have one or more pending code reviews on <a href="{url}">{url}</a> targeted at the '{group_names}' group(s) that have not been updated in over {days_old} days and have not been submitted. At your convenience, please evaluate these reviews and close/submit any that have been merged or discarded. Thank You.</p>\n""".format(url=url, days_old=days_old, group_names=", ".join(group_names)) body += table body += "\n<br />\n" host = node() user = getuser() body += """ <p><em>generated by <a href=\"https://github.com/jantman/misc-scripts/blob/master/reviewboard_reminder_mail.py">reviewboard_reminder_mail.py</a> running on {host} as {user} at {ds}</em></p> """.format(host=host, user=user, ds=datetime.datetime.now().isoformat()) if dry_run: print("Message to send:\n##############################\n{msg}\n#################################\n".format(msg=body)) print("Would send to:\n {to}".format(to=", ".join(recipients))) else: raise SystemExit("Oops - never actually implemented the mail sending...") return True
def ship_it(self, rrid, username, password): """Create and publish a ship-it review""" # TODO: this code is lifted from the reviewboard mach commands. If we # need to make more reviewboard api calls in these tests, we # should make a function in the base class to get a RBClient. class NoCacheTransport(SyncTransport): """API transport with disabled caching.""" def enable_cache(self): pass # RBClient is persisting login cookies from call to call # in $HOME/.rbtools-cookies. We want to be able to easily switch # between users, so we clear that cookie between calls to the # server and reauthenticate every time. try: os.remove(os.path.join(os.environ.get('HOME'), '.rbtools-cookies')) except Exception: pass client = RBClient(self.rburl, username=username, password=password, transport_cls=NoCacheTransport) root = client.get_root() reviews = root.get_reviews(review_request_id=rrid) reviews.create(public=True, ship_it=True)
def __init__(self, configuration, *args): ''' Helper to build an RBTools api root used by MozReview below ''' url, api_key, username = self.requires(configuration, 'url', 'api_key', 'username') # Authenticate client client = RBClient(url, save_cookies=False, allow_caching=False) login_resource = client.get_path( 'extensions/mozreview.extension.MozReviewExtension/' 'bugzilla-api-key-logins/') login_resource.create(username=username, api_key=api_key) self.api = client.get_root() # Report issues from specific analyzers self.analyzers = list( filter( lambda a: a in (CLANG_TIDY, CLANG_FORMAT, MOZLINT), configuration.get('analyzers', [ CLANG_TIDY, ]), )) assert len(self.analyzers) > 0, \ 'No valid analyzers for mozreview' self.publish_success = configuration.get('publish_success', False) assert isinstance(self.publish_success, bool) logger.info('Mozreview report enabled', url=url, username=username, analyzers=self.analyzers)
def get_root(): settings = get_config() review = settings['reviewboard'] client = RBClient(review['server']) client.login(review['user'], review['password']) return client.get_root()
def main(): ''' Posts a review to the review with specified id indicating if the build passed or failed ''' parser = argparse.ArgumentParser() parser.add_argument("--cfg", required=True, help="Configuration file") parser.add_argument("--reviewid", required=True, help="Review id to post to") parser.add_argument("--buildurl", required=True, help="The jenkins build url") parser.add_argument("--buildstate", required=True, choices=["SUCCESS", "UNSTABLE", "FAILURE"], help="Indicates if build succeeded (1) or failed (0)") args = parser.parse_args() reviewid = args.reviewid buildurl = args.buildurl buildstate = args.buildstate config = ConfigParser.ConfigParser() try: config.read(args.cfg) client = RBClient(config.get('jrbb', 'reviewboard_server'), username=config.get('jrbb', 'reviewboard_user'), api_token=config.get('jrbb', 'reviewboard_apitoken')) except ConfigParser.NoSectionError: print "Configuration file " + args.cfg + " not found or missing items" exit(1) root = client.get_root() # Get the revision number of the latest diff from the review # pylint: disable=no-member reviewreq = root.get_review_request(review_request_id=reviewid) reviews = reviewreq.get_reviews() if buildstate == 'SUCCESS': msg = 'Successfully built changes. See ' + buildurl else: msg = 'Opps! I could not build these changes. See ' + buildurl reviews.create(body_bottom=msg, public=True) print "Posted to review " + reviewid + " build state=" + buildstate + \ ". Build url=" + buildurl
def build_api_root(url, username, api_key): ''' Helper to build an RBTools api root used by BatchReview ''' logger.info('Authenticate on Mozreview', url=url, username=username) client = RBClient(url, save_cookies=False, allow_caching=False) login_resource = client.get_path( 'extensions/mozreview.extension.MozReviewExtension/' 'bugzilla-api-key-logins/') login_resource.create(username=username, api_key=api_key) return client.get_root()
def get_api(server_url, username, password): """Returns an RBClient instance and the associated root resource. Hooks should use this method to gain access to the API, instead of instantianting their own client. """ api_client = RBClient(server_url, username=username, password=password) try: api_root = api_client.get_root() except ServerInterfaceError, e: raise HookError('Could not reach the Review Board server at %s: %s' % (server_url, e))
def run(self): while True: id = q.get() client = RBClient(reviewboard_url, api_token=api_token) root = client.get_root() review = root.get_review_request(review_request_id=id) comments = review.get_reviews(max_results=200) for i, c in enumerate(comments): if i == len(comments) - 1: break if c.links.user.title == jenkins_uname and c.ship_it: c.update(ship_it=False)
def main(): ''' Fetches the latest patch diff from the review given the review id passed as a parameter and writes it an output file. ''' parser = argparse.ArgumentParser() parser.add_argument("--reviewid", required=True, help="review id to post to") parser.add_argument("--cfg", required=True, help="Configuration file") parser.add_argument("--out", required=True, help="Output file location (e.g. patch.diff)") args = parser.parse_args() reviewid = args.reviewid config = ConfigParser.ConfigParser() try: config.read(args.cfg) client = RBClient(config.get('jrbb', 'reviewboard_server'), username=config.get('jrbb', 'reviewboard_user'), api_token=config.get('jrbb', 'reviewboard_apitoken')) except ConfigParser.NoSectionError: print "Configuration file " + args.cfg + " not found or missing items" exit(1) root = client.get_root() # Get the revision number of the latest diff from the review # pylint: disable=no-member reviewrequest = root.get_review_request(review_request_id=reviewid) diffrevision = reviewrequest.get_latest_diff().revision print "Latest diff revision for review", reviewid, "is", diffrevision diff = root.get_diff(review_request_id=reviewid, diff_revision=diffrevision) patch = diff.get_patch() print "Retrieved the following patch file" print "-------------------------------------------------------------------" print patch.data print "-------------------------------------------------------------------" outfile = open(args.out, "w") print >> outfile, patch.data outfile.close() print "Patch written to " + args.out
def ProcessReviewRequest(payload, tool_settings): """Execute an automated review on a review request.""" routing_key = ProcessReviewRequest.request.delivery_info['routing_key'] route_parts = routing_key.partition('.') tool_ep = route_parts[0] logger.info("Request to execute review tool '%s' for %s" % (tool_ep, payload['url'])) try: logger.info("Initializing RB API") api_client = RBClient(payload['url'], cookie_file=COOKIE_FILE, agent=AGENT, session=payload['session']) api_root = api_client.get_root() except: logger.error("Could not contact RB server at '%s'" % payload['url']) return False logger.info("Loading requested tool '%s'" % tool_ep) tools = [] for ep in pkg_resources.iter_entry_points(group='reviewbot.tools', name=tool_ep): tools.append(ep.load()) if len(tools) > 1: _update_tool_execution(api_root, payload['request'], status=FAILED, msg="Tool '%s' is ambiguous" % tool_ep) return False elif len(tools) == 0: _update_tool_execution(api_root, payload['request'], status=FAILED, msg="Tool '%s' not found" % tool_ep) return False tool = tools[0] try: logger.info("Initializing review") review = Review(api_root, payload['request'], payload['review_settings']) except Exception, e: _update_tool_execution(api_root, payload['request'], status=FAILED, msg="Error initializing review: %s" % str(e)) return False
def get_open_reviews(args): # get open reviews to a specified user, group, etc. args['status'] = 'pending' args['max_results'] = MAX_RESULTS client = RBClient(RB_URL) root = client.get_root() if not root: print "Error - could not get RBClient root." return False req = root.get_review_requests(**args) print "\n\nGot %d pending/unsubmitted reviews" % req.total_results for review in req: print "%d - %s - %s" % (review.id, review.get_submitter().username, review.summary)
def shipit(use_merge): subprocess.check_call(["git", "fetch"]) if use_merge: subprocess.check_call(["git", "merge", "origin/master"]) else: subprocess.check_call(["git", "rebase", "origin/master"]) # run spec try: if os.path.exists("scripts/test.sh"): subprocess.check_call(["scripts/test.sh"]) except subprocess.CalledProcessError: exit(1) branch = current_branch() rid = get_branch_info("rid") if not rid: print "You don't have a review branch." exit(1) else: rid = int(rid) # git checkout master subprocess.check_call(["git", "checkout", "master"]) # git pull origin master subprocess.check_call(["git", "pull", "origin", "master"]) # git merge current_branch subprocess.check_call(["git", "merge", branch]) # git push origin master subprocess.check_call(["git", "push", "origin", "master"]) # git push origin :current_branch subprocess.check_call(["git", "push", "origin", ":" + branch]) # git branch -D current_branch subprocess.check_call(["git", "branch", "-D", branch]) # close reviewboard request from rbtools.api.client import RBClient client = RBClient('http://reviewboard.nodeswork.com') root = client.get_root() request = root.get_review_request(review_request_id=rid) request.update(status="submitted")
def run(old_value, new_value, ref): diff = call_cmd("git diff %s..%s"%(old_value, new_value)) info(diff) ci_range = "%s..%s"%(old_value, new_value) # get author name cmd = "git log --format=%cn -1 " + new_value author = call_cmd(cmd).strip() if author in AUTHOR_MAP: author = AUTHOR_MAP[author] reviewer = REVIEWER_MAP[author] # get summary desc cmd = "git log --format=%s " + ci_range logs = call_cmd(cmd) summary = logs.split(os.linesep)[0] cmd = "git log --pretty=fuller " + ci_range desc = call_cmd(cmd) summary = summary.replace("\"", "@") desc = desc.replace("\"", "@") repo_branch = ref.split("/")[-1] # 创建review_request client = RBClient(rbcfg["rbserver"], username=rbcfg["rbadmin"], password=rbcfg["rbadminpw"]) root = client.get_root() request_data = { "repository" : rbcfg["rbrepo"], "submit_as" : author, } r = root.get_review_requests().create(**request_data) vl = root.get_diff_validation() basedir = "/" #info("------------------"+diff) vl.validate_diff(rbcfg["rbrepo"], diff, base_dir=basedir) r.get_diffs().upload_diff(diff, base_dir=basedir) draft = r.get_draft() update_data = { "branch" : repo_branch, "summary" : summary, "description" : desc, "target_people" : reviewer, "public" : True, } ret = draft.update(**update_data) info("repo:<%s> rev:<%s> rid:<%s>"%(rbcfg["rbserver"], ci_range, r.id))
def ProcessReviewRequest(payload, tool_settings): """Execute an automated review on a review request.""" routing_key = ProcessReviewRequest.request.delivery_info['routing_key'] route_parts = routing_key.partition('.') tool_ep = route_parts[0] logger.info( "Request to execute review tool '%s' for %s" % ( tool_ep, payload['url'])) try: logger.info("Initializing RB API") api_client = RBClient( payload['url'], cookie_file=COOKIE_FILE, agent=AGENT, session=payload['session']) api_root = api_client.get_root() except: logger.error("Could not contact RB server at '%s'" % payload['url']) return False logger.info("Loading requested tool '%s'" % tool_ep) tools = [] for ep in pkg_resources.iter_entry_points(group='reviewbot.tools', name=tool_ep): tools.append(ep.load()) if len(tools) > 1: _update_tool_execution(api_root, payload['request'], status=FAILED, msg="Tool '%s' is ambiguous" % tool_ep) return False elif len(tools) == 0: _update_tool_execution(api_root, payload['request'], status=FAILED, msg="Tool '%s' not found" % tool_ep) return False tool = tools[0] try: logger.info("Initializing review") review = Review(api_root, payload['request'], payload['review_settings']) except Exception, e: _update_tool_execution(api_root, payload['request'], status=FAILED, msg="Error initializing review: %s" % str(e)) return False
def update_tools_list(panel, payload): """Update the RB server with installed tools. This will detect the installed analysis tool plugins and inform Review Board of them. """ logging.info("Request to refresh installed tools from '%s'" % payload['url']) logging.info("Iterating Tools") tools = [] for ep in pkg_resources.iter_entry_points(group='reviewbot.tools'): entry_point = ep.name tool_class = ep.load() tool = tool_class() logging.info("Tool: %s" % entry_point) if tool.check_dependencies(): tools.append({ 'name': tool_class.name, 'entry_point': entry_point, 'version': tool_class.version, 'description': tool_class.description, 'tool_options': json.dumps(tool_class.options), }) else: logging.warning("%s dependency check failed." % ep.name) logging.info("Done iterating Tools") tools = json.dumps(tools) hostname = panel.hostname try: api_client = RBClient( payload['url'], cookie_file=COOKIE_FILE, agent=AGENT, session=payload['session']) api_root = api_client.get_root() except Exception, e: logging.error("Could not reach RB server: %s" % str(e)) return {'error': 'Could not reach RB server.'}
def get_open_reviews(args): """ get open reviews to a specified user, group, etc. """ args['status'] = 'pending' if 'max_results' not in args: args['max_results'] = 100 client = RBClient(RB_URL) root = client.get_root() if not root: print "Error - could not get RBClient root." return False req = root.get_review_requests(**args) ret = {'total': req.total_results, 'reviews': []} for review in req: ret['reviews'].append("(%s) %s <%s/r/%d/>" % (review.get_submitter().username, review.summary, RB_URL, review.id)) return ret
def get_rb_client(self): options = {} with open(".reviewboardrc") as reviewboardrc: for line in reviewboardrc: k, v = line.split("=") k = k.strip() v = eval(v.strip()) options[k] = v rbclient = RBClient(options['REVIEWBOARD_URL']) self.repository = options['REPOSITORY'] self.branch = options['BRANCH'] self.target_groups = None if options.has_key('TARGET_GROUPS'): self.target_groups = options['TARGET_GROUPS'] if rbclient.get_root().get_session()['authenticated']: return rbclient username = raw_input("Enter review board Username: "******"Enter password: ") rbclient.login(username, password) return rbclient
def process_review_requests(client, channel, nick, review_ids): """ Processes a list of review request ids using a shared client """ logger.info("Starting codereview of: {0}".format(review_ids)) api = RBClient(settings.CODEREVIEW_REVIEWBOARD_API_URL, username=settings.CODEREVIEW_REVIEWBOARD_API_USERNAME, password=settings.CODEREVIEW_REVIEWBOARD_API_PASSWORD) try: api_root = api.get_root() except: logger.exception("Cannot access reviewboard") client.msg( channel, "I can't complete your review {0} because I can't access reviewboard" .format(nick)) return errors = [] for review_id in review_ids: try: do_review(client, channel, nick, api_root, review_id) except: logger.exception("Cannot codereview cr{0}".format(review_id)) errors.append(review_id) if errors: cr_list = ', '.join(map(lambda id: 'cr{0}'.format(id), errors)) client.msg( channel, 'Codereview complete {0}, but I was unable to review: {1}'.format( nick, cr_list)) else: client.msg(channel, 'Codereview complete {0}'.format(nick))
def get_open_reviews(args): """ get open reviews to a specified user, group, etc. """ args['status'] = 'pending' if 'max_results' not in args: args['max_results'] = 100 client = RBClient(REVIEWBOARD_URL) # If we have a username and password, login if REVIEWBOARD_USERNAME and REVIEWBOARD_PASSWORD: client.login(REVIEWBOARD_USERNAME, REVIEWBOARD_PASSWORD) root = client.get_root() if not root: logger.error(u'Could not get RBClient root') return None try: req = root.get_review_requests(**args) except APIError: logger.exception(u'Error querying API') return None ret = {'total': req.total_results, 'reviews': []} review_fmt = u"[{user}] {summary} ({url}/r/{id})" for review in req: ret['reviews'].append(review_fmt.format(user=review.get_submitter().username, summary=review.summary, url=REVIEWBOARD_URL, id=review.id)) return ret
def update_tools_list(panel, payload): """Update the list of installed tools. This will detect the installed analysis tool plugins and inform Review Board of them. Args: panel (celery.worker.control.Panel): The worker control panel. payload (dict): The payload as assembled by the extension. Returns: bool: Whether the task completed successfully. """ logger.info('Request to refresh installed tools from "%s"', payload['url']) logger.info('Iterating Tools') tools = [] for ep in pkg_resources.iter_entry_points(group='reviewbot.tools'): entry_point = ep.name tool_class = ep.load() tool = tool_class() logger.info('Tool: %s' % entry_point) if tool.check_dependencies(): tools.append({ 'name': tool_class.name, 'entry_point': entry_point, 'version': tool_class.version, 'description': tool_class.description, 'tool_options': json.dumps(tool_class.options), 'timeout': tool_class.timeout, 'working_directory_required': tool_class.working_directory_required, }) else: logger.warning('%s dependency check failed.', ep.name) logger.info('Done iterating Tools') hostname = panel.hostname try: api_client = RBClient( payload['url'], cookie_file=COOKIE_FILE, agent=AGENT, session=payload['session']) api_root = api_client.get_root() except Exception as e: logger.exception('Could not reach RB server: %s', e) return { 'status': 'error', 'error': 'Could not reach Review Board server: %s' % e, } try: api_tools = _get_extension_resource(api_root).get_tools() api_tools.create(hostname=hostname, tools=json.dumps(tools)) except Exception as e: logger.exception('Problem POSTing tools: %s', e) return { 'status': 'error', 'error': 'Problem uploading tools: %s' % e, } return { 'status': 'ok', 'tools': tools, }
class MozReviewBot(object): def __init__(self, config_path=None, reviewboard_url=None, reviewboard_user=None, reviewboard_password=None, pulse_host=None, pulse_port=None, pulse_userid=None, pulse_password=None, exchange=None, queue=None, routing_key=None, pulse_timeout=None, pulse_ssl=False, repo_root=None, logger=None): if logger is None: self.logger = logging.getLogger('mozreviewbot') else: self.logger = logger # We use options passed into __init__ preferentially. If any of these # are not specified, we next check the configuration file, if any. # Finally, we use environment variables. if config_path and not os.path.isfile(config_path): # ConfigParser doesn't seem to throw if it is unable to find the # config file so we'll explicitly check that it exists. self.logger.error('could not locate config file: %s' % (config_path)) config_path = None if config_path: try: config = ConfigParser() config.read(config_path) reviewboard_url = (reviewboard_url or config.get('reviewboard', 'url')) reviewboard_user = (reviewboard_user or config.get('reviewboard', 'user')) reviewboard_password = (reviewboard_password or config.get( 'reviewboard', 'password')) pulse_host = pulse_host or config.get('pulse', 'host') pulse_port = pulse_port or config.get('pulse', 'port') pulse_userid = pulse_userid or config.get('pulse', 'userid') pulse_password = pulse_password or config.get( 'pulse', 'password') exchange = exchange or config.get('pulse', 'exchange') queue = queue or config.get('pulse', 'queue') routing_key = routing_key or config.get('pulse', 'routing_key') pulse_timeout = pulse_timeout or config.get('pulse', 'timeout') if pulse_ssl is None: pulse_ssl = config.get('pulse', 'ssl') except NoSectionError as e: self.logger.error('configuration file missing section: %s' % e.section) try: repo_root = repo_root or config.get('hg', 'repo_root') except (NoOptionError, NoSectionError): # Subclasses do not need to define repo root if they do not # plan on using the hg functionality. pass # keep config around in case any subclasses would like to extract # options from it. self.config = config else: self.config = None reviewboard_url = reviewboard_url or os.environ.get('REVIEWBOARD_URL') pulse_host = pulse_host or os.environ.get('PULSE_HOST') pulse_port = pulse_port or os.environ.get('PULSE_PORT') self.rbclient = RBClient(reviewboard_url, username=reviewboard_user, password=reviewboard_password) self.api_root = self.rbclient.get_root() self.conn = Connection(hostname=pulse_host, port=pulse_port, userid=pulse_userid, password=pulse_password, ssl=pulse_ssl) self.exchange = Exchange(exchange, type='topic', durable=True) self.queue = Queue(name=queue, exchange=self.exchange, durable=True, routing_key=routing_key, exclusive=False, auto_delete=False) self.pulse_timeout = float(pulse_timeout) self.repo_root = repo_root self.hg = None for DIR in os.environ['PATH'].split(os.pathsep): p = os.path.join(DIR, 'hg') if os.path.exists(p): self.hg = p def _get_available_messages(self): messages = [] def onmessage(body, message): messages.append((body, message)) consumer = self.conn.Consumer([self.queue], callbacks=[onmessage], auto_declare=True) with consumer: try: self.conn.drain_events(timeout=self.pulse_timeout) except socket.timeout: pass return messages def _run_hg(self, hg_args): # TODO: Use hgtool. args = [self.hg] + hg_args env = dict(os.environ) env['HGENCODING'] = 'utf-8' null = open(os.devnull, 'w') # Execute at / to prevent Mercurial's path traversal logic from # kicking in and picking up unwanted config files. return subprocess.check_output(args, stdin=null, stderr=null, env=env, cwd='/') def ensure_hg_repo_exists(self, landing_repo_url, repo_url, pull_rev=None): # TODO: Use the root changeset in each repository as an identifier. # This will enable "forks" to share the same local clone. # The "share" extension now has support for this. # Read hg help -e share for details about "pooled storage." # We should probably deploy that. url = landing_repo_url or repo_url sha1 = hashlib.sha1(url).hexdigest() repo_path = os.path.join(self.repo_root, sha1) if not os.path.exists(repo_path): args = ['clone', url, repo_path] self.logger.debug('cloning %s' % url) self._run_hg(args) self.logger.debug('finished cloning %s' % url) args = ['-R', repo_path, 'pull', repo_url] if pull_rev: args.extend(['-r', pull_rev]) self.logger.debug('pulling %s' % repo_url) self._run_hg(args) self.logger.debug('finished pulling %s' % repo_url) return repo_path def hg_commit_changes(self, repo_path, node, diff_context=None): """Obtain information about what changed in a Mercurial commit. The return value is a tuple of: (set(adds), set(dels), set(mods), None, diff) The first 4 items list what files changed in the changeset. The last item is a unified diff of the changeset. File copies are currently not returned. ``None`` is being used as a placeholder until support is needed. """ part_delim = str(uuid.uuid4()) item_delim = str(uuid.uuid4()) parts = [ '{join(file_adds, "%s")}' % item_delim, '{join(file_dels, "%s")}' % item_delim, '{join(file_mods, "%s")}' % item_delim, '{join(file_copies, "%s")}' % item_delim, ] template = part_delim.join(parts) self._run_hg(['-R', repo_path, 'up', '-C', node]) res = self._run_hg( ['-R', repo_path, 'log', '-r', node, '-T', template]) diff_args = ['-R', repo_path, 'diff', '-c', node] if diff_context is not None: diff_args.extend(['-U', str(diff_context)]) diff = self._run_hg(diff_args) adds, dels, mods, copies = res.split(part_delim) adds = set(f for f in adds.split(item_delim) if f) dels = set(f for f in dels.split(item_delim) if f) mods = set(f for f in mods.split(item_delim) if f) # TODO parse the copies. return adds, dels, mods, None, diff def strip_nonpublic_changesets(self, repo_path): """Strip non-public changesets from a repository. Pulling changesets over and over results in many heads in a repository. This makes Mercurial slow. So, we prune non-public changesets/heads to keep repositories fast. """ self._run_hg([ '-R', repo_path, '--config', 'extensions.strip=', 'strip', '--no-backup', '-r', 'not public()' ]) def get_commit_files(self, commit): """Fetches a list of files that were changed by this commit.""" rrid = commit['review_request_id'] diff_revision = commit['diffset_revision'] start = 0 files = [] while True: result = self.api_root.get_files(review_request_id=rrid, diff_revision=diff_revision, start=start) files.extend(result) start += result.num_items if result.num_items == 0 or start >= result.total_results: break return files def handle_available_messages(self): for body, message in self._get_available_messages(): payload = body['payload'] repo_url = payload['repository_url'] landing_repo_url = payload['landing_repository_url'] commits = payload['commits'] # TODO: should we allow process commits to signal that we should # skip acknowledging the message? try: for commit in commits: rrid = commit['review_request_id'] diff_revision = commit['diffset_revision'] review = BatchReview(self.api_root, rrid, diff_revision) self.process_commit(review, landing_repo_url, repo_url, commit) finally: # This prevents the queue from growing indefinitely but # prevents us from fixing whatever caused the exception # and restarting the bot to handle the message. message.ack() def listen_forever(self): while True: self.handle_available_messages() def process_commit(self, review, repo_url, commits): pass
"ERROR: You must specify a reviewboard server URL (-u|--url) to use" ) sys.exit(2) if not options.repo: print("ERROR: You must specify a repo (-r|--repo) to find reviews for") sys.exit(2) if not options.branch: print( "ERROR: You must specify a branch (-b|--branch) to find reviews for" ) sys.exit(2) client = RBClient(options.url, username=RB_USER, password=RB_PASSWORD) root = client.get_root() if not root: print("Error - could not get RBClient root.") sys.exit(1) repo = get_repository_id_by_name(root, options.repo, verbose=VERBOSE) if repo is None: print("ERROR: Could not find ReviewBoard repository with name '%s'" % options.repo) sys.exit(3) reviews = get_reviews_for_branch(root, repo, options.branch, verbose=VERBOSE) if len(reviews) == 0:
class RepositoryMatchTests(kgb.SpyAgency, RBTestBase): """Unit tests for remote repository matching.""" payloads = { 'http://localhost:8080/api/': { 'mimetype': 'application/vnd.reviewboard.org.root+json', 'rsp': { 'uri_templates': {}, 'links': { 'self': { 'href': 'http://localhost:8080/api/', 'method': 'GET', }, 'repositories': { 'href': 'http://localhost:8080/api/repositories/', 'method': 'GET', }, }, 'stat': 'ok', }, }, (_MATCH_URL_BASE + '&path=git%40example.com%3Atest.git'): { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [_REPO1], 'links': {}, 'total_results': 1, 'stat': 'ok', }, }, (_MATCH_URL_BASE + '&path=git%40example.com%3Atest2.git'): { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [_REPO2], 'links': {}, 'total_results': 1, 'stat': 'ok', }, }, (_MATCH_URL_BASE + '&path=http%3A%2F%2Fexample.com%2Ftest3.git'): { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [_REPO1, _REPO3], 'links': {}, 'total_results': 2, 'stat': 'ok', }, }, (_MATCH_URL_BASE + '&path=git%40example.com%3Atest4.git'): { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [], 'links': {}, 'total_results': 0, 'stat': 'ok', }, }, (_MATCH_URL_BASE): { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [ _REPO1, _REPO2, ], 'links': {}, 'total_results': 2, 'stat': 'ok', }, }, } def setUp(self): super(RepositoryMatchTests, self).setUp() @self.spy_for(urlopen) def _urlopen(url, **kwargs): url = url.get_full_url() try: payload = self.payloads[url] except KeyError: print('Test requested unexpected URL "%s"' % url) return MockResponse( 404, {}, json.dumps({ 'rsp': { 'stat': 'fail', 'err': { 'code': 100, 'msg': 'Object does not exist', }, }, })) return MockResponse(200, { 'Content-Type': payload['mimetype'], }, json.dumps(payload['rsp'])) self.api_client = RBClient('http://*****:*****@example.com:test.git') self.assertEqual(repository.id, 1) def test_find_matching_server_repository_with_mirror_path_match(self): """Testing get_repository_resource with mirror path match""" repository, info = get_repository_resource( self.root_resource, repository_paths='[email protected]:test2.git') self.assertEqual(repository.id, 2) def test_find_matching_server_repository_with_multiple_matches(self): """Testing get_repository_resource with multiple matching paths""" repository, info = get_repository_resource( self.root_resource, repository_paths='http://example.com/test3.git') self.assertEqual(repository.id, 1) def test_find_matching_server_repository_no_match(self): """Testing get_repository_resource with no match""" repository, info = get_repository_resource( self.root_resource, repository_paths='[email protected]:test4.git') self.assertIsNone(repository) self.assertIsNone(info)
class SVNRepositoryInfoTests(SpyAgency, SCMClientTests): """Unit tests for rbtools.clients.svn.SVNRepositoryInfo.""" payloads = { 'http://localhost:8080/api/': { 'mimetype': 'application/vnd.reviewboard.org.root+json', 'rsp': { 'uri_templates': {}, 'links': { 'self': { 'href': 'http://localhost:8080/api/', 'method': 'GET', }, 'repositories': { 'href': 'http://localhost:8080/api/repositories/', 'method': 'GET', }, }, 'stat': 'ok', }, }, 'http://localhost:8080/api/repositories/?tool=Subversion': { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [ { # This one doesn't have a mirror_path, to emulate # Review Board 1.6. 'id': 1, 'name': 'SVN Repo 1', 'path': 'https://svn1.example.com/', 'links': { 'info': { 'href': ('https://localhost:8080/api/' 'repositories/1/info/'), 'method': 'GET', }, }, }, { 'id': 2, 'name': 'SVN Repo 2', 'path': 'https://svn2.example.com/', 'mirror_path': 'svn+ssh://svn2.example.com/', 'links': { 'info': { 'href': ('https://localhost:8080/api/' 'repositories/2/info/'), 'method': 'GET', }, }, }, ], 'links': { 'next': { 'href': ('http://localhost:8080/api/repositories/' '?tool=Subversion&page=2'), 'method': 'GET', }, }, 'total_results': 3, 'stat': 'ok', }, }, 'http://localhost:8080/api/repositories/?tool=Subversion&page=2': { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [ { 'id': 3, 'name': 'SVN Repo 3', 'path': 'https://svn3.example.com/', 'mirror_path': 'svn+ssh://svn3.example.com/', 'links': { 'info': { 'href': ('https://localhost:8080/api/' 'repositories/3/info/'), 'method': 'GET', }, }, }, ], 'total_results': 3, 'stat': 'ok', }, }, 'https://localhost:8080/api/repositories/1/info/': { 'mimetype': 'application/vnd.reviewboard.org.repository-info+json', 'rsp': { 'info': { 'uuid': 'UUID-1', 'url': 'https://svn1.example.com/', 'root_url': 'https://svn1.example.com/', }, 'stat': 'ok', }, }, 'https://localhost:8080/api/repositories/2/info/': { 'mimetype': 'application/vnd.reviewboard.org.repository-info+json', 'rsp': { 'info': { 'uuid': 'UUID-2', 'url': 'https://svn2.example.com/', 'root_url': 'https://svn2.example.com/', }, 'stat': 'ok', }, }, 'https://localhost:8080/api/repositories/3/info/': { 'mimetype': 'application/vnd.reviewboard.org.repository-info+json', 'rsp': { 'info': { 'uuid': 'UUID-3', 'url': 'https://svn3.example.com/', 'root_url': 'https://svn3.example.com/', }, 'stat': 'ok', }, }, } def setUp(self): super(SVNRepositoryInfoTests, self).setUp() def _urlopen(url, **kwargs): url = url.get_full_url() try: payload = self.payloads[url] except KeyError: return MockResponse(404, {}, json.dumps({ 'rsp': { 'stat': 'fail', 'err': { 'code': 100, 'msg': 'Object does not exist', }, }, })) return MockResponse( 200, { 'Content-Type': payload['mimetype'], }, json.dumps(payload['rsp'])) self.spy_on(urlopen, call_fake=_urlopen) self.api_client = RBClient('http://localhost:8080/') self.root_resource = self.api_client.get_root() def test_find_server_repository_info_with_path_match(self): """Testing SVNRepositoryInfo.find_server_repository_info with path matching """ info = SVNRepositoryInfo('https://svn1.example.com/', '/', '') repo_info = info.find_server_repository_info(self.root_resource) self.assertEqual(repo_info, info) self.assertEqual(repo_info.repository_id, 1) def test_find_server_repository_info_with_mirror_path_match(self): """Testing SVNRepositoryInfo.find_server_repository_info with mirror_path matching """ info = SVNRepositoryInfo('svn+ssh://svn2.example.com/', '/', '') repo_info = info.find_server_repository_info(self.root_resource) self.assertEqual(repo_info, info) self.assertEqual(repo_info.repository_id, 2) def test_find_server_repository_info_with_uuid_match(self): """Testing SVNRepositoryInfo.find_server_repository_info with UUID matching """ info = SVNRepositoryInfo('svn+ssh://blargle/', '/', 'UUID-3') repo_info = info.find_server_repository_info(self.root_resource) self.assertNotEqual(repo_info, info) self.assertEqual(repo_info.repository_id, 3) def test_relative_paths(self): """Testing SVNRepositoryInfo._get_relative_path""" info = SVNRepositoryInfo('http://svn.example.com/svn/', '/', '') self.assertEqual(info._get_relative_path('/foo', '/bar'), None) self.assertEqual(info._get_relative_path('/', '/trunk/myproject'), None) self.assertEqual(info._get_relative_path('/trunk/myproject', '/'), '/trunk/myproject') self.assertEqual( info._get_relative_path('/trunk/myproject', ''), '/trunk/myproject') self.assertEqual( info._get_relative_path('/trunk/myproject', '/trunk'), '/myproject') self.assertEqual( info._get_relative_path('/trunk/myproject', '/trunk/myproject'), '/')
class SVNRepositoryMatchTests(kgb.SpyAgency, SCMClientTestCase): """Unit tests for rbtools.clients.svn.SVNRepositoryInfo.""" payloads = { 'http://localhost:8080/api/': { 'mimetype': 'application/vnd.reviewboard.org.root+json', 'rsp': { 'uri_templates': {}, 'links': { 'self': { 'href': 'http://localhost:8080/api/', 'method': 'GET', }, 'repositories': { 'href': 'http://localhost:8080/api/repositories/', 'method': 'GET', }, }, 'stat': 'ok', }, }, (_MATCH_URL_BASE + '?' + _MATCH_URL_FIELDS + '&only-links=info&path=https%3A%2F%2Fsvn1.example.com%2F&' + _MATCH_URL_TOOL): { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [ { # This one doesn't have a mirror_path, to emulate # Review Board 1.6. 'id': 1, 'name': 'SVN Repo 1', 'path': 'https://svn1.example.com/', 'links': { 'info': { 'href': ('http://localhost:8080/api/' 'repositories/1/info/'), 'method': 'GET', }, }, }, ], 'links': {}, 'total_results': 1, 'stat': 'ok', }, }, (_MATCH_URL_BASE + '?' + _MATCH_URL_FIELDS + '&only-links=info&path=svn%2Bssh%3A%2F%2Fsvn2.example.com%2F&' + _MATCH_URL_TOOL): { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [ { 'id': 2, 'name': 'SVN Repo 2', 'path': 'https://svn2.example.com/', 'mirror_path': 'svn+ssh://svn2.example.com/', 'links': { 'info': { 'href': ('http://localhost:8080/api/' 'repositories/1/info/'), 'method': 'GET', }, }, }, ], 'links': {}, 'total_results': 1, 'stat': 'ok', }, }, (_MATCH_URL_BASE + '?' + _MATCH_URL_FIELDS + '&only-links=info&path=svn%2Bssh%3A%2F%2Fblargle%2F&' + _MATCH_URL_TOOL): { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [], 'links': {}, 'total_results': 0, 'stat': 'ok', }, }, (_MATCH_URL_BASE + '?' + _MATCH_URL_FIELDS + '&only-links=&path=svn%2Bssh%3A%2F%2Fblargle%2F&' + _MATCH_URL_TOOL): { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [], 'links': {}, 'total_results': 0, 'stat': 'ok', }, }, (_MATCH_URL_BASE + '?' + _MATCH_URL_FIELDS + '&only-links=info&' + _MATCH_URL_TOOL): { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [ { # This one doesn't have a mirror_path, to emulate # Review Board 1.6. 'id': 1, 'name': 'SVN Repo 1', 'path': 'https://svn1.example.com/', 'links': { 'info': { 'href': ('http://localhost:8080/api/' 'repositories/1/info/'), 'method': 'GET', }, }, }, { 'id': 2, 'name': 'SVN Repo 2', 'path': 'https://svn2.example.com/', 'mirror_path': 'svn+ssh://svn2.example.com/', 'links': { 'info': { 'href': ('http://localhost:8080/api/' 'repositories/2/info/'), 'method': 'GET', }, }, }, ], 'links': { 'next': { 'href': ('http://localhost:8080/api/repositories/?' 'only-links=info&tool=Subversion&' 'only-fields=id%2Cname%2Cmirror_path%2Cpath&' 'page=2'), 'method': 'GET', }, }, 'total_results': 3, 'stat': 'ok', }, }, (_MATCH_URL_BASE + '?' + _MATCH_URL_FIELDS + '&only-links=info&page=2&' + _MATCH_URL_TOOL): { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [ { 'id': 3, 'name': 'SVN Repo 3', 'path': 'https://svn3.example.com/', 'mirror_path': 'svn+ssh://svn3.example.com/', 'links': { 'info': { 'href': ('http://localhost:8080/api/' 'repositories/3/info/'), 'method': 'GET', }, }, }, ], 'total_results': 3, 'stat': 'ok', }, }, (_MATCH_URL_BASE + '1/info/'): { 'mimetype': 'application/vnd.reviewboard.org.repository-info+json', 'rsp': { 'info': { 'uuid': 'UUID-1', 'url': 'https://svn1.example.com/', 'root_url': 'https://svn1.example.com/', }, 'stat': 'ok', }, }, (_MATCH_URL_BASE + '2/info/'): { 'mimetype': 'application/vnd.reviewboard.org.repository-info+json', 'rsp': { 'info': { 'uuid': 'UUID-2', 'url': 'https://svn2.example.com/', 'root_url': 'https://svn2.example.com/', }, 'stat': 'ok', }, }, (_MATCH_URL_BASE + '3/info/'): { 'mimetype': 'application/vnd.reviewboard.org.repository-info+json', 'rsp': { 'info': { 'uuid': 'UUID-3', 'url': 'https://svn3.example.com/', 'root_url': 'https://svn3.example.com/', }, 'stat': 'ok', }, }, } def setUp(self): super(SVNRepositoryMatchTests, self).setUp() @self.spy_for(urlopen) def _urlopen(url, **kwargs): url = url.get_full_url() try: payload = self.payloads[url] except KeyError: print('Test requested unexpected URL "%s"' % url) return MockResponse( 404, {}, json.dumps({ 'rsp': { 'stat': 'fail', 'err': { 'code': 100, 'msg': 'Object does not exist', }, }, })) return MockResponse(200, { 'Content-Type': payload['mimetype'], }, json.dumps(payload['rsp'])) self.api_client = RBClient('http://localhost:8080/') self.root_resource = self.api_client.get_root() def test_find_matching_server_repository_with_path_match(self): """Testing SVNClient.find_matching_server_repository with path match """ url = 'https://svn1.example.com/' self.options.repository_url = url client = SVNClient(options=self.options) repository, info = get_repository_resource(self.root_resource, tool=client, repository_paths=url) self.assertEqual(repository.id, 1) def test_find_matching_server_repository_with_mirror_path_match(self): """Testing SVNClient.find_matching_server_repository with mirror_path match """ url = 'svn+ssh://svn2.example.com/' self.options.repository_url = url client = SVNClient(options=self.options) repository, info = get_repository_resource(self.root_resource, tool=client, repository_paths=url) self.assertEqual(repository.id, 2) def test_find_matching_server_repository_with_uuid_match(self): """Testing SVNClient.find_matching_server_repository with UUID match """ url = 'svn+ssh://blargle/' self.options.repository_url = url client = SVNClient(options=self.options) self.spy_on(client.svn_info, op=kgb.SpyOpReturn({ 'Repository UUID': 'UUID-3', })) repository, info = get_repository_resource(self.root_resource, tool=client, repository_paths=url) self.assertEqual(repository.id, 3) def test_relative_paths(self): """Testing SVNRepositoryInfo._get_relative_path""" info = SVNRepositoryInfo('http://svn.example.com/svn/', '/', '') self.assertEqual(info._get_relative_path('/foo', '/bar'), None) self.assertEqual(info._get_relative_path('/', '/trunk/myproject'), None) self.assertEqual(info._get_relative_path('/trunk/myproject', '/'), '/trunk/myproject') self.assertEqual(info._get_relative_path('/trunk/myproject', ''), '/trunk/myproject') self.assertEqual(info._get_relative_path('/trunk/myproject', '/trunk'), '/myproject') self.assertEqual( info._get_relative_path('/trunk/myproject', '/trunk/myproject'), '/')
def RunTool(server_url, session, review_request_id, diff_revision, status_update_id, review_settings, tool_options): """Execute an automated review on a review request. Args: server_url (unicode): The URL of the Review Board server. session (unicode): The encoded session identifier. review_request_id (int): The ID of the review request being reviewed (ID for use in the API, which is the "display_id" field). diff_revision (int): The ID of the diff revision being reviewed. status_update_id (int): The ID of the status update for this invocation of the tool. review_settings (dict): Settings for how the review should be created. tool_options (dict): The tool-specific settings. Returns: bool: Whether the task completed successfully. """ routing_key = RunTool.request.delivery_info["routing_key"] route_parts = routing_key.partition(".") tool_name = route_parts[0] log_detail = "(server=%s, review_request_id=%s, diff_revision=%s)" % (server_url, review_request_id, diff_revision) logger.info('Running tool "%s" %s', tool_name, log_detail) try: logger.info("Initializing RB API %s", log_detail) api_client = RBClient(server_url, cookie_file=COOKIE_FILE, agent=AGENT, session=session) api_root = api_client.get_root() except Exception as e: logger.error("Could not contact Review Board server: %s %s", e, log_detail) return False logger.info('Loading requested tool "%s" %s', tool_name, log_detail) tools = [ entrypoint.load() for entrypoint in pkg_resources.iter_entry_points(group="reviewbot.tools", name=tool_name) ] if len(tools) == 0: logger.error('Tool "%s" not found %s', tool_name, log_detail) return False elif len(tools) > 1: logger.error( 'Tool "%s" is ambiguous (found %s) %s', tool_name, ", ".join(tool.name for tool in tools), log_detail ) return False else: tool = tools[0] try: logger.info("Creating status update %s", log_detail) status_update = api_root.get_status_update( review_request_id=review_request_id, status_update_id=status_update_id ) except Exception as e: logger.exception("Unable to create status update: %s %s", e, log_detail) return False try: logger.info("Initializing review %s", log_detail) review = Review(api_root, review_request_id, diff_revision, review_settings) status_update.update(description="running...") except Exception as e: logger.exception("Failed to initialize review: %s %s", e, log_detail) status_update.update(state=ERROR, description="internal error.") return False try: logger.info('Initializing tool "%s %s" %s', tool.name, tool.version, log_detail) t = tool() except Exception as e: logger.exception('Error initializing tool "%s": %s %s', tool.name, e, log_detail) status_update.update(state=ERROR, description="internal error.") return False try: logger.info('Executing tool "%s" %s', tool.name, log_detail) t.execute(review, settings=tool_options) logger.info('Tool "%s" completed successfully %s', tool.name, log_detail) except Exception as e: logger.exception('Error executing tool "%s": %s %s', tool.name, e, log_detail) status_update.update(state=ERROR, description="internal error.") return False try: if len(review.comments) == 0: status_update.update(state=DONE_SUCCESS, description="passed.") else: logger.info("Publishing review %s", log_detail) review_id = review.publish().id status_update.update(state=DONE_FAILURE, description="failed.", review_id=review_id) except Exception as e: logger.exception("Error when publishing review: %s %s", e, log_detail) status_update.update(state=ERROR, description="internal error.") return False logger.info("Review completed successfully %s", log_detail) return True
def update_tools_list(panel, payload): """Update the list of installed tools. This will detect the installed analysis tool plugins and inform Review Board of them. Args: panel (celery.worker.control.Panel): The worker control panel. payload (dict): The payload as assembled by the extension. Returns: bool: Whether the task completed successfully. """ logger.info('Request to refresh installed tools from "%s"', payload["url"]) logger.info("Iterating Tools") tools = [] for ep in pkg_resources.iter_entry_points(group="reviewbot.tools"): entry_point = ep.name tool_class = ep.load() tool = tool_class() logger.info("Tool: %s" % entry_point) if tool.check_dependencies(): tools.append( { "name": tool_class.name, "entry_point": entry_point, "version": tool_class.version, "description": tool_class.description, "tool_options": json.dumps(tool_class.options), "timeout": tool_class.timeout, } ) else: logger.warning("%s dependency check failed.", ep.name) logger.info("Done iterating Tools") hostname = panel.hostname try: api_client = RBClient(payload["url"], cookie_file=COOKIE_FILE, agent=AGENT, session=payload["session"]) api_root = api_client.get_root() except Exception as e: logger.exception("Could not reach RB server: %s", e) return {"status": "error", "error": "Could not reach Review Board server: %s" % e} try: api_tools = _get_extension_resource(api_root).get_tools() api_tools.create(hostname=hostname, tools=json.dumps(tools)) except Exception as e: logger.exception("Problem POSTing tools: %s", e) return {"status": "error", "error": "Problem uploading tools: %s" % e} return {"status": "ok", "tools": tools}
def getrbapi(repo, o): from rbtools.api.client import RBClient url = repo.ui.config('reviewboard', 'url', None).rstrip('/') c = RBClient(url, username=o['bzusername'], password=o['bzpassword']) return c.get_root()
class AnyRB(object): def __init__(self): self.client = RBClient(settings.RB_API_URL, username=settings.RB_API_USERNAME, password=settings.RB_API_PASSWORD) def get_repository(self, user): username = user.username root = self.client.get_root() for repo in root.get_repositories(): if repo.fields["name"] == username: return repo return None def create_repository(self, user): root = self.client.get_root() root.get_repositories().create(name=user.username, path=get_svn_uri(user), tool="Subversion", public=False, access_users=",".join((user.username, settings.RB_API_USERNAME)), access_groups=settings.RB_API_DEFAULT_REVIEW_GROUP ) return self.get_repository(user) def submit_review(self, user, diff_content, path="", summary="", description="", review_group_name=None, review_id=None): #review_id is for update descriptions = [] if isinstance(summary, unicode): summary = summary.encode("utf-8") root = self.client.get_root() repository = self.get_repository(user) if repository is None: repository = self.create_repository(user) if review_id: review_request = root.get_review_request(review_request_id=review_id) descriptions.append(review_request.description.encode("utf-8")) else: review_request = root.get_review_requests().create(repository=repository.id, submit_as=user.username) try: review_request.get_diffs().upload_diff(diff_content, base_dir="/") except Exception: descriptions.append(u"WARNING: Diff has not been uploaded. Probably it contains non-ASCII filenames. Non-ASCII filenames are not supported.") descriptions.append("=== Added on {0} ===\n".format(datetime.datetime.now())) descriptions.append(description) draft = review_request.get_or_create_draft() description = "\n".join(descriptions) draft.update(description=description, summary=summary) review_request.update(status="pending") if review_group_name: draft.update(target_groups=review_group_name) return review_request.id def get_review_url(self, request, review_id): host = request.get_host() proto = "http://" if request.is_secure(): proto = "https://" return "{0}{1}/rb/r/{2}".format(proto, host, review_id)
def main(url, group_names, days_old=7, dry_run=False): """ do something """ try: user = os.environ['RBUSER'] except KeyError: raise SystemExit("please set RBUSER environment variable for reviewboard user") try: passwd = os.environ['RBPASS'] except KeyError: raise SystemExit("please set RBPASS environment variable for reviewboard password") client = RBClient(url, username=user, password=passwd) root = client.get_root() if not root: raise SystemExit("Error - could not get RBClient root.") if days_old == 0: newer_than = datetime.date(2010, 01, 01) print "days_old is %d getting ALL reviews since %s....script will take ~30m to complete" %(newer_than, days_old) else: newer_than = datetime.datetime.now() - datetime.timedelta(days_old) print "Retrieving reviews since time-added-from - %s" %(newer_than.strftime("%Y-%m-%d")) all_review_req = root.get_review_requests(ship_it_count_gte=2, status="all", max_results=200, time_added_from=newer_than.strftime("%Y-%m-%d")) peep_stats = {} rev_counter = 0 returned_counter = 0 print "Need to retreive %d reviews in 200 review chunks." % (all_review_req.total_results) logger.info("All review request - %s" % (all_review_req)) peep_stats, returned_counter = calculate_total_reviews(all_review_req, peep_stats) rev_counter += returned_counter try: while all_review_req.get_next(): print "Retrieving next chunk of 200.... %s" % (all_review_req.links.next.href) all_review_req = all_review_req.get_next(ship_it_count_gte=2, status="all", time_added_from=newer_than.strftime("%Y-%m-%d")) peep_stats, returned_counter = calculate_total_reviews(all_review_req, peep_stats) rev_counter += returned_counter except StopIteration: print "No more reviews to get. I got %d" % (rev_counter) for g_name in group_names: g = root.get_group(group_name=g_name.strip(), displayname=True) group_req_count = 0 group_rev_count = 0 logger.info("Group %s" % (g)) logger.info("Group %s - Users %s" % (g_name, g.get_users())) print "<<<%s" % (g_name) for u in sorted(g.get_users(), key=lambda users: user[1]): review_request_count = root.get_review_requests(from_user=u.username, status="all", time_added_from=newer_than.strftime("%Y-%m-%d")).total_results if u.username not in peep_stats: peep_stats[u.username] = 0 review_given_count = peep_stats[u.username] group_req_count += review_request_count group_rev_count += review_given_count impact = review_request_count + review_given_count impact2 = review_given_count - review_request_count print "%s posted %d review requests and reviewed %d <<< CMGt Impact - %d" % (u.fullname, review_request_count, peep_stats[u.username], impact) logger.info("Impact - %d + %d = %d" % (review_request_count, review_given_count, impact)) logger.info("Impact2 - %d - %d = %d" % (review_given_count, review_request_count, impact2)) if impact > 0 and impact2 >= 1: print "%s is a contributor - (%d, %d)" % (u.fullname, impact, impact2) elif impact <= 0 and impact2 <= 0: print "%s did not impact the code workload - (%d, %d)" % (u.fullname, impact, impact2) else: print "%s adds to workload - (%d, %d)" % (u.fullname, impact, impact2) print "TEAM %s impact: req %d, rev %d" % (g_name, group_req_count, group_rev_count) print "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" print "All Reviewers:" print ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" for person in sorted(peep_stats, key=lambda key: peep_stats[key], reverse=True): print "%s has reviewed %d cr" % (person, peep_stats[person])
def RunTool(server_url='', session='', username='', review_request_id=-1, diff_revision=-1, status_update_id=-1, review_settings={}, tool_options={}, repository_name='', base_commit_id='', *args, **kwargs): """Execute an automated review on a review request. Args: server_url (unicode): The URL of the Review Board server. session (unicode): The encoded session identifier. username (unicode): The name of the user who owns the ``session``. review_request_id (int): The ID of the review request being reviewed (ID for use in the API, which is the "display_id" field). diff_revision (int): The ID of the diff revision being reviewed. status_update_id (int): The ID of the status update for this invocation of the tool. review_settings (dict): Settings for how the review should be created. tool_options (dict): The tool-specific settings. repository_name (unicode): The name of the repository to clone to run the tool, if the tool requires full working directory access. base_commit_id (unicode): The ID of the commit that the patch should be applied to. args (tuple): Any additional positional arguments (perhaps used by a newer version of the Review Bot extension). kwargs (dict): Any additional keyword arguments (perhaps used by a newer version of the Review Bot extension). Returns: bool: Whether the task completed successfully. """ try: routing_key = RunTool.request.delivery_info['routing_key'] route_parts = routing_key.partition('.') tool_name = route_parts[0] log_detail = ('(server=%s, review_request_id=%s, diff_revision=%s)' % (server_url, review_request_id, diff_revision)) logger.info('Running tool "%s" %s', tool_name, log_detail) try: logger.info('Initializing RB API %s', log_detail) api_client = RBClient(server_url, cookie_file=COOKIE_FILE, agent=AGENT, session=session) api_root = api_client.get_root() except Exception as e: logger.error('Could not contact Review Board server: %s %s', e, log_detail) return False logger.info('Loading requested tool "%s" %s', tool_name, log_detail) tools = [ entrypoint.load() for entrypoint in pkg_resources.iter_entry_points( group='reviewbot.tools', name=tool_name) ] if len(tools) == 0: logger.error('Tool "%s" not found %s', tool_name, log_detail) return False elif len(tools) > 1: logger.error('Tool "%s" is ambiguous (found %s) %s', tool_name, ', '.join(tool.name for tool in tools), log_detail) return False else: tool = tools[0] repository = None try: logger.info('Creating status update %s', log_detail) status_update = api_root.get_status_update( review_request_id=review_request_id, status_update_id=status_update_id) except Exception as e: logger.exception('Unable to create status update: %s %s', e, log_detail) return False if tool.working_directory_required: if not base_commit_id: logger.error('Working directory is required but the diffset ' 'has no base_commit_id %s', log_detail) status_update.update( state=ERROR, description='Diff does not include parent commit ' 'information.') return False try: repository = repositories[repository_name] except KeyError: logger.error('Unable to find configured repository "%s" %s', repository_name, log_detail) return False try: logger.info('Initializing review %s', log_detail) review = Review(api_root, review_request_id, diff_revision, review_settings) status_update.update(description='running...') except Exception as e: logger.exception('Failed to initialize review: %s %s', e, log_detail) status_update.update(state=ERROR, description='internal error.') return False try: logger.info('Initializing tool "%s %s" %s', tool.name, tool.version, log_detail) t = tool() except Exception as e: logger.exception('Error initializing tool "%s": %s %s', tool.name, e, log_detail) status_update.update(state=ERROR, description='internal error.') return False try: logger.info('Executing tool "%s" %s', tool.name, log_detail) t.execute(review, settings=tool_options, repository=repository, base_commit_id=base_commit_id) logger.info('Tool "%s" completed successfully %s', tool.name, log_detail) except Exception as e: logger.exception('Error executing tool "%s": %s %s', tool.name, e, log_detail) status_update.update(state=ERROR, description='internal error.') return False if t.output: file_attachments = \ api_root.get_user_file_attachments(username=username) attachment = \ file_attachments.upload_attachment('tool-output', t.output) status_update.update(url=attachment.absolute_url, url_text='Tool console output') try: if not review.has_comments: status_update.update(state=DONE_SUCCESS, description='passed.') else: logger.info('Publishing review %s', log_detail) review_id = review.publish().id status_update.update(state=DONE_FAILURE, description='failed.', review_id=review_id) except Exception as e: logger.exception('Error when publishing review: %s %s', e, log_detail) status_update.update(state=ERROR, description='internal error.') return False logger.info('Review completed successfully %s', log_detail) return True finally: cleanup_tempfiles()
class AnyRB(object): def __init__(self, event): self.client = RBClient(settings.RB_API_URL, username=settings.RB_API_USERNAME, password=settings.RB_API_PASSWORD) self.event = event def upload_review(self): if len(self.event.file_set.all()) == 0: return None files_diff = [] empty = True import magic with unpack_files(self.event.file_set.all()) as files: for f in files: mime_type = magic.from_buffer(f.file.read(1024), mime=True) if mime_type[:4] != 'text': continue empty = False file_content = [] for line in f.file: try: file_content.append(line.decode('utf-8')) except: file_content.append(line.decode('cp1251')) from difflib import unified_diff fname = f.filename() from_name = u'a/{0}'.format(fname) to_name = u'b/{0}'.format(fname) diff = [(u'diff --git {0} {1}'.format(from_name, to_name))] from_name = u'/dev/null' diff_content = unified_diff('', file_content, fromfile=from_name, tofile=to_name.encode('utf-8')) for line in diff_content: line = line.strip() if isinstance(line, str): diff.append(line.decode('utf-8')) else: diff.append(line) files_diff.append(u'\n'.join(diff)) files_diff = u'\n'.join(files_diff) if empty: return None review_request = self.get_review_request() if review_request is None: review_request = self.create_review_request() if review_request is None: return review_request logger.info("Diff to upload: >>>%s<<<", files_diff) review_request.get_diffs().upload_diff(files_diff.encode('utf-8')) draft = review_request.get_or_create_draft() issue = self.event.issue task_title = issue.task.get_title(issue.student.get_profile().language) summary = u'[{0}][{1}] {2}'.format(issue.student.get_full_name(), issue.task.course.get_user_group(issue.student), task_title) description_template = \ _(u'zadacha') + ': "{0}", ' + \ _(u'kurs') + ': [{1}]({2}{3})\n' + \ _(u'student') + ': [{4}]({2}{5})\n' + '[' + \ _(u'obsuzhdenie_zadachi') + ']({2}{6})' description = description_template.format( task_title, issue.task.course, Site.objects.get_current().domain, issue.task.course.get_absolute_url(), issue.student.get_full_name(), issue.student.get_absolute_url(), issue.get_absolute_url() ) draft = draft.update(summary=summary, description=description, description_text_type='markdown', target=settings.RB_API_USERNAME, public=True, ) return review_request.id # def get_or_create_review_request(self): # root = self.client.get_root() # review_request = None # try: # review_id = self.event.issue.get_byname('review_id') # review_request = root.get_review_request(review_request_id=review_id) # except (AttributeError, ValueError): # course_id = self.event.issue.task.course.id # repository_name = str(self.event.issue.id) # os.symlink(settings.RB_SYMLINK_DIR,os.path.join(settings.RB_SYMLINK_DIR, repository_name)) # repository = root.get_repositories().create( # name=repository_name, # path=settings.RB_SYMLINK_DIR+repository_name+'/.git', # tool='Git', # public=False) # root.get_repository(repository_id=repository.id).update(grant_type='add', # grant_entity='user', # grant_name=self.event.author) # root.get_repository(repository_id=repository.id).update(grant_type='add', # grant_entity='group', # grant_name='teachers') # root.get_repository(repository_id=repository.id).update(grant_type='add', # grant_entity='group', # grant_name='teachers_{0}'.format(course_id)) # review_request = root.get_review_requests().create(repository=repository.id) # self.event.issue.set_byname('review_id', review_request.id, self.event.author) # return review_request def get_review_request(self): root = self.client.get_root() review_request = None try: review_id = self.event.issue.get_byname('review_id') except (AttributeError, ValueError): logger.info("Issue '%s' has not review_id.", self.event.issue.id) return None try: review_request = root.get_review_request(review_request_id=review_id) return review_request except Exception as e: logger.info("Issue '%s' has not RB review_request. Exception: '%s'.", self.event.issue.id, e) return None def create_review_request(self): root = self.client.get_root() review_request = None course_id = self.event.issue.task.course.id repository_name = str(self.event.issue.id) repository_path = os.path.join(settings.RB_SYMLINK_DIR, repository_name) if not os.path.exists(repository_path): os.symlink(settings.RB_SYMLINK_DIR, repository_path) try: repository = None try: repository = root.get_repositories().create( name=repository_name, path=os.path.join(repository_path, '.git'), tool='Git', public=False) except Exception as e: logger.warning("Cant create repository '%s', trying to find it", repository_name) repository = self.get_repository(repository_name) if repository is None: raise Exception("Cant find repository '%s', trying to find it", repository_name) root.get_repository(repository_id=repository.id).update(grant_type='add', grant_entity='user', grant_name=self.event.issue.student) root.get_repository(repository_id=repository.id).update(grant_type='add', grant_entity='group', grant_name='teachers_{0}'.format(course_id)) review_request = root.get_review_requests().create(repository=repository.id) self.event.issue.set_byname('review_id', review_request.id, self.event.author) except Exception as e: logger.exception("Exception while creating review_request. Exception: '%s'. Issue: '%s'", e, self.event.issue.id) return None return review_request def get_repository(self, name): root = self.client.get_root() repositories = root.get_repositories() try: while True: for repo in repositories: if repo.name == name: return repo repositories = repositories.get_next() except StopIteration: return None return None
class SVNRepositoryInfoTests(SpyAgency, SCMClientTests): """Unit tests for rbtools.clients.svn.SVNRepositoryInfo.""" payloads = { 'http://localhost:8080/api/': { 'mimetype': 'application/vnd.reviewboard.org.root+json', 'rsp': { 'uri_templates': {}, 'links': { 'self': { 'href': 'http://localhost:8080/api/', 'method': 'GET', }, 'repositories': { 'href': 'http://localhost:8080/api/repositories/', 'method': 'GET', }, }, 'stat': 'ok', }, }, 'http://localhost:8080/api/repositories/?tool=Subversion': { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [ { # This one doesn't have a mirror_path, to emulate # Review Board 1.6. 'id': 1, 'name': 'SVN Repo 1', 'path': 'https://svn1.example.com/', 'links': { 'info': { 'href': ('https://localhost:8080/api/' 'repositories/1/info/'), 'method': 'GET', }, }, }, { 'id': 2, 'name': 'SVN Repo 2', 'path': 'https://svn2.example.com/', 'mirror_path': 'svn+ssh://svn2.example.com/', 'links': { 'info': { 'href': ('https://localhost:8080/api/' 'repositories/2/info/'), 'method': 'GET', }, }, }, ], 'links': { 'next': { 'href': ('http://localhost:8080/api/repositories/' '?tool=Subversion&page=2'), 'method': 'GET', }, }, 'total_results': 3, 'stat': 'ok', }, }, 'http://localhost:8080/api/repositories/?tool=Subversion&page=2': { 'mimetype': 'application/vnd.reviewboard.org.repositories+json', 'rsp': { 'repositories': [ { 'id': 3, 'name': 'SVN Repo 3', 'path': 'https://svn3.example.com/', 'mirror_path': 'svn+ssh://svn3.example.com/', 'links': { 'info': { 'href': ('https://localhost:8080/api/' 'repositories/3/info/'), 'method': 'GET', }, }, }, ], 'total_results': 3, 'stat': 'ok', }, }, 'https://localhost:8080/api/repositories/1/info/': { 'mimetype': 'application/vnd.reviewboard.org.repository-info+json', 'rsp': { 'info': { 'uuid': 'UUID-1', 'url': 'https://svn1.example.com/', 'root_url': 'https://svn1.example.com/', }, 'stat': 'ok', }, }, 'https://localhost:8080/api/repositories/2/info/': { 'mimetype': 'application/vnd.reviewboard.org.repository-info+json', 'rsp': { 'info': { 'uuid': 'UUID-2', 'url': 'https://svn2.example.com/', 'root_url': 'https://svn2.example.com/', }, 'stat': 'ok', }, }, 'https://localhost:8080/api/repositories/3/info/': { 'mimetype': 'application/vnd.reviewboard.org.repository-info+json', 'rsp': { 'info': { 'uuid': 'UUID-3', 'url': 'https://svn3.example.com/', 'root_url': 'https://svn3.example.com/', }, 'stat': 'ok', }, }, } def setUp(self): super(SVNRepositoryInfoTests, self).setUp() def _urlopen(url, **kwargs): url = url.get_full_url() try: payload = self.payloads[url] except KeyError: return MockResponse( 404, {}, json.dumps({ 'rsp': { 'stat': 'fail', 'err': { 'code': 100, 'msg': 'Object does not exist', }, }, })) return MockResponse(200, { 'Content-Type': payload['mimetype'], }, json.dumps(payload['rsp'])) self.spy_on(urlopen, call_fake=_urlopen) self.api_client = RBClient('http://localhost:8080/') self.root_resource = self.api_client.get_root() def test_find_server_repository_info_with_path_match(self): """Testing SVNRepositoryInfo.find_server_repository_info with path matching """ info = SVNRepositoryInfo('https://svn1.example.com/', '/', '') repo_info = info.find_server_repository_info(self.root_resource) self.assertEqual(repo_info, info) self.assertEqual(repo_info.repository_id, 1) def test_find_server_repository_info_with_mirror_path_match(self): """Testing SVNRepositoryInfo.find_server_repository_info with mirror_path matching """ info = SVNRepositoryInfo('svn+ssh://svn2.example.com/', '/', '') repo_info = info.find_server_repository_info(self.root_resource) self.assertEqual(repo_info, info) self.assertEqual(repo_info.repository_id, 2) def test_find_server_repository_info_with_uuid_match(self): """Testing SVNRepositoryInfo.find_server_repository_info with UUID matching """ info = SVNRepositoryInfo('svn+ssh://blargle/', '/', 'UUID-3') repo_info = info.find_server_repository_info(self.root_resource) self.assertNotEqual(repo_info, info) self.assertEqual(repo_info.repository_id, 3) def test_relative_paths(self): """Testing SVNRepositoryInfo._get_relative_path""" info = SVNRepositoryInfo('http://svn.example.com/svn/', '/', '') self.assertEqual(info._get_relative_path('/foo', '/bar'), None) self.assertEqual(info._get_relative_path('/', '/trunk/myproject'), None) self.assertEqual(info._get_relative_path('/trunk/myproject', '/'), '/trunk/myproject') self.assertEqual(info._get_relative_path('/trunk/myproject', ''), '/trunk/myproject') self.assertEqual(info._get_relative_path('/trunk/myproject', '/trunk'), '/myproject') self.assertEqual( info._get_relative_path('/trunk/myproject', '/trunk/myproject'), '/')
class Command(object): """Base class for rb commands. This class will handle retrieving the configuration, and parsing command line options. ``description`` is a string containing a short description of the command which is suitable for display in usage text. ``usage`` is a list of usage strings each showing a use case. These should not include the main rbt command or the command name; they will be added automatically. ``args`` is a string containing the usage text for what arguments the command takes. ``option_list`` is a list of command line options for the command. Each list entry should be an option created using the optparse.make_option function. """ name = "" author = "" description = "" args = "" option_list = [] def __init__(self): self.log = logging.getLogger('rb.%s' % self.name) def create_parser(self, config): """Create and return the ``OptionParser`` which will be used to parse the arguments to this command. """ option_list = [ opt.make_option(config) for opt in self.option_list ] return OptionParser(prog=RB_MAIN, usage=self.usage(), option_list=option_list, add_help_option=False) def usage(self): """Return a usage string for the command.""" usage = '%%prog %s [options] %s' % (self.name, self.args) if self.description: return '%s\n\n%s' % (usage, self.description) else: return usage def run_from_argv(self, argv): """Execute the command using the provided arguments. The options and commandline arguments will be parsed from ``argv`` and the commands ``main`` method will be called. """ self.config = load_config() parser = self.create_parser(self.config) options, args = parser.parse_args(argv[2:]) self.options = options # Check that the proper number of arguments have been provided. argspec = inspect.getargspec(self.main) minargs = len(argspec[0]) - 1 maxargs = minargs if argspec[1] is not None: maxargs = None if len(args) < minargs or (maxargs is not None and len(args) > maxargs): parser.error("Invalid number of arguments provided") sys.exit(1) self.main(*args) def get_cookie(self): """Return a cookie file that is read-only.""" # If we end up creating a cookie file, make sure it's only # readable by the user. os.umask(0077) # Generate a path to the cookie file. return os.path.join(get_home_path(), ".post-review-cookies.txt") def initialize_scm_tool(self): """Initialize the SCM tool for the current working directory.""" repository_info, tool = scan_usable_client(self.options) tool.user_config = self.config tool.configs = [self.config] tool.check_options() return repository_info, tool def setup_tool(self, tool, api_root=None): """Performs extra initialization on the tool. If api_root is not provided we'll assume we want to initialize the tool using only local information """ tool.capabilities = self.get_capabilities(api_root) def get_server_url(self, repository_info, tool): """Returns the Review Board server url.""" if self.options.server: server_url = self.options.server else: server_url = tool.scan_for_server(repository_info) if not server_url: print ("Unable to find a Review Board server " "for this source code tree.") sys.exit(1) return server_url def credentials_prompt(self, realm, uri, *args, **kwargs): """Prompt the user for credentials using the command line. This will prompt the user, and then return the provided username and password. This is used as a callback in the API when the user requires authorization. """ if getattr(self.options, 'diff_filename', None) == '-': die('HTTP authentication is required, but cannot be ' 'used with --diff-filename=-') print "==> HTTP Authentication Required" print 'Enter authorization information for "%s" at %s' % \ (realm, urlparse(uri)[1]) # getpass will write its prompt to stderr but raw_input # writes to stdout. See bug 2831. sys.stderr.write('Username: '******'Password: ') return username, password def get_root(self, server_url): """Returns the root resource of an RBClient.""" cookie_file = self.get_cookie() self.rb_api = RBClient(server_url, cookie_file=cookie_file, username=self.options.username, password=self.options.password, auth_callback=self.credentials_prompt) root = None try: root = self.rb_api.get_root() except ServerInterfaceError, e: die("Could not reach the review board server at %s" % server_url) except APIError, e: die("Error: %s" % e)
def update_tools_list(panel, payload): """Update the list of installed tools. This will detect the installed analysis tool plugins and inform Review Board of them. Args: panel (celery.worker.control.Panel): The worker control panel. payload (dict): The payload as assembled by the extension. Returns: bool: Whether the task completed successfully. """ logger.info('Request to refresh installed tools from "%s"', payload['url']) logger.info('Iterating Tools') tools = [] for ep in pkg_resources.iter_entry_points(group='reviewbot.tools'): entry_point = ep.name tool_class = ep.load() tool = tool_class() logger.info('Tool: %s' % entry_point) if tool.check_dependencies(): tools.append({ 'name': tool_class.name, 'entry_point': entry_point, 'version': tool_class.version, 'description': tool_class.description, 'tool_options': json.dumps(tool_class.options), 'timeout': tool_class.timeout, 'working_directory_required': tool_class.working_directory_required, }) else: logger.warning('%s dependency check failed.', ep.name) logger.info('Done iterating Tools') hostname = panel.hostname try: api_client = RBClient(payload['url'], cookie_file=COOKIE_FILE, agent=AGENT, session=payload['session']) api_root = api_client.get_root() except Exception as e: logger.exception('Could not reach RB server: %s', e) return { 'status': 'error', 'error': 'Could not reach Review Board server: %s' % e, } try: api_tools = _get_extension_resource(api_root).get_tools() api_tools.create(hostname=hostname, tools=json.dumps(tools)) except Exception as e: logger.exception('Problem POSTing tools: %s', e) return { 'status': 'error', 'error': 'Problem uploading tools: %s' % e, } return { 'status': 'ok', 'tools': tools, }
def run(): look_args = " -r %s %s"%(rev, repo) # 是否为感兴趣的分支 cmd = "svnlook dirs-changed %s"%look_args changed_dir = call_cmd(cmd).split(os.linesep)[0].strip() # 取第一行即可 repo_branch = _process_branch(changed_dir) if not repo_branch: exit() # 是否有代码文件 interested = False cmd = "svnlook changed %s"%look_args files = call_cmd(cmd) for f in files.split(os.linesep): if f.strip().endswith(rbcfg["filter_suffixs"]): interested = True break if not interested: exit() # 提取rbt post信息 cmd = "svnlook author %s"%look_args author = call_cmd(cmd).strip() if author in AUTHOR_MAP: author = AUTHOR_MAP[author] reviewer = REVIEWER_MAP[author] cmd = "svnlook log %s"%look_args log = call_cmd(cmd) summary = desc = log.strip().replace(os.linesep, "&").replace("\"", "@") summary = "rev:%s-[%s]"%(rev, summary) cmd = "svnlook diff %s"%look_args diff = call_cmd(cmd) diff = _process_diff(diff) #info("\n"+diff) # 创建review_request client = RBClient(rbcfg["rbserver"], username=rbcfg["rbadmin"], password=rbcfg["rbadminpw"]) root = client.get_root() request_data = { "repository" : rbcfg["rbrepo"], #"commit_id" : rev, "submit_as" : author, } r = root.get_review_requests().create(**request_data) vl = root.get_diff_validation() basedir = "/" #info("------------------"+diff) vl.validate_diff(rbcfg["rbrepo"], diff, base_dir=basedir) r.get_diffs().upload_diff(diff, base_dir=basedir) draft = r.get_draft() update_data = { "branch" : repo_branch, "summary" : summary, "description" : desc, "target_people" : reviewer, "public" : True, } ret = draft.update(**update_data) info("repo:<%s> rev:<%s> rid:<%s>"%(rbcfg["rbserver"], rev, r.id))
def RunTool(server_url='', session='', username='', review_request_id=-1, diff_revision=-1, status_update_id=-1, review_settings={}, tool_options={}, repository_name='', base_commit_id='', *args, **kwargs): """Execute an automated review on a review request. Args: server_url (unicode): The URL of the Review Board server. session (unicode): The encoded session identifier. username (unicode): The name of the user who owns the ``session``. review_request_id (int): The ID of the review request being reviewed (ID for use in the API, which is the "display_id" field). diff_revision (int): The ID of the diff revision being reviewed. status_update_id (int): The ID of the status update for this invocation of the tool. review_settings (dict): Settings for how the review should be created. tool_options (dict): The tool-specific settings. repository_name (unicode): The name of the repository to clone to run the tool, if the tool requires full working directory access. base_commit_id (unicode): The ID of the commit that the patch should be applied to. args (tuple): Any additional positional arguments (perhaps used by a newer version of the Review Bot extension). kwargs (dict): Any additional keyword arguments (perhaps used by a newer version of the Review Bot extension). Returns: bool: Whether the task completed successfully. """ try: routing_key = RunTool.request.delivery_info['routing_key'] route_parts = routing_key.partition('.') tool_name = route_parts[0] log_detail = ('(server=%s, review_request_id=%s, diff_revision=%s)' % (server_url, review_request_id, diff_revision)) logger.info('Running tool "%s" %s', tool_name, log_detail) try: logger.info('Initializing RB API %s', log_detail) api_client = RBClient(server_url, cookie_file=COOKIE_FILE, agent=AGENT, session=session) api_root = api_client.get_root() except Exception as e: logger.error('Could not contact Review Board server: %s %s', e, log_detail) return False logger.info('Loading requested tool "%s" %s', tool_name, log_detail) tools = [ entrypoint.load() for entrypoint in pkg_resources.iter_entry_points( group='reviewbot.tools', name=tool_name) ] if len(tools) == 0: logger.error('Tool "%s" not found %s', tool_name, log_detail) return False elif len(tools) > 1: logger.error('Tool "%s" is ambiguous (found %s) %s', tool_name, ', '.join(tool.name for tool in tools), log_detail) return False else: tool = tools[0] repository = None try: logger.info('Creating status update %s', log_detail) status_update = api_root.get_status_update( review_request_id=review_request_id, status_update_id=status_update_id) except Exception as e: logger.exception('Unable to create status update: %s %s', e, log_detail) return False if tool.working_directory_required: if not base_commit_id: logger.error( 'Working directory is required but the diffset ' 'has no base_commit_id %s', log_detail) status_update.update( state=ERROR, description='Diff does not include parent commit ' 'information.') return False try: repository = repositories[repository_name] except KeyError: logger.error('Unable to find configured repository "%s" %s', repository_name, log_detail) return False try: logger.info('Initializing review %s', log_detail) review = Review(api_root, review_request_id, diff_revision, review_settings) status_update.update(description='running...') except Exception as e: logger.exception('Failed to initialize review: %s %s', e, log_detail) status_update.update(state=ERROR, description='internal error.') return False try: logger.info('Initializing tool "%s %s" %s', tool.name, tool.version, log_detail) t = tool() except Exception as e: logger.exception('Error initializing tool "%s": %s %s', tool.name, e, log_detail) status_update.update(state=ERROR, description='internal error.') return False try: logger.info('Executing tool "%s" %s', tool.name, log_detail) t.execute(review, settings=tool_options, repository=repository, base_commit_id=base_commit_id) logger.info('Tool "%s" completed successfully %s', tool.name, log_detail) except Exception as e: logger.exception('Error executing tool "%s": %s %s', tool.name, e, log_detail) status_update.update(state=ERROR, description='internal error.') return False if t.output: file_attachments = \ api_root.get_user_file_attachments(username=username) attachment = \ file_attachments.upload_attachment('tool-output', t.output) status_update.update(url=attachment.absolute_url, url_text='Tool console output') try: if not review.has_comments: status_update.update(state=DONE_SUCCESS, description='passed.') else: logger.info('Publishing review %s', log_detail) review_id = review.publish().id status_update.update(state=DONE_FAILURE, description='failed.', review_id=review_id) except Exception as e: logger.exception('Error when publishing review: %s %s', e, log_detail) status_update.update(state=ERROR, description='internal error.') return False logger.info('Review completed successfully %s', log_detail) return True finally: cleanup_tempfiles()
def post_reviews(url, repoid, identifier, commits, username=None, password=None, userid=None, cookie=None): """Post a set of commits to Review Board. Repository hooks can use this function to post a set of pushed commits to Review Board. Each commit will become its own review request. Additionally, a review request with a diff encompassing all the commits will be created; This "squashed" review request will represent the push for the provided ``identifier``. The ``identifier`` is a unique string which represents a series of pushed commit sets. This identifier is used to update review requests with a new set of diffs from a new push. Generally this identifier will represent some unit of work, such as a bug. The ``commits`` argument takes the following form:: { 'squashed': { 'diff': <squashed-diff-string>, }, 'individual': [ { 'id': <commit-id>, 'precursors': [<previous changeset>], 'message': <commit-message>, 'diff': <diff>, 'parent_diff': <diff-from-base-to-commit>, }, { ... }, ... ] } When representing the commits on Review Board, we store meta data in the extra_data dictionaries. We use a number of fields to keep track of review requests and the state they are in. The following ASCII Venn Diagram represents sets the related review requests may be in and how they overlap. Legend: * "unpublished_rids" = squashed_rr.extra_data['p2rb.unpublished_rids'] * "discard_on_publish_rids" = squashed_rr.extra_data['p2rb.discard_on_publish_rids'] * "squashed.commits" = squashed_rr.extra_data['p2rb.commits'] * "draft.commits" = squashed_rr.draft.extra_data['p2rb.commits'] * A = unpublished_rids - draft.commits * B = draft.commits - squashed.commits * C = draft.commits - unpublished rids * D = delete_on_publish_rids Diagram:: unpublished_rids squashed.commits ________________________________________________________________ | | | | | | | _____________|_____________ | | | | | | | A | draft.commits | D | | | | | | | | | | | | | B | C | | | | | | | | | | | | | |_____________|_____________| | | | | | | discard_on_publish_rids | | | | |_____________________________|__________________________________| The following rules should apply to the review request sets when publishing or discarding. When publishing the squashed review request: * A: close "discarded" because it was never used * B: publish draft * C: publish draft * D: close "discarded" because it is obsolete * set unpublished_rids to empty '[]' * set discard_on_publish_rids to empty '[]' When discarding the draft of a published squashed review request: * A: close "discarded" because it was never used (so it does not appear in the owners dashboard) * B: close "discarded" because it was never used (so it does not appear in the owners dashboard) * C: DELETE the review request draft * D: do nothing * set unpublished_rids to empty '[]' * set discard_on_publish_rids to empty '[]' When discarding an unpublished squashed review request (always a close "discarded"): * TODO Bug 1047465 """ rbc = None if userid and cookie: # TODO: This is bugzilla specific code that really shouldn't be inside # of this file. The whole bugzilla cookie resource is a hack anyways # though so we'll deal with this for now. rbc = RBClient(url) login_resource = rbc.get_path( 'extensions/rbbz.extension.BugzillaExtension/' 'bugzilla-cookie-logins/') login_resource.create(login_id=userid, login_cookie=cookie) else: rbc = RBClient(url, username=username, password=password) api_root = rbc.get_root() # This assumes that we pushed to the repository/URL that Review Board is # configured to use. This assumption may not always hold. repo = api_root.get_repository(repository_id=repoid) repo_url = repo.path # Retrieve the squashed review request or create it. previous_commits = [] squashed_rr = None rrs = api_root.get_review_requests(commit_id=identifier, repository=repoid) if rrs.total_results > 0: squashed_rr = rrs[0] else: # A review request for that identifier doesn't exist - this # is the first push to this identifier and we'll need to create # it from scratch. squashed_rr = rrs.create(**{ "extra_data.p2rb": "True", "extra_data.p2rb.is_squashed": "True", "extra_data.p2rb.identifier": identifier, "extra_data.p2rb.discard_on_publish_rids": '[]', "extra_data.p2rb.unpublished_rids": '[]', "commit_id": identifier, "repository": repoid, }) squashed_rr.get_diffs().upload_diff(commits["squashed"]["diff"]) def update_review_request(rid, commit): rr = api_root.get_review_request(review_request_id=rid) draft = rr.get_or_create_draft(**{ "summary": commit['message'].splitlines()[0], "description": commit['message'], "extra_data.p2rb.commit_id": commit['id'], }) rr.get_diffs().upload_diff(commit['diff'], parent_diff=commit['parent_diff']) return rr # TODO: We need to take into account the commits data from the squashed # review request's draft. This data represents the mapping from commit # to rid in the event that we would have published. We're overwritting # this data. This will only come into play if we start trusting the server # isntead of the client when matching review request ids. Bug 1047516 previous_commits = get_previous_commits(squashed_rr) # A mapping from previously pushed node, which has not been processed # yet, to the review request id associated with that node. remaining_nodes = dict((t[0], t[1]) for i, t in enumerate(previous_commits)) # A list of review request ids that should be discarded when publishing. # Adding to this list will mark a review request as to-be-discarded when # the squashed draft is published on Review Board. discard_on_publish_rids = rid_list_to_str(json.loads( squashed_rr.extra_data['p2rb.discard_on_publish_rids'])) # A list of review request ids that have been created for individual commits # but have not been published. If this list contains an item, it should be # re-used for indiviual commits instead of creating a brand new review # request. unpublished_rids = rid_list_to_str(json.loads( squashed_rr.extra_data['p2rb.unpublished_rids'])) # Set of review request ids which have not been matched to a commit # from the current push. We use a list to represent this set because # if any entries are left over we need to process them in order. # This list includes currently published rids that were part of the # previous push and rids which have been used for drafts on this # reviewid but have not been published. unclaimed_rids = [t[1] for t in previous_commits] for rid in (discard_on_publish_rids + unpublished_rids): if rid not in unclaimed_rids: unclaimed_rids.append(rid) # Previously pushed nodes which have been processed and had their review # request updated or did not require updating. processed_nodes = set() node_to_rid = {} # A mapping from review request id to the corresponding review request # API object. review_requests = {} # Do a pass and find all commits that map cleanly to old review requests. for commit in commits['individual']: node = commit['id'] if node not in remaining_nodes: continue # If the commit appears in an old review request, by definition of # commits deriving from content, the commit has not changed and there # is nothing to update. Update our accounting and move on. rid = remaining_nodes[node] del remaining_nodes[node] unclaimed_rids.remove(rid) processed_nodes.add(node) node_to_rid[node] = rid rr = api_root.get_review_request(review_request_id=rid) review_requests[rid] = rr try: discard_on_publish_rids.remove(rid) except ValueError: pass # Find commits that map to a previous version. for commit in commits['individual']: node = commit['id'] if node in processed_nodes: continue # The client may have sent obsolescence data saying which commit this # commit has derived from. Use that data (if available) to try to find # a mapping to an old review request. for precursor in commit['precursors']: rid = remaining_nodes.get(precursor) if not rid: continue del remaining_nodes[precursor] unclaimed_rids.remove(rid) rr = update_review_request(rid, commit) processed_nodes.add(node) node_to_rid[node] = rid review_requests[rid] = rr try: discard_on_publish_rids.remove(rid) except ValueError: pass break # Now do a pass over the commits that didn't map cleanly. for commit in commits['individual']: node = commit['id'] if node in processed_nodes: continue # We haven't seen this commit before *and* our mapping above didn't # do anything useful with it. # This is where things could get complicated. We could involve # heuristic based matching (comparing commit messages, changed # files, etc). We may do that in the future. # For now, match the commit up against the next one in the index. # The unclaimed rids list contains review requests which were created # when previously updating this review identifier, but not published. # If we have more commits than were previously published we'll start # reusing these private review requests before creating new ones. if unclaimed_rids: assumed_old_rid = unclaimed_rids[0] unclaimed_rids.pop(0) rr = update_review_request(assumed_old_rid, commit) processed_nodes.add(commit['id']) node_to_rid[node] = assumed_old_rid review_requests[assumed_old_rid] = rr try: discard_on_publish_rids.remove(assumed_old_rid) except ValueError: pass continue # There are no more unclaimed review request IDs. This means we have # more commits than before. Create new review requests as appropriate. rr = rrs.create(**{ 'extra_data.p2rb': 'True', 'extra_data.p2rb.is_squashed': 'False', 'extra_data.p2rb.identifier': identifier, 'extra_data.p2rb.commit_id': commit['id'], 'repository': repoid, }) rr.get_diffs().upload_diff(commit['diff'], parent_diff=commit['parent_diff']) draft = rr.get_or_create_draft( summary=commit['message'].splitlines()[0], description=commit['message']) processed_nodes.add(commit['id']) # Normalize all review request identifiers to strings. assert isinstance(rr.id, int) rid = str(rr.id) node_to_rid[node] = rid review_requests[rid] = rr unpublished_rids.append(rid) # At this point every incoming commit has been accounted for. # If there are any remaining review requests, they must belong to # deleted commits. (Or, we made a mistake and updated the wrong review # request) for rid in unclaimed_rids: rr = api_root.get_review_request(review_request_id=rid) if rr.public and rid not in discard_on_publish_rids: # This review request has already been published so we'll need to # discard it when we publish the squashed review request. discard_on_publish_rids.append(rid) elif not rr.public and rid not in unpublished_rids: # We've never published this review request so it may be reused in # the future for *any* commit. Keep track of it. unpublished_rids.append(rid) else: # This means we've already marked the review request properly # in a previous push, so do nothing. pass squashed_description = [] for commit in commits['individual']: squashed_description.append('/r/%s - %s' % ( node_to_rid[commit['id']], commit['message'].splitlines()[0])) squashed_description.extend(['', 'Pull down ']) if len(commits['individual']) == 1: squashed_description[-1] += 'this commit:' else: squashed_description[-1] += 'these commits:' squashed_description.extend([ '', 'hg pull -r %s %s' % (commits['individual'][-1]['id'], repo_url), ]) commit_list = [] for commit in commits['individual']: node = commit['id'] commit_list.append([node, node_to_rid[node]]) commit_list_json = json.dumps(commit_list) depends = ','.join(str(i) for i in sorted(node_to_rid.values())) squashed_draft = squashed_rr.get_or_create_draft(**{ 'summary': identifier, 'description': '%s\n' % '\n'.join(squashed_description), 'depends_on': depends, 'extra_data.p2rb.commits': commit_list_json, }) squashed_rr.update(**{ 'extra_data.p2rb.discard_on_publish_rids': json.dumps( discard_on_publish_rids), 'extra_data.p2rb.unpublished_rids': json.dumps( unpublished_rids), }) review_requests[str(squashed_rr.id)] = squashed_rr return str(squashed_rr.id), node_to_rid, review_requests
class RBInterface(): def __init__(self, url): self.client = RBClient(url) self.root = self.client.get_root() try: self._version = float( self.root.rsp['product']['version'].split()[0]) except: self._version = 0.0 self._templates = self.root.rsp['uri_templates'] self._files = {} self._file_data = {} self._simplefile_data = {} @authentication_wrapper def get_review_requests(self, current_line): return self.root.get_review_requests(start=current_line) @authentication_wrapper def get_review_request(self, review_request_id): review_request_template = self._templates['review_request'] url = review_request_template.format( review_request_id=review_request_id) return self.client.get_url(url) @authentication_wrapper def get_file_src(self, review_request_id, diff_revision, filediff_id): url = self._templates['diff'].format( review_request_id=review_request_id, diff_revision=diff_revision) if url not in self._simplefile_data: diff_obj = self.client.get_url(url) self._simplefile_data[url] = diff_obj diff_obj = self._simplefile_data[url] for filesimple in diff_obj.get_files(): if filesimple['id'] == filediff_id: return filesimple['source_file'] @authentication_wrapper def get_file_dst(self, review_request_id, diff_revision, filediff_id): url = self._templates['diff'].format( review_request_id=review_request_id, diff_revision=diff_revision) if url not in self._simplefile_data: diff_obj = self.client.get_url(url) self._simplefile_data[url] = diff_obj diff_obj = self._simplefile_data[url] for filesimple in diff_obj.get_files(): if filesimple['id'] == filediff_id: return filesimple['dest_file'] @authentication_wrapper def get_file(self, review_request_id, diff_revision, filediff_id): url = self._templates['file'].format( review_request_id=review_request_id, diff_revision=diff_revision, filediff_id=filediff_id) if url in self._files: return self._files[url] file_obj = self.client.get_url(url) self._files[url] = file_obj return self._files[url] def get_dst_lines(self, review_request_id, diff_revision, filediff_id): if self._version >= 3.0: file_obj = self.get_file(review_request_id, diff_revision, filediff_id) return file_obj.get_patched_file()['data'].splitlines() dest_file = self.get_file_dst(review_request_id, diff_revision, filediff_id) updates = self.get_filediff_data(review_request_id, diff_revision, filediff_id) dst_updates = updates['dst_updates'] dst_lines = get_p4_file(dest_file) for lineno, linevalue in dst_updates.iteritems(): dst_lines[lineno] = linevalue return dst_lines def get_src_lines(self, review_request_id, diff_revision, filediff_id): try: if self._version >= 3.0: file_obj = self.get_file(review_request_id, diff_revision, filediff_id) return file_obj.get_original_file()['data'].splitlines() source_file = self.get_file_src(review_request_id, diff_revision, filediff_id) return get_p4_file(source_file) except: return None @authentication_wrapper def get_filediff_data(self, review_request_id, diff_revision, filediff_id): url = self._templates['file'].format( review_request_id=review_request_id, diff_revision=diff_revision, filediff_id=filediff_id) if url in self._file_data: return self._file_data[url] file_obj = self.get_file(review_request_id, diff_revision, filediff_id) # chunks are collected differently if self._version >= 3.0: chunks = file_obj.get_diff_data()['chunks'] else: chunks = file_obj['chunks'] source_line_global_pos = {} dest_line_global_pos = {} dst_updates = {} for chunk in chunks: for line in chunk['lines']: try: source_line_global_pos[int(line[1]) - 1] = int(line[0]) - 1 dest_line_global_pos[int(line[4]) - 1] = int(line[0]) - 1 dst_updates[line[1] - 1] = _html_parser.unescape(line[5]) except: pass self._file_data[url] = { 'source_global_pos': source_line_global_pos, 'dest_global_pos': dest_line_global_pos, 'dst_updates': dst_updates } return self._file_data[url] @authentication_wrapper def make_review(self, review_request_id): review_request = self.get_review_request(review_request_id) try: return review_request.get_reviews().get_review_draft() except: return review_request.get_reviews().create() @authentication_wrapper def make_comment(self, review_request_id, first_line, text, filediff_id, num_lines): request = self.make_review(review_request_id) request.get_diff_comments().create(first_line=first_line, text=text, filediff_id=filediff_id, num_lines=num_lines) def login(self, user, password): self.client.login(user, password)
class MozReviewBot(object): def __init__(self, config_path=None, reviewboard_url=None, reviewboard_user=None, reviewboard_password=None, pulse_host=None, pulse_port=None, pulse_userid=None, pulse_password=None, exchange=None, queue=None, routing_key=None, pulse_timeout=None, pulse_ssl=False, repo_root=None, logger=None): if logger is None: self.logger = logging.getLogger('mozreviewbot') else: self.logger = logger # We use options passed into __init__ preferentially. If any of these # are not specified, we next check the configuration file, if any. # Finally, we use environment variables. if config_path and not os.path.isfile(config_path): # ConfigParser doesn't seem to throw if it is unable to find the # config file so we'll explicitly check that it exists. self.logger.error('could not locate config file: %s' % ( config_path)) config_path = None if config_path: try: config = ConfigParser() config.read(config_path) reviewboard_url = (reviewboard_url or config.get('reviewboard', 'url')) reviewboard_user = (reviewboard_user or config.get('reviewboard', 'user')) reviewboard_password = (reviewboard_password or config.get('reviewboard', 'password')) pulse_host = pulse_host or config.get('pulse', 'host') pulse_port = pulse_port or config.get('pulse', 'port') pulse_userid = pulse_userid or config.get('pulse', 'userid') pulse_password = pulse_password or config.get('pulse', 'password') exchange = exchange or config.get('pulse', 'exchange') queue = queue or config.get('pulse', 'queue') routing_key = routing_key or config.get('pulse', 'routing_key') pulse_timeout = pulse_timeout or config.get('pulse', 'timeout') if pulse_ssl is None: pulse_ssl = config.get('pulse', 'ssl') except NoSectionError as e: self.logger.error('configuration file missing section: %s' % e.section) try: repo_root = repo_root or config.get('hg', 'repo_root') except (NoOptionError, NoSectionError): # Subclasses do not need to define repo root if they do not # plan on using the hg functionality. pass # keep config around in case any subclasses would like to extract # options from it. self.config = config else: self.config = None reviewboard_url = reviewboard_url or os.environ.get('REVIEWBOARD_URL') pulse_host = pulse_host or os.environ.get('PULSE_HOST') pulse_port = pulse_port or os.environ.get('PULSE_PORT') self.rbclient = RBClient(reviewboard_url, username=reviewboard_user, password=reviewboard_password) self.api_root = self.rbclient.get_root() self.conn = Connection(hostname=pulse_host, port=pulse_port, userid=pulse_userid, password=pulse_password, ssl=pulse_ssl) self.exchange = Exchange(exchange, type='topic', durable=True) self.queue = Queue(name=queue, exchange=self.exchange, durable=True, routing_key=routing_key, exclusive=False, auto_delete=False) self.pulse_timeout = float(pulse_timeout) self.repo_root = repo_root self.hg = None for DIR in os.environ['PATH'].split(os.pathsep): p = os.path.join(DIR, 'hg') if os.path.exists(p): self.hg = p def _get_available_messages(self): messages = [] def onmessage(body, message): messages.append((body, message)) consumer = self.conn.Consumer([self.queue], callbacks=[onmessage], auto_declare=True) with consumer: try: self.conn.drain_events(timeout=self.pulse_timeout) except socket.timeout: pass return messages def _run_hg(self, hg_args): # TODO: Use hgtool. args = [self.hg] + hg_args env = dict(os.environ) env['HGENCODING'] = 'utf-8' null = open(os.devnull, 'w') # Execute at / to prevent Mercurial's path traversal logic from # kicking in and picking up unwanted config files. return subprocess.check_output(args, stdin=null, stderr=null, env=env, cwd='/') def ensure_hg_repo_exists(self, landing_repo_url, repo_url, pull_rev=None): # TODO: Use the root changeset in each repository as an identifier. # This will enable "forks" to share the same local clone. # The "share" extension now has support for this. # Read hg help -e share for details about "pooled storage." # We should probably deploy that. url = landing_repo_url or repo_url sha1 = hashlib.sha1(url).hexdigest() repo_path = os.path.join(self.repo_root, sha1) if not os.path.exists(repo_path): args = ['clone', url, repo_path] self.logger.debug('cloning %s' % url) self._run_hg(args) self.logger.debug('finished cloning %s' % url) args = ['-R', repo_path, 'pull', repo_url] if pull_rev: args.extend(['-r', pull_rev]) self.logger.debug('pulling %s' % repo_url) self._run_hg(args) self.logger.debug('finished pulling %s' % repo_url) return repo_path def hg_commit_changes(self, repo_path, node, diff_context=None): """Obtain information about what changed in a Mercurial commit. The return value is a tuple of: (set(adds), set(dels), set(mods), None, diff) The first 4 items list what files changed in the changeset. The last item is a unified diff of the changeset. File copies are currently not returned. ``None`` is being used as a placeholder until support is needed. """ part_delim = str(uuid.uuid4()) item_delim = str(uuid.uuid4()) parts = [ '{join(file_adds, "%s")}' % item_delim, '{join(file_dels, "%s")}' % item_delim, '{join(file_mods, "%s")}' % item_delim, '{join(file_copies, "%s")}' % item_delim, ] template = part_delim.join(parts) self._run_hg(['-R', repo_path, 'up', '-C', node]) res = self._run_hg(['-R', repo_path, 'log', '-r', node, '-T', template]) diff_args = ['-R', repo_path, 'diff', '-c', node] if diff_context is not None: diff_args.extend(['-U', str(diff_context)]) diff = self._run_hg(diff_args) adds, dels, mods, copies = res.split(part_delim) adds = set(f for f in adds.split(item_delim) if f) dels = set(f for f in dels.split(item_delim) if f) mods = set(f for f in mods.split(item_delim) if f) # TODO parse the copies. return adds, dels, mods, None, diff def strip_nonpublic_changesets(self, repo_path): """Strip non-public changesets from a repository. Pulling changesets over and over results in many heads in a repository. This makes Mercurial slow. So, we prune non-public changesets/heads to keep repositories fast. """ self._run_hg(['-R', repo_path, '--config', 'extensions.strip=', 'strip', '--no-backup', '-r', 'not public()']) def get_commit_files(self, commit): """Fetches a list of files that were changed by this commit.""" rrid = commit['review_request_id'] diff_revision = commit['diffset_revision'] start = 0 files = [] while True: result = self.api_root.get_files(review_request_id=rrid, diff_revision=diff_revision, start=start) files.extend(result) start += result.num_items if result.num_items == 0 or start >= result.total_results: break return files def handle_available_messages(self): for body, message in self._get_available_messages(): payload = body['payload'] repo_url = payload['repository_url'] landing_repo_url = payload['landing_repository_url'] commits = payload['commits'] # TODO: should we allow process commits to signal that we should # skip acknowledging the message? try: for commit in commits: rrid = commit['review_request_id'] diff_revision = commit['diffset_revision'] review = BatchReview(self.api_root, rrid, diff_revision) self.process_commit(review, landing_repo_url, repo_url, commit) finally: # This prevents the queue from growing indefinitely but # prevents us from fixing whatever caused the exception # and restarting the bot to handle the message. message.ack() def listen_forever(self): while True: self.handle_available_messages() def process_commit(self, review, repo_url, commits): pass
#!/usr/bin/python from rbtools.api.client import RBClient client = RBClient('http://10.200.2.68/', username='******', password='******') root = client.get_root() repos = root.get_repositories() if repos.num_items < 1: raise Exception('No valid repositories.') #print(repos.num_items) repository = repos[12].id #No.13 repository is tnt #create request to a repository for index in range(1): review_request = root.get_review_requests().create(repository=repository) draft = review_request.get_or_create_draft() draft = draft.update(summary=review_request.id, target_people='baojiawei,tujinpeng', description='nothing', public=True) print review_request.id review = review_request.get_reviews().create() review.update(body_top=review_request.id, public=True, ship_it=True) #requests = root.get_review_requests(ship_it_count=0,to_users='wangfei') #print (requests) client2 = RBClient('http://10.200.2.68/', username='******',
def get_api_root(user, password): client = RBClient('http://localhost/', username=user, password=password) root = client.get_root() return root
class AnyRB(object): def __init__(self, event): self.client = RBClient(settings.RB_API_URL, username=settings.RB_API_USERNAME, password=settings.RB_API_PASSWORD) self.event = event def upload_review(self): if len(self.event.file_set.all()) == 0: return None files_diff = [] empty = True import magic for f in self.event.file_set.all(): mime_type = magic.from_buffer(f.file.read(1024), mime=True) if mime_type[:4] != 'text': continue empty = False file_content = [] for line in f.file: try: file_content.append(line.decode('utf-8')) except: file_content.append(line.decode('cp1251')) from difflib import unified_diff fname = f.filename() from_name = u'a/{0}'.format(fname) to_name = u'b/{0}'.format(fname) diff = [(u'diff --git {0} {1}'.format(from_name, to_name))] from_name = u'/dev/null' diff_content = unified_diff('', file_content, fromfile=from_name, tofile=to_name.encode('utf-8')) for line in diff_content: line = line.strip() if isinstance(line, str): diff.append(line.decode('utf-8')) else: diff.append(line) files_diff.append(u'\n'.join(diff)) files_diff = u'\n'.join(files_diff) if empty: return None review_request = self.get_review_request() if review_request is None: review_request = self.create_review_request() if review_request is None: return review_request review_request.get_diffs().upload_diff(files_diff.encode('utf-8')) draft = review_request.get_or_create_draft() issue = self.event.issue summary = u'[{0}][{1}] {2}'.format(issue.student.get_full_name(), issue.task.course.get_user_group(issue.student), issue.task.title) description_template = u'Задача: "{0}", ' + \ u'курс: [{1}](http://{2}{3})\n' + \ u'Студент: [{4}](http://{2}{5})\n' + \ u'[Обсуждение задачи](http://{2}{6})' description = description_template.format( issue.task.title, issue.task.course, Site.objects.get_current().domain, issue.task.course.get_absolute_url(), issue.student.get_full_name(), issue.student.get_absolute_url(), issue.get_absolute_url() ) draft = draft.update(summary=summary, description=description, description_text_type='markdown', target=settings.RB_API_USERNAME, public=True, ) return review_request.id # def get_or_create_review_request(self): # root = self.client.get_root() # review_request = None # try: # review_id = self.event.issue.get_byname('review_id') # review_request = root.get_review_request(review_request_id=review_id) # except (AttributeError, ValueError): # course_id = self.event.issue.task.course.id # repository_name = str(self.event.issue.id) # os.symlink(settings.RB_SYMLINK_DIR,os.path.join(settings.RB_SYMLINK_DIR, repository_name)) # repository = root.get_repositories().create( # name=repository_name, # path=settings.RB_SYMLINK_DIR+repository_name+'/.git', # tool='Git', # public=False) # root.get_repository(repository_id=repository.id).update(grant_type='add', # grant_entity='user', # grant_name=self.event.author) # root.get_repository(repository_id=repository.id).update(grant_type='add', # grant_entity='group', # grant_name='teachers') # root.get_repository(repository_id=repository.id).update(grant_type='add', # grant_entity='group', # grant_name='teachers_{0}'.format(course_id)) # review_request = root.get_review_requests().create(repository=repository.id) # self.event.issue.set_byname('review_id', review_request.id, self.event.author) # return review_request def get_review_request(self): root = self.client.get_root() review_request = None try: review_id = self.event.issue.get_byname('review_id') except (AttributeError, ValueError): logger.info("Issue '%s' has not review_id.", self.event.issue.id) return None try: review_request = root.get_review_request(review_request_id=review_id) return review_request except Exception as e: logger.info("Issue '%s' has not RB review_request. Exception: '%s'.", self.event.issue.id, e) return None def create_review_request(self): root = self.client.get_root() review_request = None course_id = self.event.issue.task.course.id repository_name = str(self.event.issue.id) repository_path = os.path.join(settings.RB_SYMLINK_DIR, repository_name) os.symlink(settings.RB_SYMLINK_DIR,repository_path) try: repository = root.get_repositories().create( name=repository_name, path=os.path.join(repository_path,'.git'), tool='Git', public=False) root.get_repository(repository_id=repository.id).update(grant_type='add', grant_entity='user', grant_name=self.event.issue.student) root.get_repository(repository_id=repository.id).update(grant_type='add', grant_entity='group', grant_name='teachers_{0}'.format(course_id)) review_request = root.get_review_requests().create(repository=repository.id) self.event.issue.set_byname('review_id', review_request.id, self.event.author) except Exception as e: logger.exception("Exception while creating review_request. Exception: '%s'. Issue: '%s'", e, self.event.issue.id) return None return review_request
def main(url, group_names, days_old=7, dry_run=False): """ do something """ try: user = os.environ['RBUSER'] except KeyError: raise SystemExit( "please set RBUSER environment variable for reviewboard user") try: passwd = os.environ['RBPASS'] except KeyError: raise SystemExit( "please set RBPASS environment variable for reviewboard password") #client = RBClient(url, user, passwd) client = RBClient(url) root = client.get_root() if not root: raise SystemExit("Error - could not get RBClient root.") for g_name in group_names: o = get_group_id_by_name(root, g_name, dry_run=dry_run) if not o: raise SystemExit("ERROR: no group '%s' found." % g_name) logger.debug("Found group '%s' id=%d" % (g_name, o)) reviews = get_reviews_for_groups(root, group_names, dry_run=dry_run) old_reviews = filter_reviews_older_than(root, reviews, days_old, dry_run=dry_run) logger.info( "found %d reviews for target groups and last updated %d or more days ago" % (len(old_reviews), days_old)) if len(old_reviews) < 1: logger.info("Found no reviews matching criteria, exiting") return False users = get_submitters_for_reviews(old_reviews) logger.debug("got user information for %d users" % len(users)) recipients = [] for u in users: recipients.append("{u.fullname} <{u.email}>".format(u=users[u])) table = generate_report_html_table(old_reviews, url) body = "<h1>ReviewBoard reminder</h1>\n" body += """<p>You're receiving this message because you have one or more pending code reviews on <a href="{url}">{url}</a> targeted at the '{group_names}' group(s) that have not been updated in over {days_old} days and have not been submitted. At your convenience, please evaluate these reviews and close/submit any that have been merged or discarded. Thank You.</p>\n""".format(url=url, days_old=days_old, group_names=", ".join(group_names)) body += table body += "\n<br />\n" host = node() user = getuser() body += """ <p><em>generated by <a href=\"https://github.com/jantman/misc-scripts/blob/master/reviewboard_reminder_mail.py">reviewboard_reminder_mail.py</a> running on {host} as {user} at {ds}</em></p> """.format(host=host, user=user, ds=datetime.datetime.now().isoformat()) if dry_run: print( "Message to send:\n##############################\n{msg}\n#################################\n" .format(msg=body)) print("Would send to:\n {to}".format(to=", ".join(recipients))) else: raise SystemExit( "Oops - never actually implemented the mail sending...") return True
VERBOSE = True if not options.url: print("ERROR: You must specify a reviewboard server URL (-u|--url) to use") sys.exit(2) if not options.repo: print("ERROR: You must specify a repo (-r|--repo) to find reviews for") sys.exit(2) if not options.branch: print("ERROR: You must specify a branch (-b|--branch) to find reviews for") sys.exit(2) client = RBClient(options.url, username=RB_USER, password=RB_PASSWORD) root = client.get_root() if not root: print("Error - could not get RBClient root.") sys.exit(1) repo = get_repository_id_by_name(root, options.repo, verbose=VERBOSE) if repo is None: print("ERROR: Could not find ReviewBoard repository with name '%s'" % options.repo) sys.exit(3) reviews = get_reviews_for_branch(root, repo, options.branch, verbose=VERBOSE) if len(reviews) == 0: print("ERROR: No open reviews found for branch %s in repo %s" % (options.branch, repo)) sys.exit(4) if len(reviews) > 1: print("ERROR: Multiple open reviews found for branch %s in repo %s" % (repo, options.branch))