Exemple #1
0
 def search(self, query, options):
     """ Perform Bugzilla search """
     query["query_format"] = "advanced"
     log.debug("Search query:")
     log.debug(pretty(query))
     # Fetch bug info
     try:
         result = self.server.query(query)
     except xmlrpclib.Fault as error:
         # Ignore non-existent users (this is necessary for users with
         # several email aliases to allow them using --merge/--total)
         if "not a valid username" in unicode(error):
             log.debug(error)
             return []
         # Otherwise suggest to bake bugzilla cookies
         log.error("An error encountered, while searching for bugs.")
         log.debug(error)
         raise ReportError("Have you prepared your cookies by 'bugzilla login'?")
     log.debug("Search result:")
     log.debug(pretty(result))
     bugs = dict((bug.id, bug) for bug in result)
     # Fetch bug history
     log.debug("Fetching bug history")
     result = self.server._proxy.Bug.history({"ids": bugs.keys()})
     log.debug(pretty(result))
     history = dict((bug["id"], bug["history"]) for bug in result["bugs"])
     # Fetch bug comments
     log.debug("Fetching bug comments")
     result = self.server._proxy.Bug.comments({"ids": bugs.keys()})
     log.debug(pretty(result))
     comments = dict((int(bug), data["comments"]) for bug, data in result["bugs"].items())
     # Create bug objects
     return [self.parent.bug(bugs[id], history[id], comments[id], parent=self.parent) for id in bugs]
Exemple #2
0
 def search(query, parent, options):
     """ Perform Trac search """
     # Extend the default max number of tickets to be fetched
     query = "{0}&max={1}".format(query, MAX_TICKETS)
     log.debug("Search query: {0}".format(query))
     try:
         result = parent.proxy.ticket.query(query)
     except xmlrpclib.Fault as error:
         log.error("An error encountered, while searching for tickets.")
         raise ReportError(error)
     except xmlrpclib.ProtocolError as error:
         log.debug(error)
         log.error("Trac url: {0}".format(parent.url))
         raise ReportError(
             "Unable to contact Trac server. Is the url above correct?")
     log.debug("Search result: {0}".format(result))
     # Fetch tickets and their history using multicall
     multicall = xmlrpclib.MultiCall(parent.proxy)
     for ticket_id in sorted(result):
         multicall.ticket.get(ticket_id)
         multicall.ticket.changeLog(ticket_id)
     log.debug(u"Fetching trac tickets and their history")
     result = list(multicall())
     tickets = result[::2]
     changelogs = result[1::2]
     # Print debugging info
     for ticket, changelog in zip(tickets, changelogs):
         log.debug("Fetched ticket #{0}".format(ticket[0]))
         log.debug(pretty(ticket))
         log.debug("Changelog:")
         log.debug(pretty(changelog))
     # Return the list of ticket objects
     return [
         Trac(ticket, changelg, parent=parent, options=options)
         for ticket, changelg in zip(tickets, changelogs)]
Exemple #3
0
    def __init__(self, config=None, path=None):
        """
        Read the config file

        Parse config from given string (config) or file (path).
        If no config or path given, default to "~/.did/config" which
        can be overrided by the ``DID_DIR`` environment variable.
        """
        # Read the config only once (unless explicitly provided)
        if self.parser is not None and config is None and path is None:
            return
        Config.parser = ConfigParser.SafeConfigParser()
        # If config provided as string, parse it directly
        if config is not None:
            log.info("Inspecting config file from string")
            log.debug(utils.pretty(config))
            self.parser.readfp(StringIO.StringIO(config))
            return
        # Check the environment for config file override
        # (unless path is explicitly provided)
        if path is None:
            path = Config.path()
        # Parse the config from file
        try:
            log.info("Inspecting config file '{0}'.".format(path))
            self.parser.readfp(codecs.open(path, "r", "utf8"))
        except IOError as error:
            log.debug(error)
            Config.parser = None
            raise ConfigFileError(
                "Unable to read the config file '{0}'.".format(path))
Exemple #4
0
    def search(query, stats, expand=None):
        """ Perform page/comment search for given stats instance """
        log.debug("Search query: {0}".format(query))
        content = []

        # Fetch data from the server in batches of MAX_RESULTS issues
        for batch in range(MAX_BATCHES):
            response = stats.parent.session.get(
                "{0}/rest/api/content/search?{1}".format(
                    stats.parent.url,
                    urllib.parse.urlencode({
                        "cql": query,
                        "limit": MAX_RESULTS,
                        "expand": expand,
                        "start": batch * MAX_RESULTS
                    })))
            data = response.json()
            log.debug("Batch {0} result: {1} fetched".format(
                batch, listed(data["results"], "object")))
            log.data(pretty(data))
            content.extend(data["results"])
            # If all issues fetched, we're done
            if data['_links'].get('next') is None:
                break
        return content
Exemple #5
0
    def user_link_history(self, created_before=None, created_after=None,
                          limit=100, **kwargs):
        """  Bit.ly API - user_link_history wrapper"""
        """ Bit.ly link

        Link History Keys
        -----------------

            [u'aggregate_link', u'archived', u'campaign_ids',
             u'client_id', u'created_at', u'keyword_link',
             u'link', u'long_url', u'modified_at',
             u'private', u'tags', u'title', u'user_ts']
        """
        # bit.ly API doesn't seem to like anything other than int's
        limit = int(limit)
        created_after = int(created_after)
        created_before = int(created_before)
        hist = self.api.user_link_history(
            limit=limit, created_before=created_before,
            created_after=created_after)

        # FIXME: if we have more than 100 objects we need to PAGINATE
        record = "{0} - {1}"
        links = []
        for r in hist:
            link = r.get('keyword_link') or r['link']
            title = r['title'] or '<< NO TITLE >>'
            links.append(record.format(link, title))
        log.debug("First 3 Links fetched:")
        log.debug(pretty(hist[0:3], indent=4))
        return links
Exemple #6
0
 def search(query, stats):
     """ Perform issue search for given stats instance """
     log.debug("Search query: {0}".format(query))
     issues = []
     # Fetch data from the server in batches of MAX_RESULTS issues
     for batch in range(MAX_BATCHES):
         result = stats.parent.session.open(
             "{0}/rest/api/latest/search?{1}".format(
                 stats.parent.url,
                 urllib.urlencode({
                     "jql": query,
                     "fields": "summary,comment",
                     "maxResults": MAX_RESULTS,
                     "startAt": batch * MAX_RESULTS
                 })))
         data = json.loads(result.read())
         log.debug("Batch {0} result: {1} fetched".format(
             batch, listed(data["issues"], "issue")))
         log.data(pretty(data))
         issues.extend(data["issues"])
         # If all issues fetched, we're done
         if len(issues) >= data["total"]:
             break
     # Return the list of issue objects
     return [Issue(issue, prefix=stats.parent.prefix) for issue in issues]
Exemple #7
0
    def get_actions(self, filters, since=None, before=None, limit=1000):
        """
        Example of data structure:
        https://api.trello.com/1/members/ben/actions?limit=2
        """
        if limit > 1000:
            raise NotImplementedError(
                "Fetching more than 1000 items is not implemented")
        resp = self.stats.session.open("{0}/members/{1}/actions?{2}".format(
            self.stats.url, self.username,
            urllib.parse.urlencode({
                "key": self.key,
                "token": self.token,
                "filter": filters,
                "limit": limit,
                "since": str(since),
                "before": str(before)
            })))

        actions = json.loads(resp.read())
        log.data(pretty(actions))
        # print[act for act in actions if "shortLink" not in
        # act['data']['board'].keys()]
        actions = [
            act for act in actions
            if act['data']['board']['id'] in self.board_ids
        ]
        return actions
Exemple #8
0
    def __init__(self, config=None, path=None):
        """
        Read the config file

        Parse config from given string (config) or file (path).
        If no config or path given, default to "~/.did/config" which
        can be overrided by the ``DID_DIR`` environment variable.
        """
        # Read the config only once (unless explicitly provided)
        if self.parser is not None and config is None and path is None:
            return
        Config.parser = ConfigParser.SafeConfigParser()
        # If config provided as string, parse it directly
        if config is not None:
            log.info("Inspecting config file from string")
            log.debug(utils.pretty(config))
            self.parser.readfp(StringIO.StringIO(config))
            return
        # Check the environment for config file override
        # (unless path is explicitly provided)
        if path is None:
            path = Config.path()
        # Parse the config from file
        try:
            log.info("Inspecting config file '{0}'.".format(path))
            self.parser.readfp(codecs.open(path, "r", "utf8"))
        except IOError as error:
            log.debug(error)
            Config.parser = None
            raise ConfigFileError(
                "Unable to read the config file '{0}'.".format(path))
Exemple #9
0
    def fetch(self):
        log.info(u"Searching for patches added to changes by {0}".format(self.user))
        reviewer = self.user.login
        self.stats = []
        tickets = GerritUnit.fetch(
            self, "owner:{0}+is:closed&q=owner:{0}+is:open".format(reviewer), "", limit_since=True
        )
        for tck in tickets:
            log.debug("ticket = {0}".format(tck))
            try:
                changes = self.repo.get_changelog(tck)
            except IOError:
                log.debug("Failing to retrieve details for {0}".format(tck.change_id))
                continue

            owner = changes["owner"]["email"]

            log.debug("changes.messages = {0}".format(pretty(changes["messages"])))
            cmnts_by_user = []
            for chg in changes["messages"]:
                # TODO This is a very bad algorithm for recognising
                # patch setts added by the owner of the change, but
                # I don’t know how to find a list of all revisions for
                # the particular change.
                if (
                    "author" in chg
                    and owner == chg["author"]["email"]
                    and self.get_gerrit_date(chg["date"][:10]) >= self.since_date
                    and "uploaded patch" in chg["message"].lower()
                ):
                    cmnts_by_user.append(chg)
            if len(cmnts_by_user) > 0:
                self.stats.append(Change(tck.ticket, changelog=changes, prefix=self.prefix))
        log.debug(u"self.stats = {0}".format(self.stats))
Exemple #10
0
    def user_link_history(self,
                          created_before=None,
                          created_after=None,
                          limit=100,
                          **kwargs):
        """  Bit.ly API - user_link_history wrapper"""
        """ Bit.ly link

        Link History Keys
        -----------------

            [u'aggregate_link', u'archived', u'campaign_ids',
             u'client_id', u'created_at', u'keyword_link',
             u'link', u'long_url', u'modified_at',
             u'private', u'tags', u'title', u'user_ts']
        """
        # bit.ly API doesn't seem to like anything other than int's
        limit = int(limit)
        created_after = int(created_after)
        created_before = int(created_before)
        hist = self.api.user_link_history(limit=limit,
                                          created_before=created_before,
                                          created_after=created_after)

        # FIXME: if we have more than 100 objects we need to PAGINATE
        record = "{0} - {1}"
        links = []
        for r in hist:
            link = r.get('keyword_link') or r['link']
            title = r['title'] or '<< NO TITLE >>'
            links.append(record.format(link, title))
        log.debug("First 3 Links fetched:")
        log.debug(pretty(hist[0:3], indent=4))
        return links
Exemple #11
0
    def get_actions(self, filters, since=None, before=None, limit=1000):
        """
        Example of data structure:
        https://api.trello.com/1/members/ben/actions?limit=2
        """
        if limit > 1000:
            raise NotImplementedError(
                "Fetching more than 1000 items is not implemented")
        resp = self.stats.session.open(
            "{0}/members/{1}/actions?{2}".format(
                self.stats.url, self.username, urllib.urlencode({
                    "key": self.key,
                    "token": self.token,
                    "filter": filters,
                    "limit": limit,
                    "since": str(since),
                    "before": str(before)})))

        actions = json.loads(resp.read())
        log.data(pretty(actions))
        # print[act for act in actions if "shortLink" not in
        # act['data']['board'].keys()]
        actions = [act for act in actions if act['data']
                   ['board']['id'] in self.board_ids]
        return actions
Exemple #12
0
    def search(self, query):
        """ Perform GitHub query """
        result = []
        url = self.url + "/" + query + f"&per_page={PER_PAGE}"

        while True:
            # Fetch the query
            log.debug(f"GitHub query: {url}")
            try:
                response = requests.get(url, headers=self.headers)
                log.debug(f"Response headers:\n{response.headers}")
            except requests.exceptions.RequestException as error:
                log.debug(error)
                raise ReportError(f"GitHub search on {self.url} failed.")

            # Parse fetched json data
            try:
                data = json.loads(response.text)["items"]
                result.extend(data)
            except requests.exceptions.JSONDecodeError as error:
                log.debug(error)
                raise ReportError(f"GitHub JSON failed: {response.text}.")

            # Update url to the next page, break if no next page provided
            if 'next' in response.links:
                url = response.links['next']['url']
            else:
                break

        log.debug("Result: {0} fetched".format(listed(len(result), "item")))
        log.data(pretty(result))
        return result
Exemple #13
0
 def search(query, stats):
     """ Perform issue search for given stats instance """
     log.debug("Search query: {0}".format(query))
     issues = []
     # Fetch data from the server in batches of MAX_RESULTS issues
     for batch in range(MAX_BATCHES):
         response = stats.parent.session.get(
             "{0}/rest/api/latest/search?{1}".format(
                 stats.parent.url, urllib.parse.urlencode({
                     "jql": query,
                     "fields": "summary,comment",
                     "maxResults": MAX_RESULTS,
                     "startAt": batch * MAX_RESULTS})))
         data = response.json()
         if not response.ok:
             try:
                 error = " ".join(data["errorMessages"])
             except KeyError:
                 error = "unknown"
             raise ReportError(
                 f"Failed to fetch jira issues for query '{query}'. "
                 f"The reason was '{response.reason}' "
                 f"and the error was '{error}'.")
         log.debug("Batch {0} result: {1} fetched".format(
             batch, listed(data["issues"], "issue")))
         log.data(pretty(data))
         issues.extend(data["issues"])
         # If all issues fetched, we're done
         if len(issues) >= data["total"]:
             break
     # Return the list of issue objects
     return [Issue(issue, prefix=stats.parent.prefix) for issue in issues]
Exemple #14
0
 def fetch(self):
     log.info(u"Searching for changes reviewed by {0}".format(self.user))
     # Collect ALL changes opened (and perhaps now closed) after
     # given date and collect all reviews from them ... then limit by
     # actual reviewer (not reviewer:<login> because that doesn’t
     # that the person actually did a review, only that it has
     # a right to do so).
     self.stats = []
     reviewer = self.user.login
     tickets = GerritUnit.fetch(
         self, "reviewer:{0}+is:closed&q=reviewer:{0}+is:open".format(self.user.login), "", limit_since=True
     )
     for tck in tickets:
         log.debug("ticket = {0}".format(tck))
         try:
             changes = self.repo.get_changelog(tck)
         except IOError:
             log.debug("Failing to retrieve details for {0}".format(tck.change_id))
             continue
         log.debug("changes.messages = {0}".format(pretty(changes["messages"])))
         cmnts_by_user = []
         for chg in changes["messages"]:
             if "author" in chg and reviewer in chg["author"]["email"]:
                 comment_date = self.get_gerrit_date(chg["date"][:10])
                 if comment_date >= self.since_date:
                     cmnts_by_user.append(chg)
         if len(cmnts_by_user) > 0:
             self.stats.append(Change(tck.ticket, changelog=changes, prefix=self.prefix))
     log.debug(u"self.stats = {0}".format(self.stats))
Exemple #15
0
    def get(self, path):
        """ Perform a GET request with GSSAPI authentication """
        # Generate token
        service_name = gssapi.Name('HTTP@{0}'.format(self.url.netloc),
                                   gssapi.NameType.hostbased_service)
        ctx = gssapi.SecurityContext(usage="initiate", name=service_name)
        data = b64encode(ctx.step()).decode()

        # Make the connection
        connection = httplib.HTTPSConnection(self.url.netloc, 443)
        log.debug("GET {0}".format(path))
        connection.putrequest("GET", path)
        connection.putheader("Authorization", "Negotiate {0}".format(data))
        connection.putheader("Referer", self.url_string)
        connection.endheaders()

        # Perform the request, convert response into lines
        response = connection.getresponse()
        if response.status != 200:
            raise ReportError(
                "Failed to fetch tickets: {0}".format(response.status))
        lines = response.read().decode("utf8").strip().split("\n")[1:]
        log.debug("Tickets fetched:")
        log.debug(pretty(lines))
        return lines
Exemple #16
0
    def get(self, path):
        """ Perform a GET request with Kerberos authentication """
        # Prepare Kerberos ticket granting ticket """
        _, ctx = kerberos.authGSSClientInit(
            'HTTP@{0}'.format(self.url.netloc))
        kerberos.authGSSClientStep(ctx, "")
        tgt = kerberos.authGSSClientResponse(ctx)

        # Make the connection
        connection = httplib.HTTPSConnection(self.url.netloc, 443)
        log.debug("GET {0}".format(path))
        connection.putrequest("GET", path)
        connection.putheader("Authorization", "Negotiate {0}".format(tgt))
        connection.putheader("Referer", self.url_string)
        connection.endheaders()

        # Perform the request, convert response into lines
        response = connection.getresponse()
        if response.status != 200:
            raise ReportError(
                "Failed to fetch tickets: {0}".format(response.status))
        lines = response.read().decode("utf8").strip().split("\n")[1:]
        log.debug("Tickets fetched:")
        log.debug(pretty(lines))
        return lines
Exemple #17
0
 def search(self, query, options):
     """ Perform Bugzilla search """
     query["query_format"] = "advanced"
     query["limit"] = "0"
     log.debug("Search query:")
     log.debug(pretty(query))
     # Fetch bug info
     try:
         result = self.server.query(query)
     except xmlrpc.client.Fault as error:
         # Ignore non-existent users (this is necessary for users with
         # several email aliases to allow them using --merge/--total)
         if "not a valid username" in str(error):
             log.debug(error)
             return []
         # Otherwise suggest to bake bugzilla cookies
         log.error("An error encountered, while searching for bugs.")
         log.debug(error)
         raise ReportError(
             "Have you baked cookies using the 'bugzilla login' command?")
     log.debug("Search result:")
     log.debug(pretty(result))
     bugs = dict((bug.id, bug) for bug in result)
     # Fetch bug history
     log.debug("Fetching bug history")
     result = self.server._proxy.Bug.history({'ids': list(bugs.keys())})
     log.debug(pretty(result))
     history = dict((bug["id"], bug["history"]) for bug in result["bugs"])
     # Fetch bug comments
     log.debug("Fetching bug comments")
     result = self.server._proxy.Bug.comments({'ids': list(bugs.keys())})
     log.debug(pretty(result))
     comments = dict((int(bug), data["comments"])
                     for bug, data in list(result["bugs"].items()))
     # Create bug objects
     return [
         self.parent.bug(bugs[id],
                         history[id],
                         comments[id],
                         parent=self.parent) for id in bugs
     ]
Exemple #18
0
 def _fetch_activities(self):
     """ Get organization activity, handle pagination """
     activities = []
     # Prepare url of the first page
     url = '{0}/organizations/{1}/activity/'.format(
         self.url, self.organization)
     while url:
         # Fetch one page of activities
         try:
             log.debug('Fetching activity data: {0}'.format(url))
             response = requests.get(url, headers=self.headers)
             if not response.ok:
                 log.error(response.text)
                 raise ReportError('Failed to fetch Sentry activities.')
             data = response.json()
             log.data("Response headers:\n{0}".format(
                 pretty(response.headers)))
             log.debug("Fetched {0}.".format(listed(len(data), 'activity')))
             log.data(pretty(data))
             for activity in [Activity(item) for item in data]:
                 # We've reached the last page, older records not relevant
                 if activity.created < self.stats.options.since.date:
                     return activities
                 # Store only relevant activites (before until date)
                 if activity.created < self.stats.options.until.date:
                     log.details("Activity: {0}".format(activity))
                     activities.append(activity)
         except requests.RequestException as error:
             log.debug(error)
             raise ReportError(
                 'Failed to fetch Sentry activities from {0}'.format(url))
         # Check for possible next page
         try:
             url = NEXT_PAGE.search(response.headers['Link']).groups()[0]
         except AttributeError:
             url = None
     return activities
Exemple #19
0
 def search(query, parent, options):
     """ Perform Trac search """
     # Extend the default max number of tickets to be fetched
     query = "{0}&max={1}".format(query, MAX_TICKETS)
     log.debug("Search query: {0}".format(query))
     try:
         result = parent.proxy.ticket.query(query)
     except xmlrpc.client.Fault as error:
         log.error("An error encountered, while searching for tickets.")
         raise ReportError(error)
     except xmlrpc.client.ProtocolError as error:
         log.debug(error)
         log.error("Trac url: {0}".format(parent.url))
         raise ReportError(
             "Unable to contact Trac server. Is the url above correct?")
     log.debug("Search result: {0}".format(result))
     # Fetch tickets and their history using multicall
     multicall = xmlrpc.client.MultiCall(parent.proxy)
     for ticket_id in sorted(result):
         multicall.ticket.get(ticket_id)
         multicall.ticket.changeLog(ticket_id)
     log.debug("Fetching trac tickets and their history")
     result = list(multicall())
     tickets = result[::2]
     changelogs = result[1::2]
     # Print debugging info
     for ticket, changelog in zip(tickets, changelogs):
         log.debug("Fetched ticket #{0}".format(ticket[0]))
         log.debug(pretty(ticket))
         log.debug("Changelog:")
         log.debug(pretty(changelog))
     # Return the list of ticket objects
     return [
         Trac(ticket, changelg, parent=parent, options=options)
         for ticket, changelg in zip(tickets, changelogs)
     ]
Exemple #20
0
 def search(self, user, since, until, target_type, action_name):
     """ Perform GitLab query """
     if not self.user:
         self.user = self.get_user(user)
     if not self.events:
         self.events = self.user_events(self.user['id'], since)
     result = []
     for event in self.events:
         created_at = dateutil.parser.parse(event['created_at']).date()
         if (event['target_type'] == target_type and
                 event['action_name'] == action_name and
                 since.date <= created_at and until.date >= created_at):
             result.append(event)
     log.debug("Result: {0} fetched".format(listed(len(result), "item")))
     log.data(pretty(result))
     return result
Exemple #21
0
 def search(self, query):
     """ Perform GitHub query """
     url = self.url + "/" + query
     log.debug("GitHub query: {0}".format(url))
     try:
         request = urllib.request.Request(url, headers=self.headers)
         response = urllib.request.urlopen(request)
         log.debug("Response headers:\n{0}".format(
             str(response.info()).strip()))
     except urllib.error.URLError as error:
         log.debug(error)
         raise ReportError("GitHub search on {0} failed.".format(self.url))
     result = json.loads(response.read())["items"]
     log.debug("Result: {0} fetched".format(listed(len(result), "item")))
     log.data(pretty(result))
     return result
Exemple #22
0
 def search(self, user, since, until, target_type, action_name):
     """ Perform GitLab query """
     if not self.user:
         self.user = self.get_user(user)
     if not self.events:
         self.events = self.user_events(self.user['id'], since)
     result = []
     for event in self.events:
         created_at = dateutil.parser.parse(event['created_at']).date()
         if (event['target_type'] == target_type
                 and event['action_name'] == action_name
                 and since.date <= created_at and until.date >= created_at):
             result.append(event)
     log.debug("Result: {0} fetched".format(listed(len(result), "item")))
     log.data(pretty(result))
     return result
Exemple #23
0
 def search(self, query):
     """ Perform GitHub query """
     url = self.url + "/" + query
     log.debug("GitHub query: {0}".format(url))
     try:
         request = urllib2.Request(url, headers=self.headers)
         response = urllib2.urlopen(request)
         log.debug("Response headers:\n{0}".format(
             unicode(response.info()).strip()))
     except urllib2.URLError as error:
         log.debug(error)
         raise ReportError(
             "GitHub search on {0} failed.".format(self.url))
     result = json.loads(response.read())["items"]
     log.debug("Result: {0} fetched".format(listed(len(result), "item")))
     log.data(pretty(result))
     return result
Exemple #24
0
    def fetch(self):
        log.info(u"Searching for patches added to changes by {0}".format(
            self.user))
        reviewer = self.user.login
        self.stats = []
        tickets = GerritUnit.fetch(
            self, 'owner:{0}+is:closed&q=owner:{0}+is:open'.format(
                reviewer),
            '')
        for tck in tickets:
            log.debug("ticket = {0}".format(tck))
            try:
                changes = self.repo.get_changelog(tck)
            except IOError:
                log.debug('Failing to retrieve details for {0}'.format(
                    tck.change_id))
                continue

            owner = changes['owner']['email']

            log.debug("changes.messages = {0}".format(
                pretty(changes['messages'])))
            cmnts_by_user = []
            for chg in changes['messages']:
                # TODO This is a very bad algorithm for recognising
                # patch setts added by the owner of the change, but
                # I don’t know how to find a list of all revisions for
                # the particular change.
                if 'author' not in chg:
                    continue
                if 'email' not in chg['author']:
                    continue
                date = self.get_gerrit_date(chg['date'][:10])
                comment_date = self.get_gerrit_date(chg['date'][:10])
                if (owner == chg['author']['email'] and
                        '_revision_number' in chg and
                        chg['_revision_number'] > 1 and
                        comment_date >= self.since_date and
                        'uploaded patch' in chg['message'].lower()):
                    cmnts_by_user.append(chg)
            if len(cmnts_by_user) > 0:
                self.stats.append(
                    Change(tck.ticket, changelog=changes,
                           prefix=self.prefix))
        log.debug(u"self.stats = {0}".format(self.stats))
Exemple #25
0
    def fetch(self):
        log.info("Searching for patches added to changes by {0}".format(
            self.user))
        reviewer = self.user.login
        self.stats = []
        tickets = GerritUnit.fetch(
            self, 'owner:{0}+is:closed&q=owner:{0}+is:open'.format(
                reviewer),
            '')
        for tck in tickets:
            log.debug("ticket = {0}".format(tck))
            try:
                changes = self.repo.get_changelog(tck)
            except IOError:
                log.debug('Failing to retrieve details for {0}'.format(
                    tck.change_id))
                continue

            owner = changes['owner']['email']

            log.debug("changes.messages = {0}".format(
                pretty(changes['messages'])))
            cmnts_by_user = []
            for chg in changes['messages']:
                # TODO This is a very bad algorithm for recognising
                # patch setts added by the owner of the change, but
                # I don’t know how to find a list of all revisions for
                # the particular change.
                if 'author' not in chg:
                    continue
                if 'email' not in chg['author']:
                    continue
                date = self.get_gerrit_date(chg['date'][:10])
                comment_date = self.get_gerrit_date(chg['date'][:10])
                if (owner == chg['author']['email'] and
                        '_revision_number' in chg and
                        chg['_revision_number'] > 1 and
                        comment_date >= self.since_date and
                        'uploaded patch' in chg['message'].lower()):
                    cmnts_by_user.append(chg)
            if len(cmnts_by_user) > 0:
                self.stats.append(
                    Change(tck.ticket, changelog=changes,
                           prefix=self.prefix))
        log.debug("self.stats = {0}".format(self.stats))
Exemple #26
0
    def commits(self, user, options):
        """ List commits for given user. """
        # Prepare the command
        command = "git log --all --author={0}".format(user.login).split()
        command.append("--format=format:%h - %s")
        command.append("--since='{0} 00:00:00'".format(options.since))
        command.append("--until='{0} 00:00:00'".format(options.until))
        if options.verbose:
            command.append("--name-only")
        log.info("Checking commits in {0}".format(self.path))
        log.details(pretty(command))

        # Get the commit messages
        try:
            process = subprocess.Popen(command,
                                       cwd=self.path,
                                       encoding='utf-8',
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
        except OSError as error:
            log.debug(error)
            raise did.base.ReportError(
                "Unable to access git repo '{0}'".format(self.path))
        output, errors = process.communicate()
        log.debug("git log output:")
        log.debug(output)
        if process.returncode == 0:
            if not output:
                return []
            else:
                if not options.verbose:
                    return output.split("\n")
                commits = []
                for commit in output.split("\n\n"):
                    summary = commit.split("\n")[0]
                    directory = re.sub("/[^/]+$", "", commit.split("\n")[1])
                    commits.append("{0}\n{1}* {2}".format(
                        summary, 8 * " ", directory))
                return commits
        else:
            log.debug(errors.strip())
            log.warn("Unable to check commits in '{0}'".format(self.path))
            return []
Exemple #27
0
 def search(self, query):
     """ Perform Bodhi query """
     result = []
     current_page = 1
     original_query = query
     while current_page:
         log.debug("Bodhi query: {0}".format(query))
         client = BodhiClient(self.url)
         data = client.send_request(query, verb='GET')
         objects = data['updates']
         log.debug("Result: {0} fetched".format(listed(
             len(objects), "item")))
         log.data(pretty(data))
         result.extend(objects)
         if current_page < data['pages']:
             current_page = current_page + 1
             query = f"{original_query}&page={current_page}"
         else:
             current_page = None
     return result
Exemple #28
0
    def commits(self, user, options):
        """ List commits for given user. """
        # Prepare the command
        command = "git log --all --author={0}".format(user.login).split()
        command.append("--format=format:%h - %s")
        command.append("--since='{0} 00:00:00'".format(options.since))
        command.append("--until='{0} 00:00:00'".format(options.until))
        if options.verbose:
            command.append("--name-only")
        log.info(u"Checking commits in {0}".format(self.path))
        log.debug(pretty(command))

        # Get the commit messages
        try:
            process = subprocess.Popen(
                command, cwd=self.path,
                stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        except OSError as error:
            log.debug(error)
            raise ReportError(
                "Unable to access git repo '{0}'".format(self.path))
        output, errors = process.communicate()
        log.debug("git log output:")
        log.debug(output)
        if process.returncode == 0:
            if not output:
                return []
            else:
                if not options.verbose:
                    return unicode(output, "utf8").split("\n")
                commits = []
                for commit in unicode(output, "utf8").split("\n\n"):
                    summary = commit.split("\n")[0]
                    directory = re.sub("/[^/]+$", "", commit.split("\n")[1])
                    commits.append("{0}\n{1}* {2}".format(
                        summary, 8 * " ", directory))
                return commits
        else:
            log.error(errors.strip())
            raise ReportError(
                "Unable to check commits in '{0}'".format(self.path))
Exemple #29
0
 def fetch(self):
     log.info(u"Searching for changes reviewed by {0}".format(self.user))
     # Collect ALL changes opened (and perhaps now closed) after
     # given date and collect all reviews from them ... then limit by
     # actual reviewer (not reviewer:<login> because that doesn’t
     # that the person actually did a review, only that it has
     # a right to do so).
     self.stats = []
     reviewer = self.user.login
     tickets = GerritUnit.fetch(
         self,
         'reviewer:{0}+is:closed&q=reviewer:{0}+is:open'.format(
             self.user.login),
         '',
         limit_since=True)
     for tck in tickets:
         log.debug("ticket = {0}".format(tck))
         try:
             changes = self.repo.get_changelog(tck)
         except IOError:
             log.debug('Failing to retrieve details for {0}'.format(
                 tck.change_id))
             continue
         log.debug("changes.messages = {0}".format(
             pretty(changes['messages'])))
         cmnts_by_user = []
         for chg in changes['messages']:
             if 'author' not in chg:
                 continue
             if 'email' not in chg['author']:
                 continue
             if reviewer in chg['author']['email']:
                 comment_date = self.get_gerrit_date(chg['date'][:10])
                 if comment_date >= self.since_date:
                     cmnts_by_user.append(chg)
         if len(cmnts_by_user) > 0:
             self.stats.append(
                 Change(tck.ticket, changelog=changes, prefix=self.prefix))
     log.debug(u"self.stats = {0}".format(self.stats))
Exemple #30
0
 def search(self, query, pagination, result_field):
     """ Perform Pagure query """
     result = []
     url = "/".join((self.url, query))
     while url:
         log.debug("Pagure query: {0}".format(url))
         try:
             response = requests.get(url, headers=self.headers)
             log.data("Response headers:\n{0}".format(response.headers))
         except requests.RequestException as error:
             log.error(error)
             raise ReportError("Pagure search {0} failed.".format(self.url))
         data = response.json()
         objects = data[result_field]
         log.debug("Result: {0} fetched".format(
             listed(len(objects), "item")))
         log.data(pretty(data))
         # FIXME later: Work around https://pagure.io/pagure/issue/4057
         if not objects:
             break
         result.extend(objects)
         url = data[pagination]['next']
     return result
Exemple #31
0
 def search(self, query, pagination, result_field):
     """ Perform Pagure query """
     result = []
     url = "/".join((self.url, query))
     while url:
         log.debug("Pagure query: {0}".format(url))
         try:
             response = requests.get(url, headers=self.headers)
             log.data("Response headers:\n{0}".format(response.headers))
         except requests.RequestException as error:
             log.error(error)
             raise ReportError("Pagure search {0} failed.".format(self.url))
         data = response.json()
         objects = data[result_field]
         log.debug("Result: {0} fetched".format(listed(
             len(objects), "item")))
         log.data(pretty(data))
         # FIXME later: Work around https://pagure.io/pagure/issue/4057
         if not objects:
             break
         result.extend(objects)
         url = data[pagination]['next']
     return result
Exemple #32
0
 def search(query, stats):
     """ Perform issue search for given stats instance """
     log.debug("Search query: {0}".format(query))
     issues = []
     # Fetch data from the server in batches of MAX_RESULTS issues
     for batch in range(MAX_BATCHES):
         result = stats.parent.session.open(
             "{0}/rest/api/latest/search?{1}".format(
                 stats.parent.url, urllib.urlencode({
                     "jql": query,
                     "fields": "summary,comment",
                     "maxResults": MAX_RESULTS,
                     "startAt": batch * MAX_RESULTS})))
         data = json.loads(result.read())
         log.debug("Batch {0} result: {1} fetched".format(
             batch, listed(data["issues"], "issue")))
         log.data(pretty(data))
         issues.extend(data["issues"])
         # If all issues fetched, we're done
         if len(issues) >= data["total"]:
             break
     # Return the list of issue objects
     return [Issue(issue, prefix=stats.parent.prefix) for issue in issues]
Exemple #33
0
 def _get_gitlab_api_json(self, endpoint):
     log.debug("Query: {0}".format(endpoint))
     result = self._get_gitlab_api(endpoint).json()
     log.data(pretty(result))
     return result
Exemple #34
0
 def _get_gitlab_api_json(self, endpoint):
     log.debug("Query: {0}".format(endpoint))
     result = self._get_gitlab_api(endpoint).json()
     log.data(pretty(result))
     return result