コード例 #1
0
ファイル: cli.py プロジェクト: mfrodl/did
def main(arguments=None):
    """
    Parse options, gather stats and show the results

    Takes optional parameter ``arguments`` which can be either
    command line string or list of options. This is very useful
    for testing purposes. Function returns a tuple of the form::

        ([user_stats], team_stats)

    with the list of all gathered stats objects.
    """
    try:
        # Parse options, initialize gathered stats
        options, header = Options().parse(arguments)
        gathered_stats = []

        # Check for user email addresses (command line or config)
        emails = options.emails or did.base.Config().email
        emails = utils.split(emails, separator=re.compile(r"\s*,\s*"))
        users = [did.base.User(email=email) for email in emails]

        # Print header and prepare team stats object for data merging
        utils.eprint(header)
        team_stats = UserStats(options=options)
        if options.merge:
            utils.header("Total Report")
            utils.item("Users: {0}".format(len(users)), options=options)

        # Check individual user stats
        for user in users:
            if options.merge:
                utils.item(user, 1, options=options)
            else:
                utils.header(user)
            user_stats = UserStats(user=user, options=options)
            user_stats.check()
            team_stats.merge(user_stats)
            gathered_stats.append(user_stats)

        # Display merged team report
        if options.merge or options.total:
            if options.total:
                utils.header("Total Report")
            team_stats.show()

        # Return all gathered stats objects
        return gathered_stats, team_stats

    except did.base.ConfigFileError as error:
        utils.info(
            "Create at least a minimum config file {0}:\n{1}".format(
                did.base.Config.path(), did.base.Config.example().strip()
            )
        )
        raise

    except kerberos.GSSError as error:
        log.debug(error)
        raise did.base.ConfigError("Kerberos authentication failed. Try kinit.")
コード例 #2
0
ファイル: git.py プロジェクト: psss/did
 def __init__(self, option, name=None, parent=None, user=None):
     name = "Work on {0}".format(option)
     StatsGroup.__init__(self, option, name, parent, user)
     for repo, path in did.base.Config().section(option):
         if path.endswith('/*'):
             try:
                 directories = os.listdir(path[:-1])
             except OSError as error:
                 log.error(error)
                 raise did.base.ConfigError(
                     "Invalid path in the [{0}] section".format(option))
             for repo_dir in sorted(directories):
                 repo_path = path.replace('*', repo_dir)
                 # Check directories only
                 if not os.path.isdir(repo_path):
                     continue
                 # Silently ignore non-git directories
                 if not os.path.exists(os.path.join(repo_path, ".git")):
                     log.debug("Skipping non-git directory '{0}'.".format(
                         repo_path))
                     continue
                 self.stats.append(GitCommits(
                     option="{0}-{1}".format(repo, repo_dir),
                     parent=self, path=repo_path,
                     name="Work on {0}/{1}".format(repo, repo_dir)))
         else:
             self.stats.append(GitCommits(
                 option=option + "-" + repo, parent=self, path=path,
                 name="Work on {0}".format(repo)))
コード例 #3
0
ファイル: bitly.py プロジェクト: psss/did
    def user_link_history(self, created_before=None, created_after=None,
                          limit=100, **kwargs):
        """  Bit.ly API - user_link_history wrapper"""
        """ Bit.ly link

        Link History Keys
        -----------------

            [u'aggregate_link', u'archived', u'campaign_ids',
             u'client_id', u'created_at', u'keyword_link',
             u'link', u'long_url', u'modified_at',
             u'private', u'tags', u'title', u'user_ts']
        """
        # bit.ly API doesn't seem to like anything other than int's
        limit = int(limit)
        created_after = int(created_after)
        created_before = int(created_before)
        hist = self.api.user_link_history(
            limit=limit, created_before=created_before,
            created_after=created_after)

        # FIXME: if we have more than 100 objects we need to PAGINATE
        record = "{0} - {1}"
        links = []
        for r in hist:
            link = r.get('keyword_link') or r['link']
            title = r['title'] or '<< NO TITLE >>'
            links.append(record.format(link, title))
        log.debug("First 3 Links fetched:")
        log.debug(pretty(hist[0:3], indent=4))
        return links
コード例 #4
0
ファイル: base.py プロジェクト: rhatlapa/did
 def email(self):
     """ User email(s) """
     try:
         return self.parser.get("general", "email")
     except (NoOptionError, NoSectionError) as error:
         log.debug(error)
         return []
コード例 #5
0
ファイル: __init__.py プロジェクト: AloisMahdal/did
def load():
    """ Check available plugins and attempt to import them """
    # Code is based on beaker-client's command.py script
    plugins = []
    for filename in os.listdir(PLUGINS_PATH):
        if not filename.endswith(".py") or filename.startswith("_"):
            continue
        if not os.path.isfile(os.path.join(PLUGINS_PATH, filename)):
            continue
        plugin = filename[:-3]
        if plugin in FAILED_PLUGINS:
            # Skip loading plugins that already failed before
            continue
        try:
            __import__(PLUGINS.__name__, {}, {}, [plugin])
            plugins.append(plugin)
            log.debug("Successfully imported {0} plugin".format(plugin))
        except (ImportError, SyntaxError) as error:
            # Give a warning only when the plugin is configured
            message = "Failed to import {0} plugin ({1})".format(plugin, error)
            if Config().sections(kind=plugin):
                log.warn(message)
            else:
                log.debug(message)
            FAILED_PLUGINS.append(plugin)
    return plugins
コード例 #6
0
ファイル: base.py プロジェクト: barraq/did
    def __init__(self, config=None, path=None):
        """
        Read the config file

        Parse config from given string (config) or file (path).
        If no config or path given, default to "~/.did/config" which
        can be overrided by the ``DID_DIR`` environment variable.
        """
        # Read the config only once (unless explicitly provided)
        if self.parser is not None and config is None and path is None:
            return
        Config.parser = ConfigParser.SafeConfigParser()
        # If config provided as string, parse it directly
        if config is not None:
            log.info("Inspecting config file from string")
            log.debug(utils.pretty(config))
            self.parser.readfp(StringIO.StringIO(config))
            return
        # Check the environment for config file override
        # (unless path is explicitly provided)
        if path is None:
            path = Config.path()
        # Parse the config from file
        try:
            log.info("Inspecting config file '{0}'.".format(path))
            self.parser.readfp(codecs.open(path, "r", "utf8"))
        except IOError as error:
            log.debug(error)
            Config.parser = None
            raise ConfigFileError(
                "Unable to read the config file '{0}'.".format(path))
コード例 #7
0
ファイル: cli.py プロジェクト: happz/did
    def __init__(self, arguments=None):
        """ Prepare the parser. """
        self.parser = argparse.ArgumentParser(
            usage="did [this|last] [week|month|quarter|year] [opts]")
        self.arguments = arguments
        self.opt = self.arg = None

        # Enable debugging output (even before options are parsed)
        if "--debug" in sys.argv:
            log.setLevel(utils.LOG_DEBUG)

        # Time & user selection
        group = self.parser.add_argument_group("Select")
        group.add_argument(
            "--email", dest="emails", default=[], action="append",
            help="User email address(es)")
        group.add_argument(
            "--since",
            help="Start date in the YYYY-MM-DD format")
        group.add_argument(
            "--until",
            help="End date in the YYYY-MM-DD format")

        # Create sample stats and include all stats objects options
        log.debug("Loading Sample Stats group to build Options")
        self.sample_stats = UserStats()
        self.sample_stats.add_option(self.parser)

        # Formating options
        group = self.parser.add_argument_group("Format")
        group.add_argument(
            "--format", default="text",
            help="Output style, possible values: text (default) or wiki")
        group.add_argument(
            "--width", default=did.base.MAX_WIDTH, type=int,
            help="Maximum width of the report output (default: %(default)s)")
        group.add_argument(
            "--brief", action="store_true",
            help="Show brief summary only, do not list individual items")
        group.add_argument(
            "--verbose", action="store_true",
            help="Include more details (like modified git directories)")

        # Other options
        group = self.parser.add_argument_group("Utils")
        group.add_argument(
            "--config",
            metavar="FILE",
            help="Use alternate configuration file (default: 'config')")
        group.add_argument(
            "--total", action="store_true",
            help="Append total stats after listing individual users")
        group.add_argument(
            "--merge", action="store_true",
            help="Merge stats of all users into a single report")
        group.add_argument(
            "--debug", action="store_true",
            help="Turn on debugging output, do not catch exceptions")
コード例 #8
0
ファイル: google.py プロジェクト: AloisMahdal/did
 def __init__(self, option, name=None, parent=None):
     super(GoogleStatsBase, self).__init__(
         option=option, name=name, parent=parent)
     try:
         self.since = self.options.since.datetime.isoformat() + "Z"
         self.until = self.options.until.datetime.isoformat() + "Z"
     except AttributeError:
         log.debug("Failed to initialize time range, skipping")
     self._events = None
コード例 #9
0
ファイル: rt.py プロジェクト: AloisMahdal/did
    def search(self, query):
        """ Perform request tracker search """
        # Prepare the path
        log.debug("Query: {0}".format(query))
        path = self.url.path + '?Format=__id__+__Subject__'
        path += "&Order=ASC&OrderBy=id&Query=" + urllib.quote(query)

        # Get the tickets
        lines = self.get(path)
        log.info(u"Fetched tickets: {0}".format(len(lines)))
        return [self.parent.ticket(line, self.parent) for line in lines]
コード例 #10
0
ファイル: sentry.py プロジェクト: AloisMahdal/did
 def filter_data(self):
     stats = []
     log.debug("Query: Date range {0} - {1}".format(
         str(self.options.since.date), str(self.options.until.date)))
     for activity in self.sentry.get_data():
         date = self.get_date(activity)
         if (date >= str(self.options.since.date) and
                 date <= str(self.options.until.date) and
                 activity['type'] != "set_regression"):
             stats.append(activity)
     return stats
コード例 #11
0
ファイル: cli.py プロジェクト: rhatlapa/did
    def __init__(self, arguments=None):
        """ Prepare the parser. """
        self.parser = optparse.OptionParser(
            usage="did [last] [week|month|quarter|year] [opts]")
        self.arguments = arguments
        self.opt = self.arg = None

        # Enable debugging output (even before options are parsed)
        if "--debug" in sys.argv:
            utils.Logging("did").set(utils.LOG_DEBUG)

        # Time & user selection
        group = optparse.OptionGroup(self.parser, "Selection")
        group.add_option(
            "--email", dest="emails", default=[], action="append",
            help="User email address(es)")
        group.add_option(
            "--since",
            help="Start date in the YYYY-MM-DD format")
        group.add_option(
            "--until",
            help="End date in the YYYY-MM-DD format")
        self.parser.add_option_group(group)

        # Create sample stats and include all stats objects options
        log.debug("Loading Sample Stats group to build Options")
        self.sample_stats = UserStats()
        self.sample_stats.add_option(self.parser)

        # Display mode
        group = optparse.OptionGroup(self.parser, "Display mode")
        group.add_option(
            "--format", default="text",
            help="Output style, possible values: text (default) or wiki")
        group.add_option(
            "--width", default=did.base.Config().width, type="int",
            help="Maximum width of the report output (default: %default)")
        group.add_option(
            "--brief", action="store_true",
            help="Show brief summary only, do not list individual items")
        group.add_option(
            "--verbose", action="store_true",
            help="Include more details (like modified git directories)")
        group.add_option(
            "--total", action="store_true",
            help="Append total stats after listing individual users")
        group.add_option(
            "--merge", action="store_true",
            help="Merge stats of all users into a single report")
        group.add_option(
            "--debug", action="store_true",
            help="Turn on debugging output, do not catch exceptions")
        self.parser.add_option_group(group)
コード例 #12
0
ファイル: base.py プロジェクト: barraq/did
 def email(self):
     """ User email(s) """
     try:
         return self.parser.get("general", "email")
     except NoSectionError as error:
         log.debug(error)
         raise ConfigFileError(
             "No general section found in the config file.")
     except NoOptionError as error:
         log.debug(error)
         raise ConfigFileError(
             "No email address defined in the config file.")
コード例 #13
0
ファイル: cli.py プロジェクト: rhatlapa/did
    def parse(self, arguments=None):
        """ Parse the options. """
        # Split arguments if given as string and run the parser
        if arguments is not None:
            self.arguments = arguments
        if (self.arguments is not None
                and isinstance(self.arguments, basestring)):
            self.arguments = self.arguments.split()
        # Otherwise properly decode command line arguments
        if self.arguments is None:
            self.arguments = [arg.decode("utf-8") for arg in sys.argv[1:]]
        (opt, arg) = self.parser.parse_args(self.arguments)
        self.opt = opt
        self.arg = arg
        self.check()

        # Enable --all if no particular stat or group selected
        opt.all = not any([
            getattr(opt, stat.dest) or getattr(opt, group.dest)
            for group in self.sample_stats.stats
            for stat in group.stats])

        # Detect email addresses and split them on comma
        if not opt.emails:
            opt.emails = did.base.Config().email
        opt.emails = utils.split(opt.emails, separator=re.compile(r"\s*,\s*"))
        if not opt.emails:
            raise ConfigError("No email given. Use --email or create config.")

        # Time period handling
        if opt.since is None and opt.until is None:
            opt.since, opt.until, period = did.base.Date.period(arg)
        else:
            opt.since = did.base.Date(opt.since or "1993-01-01")
            opt.until = did.base.Date(opt.until or "today")
            # Make the 'until' limit inclusive
            opt.until.date += delta(days=1)
            period = "given date range"

        # Validate the date range
        if not opt.since.date < opt.until.date:
            raise RuntimeError(
                "Invalid date range ({0} to {1})".format(
                    opt.since, opt.until.date - delta(days=1)))
        print(u"Status report for {0} ({1} to {2}).".format(
            period, opt.since, opt.until.date - delta(days=1)))

        # Finito
        log.debug("Gathered options:")
        log.debug('options = {0}'.format(opt))
        return opt
コード例 #14
0
ファイル: gerrit.py プロジェクト: rhatlapa/did
 def fetch(self):
     log.info(u"Searching for changes reviewed by {0}".format(self.user))
     # Collect ALL changes opened (and perhaps now closed) after
     # given date and collect all reviews from them ... then limit by
     # actual reviewer (not reviewer:<login> because that doesn’t
     # that the person actually did a review, only that it has
     # a right to do so).
     self.stats = []
     reviewer = self.user.login
     tickets = GerritUnit.fetch(
         self, "reviewer:{0}+is:closed&q=reviewer:{0}+is:open".format(self.user.login), "", limit_since=True
     )
     for tck in tickets:
         log.debug("ticket = {0}".format(tck))
         try:
             changes = self.repo.get_changelog(tck)
         except IOError:
             log.debug("Failing to retrieve details for {0}".format(tck.change_id))
             continue
         log.debug("changes.messages = {0}".format(pretty(changes["messages"])))
         cmnts_by_user = []
         for chg in changes["messages"]:
             if "author" in chg and reviewer in chg["author"]["email"]:
                 comment_date = self.get_gerrit_date(chg["date"][:10])
                 if comment_date >= self.since_date:
                     cmnts_by_user.append(chg)
         if len(cmnts_by_user) > 0:
             self.stats.append(Change(tck.ticket, changelog=changes, prefix=self.prefix))
     log.debug(u"self.stats = {0}".format(self.stats))
コード例 #15
0
ファイル: gerrit.py プロジェクト: rhatlapa/did
    def fetch(self):
        log.info(u"Searching for patches added to changes by {0}".format(self.user))
        reviewer = self.user.login
        self.stats = []
        tickets = GerritUnit.fetch(
            self, "owner:{0}+is:closed&q=owner:{0}+is:open".format(reviewer), "", limit_since=True
        )
        for tck in tickets:
            log.debug("ticket = {0}".format(tck))
            try:
                changes = self.repo.get_changelog(tck)
            except IOError:
                log.debug("Failing to retrieve details for {0}".format(tck.change_id))
                continue

            owner = changes["owner"]["email"]

            log.debug("changes.messages = {0}".format(pretty(changes["messages"])))
            cmnts_by_user = []
            for chg in changes["messages"]:
                # TODO This is a very bad algorithm for recognising
                # patch setts added by the owner of the change, but
                # I don’t know how to find a list of all revisions for
                # the particular change.
                if (
                    "author" in chg
                    and owner == chg["author"]["email"]
                    and self.get_gerrit_date(chg["date"][:10]) >= self.since_date
                    and "uploaded patch" in chg["message"].lower()
                ):
                    cmnts_by_user.append(chg)
            if len(cmnts_by_user) > 0:
                self.stats.append(Change(tck.ticket, changelog=changes, prefix=self.prefix))
        log.debug(u"self.stats = {0}".format(self.stats))
コード例 #16
0
ファイル: sentry.py プロジェクト: AloisMahdal/did
    def get_data(self):
        """ Get organization activity in JSON representation """
        url = self.url + "organizations/" + self.organization + "/activity/"
        headers = {'Authorization': 'Bearer {0}'.format(self.token)}
        request = urllib2.Request(url, None, headers)
        log.debug("Getting activity data from server.")
        try:
            response = urllib2.urlopen(request)
        except urllib2.URLError as e:
            log.error("An error encountered while getting data from server.")
            log.debug(e)
            raise ReportError("Could not get data. {0}.".format(str(e)))

        return json.load(response)
コード例 #17
0
ファイル: jira.py プロジェクト: tvieira/did
 def session(self):
     """ Initialize the session """
     if self._session is None:
         # http://stackoverflow.com/questions/8811269/
         # http://www.techchorus.net/using-cookie-jar-urllib2
         cookie = cookielib.CookieJar()
         self._session = urllib2.build_opener(
             urllib2.HTTPSHandler(debuglevel=0),
             urllib2.HTTPRedirectHandler,
             urllib2.HTTPCookieProcessor(cookie),
             urllib2_kerberos.HTTPKerberosAuthHandler)
         log.debug(u"Connecting to {0}".format(self.sso_url))
         self._session.open(self.sso_url)
     return self._session
コード例 #18
0
ファイル: base.py プロジェクト: pabelanger/did
 def __init__(self, date=None):
     """ Parse the date string """
     if isinstance(date, datetime.date):
         self.date = date
     elif date is None or date.lower() == "today":
         self.date = TODAY
     elif date.lower() == "yesterday":
         self.date = TODAY - delta(days=1)
     else:
         try:
             self.date = datetime.date(*[int(i) for i in date.split("-")])
         except StandardError as error:
             log.debug(error)
             raise OptionError("Invalid date format: '{0}', use YYYY-MM-DD.".format(date))
     self.datetime = datetime.datetime(self.date.year, self.date.month, self.date.day, 0, 0, 0)
コード例 #19
0
ファイル: gitlab.py プロジェクト: psss/did
 def search(self, user, since, until, target_type, action_name):
     """ Perform GitLab query """
     if not self.user:
         self.user = self.get_user(user)
     if not self.events:
         self.events = self.user_events(self.user['id'], since, until)
     result = []
     for event in self.events:
         created_at = dateutil.parser.parse(event['created_at']).date()
         if (event['target_type'] == target_type and
                 event['action_name'] == action_name and
                 since.date <= created_at and until.date >= created_at):
             result.append(event)
     log.debug("Result: {0} fetched".format(listed(len(result), "item")))
     return result
コード例 #20
0
ファイル: stats.py プロジェクト: pabelanger/did
 def __init__(
         self, option, name=None, parent=None, user=None, options=None):
     """ Set the name, indent level and initialize data.  """
     self.option = option.replace(" ", "-")
     self.dest = self.option.replace("-", "_")
     self._name = name
     self.parent = parent
     self.stats = []
     # Save user and options (get it directly or from parent)
     self.options = options or getattr(self.parent, 'options', None)
     if user is None and self.parent is not None:
         self.user = self.parent.user
     else:
         self.user = user
     log.debug(
         'Loading {0} Stats instance for {1}'.format(option, self.user))
コード例 #21
0
ファイル: stats.py プロジェクト: rhatlapa/did
 def __init__(self, user=None, options=None):
     """ Initialize stats objects. """
     super(UserStats, self).__init__(
         option="all", user=user, options=options)
     self.stats = []
     try:
         import did.plugins
         for section, statsgroup in did.plugins.detect():
             self.stats.append(statsgroup(option=section, parent=self))
     except did.base.ConfigError as error:
         # Missing config file is OK if building options (--help).
         # Otherwise raise the expection to suggest config example.
         if options is None:
             log.debug(error)
         else:
             raise
コード例 #22
0
ファイル: gerrit.py プロジェクト: rhatlapa/did
    def get_query_result(self, url):
        log.debug("url = {0}".format(url))
        res = self.opener.open(url)
        if res.getcode() != 200:
            raise IOError("Cannot retrieve list of changes ({0})".format(res.getcode()))

        # see https://code.google.com/p/gerrit/issues/detail?id=2006
        # for explanation of skipping first four characters
        json_str = res.read()[4:].strip()
        try:
            data = json.loads(json_str)
        except ValueError:
            log.exception("Cannot parse JSON data:\n%s", json_str)
            raise
        res.close()

        return data
コード例 #23
0
ファイル: gerrit.py プロジェクト: pabelanger/did
    def search(self, query):
        full_url = self.join_URL_frags(self.baseurl, '/changes/?q=' + query)
        log.debug('full_url = {0}'.format(full_url))
        tickets = []

        # Get tickets
        tickets = self.get_query_result(full_url)

        # When using multiple queries at once, we get list of lists
        # so we need to merge them
        if '&' in query:
            tmplist = []
            for sublist in tickets:
                tmplist.extend(sublist)
            tickets = tmplist[:]

        return tickets
コード例 #24
0
ファイル: gerrit.py プロジェクト: rhatlapa/did
    def __init__(self, option, name=None, user=None, parent=None, options=None):
        StatsGroup.__init__(self, option, name, parent, user, options)
        self.config = dict(Config().section(option))
        if "url" not in self.config:
            raise IOError("No gerrit URL set in the [{0}] section".format(option))
        self.repo_url = self.config["url"]
        log.debug("repo_url = {0}".format(self.repo_url))

        if "prefix" not in self.config:
            raise ReportError("No prefix set in the [{0}] section".format(option))

        self.stats = [
            AbandonedChanges(option=option + "-abandoned", parent=self),
            MergedChanges(option=option + "-merged", parent=self),
            SubmitedChanges(option=option + "-submitted", parent=self),
            PublishedDrafts(option=option + "-drafts", parent=self),
            AddedPatches(option=option + "-added-patches", parent=self),
            ReviewedChanges(option=option + "-reviewed", parent=self),
        ]
コード例 #25
0
ファイル: cli.py プロジェクト: mfrodl/did
    def parse(self, arguments=None):
        """ Parse the options. """
        # Split arguments if given as string and run the parser
        if arguments is not None:
            self.arguments = arguments
        if self.arguments is not None and isinstance(self.arguments, basestring):
            self.arguments = self.arguments.split()
        # Otherwise properly decode command line arguments
        if self.arguments is None:
            self.arguments = [arg.decode("utf-8") for arg in sys.argv[1:]]
        opt, arg = self.parser.parse_known_args(self.arguments)
        self.opt = opt
        self.arg = arg
        self.check()

        # Enable --all if no particular stat or group selected
        opt.all = not any(
            [
                getattr(opt, stat.dest) or getattr(opt, group.dest)
                for group in self.sample_stats.stats
                for stat in group.stats
            ]
        )

        # Time period handling
        if opt.since is None and opt.until is None:
            opt.since, opt.until, period = did.base.Date.period(arg)
        else:
            opt.since = did.base.Date(opt.since or "1993-01-01")
            opt.until = did.base.Date(opt.until or "today")
            # Make the 'until' limit inclusive
            opt.until.date += delta(days=1)
            period = "given date range"

        # Validate the date range
        if not opt.since.date < opt.until.date:
            raise RuntimeError("Invalid date range ({0} to {1})".format(opt.since, opt.until.date - delta(days=1)))
        header = "Status report for {0} ({1} to {2}).".format(period, opt.since, opt.until.date - delta(days=1))

        # Finito
        log.debug("Gathered options:")
        log.debug("options = {0}".format(opt))
        return opt, header
コード例 #26
0
ファイル: git.py プロジェクト: happz/did
    def commits(self, user, options):
        """ List commits for given user. """
        # Prepare the command
        command = "git log --all --author={0}".format(user.login).split()
        command.append("--format=format:%h - %s")
        command.append("--since='{0} 00:00:00'".format(options.since))
        command.append("--until='{0} 00:00:00'".format(options.until))
        if options.verbose:
            command.append("--name-only")
        log.info(u"Checking commits in {0}".format(self.path))
        log.debug(pretty(command))

        # Get the commit messages
        try:
            process = subprocess.Popen(
                command, cwd=self.path,
                stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        except OSError as error:
            log.debug(error)
            raise ReportError(
                "Unable to access git repo '{0}'".format(self.path))
        output, errors = process.communicate()
        log.debug("git log output:")
        log.debug(output)
        if process.returncode == 0:
            if not output:
                return []
            else:
                if not options.verbose:
                    return unicode(output, "utf8").split("\n")
                commits = []
                for commit in unicode(output, "utf8").split("\n\n"):
                    summary = commit.split("\n")[0]
                    directory = re.sub("/[^/]+$", "", commit.split("\n")[1])
                    commits.append("{0}\n{1}* {2}".format(
                        summary, 8 * " ", directory))
                return commits
        else:
            log.debug(errors.strip())
            log.warn("Unable to check commits in '{0}'".format(self.path))
            return []
コード例 #27
0
ファイル: gerrit.py プロジェクト: psss/did
    def __init__(self, option, name=None, parent=None, user=None):
        StatsGroup.__init__(self, option, name, parent, user)
        self.config = dict(Config().section(option))
        if 'url' not in self.config:
            raise IOError(
                'No gerrit URL set in the [{0}] section'.format(option))
        self.repo_url = self.config['url']
        log.debug('repo_url = {0}'.format(self.repo_url))

        if "prefix" not in self.config:
            raise ReportError(
                "No prefix set in the [{0}] section".format(option))

        self.stats = [
            AbandonedChanges(option=option + '-abandoned', parent=self),
            MergedChanges(option=option + '-merged', parent=self),
            SubmitedChanges(option=option + '-submitted', parent=self),
            WIPChanges(option=option + '-wip', parent=self),
            #AddedPatches(option=option + '-added-patches', parent=self),
            ReviewedChanges(option=option + '-reviewed', parent=self),
            ]
コード例 #28
0
ファイル: rt.py プロジェクト: AloisMahdal/did
    def get(self, path):
        """ Perform a GET request with Kerberos authentication """
        # Prepare Kerberos ticket granting ticket """
        _, ctx = kerberos.authGSSClientInit(
            'HTTP@{0}'.format(self.url.netloc))
        kerberos.authGSSClientStep(ctx, "")
        tgt = kerberos.authGSSClientResponse(ctx)

        # Make the connection
        connection = httplib.HTTPSConnection(self.url.netloc, 443)
        log.debug("GET {0}".format(path))
        connection.putrequest("GET", path)
        connection.putheader("Authorization", "Negotiate {0}".format(tgt))
        connection.putheader("Referer", self.url_string)
        connection.endheaders()

        # Perform the request, convert response into lines
        response = connection.getresponse()
        if response.status != 200:
            raise ReportError(
                "Failed to fetch tickets: {0}".format(response.status))
        lines = response.read().decode("utf8").strip().split("\n")[1:]
        log.debug("Tickets fetched:")
        log.debug(pretty(lines))
        return lines
コード例 #29
0
ファイル: rt.py プロジェクト: psss/did
    def get(self, path):
        """ Perform a GET request with GSSAPI authentication """
        # Generate token
        service_name = gssapi.Name('HTTP@{0}'.format(self.url.netloc),
                                   gssapi.NameType.hostbased_service)
        ctx = gssapi.SecurityContext(usage="initiate", name=service_name)
        data = b64encode(ctx.step()).decode()

        # Make the connection
        connection = httplib.HTTPSConnection(self.url.netloc, 443)
        log.debug("GET {0}".format(path))
        connection.putrequest("GET", path)
        connection.putheader("Authorization", "Negotiate {0}".format(data))
        connection.putheader("Referer", self.url_string)
        connection.endheaders()

        # Perform the request, convert response into lines
        response = connection.getresponse()
        if response.status != 200:
            raise ReportError(
                "Failed to fetch tickets: {0}".format(response.status))
        lines = response.read().decode("utf8").strip().split("\n")[1:]
        log.debug("Tickets fetched:")
        log.debug(pretty(lines))
        return lines
コード例 #30
0
ファイル: jira.py プロジェクト: AloisMahdal/did
 def search(query, stats):
     """ Perform issue search for given stats instance """
     log.debug("Search query: {0}".format(query))
     issues = []
     # Fetch data from the server in batches of MAX_RESULTS issues
     for batch in range(MAX_BATCHES):
         result = stats.parent.session.open(
             "{0}/rest/api/latest/search?{1}".format(
                 stats.parent.url, urllib.urlencode({
                     "jql": query,
                     "fields": "summary,comment",
                     "maxResults": MAX_RESULTS,
                     "startAt": batch * MAX_RESULTS})))
         data = json.loads(result.read())
         log.debug("Batch {0} result: {1} fetched".format(
             batch, listed(data["issues"], "issue")))
         log.data(pretty(data))
         issues.extend(data["issues"])
         # If all issues fetched, we're done
         if len(issues) >= data["total"]:
             break
     # Return the list of issue objects
     return [Issue(issue, prefix=stats.parent.prefix) for issue in issues]
コード例 #31
0
ファイル: gerrit.py プロジェクト: psss/did
    def fetch(self):
        log.info("Searching for patches added to changes by {0}".format(
            self.user))
        reviewer = self.user.login
        self.stats = []
        tickets = GerritUnit.fetch(
            self, 'owner:{0}+is:closed&q=owner:{0}+is:open'.format(
                reviewer),
            '')
        for tck in tickets:
            log.debug("ticket = {0}".format(tck))
            try:
                changes = self.repo.get_changelog(tck)
            except IOError:
                log.debug('Failing to retrieve details for {0}'.format(
                    tck.change_id))
                continue

            owner = changes['owner']['email']

            log.debug("changes.messages = {0}".format(
                pretty(changes['messages'])))
            cmnts_by_user = []
            for chg in changes['messages']:
                # TODO This is a very bad algorithm for recognising
                # patch setts added by the owner of the change, but
                # I don’t know how to find a list of all revisions for
                # the particular change.
                if 'author' not in chg:
                    continue
                if 'email' not in chg['author']:
                    continue
                date = self.get_gerrit_date(chg['date'][:10])
                comment_date = self.get_gerrit_date(chg['date'][:10])
                if (owner == chg['author']['email'] and
                        '_revision_number' in chg and
                        chg['_revision_number'] > 1 and
                        comment_date >= self.since_date and
                        'uploaded patch' in chg['message'].lower()):
                    cmnts_by_user.append(chg)
            if len(cmnts_by_user) > 0:
                self.stats.append(
                    Change(tck.ticket, changelog=changes,
                           prefix=self.prefix))
        log.debug("self.stats = {0}".format(self.stats))
コード例 #32
0
    def commits(self, user, options):
        """ List commits for given user. """
        # Prepare the command
        command = "git log --all --author={0}".format(user.login).split()
        command.append("--format=format:%h - %s")
        command.append("--since='{0} 00:00:00'".format(options.since))
        command.append("--until='{0} 00:00:00'".format(options.until))
        if options.verbose:
            command.append("--name-only")
        log.info("Checking commits in {0}".format(self.path))
        log.details(pretty(command))

        # Get the commit messages
        try:
            process = subprocess.Popen(command,
                                       cwd=self.path,
                                       encoding='utf-8',
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
        except OSError as error:
            log.debug(error)
            raise did.base.ReportError(
                "Unable to access git repo '{0}'".format(self.path))
        output, errors = process.communicate()
        log.debug("git log output:")
        log.debug(output)
        if process.returncode == 0:
            if not output:
                return []
            else:
                if not options.verbose:
                    return output.split("\n")
                commits = []
                for commit in output.split("\n\n"):
                    summary = commit.split("\n")[0]
                    directory = re.sub("/[^/]+$", "", commit.split("\n")[1])
                    commits.append("{0}\n{1}* {2}".format(
                        summary, 8 * " ", directory))
                return commits
        else:
            log.debug(errors.strip())
            log.warn("Unable to check commits in '{0}'".format(self.path))
            return []
コード例 #33
0
ファイル: zammad.py プロジェクト: psss/did
 def search(self, query):
     """ Perform Zammad query """
     url = self.url + "/" + query
     log.debug("Zammad query: {0}".format(url))
     try:
         request = urllib.request.Request(url, headers=self.headers)
         response = urllib.request.urlopen(request)
         log.debug("Response headers:\n{0}".format(
             str(response.info()).strip()))
     except urllib.error.URLError as error:
         log.debug(error)
         raise ReportError("Zammad search on {0} failed.".format(self.url))
     result = json.loads(response.read())["assets"]
     try:
         result = result["Ticket"]
     except KeyError:
         result = dict()
     log.debug("Result: {0} fetched".format(listed(len(result), "item")))
     log.data(pretty(result))
     return result
コード例 #34
0
 def fetch(self):
     log.info(u"Searching for changes reviewed by {0}".format(self.user))
     # Collect ALL changes opened (and perhaps now closed) after
     # given date and collect all reviews from them ... then limit by
     # actual reviewer (not reviewer:<login> because that doesn’t
     # that the person actually did a review, only that it has
     # a right to do so).
     self.stats = []
     reviewer = self.user.login
     tickets = GerritUnit.fetch(
         self,
         'reviewer:{0}+is:closed&q=reviewer:{0}+is:open'.format(
             self.user.login),
         '',
         limit_since=True)
     for tck in tickets:
         log.debug("ticket = {0}".format(tck))
         try:
             changes = self.repo.get_changelog(tck)
         except IOError:
             log.debug('Failing to retrieve details for {0}'.format(
                 tck.change_id))
             continue
         log.debug("changes.messages = {0}".format(
             pretty(changes['messages'])))
         cmnts_by_user = []
         for chg in changes['messages']:
             if 'author' not in chg:
                 continue
             if 'email' not in chg['author']:
                 continue
             if reviewer in chg['author']['email']:
                 comment_date = self.get_gerrit_date(chg['date'][:10])
                 if comment_date >= self.since_date:
                     cmnts_by_user.append(chg)
         if len(cmnts_by_user) > 0:
             self.stats.append(
                 Change(tck.ticket, changelog=changes, prefix=self.prefix))
     log.debug(u"self.stats = {0}".format(self.stats))
コード例 #35
0
 def _fetch_activities(self):
     """ Get organization activity, handle pagination """
     activities = []
     # Prepare url of the first page
     url = '{0}/organizations/{1}/activity/'.format(
         self.url, self.organization)
     while url:
         # Fetch one page of activities
         try:
             log.debug('Fetching activity data: {0}'.format(url))
             response = requests.get(url, headers=self.headers)
             if not response.ok:
                 log.error(response.text)
                 raise ReportError('Failed to fetch Sentry activities.')
             data = response.json()
             log.data("Response headers:\n{0}".format(
                 pretty(response.headers)))
             log.debug("Fetched {0}.".format(listed(len(data), 'activity')))
             log.data(pretty(data))
             for activity in [Activity(item) for item in data]:
                 # We've reached the last page, older records not relevant
                 if activity.created < self.stats.options.since.date:
                     return activities
                 # Store only relevant activites (before until date)
                 if activity.created < self.stats.options.until.date:
                     log.details("Activity: {0}".format(activity))
                     activities.append(activity)
         except requests.RequestException as error:
             log.debug(error)
             raise ReportError(
                 'Failed to fetch Sentry activities from {0}'.format(url))
         # Check for possible next page
         try:
             url = NEXT_PAGE.search(response.headers['Link']).groups()[0]
         except AttributeError:
             url = None
     return activities
コード例 #36
0
 def _get_gitlab_api_json(self, endpoint):
     log.debug("Query: {0}".format(endpoint))
     result = self._get_gitlab_api(endpoint).json()
     log.data(pretty(result))
     return result
コード例 #37
0
 def fetch(self):
     log.info(u"Searching for drafts published by {0}".format(self.user))
     self.stats = GerritUnit.fetch(self, 'is:draft', limit_since=True)
     log.debug(u"self.stats = {0}".format(self.stats))
コード例 #38
0
 def search(query, parent, options):
     """ Perform Trac search """
     # Extend the default max number of tickets to be fetched
     query = "{0}&max={1}".format(query, MAX_TICKETS)
     log.debug("Search query: {0}".format(query))
     try:
         result = parent.proxy.ticket.query(query)
     except xmlrpc.client.Fault as error:
         log.error("An error encountered, while searching for tickets.")
         raise ReportError(error)
     except xmlrpc.client.ProtocolError as error:
         log.debug(error)
         log.error("Trac url: {0}".format(parent.url))
         raise ReportError(
             "Unable to contact Trac server. Is the url above correct?")
     log.debug("Search result: {0}".format(result))
     # Fetch tickets and their history using multicall
     multicall = xmlrpc.client.MultiCall(parent.proxy)
     for ticket_id in sorted(result):
         multicall.ticket.get(ticket_id)
         multicall.ticket.changeLog(ticket_id)
     log.debug("Fetching trac tickets and their history")
     result = list(multicall())
     tickets = result[::2]
     changelogs = result[1::2]
     # Print debugging info
     for ticket, changelog in zip(tickets, changelogs):
         log.debug("Fetched ticket #{0}".format(ticket[0]))
         log.debug(pretty(ticket))
         log.debug("Changelog:")
         log.debug(pretty(changelog))
     # Return the list of ticket objects
     return [
         Trac(ticket, changelg, parent=parent, options=options)
         for ticket, changelg in zip(tickets, changelogs)
     ]
コード例 #39
0
    def __init__(self, arguments=None):
        """ Prepare the parser. """
        self.parser = argparse.ArgumentParser(usage=USAGE)
        self._prepare_arguments(arguments)
        self.opt = self.arg = None

        # Enable debugging output (even before options are parsed)
        if "--debug" in self.arguments:
            log.setLevel(utils.LOG_DEBUG)
        # Use a simple test config if smoke test requested
        if "--test" in self.arguments:
            did.base.Config(did.base.TEST_CONFIG)

        # Get the default output width from the config (if available)
        try:
            width = did.base.Config().width
        except did.base.ConfigFileError:
            width = did.base.MAX_WIDTH

        # Time & user selection
        group = self.parser.add_argument_group("Select")
        group.add_argument(
            "--email", dest="emails", default=[], action="append",
            help="User email address(es)")
        group.add_argument(
            "--since",
            help="Start date in the YYYY-MM-DD format")
        group.add_argument(
            "--until",
            help="End date in the YYYY-MM-DD format")

        # Create sample stats and include all stats objects options
        log.debug("Loading Sample Stats group to build Options")
        self.sample_stats = UserStats()
        self.sample_stats.add_option(self.parser)
        log.info("Default command line: did {0}".format(" ".join(
            [f'--{stat.option}' for stat in self.sample_stats.stats])))

        # Formating options
        group = self.parser.add_argument_group("Format")
        group.add_argument(
            "--format", default="text",
            help="Output style, possible values: text (default) or wiki")
        group.add_argument(
            "--width", default=width, type=int,
            help="Maximum width of the report output (default: %(default)s)")
        group.add_argument(
            "--brief", action="store_true",
            help="Show brief summary only, do not list individual items")
        group.add_argument(
            "--verbose", action="store_true",
            help="Include more details (like modified git directories)")

        # Other options
        group = self.parser.add_argument_group("Utils")
        group.add_argument(
            "--config",
            metavar="FILE",
            help="Use alternate configuration file (default: 'config')")
        group.add_argument(
            "--total", action="store_true",
            help="Append total stats after listing individual users")
        group.add_argument(
            "--merge", action="store_true",
            help="Merge stats of all users into a single report")
        group.add_argument(
            "--debug", action="store_true",
            help="Turn on debugging output, do not catch exceptions")
        group.add_argument(
            "--test", action="store_true",
            help="Run a simple smoke test against the github server")
コード例 #40
0
ファイル: cli.py プロジェクト: thrix/did
    def __init__(self, arguments=None):
        """ Prepare the parser. """
        self.parser = argparse.ArgumentParser(
            usage="did [this|last] [week|month|quarter|year] [options]")
        self.arguments = arguments
        self.opt = self.arg = None

        # Enable debugging output (even before options are parsed)
        if "--debug" in sys.argv:
            log.setLevel(utils.LOG_DEBUG)

        # Get the default output width from the config (if available)
        try:
            width = did.base.Config().width
        except did.base.ConfigFileError:
            width = did.base.MAX_WIDTH

        # Time & user selection
        group = self.parser.add_argument_group("Select")
        group.add_argument("--email",
                           dest="emails",
                           default=[],
                           action="append",
                           help="User email address(es)")
        group.add_argument("--since",
                           help="Start date in the YYYY-MM-DD format")
        group.add_argument("--until", help="End date in the YYYY-MM-DD format")

        # Create sample stats and include all stats objects options
        log.debug("Loading Sample Stats group to build Options")
        self.sample_stats = UserStats()
        self.sample_stats.add_option(self.parser)

        # Formating options
        group = self.parser.add_argument_group("Format")
        group.add_argument(
            "--format",
            default="text",
            help="Output style, possible values: text (default) or wiki")
        group.add_argument(
            "--width",
            default=width,
            type=int,
            help="Maximum width of the report output (default: %(default)s)")
        group.add_argument(
            "--brief",
            action="store_true",
            help="Show brief summary only, do not list individual items")
        group.add_argument(
            "--verbose",
            action="store_true",
            help="Include more details (like modified git directories)")

        # Other options
        group = self.parser.add_argument_group("Utils")
        group.add_argument(
            "--config",
            metavar="FILE",
            help="Use alternate configuration file (default: 'config')")
        group.add_argument(
            "--total",
            action="store_true",
            help="Append total stats after listing individual users")
        group.add_argument(
            "--merge",
            action="store_true",
            help="Merge stats of all users into a single report")
        group.add_argument(
            "--debug",
            action="store_true",
            help="Turn on debugging output, do not catch exceptions")
コード例 #41
0
ファイル: bugzilla.py プロジェクト: qiankehan/did
 def search(self, query, options):
     """ Perform Bugzilla search """
     query["query_format"] = "advanced"
     log.debug("Search query:")
     log.debug(pretty(query))
     # Fetch bug info
     try:
         result = self.server.query(query)
     except xmlrpclib.Fault as error:
         # Ignore non-existent users (this is necessary for users with
         # several email aliases to allow them using --merge/--total)
         if "not a valid username" in unicode(error):
             log.debug(error)
             return []
         # Otherwise suggest to bake bugzilla cookies
         log.error("An error encountered, while searching for bugs.")
         log.debug(error)
         raise ReportError(
             "Have you baked cookies using the 'bugzilla login' command?")
     log.debug("Search result:")
     log.debug(pretty(result))
     bugs = dict((bug.id, bug) for bug in result)
     # Fetch bug history
     log.debug("Fetching bug history")
     result = self.server._proxy.Bug.history({'ids': bugs.keys()})
     log.debug(pretty(result))
     history = dict((bug["id"], bug["history"]) for bug in result["bugs"])
     # Fetch bug comments
     log.debug("Fetching bug comments")
     result = self.server._proxy.Bug.comments({'ids': bugs.keys()})
     log.debug(pretty(result))
     comments = dict(
         (int(bug), data["comments"])
         for bug, data in result["bugs"].items())
     # Create bug objects
     return [
         self.parent.bug(
             bugs[id], history[id], comments[id], parent=self.parent)
         for id in bugs]
コード例 #42
0
 def get_changelog(self, chg):
     messages_url = self.join_URL_frags(
         self.baseurl, '/changes/{0}/detail'.format(chg.change_id))
     changelog = self.get_query_result(messages_url)
     log.debug("changelog = {0}".format(changelog))
     return changelog
コード例 #43
0
 def fetch(self):
     log.info("Searching for changes merged by {0}".format(self.user))
     self.stats = GerritUnit.fetch(self, 'status:merged')
     log.debug("self.stats = {0}".format(self.stats))
コード例 #44
0
    def fetch(self,
              query_string="",
              common_query_options=None,
              limit_since=False):
        """
        Backend for the actual gerrit query.

        query_string:
            basic query terms, e.g., 'status:abandoned'
        common_query_options:
            [optional] rest of the query string; if omitted, the default
            one is used (limit by the current user and since option);
            if empty, nothing will be added to query_string
        limit_since:
            [optional] Boolean (defaults to False) post-process the results
            to eliminate items created after since option.
        """
        work_list = []
        log.info("Searching for changes by {0}".format(self.user))
        log.debug('query_string = {0}, common_query_options = {1}'.format(
            query_string, common_query_options))

        self.since_date = self.get_gerrit_date(self.options.since)

        if common_query_options is None:
            # Calculate age from self.options.since
            #
            # Amount of time that has expired since the change was last
            # updated with a review comment or new patch set.
            #
            # Meaning that the last time we changed the review is
            # GREATER than the given age.
            # For age SMALLER we need -age:<time>

            common_query_options = '+owner:{0}'.format(self.user.login)
            if not limit_since:
                age = (TODAY - self.since_date).days
                common_query_options += '+-age:{0}d'.format(age)

        common_query_options += '+since:{0}+until:{1}'.format(
            self.get_gerrit_date(self.options.since),
            self.get_gerrit_date(self.options.until))

        if isinstance(common_query_options, str) and \
                len(common_query_options) > 0:
            query_string += common_query_options

        log.debug('query_string = {0}'.format(query_string))
        log.debug('self.prefix = {0}'.format(self.prefix))
        log.debug('[fetch] self.base_url = {0}'.format(self.base_url))
        work_list = self.repo.search(query_string)

        if limit_since:
            tmplist = []
            log.debug('Limiting by since option')
            self.stats = []
            for chg in work_list:
                log.debug('chg = {0}'.format(chg))
                chg_created = self.get_gerrit_date(chg['created'][:10])
                log.debug('chg_created = {0}'.format(chg_created))
                if chg_created >= self.since_date:
                    tmplist.append(chg)
            work_list = tmplist[:]
        log.debug("work_list = {0}".format(work_list))

        # Return the list of tick_data objects
        return [Change(ticket, prefix=self.prefix) for ticket in work_list]
コード例 #45
0
 def fetch(self):
     log.info(u"Searching for WIP changes opened by {0}".format(self.user))
     self.stats = GerritUnit.fetch(self,
                                   'status:open is:wip',
                                   limit_since=True)
     log.debug(u"self.stats = {0}".format(self.stats))
コード例 #46
0
 def fetch(self):
     log.info(u"Searching for changes abandoned by {0}".format(self.user))
     self.stats = GerritUnit.fetch(self, 'status:abandoned')
     log.debug(u"self.stats = {0}".format(self.stats))
コード例 #47
0
    def search(self, query):
        """ Perform GitHub query """
        result = []
        url = self.url + "/" + query + f"&per_page={PER_PAGE}"

        while True:
            # Fetch the query
            log.debug(f"GitHub query: {url}")
            try:
                response = requests.get(url, headers=self.headers)
                log.debug(f"Response headers:\n{response.headers}")
            except requests.exceptions.RequestException as error:
                log.debug(error)
                raise ReportError(f"GitHub search on {self.url} failed.")

            # Check if credentials are valid
            log.debug(f"GitHub status code: {response.status_code}")
            if response.status_code == 401:
                raise ReportError("Defined token is not valid. "
                                  "Either update it or remove it.")

            # Parse fetched json data
            try:
                data = json.loads(response.text)["items"]
                result.extend(data)
            except requests.exceptions.JSONDecodeError as error:
                log.debug(error)
                raise ReportError(f"GitHub JSON failed: {response.text}.")

            # Update url to the next page, break if no next page provided
            if 'next' in response.links:
                url = response.links['next']['url']
            else:
                break

        log.debug("Result: {0} fetched".format(listed(len(result), "item")))
        log.data(pretty(result))
        return result