Exemplo n.º 1
0
 def test_drop_piece(self):
     board = np.zeros((6, 7))
     row = 0
     col = 0
     piece = 1
     ob = Repo()
     ob.drop_piece(board, row, col, piece)
     self.assertEqual(board[0][0], 1)
Exemplo n.º 2
0
def main(argv):
    parser = argparse.ArgumentParser(
        description="Count unique contributors over time.")
    parser.add_argument(
        "--type",
        choices=["external", "internal"],
        default="external",
        help="What kind of pull requests should be counted [%(default)s]")
    parser.add_argument(
        "--window",
        metavar="DAYS",
        type=int,
        default=90,
        help="Count contributors over this large a window [%(default)d]")
    parser.add_argument("--start",
                        type=date_arg,
                        help="Date to start collecting, format is flexible: "
                        "20141225, Dec/25/2014, 2014-12-25, etc")
    args = parser.parse_args(argv[1:])

    if args.start is None:
        args.start = make_timezone_aware(datetime.datetime(2013, 6, 5))

    if args.type == "external":
        interesting = lambda issue: issue.intext == "external"
    elif args.type == "internal":
        interesting = lambda issue: issue.intext == "internal"

    repos = [r.name for r in Repo.from_yaml() if r.track_pulls]
    for when, num_authors in unique_authors(repos, args.window, interesting):
        if when < args.start:
            continue
        print("{0:%Y-%m-%d}\t{1}".format(when, num_authors))
Exemplo n.º 3
0
def main(argv):
    parser = argparse.ArgumentParser(description="Summarize pull requests by organization.")
    parser.add_argument("--since", metavar="DAYS", type=int,
        help="Only consider pull requests closed in the past DAYS days"
    )
    parser.add_argument("--start", type=date_arg,
        help="Date to start collecting, format is flexible: "
        "20141225, Dec/25/2014, 2014-12-25, etc"
    )
    parser.add_argument("--end", type=date_arg,
        help="Date to end collecting, format is flexible: "
        "25/Dec/2014, 12/25/2014, 2014-12-25, etc"
    )

    args = parser.parse_args(argv[1:])

    since = None
    if args.since:
        since = date.today() - timedelta(days=args.since)
    if args.start:
        if since is not None:
            raise Exception("Can't use --since and --start")
        since = args.start

    repos = [ r for r in Repo.from_yaml() if r.track_pulls ]

    by_org = collections.defaultdict(list)

    for repo in repos:
        for pull in get_pulls(repo.name, state="closed", pull_details="list", org=True, since=since):
            # We only want external pull requests.
            if pull['intext'] != "external":
                continue
            # We only want merged pull requests.
            if pull['combinedstate'] != "merged":
                continue

            if args.end is not None:
                # We don't want to count things merged after our end date.
                merged = dateutil.parser.parse(pull['pull.merged_at'])
                if merged >= args.end:
                    continue

            by_org[pull['org']].append(pull)

    keys = sorted(by_org, key=lambda k: len(by_org[k]), reverse=True)
    for key in keys:
        print("{}: {}".format(key, len(by_org[key])))

    fmt = "{number:5d} {user.login:>17s} {title}"

    for i, pull in enumerate(by_org['other']):
        if i == 0:
            print("\n'Other' pull requests:")
        print(pull.format(fmt))

    for i, pull in enumerate(by_org['unsigned']):
        if i == 0:
            print("\nUnsigned authors:")
        print(pull.format(fmt))
Exemplo n.º 4
0
def get_all_repos(date_bucket_fn,
                  start,
                  by_size=False,
                  lines=False,
                  closed=False):
    repos = [r for r in Repo.from_yaml() if r.track_pulls]

    dimensions = []
    if closed:
        dimensions.append(["opened", "merged", "closed"])
    else:
        dimensions.append(["opened", "merged"])
    dimensions.append(["internal", "external"])
    if by_size:
        dimensions.append(["small", "large"])

    keys = [" ".join(prod) for prod in itertools.product(*dimensions)]
    bucket_blank = dict.fromkeys(keys, 0)

    buckets = collections.defaultdict(lambda: dict(bucket_blank))
    for repo in repos:
        get_bucket_data(buckets,
                        repo.name,
                        date_bucket_fn,
                        start=start,
                        by_size=by_size,
                        lines=lines,
                        closed=closed)

    print("timespan\t" + "\t".join(keys))
    for time_period in sorted(buckets.keys()):
        data = buckets[time_period]
        print("{}\t{}".format(time_period,
                              "\t".join(str(data[k]) for k in keys)))
Exemplo n.º 5
0
def get_all_repos(date_bucket_fn):
    repos = [ r for r in Repo.from_yaml() if r.track_pulls ]

    def bucket_blank():
        return {
            "opened": {
                "internal": 0,
                "external": 0,
            },
            "merged": {
                "internal": 0,
                "external": 0,
            },
        }

    buckets = collections.defaultdict(bucket_blank)
    for repo in repos:
        get_bucket_data(buckets, repo.name, date_bucket_fn)

    print("qrtr\topened internal\tmerged internal\topened external\tmerged external")
    for q in sorted(buckets.keys()):
        data = buckets[q]
        print("{}\t{}\t{}\t{}\t{}".format(q,
            data["opened"]["internal"],
            data["merged"]["internal"],
            data["opened"]["external"],
            data["merged"]["external"],
        ))
Exemplo n.º 6
0
def main(argv):
    parser = argparse.ArgumentParser(description="Count unique contributors over time.")
    parser.add_argument(
        "--type", choices=["external", "internal"], default="external",
        help="What kind of pull requests should be counted [%(default)s]"
    )
    parser.add_argument(
        "--window", metavar="DAYS", type=int, default=90,
        help="Count contributors over this large a window [%(default)d]"
    )
    parser.add_argument(
        "--start", type=date_arg,
        help="Date to start collecting, format is flexible: "
        "20141225, Dec/25/2014, 2014-12-25, etc"
    )
    args = parser.parse_args(argv[1:])

    if args.start is None:
        args.start = make_timezone_aware(datetime.datetime(2013, 6, 5))

    if args.type == "external":
        interesting = lambda issue: issue.intext == "external"
    elif args.type == "internal":
        interesting = lambda issue: issue.intext == "internal"

    repos = [ r.name for r in Repo.from_yaml() if r.track_pulls ]
    for when, num_authors in unique_authors(repos, args.window, interesting):
        if when < args.start:
            continue
        print("{0:%Y-%m-%d}\t{1}".format(when, num_authors))
Exemplo n.º 7
0
    def __init__(self, odooenv, name):
        """ Busca el cliente en la estructura de directorios, pero si no lo
            encuentra pide un directorio donde esta el repo que lo contiene
        """
        # parent es siempre un objeto OdooEnv
        self._parent = odooenv
        self._name = name

        # si estamos en test accedo a data
        if name[0:5] == 'test_':
            path = os.path.dirname(os.path.abspath(__file__))
            path = path.replace('scripts', 'data')
            manifest = self.get_manifest(path)
        else:
            manifest = self.get_manifest(BASE_DIR)
        if not manifest:
            msg.inf('Can not find client {} in this host. Please provide path '
                    'to repo\n where it is or hit Enter to exit.'
                    '\n'.format(self._name))

            path = raw_input('path = ')
            manifest = self.get_manifest(path)
            if not manifest:
                msg.err('Can not find client {} in this host'.format(name))

            msg.inf('Client found!')
            msg.inf('Name {}\nversion {}\n'.format(manifest.get('name'),
                                                   manifest.get('version')))

        # Chequar que este todo bien
        if not manifest.get('docker'):
            msg.err('No images in manifest {}'.format(self.name))

        if not manifest.get('repos'):
            msg.err('No repos in manifest {}'.format(self.name))

        self._port = manifest.get('port')
        if not self._port:
            msg.err('No port in manifest {}'.format(self.name))

        self._version = manifest.get('version')[0:3]
        if not self._version:
            msg.err('No version tag in manifest {}'.format(self.name))

        # Crear imagenes y repos
        self._repos = []
        for rep in manifest.get('repos'):
            self._repos.append(Repo(rep))

        self._images = []
        for img in manifest.get('docker'):
            self._images.append(Image(img))

        # todo codigo repetido
        # get first word of name in lowercase
        name = manifest.get('name').lower()
        if not self._name == name.split()[0]:
            msg.err('You intend to install client {} but in manifest, '
                    'the name is {}'.format(self._name, manifest.get('name')))
Exemplo n.º 8
0
 def test_pick_best_move(self):
     board = np.zeros((6, 7))
     board[0][0] = 2
     board[0][1] = 2
     board[0][2] = 2
     AI = 2
     ob = Service(Repo())
     self.assertEqual(ob.pick_best_move(board, AI), 3)
Exemplo n.º 9
0
 def test_is_terminal_node(self):
     board = np.zeros((6, 7))
     board[0][0] = 1
     board[0][1] = 1
     board[0][2] = 1
     board[0][3] = 1
     ob = Service(Repo())
     self.assertEqual(ob.is_terminal_node(board), True)
Exemplo n.º 10
0
 def test_score_position(self):
     board = np.zeros((6, 7))
     board[0][0] = 1
     board[0][1] = 1
     board[0][2] = 1
     board[0][3] = 1
     ob = Service(Repo())
     self.assertEqual(ob.score_position(board, 1), 110)
Exemplo n.º 11
0
 def test_winning_move(self):
     board = np.zeros((6, 7))
     board[0][0] = 1
     board[0][1] = 1
     board[0][2] = 1
     board[0][3] = 1
     ob = Service(Repo())
     self.assertEqual(ob.winning_move(board, 1), True)
Exemplo n.º 12
0
 def test_minimax(self):
     board = np.zeros((6, 7))
     board[0][0] = 2
     board[0][1] = 2
     board[0][2] = 2
     board[0][3] = 2
     ob = Service(Repo())
     self.assertEqual(ob.minimax(board, 0, -math.inf, math.inf, True),
                      (None, 100000000000000))
Exemplo n.º 13
0
def find_git_repos(dirpath, excludes, depth, name_for_parent=1):
    """Return list of directories containing a `.git` file or directory

    Results matching globbing patterns in `excludes` will be ignored.

    `depth` is how many directories deep to search (2 is the minimum in
    most situations).

    `name_for_parent` is which level of the directory hierarchy to name
    the repo after relative to `.git` (1=immediate parent, 2=grandparent)

    """
    start = time()

    cmd = ['find', '-L', dirpath, '-name', '.git', '-maxdepth', str(depth)]

    output = subprocess.check_output(cmd)
    output = [
        os.path.dirname(s.strip()) for s in decode(output).split('\n')
        if s.strip()
    ]

    results = []
    for filepath in output:
        ignore = False
        for pattern in excludes:
            if fnmatch(filepath, pattern):
                ignore = True
                break

        if ignore:
            continue

        # Work out name for repo
        if name_for_parent < 2:  # ignore 0, it's pointless
            name = os.path.basename(filepath)
        else:
            components = filepath.rstrip('/').split('/')
            if name_for_parent >= len(components):
                log.warning(
                    u'%s : `name_for_parent` is %d, but '
                    u'only %d levels in file tree', filepath, name_for_parent,
                    len(components))
                name = os.path.basename(filepath)
            else:
                name = components[-(name_for_parent)]

        results.append(Repo(name, filepath))

    log.debug(u'%d repo(s) found in `%s` in %0.2fs', len(results), dirpath,
              time() - start)

    for r in results:
        log.debug('    %r', r)

    return results
Exemplo n.º 14
0
def show_pulls(labels=None, show_comments=False, state="open", since=None,
               org=False, intext=None, merged=False):
    """
    `labels`: Filters PRs by labels (all are shown if None is specified)
    `show_comments`: shows the last 5 comments on each PR, if True
    `state`: Filter PRs by this state (either 'open' or 'closed')
    `since`: a datetime representing the earliest time from which to pull information.
             All PRs regardless of time are shown if None is specified.
    `org`: If True, sorts by PR author affiliation
    `intext`: specify 'int' (internal) or 'ext' (external) pull request
    `merged`: If True and state="closed", shows only PRs that were merged.
    """
    num = 0
    adds = 0
    deletes = 0
    repos = [ r for r in Repo.from_yaml() if r.track_pulls ]
    for repo in repos:
        issues = get_pulls(repo.name, labels, state, since, org=org or intext, pull_details="all")

        category = None
        for issue in issues:
            issue.repo = repo.nick
            if intext is not None:
                if issue.intext != intext:
                    continue
            if state == 'closed' and merged and issue.combinedstate != 'merged':
                # If we're filtering on closed PRs, and only want those that are merged,
                # skip ones that were closed without merge.
                continue
            if state == 'closed' and since:
                # If this PR was closed prior to the last `since` interval of days, continue on
                # (it may have been *updated* - that is, referenced or commented on - more recently,
                #  but we just want to see what's been merged or closed in the past "since" days)
                if issue.closed_at < since:
                    continue

            if org and issue.org != category:
                # new category! print category header
                category = issue.org
                print("-- {category} ----".format(category=category))

            print(fformat(ISSUE_FMT, issue))
            num += 1
            adds += issue.additions
            deletes += issue.deletions

            if show_comments:
                comments = get_comments(issue)
                last_five_comments = reversed(more_itertools.take(5, comments))
                for comment in last_five_comments:
                    print(fformat(COMMENT_FMT, comment))

    print()
    print("{num} pull requests; {adds}+ {deletes}-".format(num=num, adds=adds, deletes=deletes))
Exemplo n.º 15
0
def get_all_repos(date_bucket_fn, start, lines=False, internal=False):
    repos = [r for r in Repo.from_yaml() if r.track_pulls]

    dimensions = [["merged", "closed", "unresolved", "opened"]]
    if internal:
        dimensions.append(["internal"])
    else:
        dimensions.append(["external"])

    keys = [" ".join(prod) for prod in itertools.product(*dimensions)]
    bucket_blank = dict.fromkeys(keys, 0)

    buckets = collections.defaultdict(lambda: dict(bucket_blank))
    for repo in repos:
        get_bucket_data(buckets, repo.name, date_bucket_fn, start=start, lines=lines, internal=internal)

    print_repo_output(keys, buckets)
Exemplo n.º 16
0
def get_all_repos(date_bucket_fn, start, by_size=False, lines=False, closed=False):
    repos = [r for r in Repo.from_yaml() if r.track_pulls]

    dimensions = []
    if closed:
        dimensions.append(["opened", "merged", "closed"])
    else:
        dimensions.append(["opened", "merged"])
    dimensions.append(["internal", "external"])
    if by_size:
        dimensions.append(["small", "large"])

    keys = [" ".join(prod) for prod in itertools.product(*dimensions)]
    bucket_blank = dict.fromkeys(keys, 0)

    buckets = collections.defaultdict(lambda: dict(bucket_blank))
    for repo in repos:
        get_bucket_data(buckets, repo.name, date_bucket_fn, start=start, by_size=by_size, lines=lines, closed=closed)

    print_repo_output(keys, buckets)
Exemplo n.º 17
0
def main(argv):

    parser = argparse.ArgumentParser(description="Collect info about people commenting on pull requests")
    parser.add_argument("--since", metavar="DAYS", type=int, default=14,
        help="Include comments in this many days [%(default)d]"
    )
    parser.add_argument("--debug", action="store_true",
        help="Break down by organization"
    )
    args = parser.parse_args(argv[1:])

    since = None
    if args.since:
        since = datetime.now() - timedelta(days=args.since)

    internal = get_internal()
    repos = [ r for r in Repo.from_yaml() if r.track_pulls ]
    for repo in repos:
        get_comment_data(repo.name, since=since, internal=internal.get)

    return 0
Exemplo n.º 18
0
def get_all_repos(date_bucket_fn, start, lines=False, internal=False):
    repos = [r for r in Repo.from_yaml() if r.track_pulls]

    dimensions = [["merged", "closed", "unresolved", "opened"]]
    if internal:
        dimensions.append(["internal"])
    else:
        dimensions.append(["external"])

    keys = [" ".join(prod) for prod in itertools.product(*dimensions)]
    bucket_blank = dict.fromkeys(keys, 0)

    buckets = collections.defaultdict(lambda: dict(bucket_blank))
    for repo in repos:
        get_bucket_data(buckets,
                        repo.name,
                        date_bucket_fn,
                        start=start,
                        lines=lines,
                        internal=internal)

    print_repo_output(keys, buckets)
Exemplo n.º 19
0
def get_all_repos(date_bucket_fn, start, by_size=False, lines=False, closed=False):
    repos = [r for r in Repo.from_yaml() if r.track_pulls]

    dimensions = []
    if closed:
        dimensions.append(["opened", "merged", "closed"])
    else:
        dimensions.append(["opened", "merged"])
    dimensions.append(["internal", "external"])
    if by_size:
        dimensions.append(["small", "large"])

    keys = [" ".join(prod) for prod in itertools.product(*dimensions)]
    bucket_blank = dict.fromkeys(keys, 0)

    buckets = collections.defaultdict(lambda: dict(bucket_blank))
    for repo in repos:
        get_bucket_data(buckets, repo.name, date_bucket_fn, start=start, by_size=by_size, lines=lines, closed=closed)

    print("timespan\t" + "\t".join(keys))
    for time_period in sorted(buckets.keys()):
        data = buckets[time_period]
        print("{}\t{}".format(time_period, "\t".join(str(data[k]) for k in keys)))
Exemplo n.º 20
0
def main(argv):
    parser = argparse.ArgumentParser(description="Summarize pull requests by organization.")
    parser.add_argument(
        "--since", metavar="DAYS", type=int,
        help="Only consider pull requests closed in the past DAYS days"
    )
    parser.add_argument(
        "--start", type=date_arg,
        help="Date to start collecting, format is flexible: "
        "20141225, Dec/25/2014, 2014-12-25, etc"
    )
    parser.add_argument(
        "--end", type=date_arg,
        help="Date to end collecting, format is flexible: "
        "25/Dec/2014, 12/25/2014, 2014-12-25, etc"
    )
    parser.add_argument(
        "--short", action="store_true",
        help="Only show the short summary"
    )

    args = parser.parse_args(argv[1:])

    since = None
    if args.since:
        since = make_timezone_aware(datetime.today() - timedelta(days=args.since))
    if args.start:
        if since is not None:
            raise Exception("Can't use --since and --start")
        since = args.start

    repos = [ r for r in Repo.from_yaml() if r.track_pulls ]

    by_org = collections.defaultdict(list)

    for repo in repos:
        for pull in get_pulls(repo.name, state="closed", pull_details="list", org=True, since=since):
            # We only want external pull requests.
            if pull.intext != "external":
                continue
            # We only want merged pull requests.
            if pull.combinedstate != "merged":
                continue
            # Pull requests can be recently modified even if they were merged
            # long ago, so only take things merged since our since date.
            merged = make_timezone_aware(pull.merged_at)
            if merged < since:
                continue

            if args.end is not None:
                # We don't want to count things merged after our end date.
                if merged >= args.end:
                    continue

            pull.repo = repo.nick
            by_org[pull.org].append(pull)

    keys = sorted(by_org, key=lambda k: len(by_org[k]), reverse=True)
    for key in keys:
        print("{}: {}".format(key, len(by_org[key])))

    fmt = "{pull.repo:4s} {pull.number:5d} {pull.user_login:>17s} {pull.title}"

    if args.short:
        if 'unsigned' in keys:
            print("\n-- {} -------".format('unsigned'))
            for pull in by_org['unsigned']:
                print(fmt.format(pull=pull))
    else:
        for key in keys:
            print("\n-- {} -------".format(key))
            for pull in by_org[key]:
                print(fmt.format(pull=pull))
Exemplo n.º 21
0
def main(argv):
    parser = argparse.ArgumentParser(description="Summarize pull requests by organization.")
    parser.add_argument(
        "--since", metavar="DAYS", type=int,
        help="Only consider pull requests closed in the past DAYS days"
    )
    parser.add_argument(
        "--start", type=date_arg,
        help="Date to start collecting, format is flexible: "
        "20141225, Dec/25/2014, 2014-12-25, etc"
    )
    parser.add_argument(
        "--end", type=date_arg,
        help="Date to end collecting, format is flexible: "
        "25/Dec/2014, 12/25/2014, 2014-12-25, etc"
    )
    parser.add_argument(
        "--short", action="store_true",
        help="Only show the short summary"
    )

    args = parser.parse_args(argv[1:])

    since = None
    if args.since:
        since = make_timezone_aware(datetime.today() - timedelta(days=args.since))
    if args.start:
        if since is not None:
            raise Exception("Can't use --since and --start")
        since = args.start

    repos = [ r for r in Repo.from_yaml() if r.track_pulls ]

    by_org = collections.defaultdict(list)

    for repo in repos:
        for pull in get_pulls(repo.name, state="closed", pull_details="list", org=True, since=since):
            # We only want external pull requests.
            if pull.intext != "external":
                continue
            # We only want merged pull requests.
            if pull.combinedstate != "merged":
                continue
            # Pull requests can be recently modified even if they were merged
            # long ago, so only take things merged since our since date.
            merged = make_timezone_aware(pull.merged_at)
            if merged < since:
                continue

            if args.end is not None:
                # We don't want to count things merged after our end date.
                if merged >= args.end:
                    continue

            pull.repo = repo.nick
            by_org[pull.org].append(pull)

    keys = sorted(by_org, key=lambda k: len(by_org[k]), reverse=True)
    for key in keys:
        print("{}: {}".format(key, len(by_org[key])))

    fmt = "{pull.repo:4s} {pull.number:5d} {pull.user_login:>17s} {pull.title}"

    if args.short:
        if 'unsigned' in keys:
            print("\n-- {} -------".format('unsigned'))
            for pull in by_org['unsigned']:
                print(fmt.format(pull=pull))
    else:
        for key in keys:
            print("\n-- {} -------".format(key))
            for pull in by_org[key]:
                print(fmt.format(pull=pull))
Exemplo n.º 22
0
 def test_is_valid_location(self):
     board = np.zeros((6, 7))
     board[0][0] = 1
     col = 0
     ob = Service(Repo())
     self.assertEqual(ob.is_valid_location(board, col), True)
Exemplo n.º 23
0
def get_wall_data(pretty=False):
    """Returns a JSON string of aging data for the wall display."""
    repos = [ r for r in Repo.from_yaml() if r.track_pulls ]
    wall_data = WallMaker().show_wall(repos)
    return json.dumps(wall_data, indent=4 if pretty else None)
Exemplo n.º 24
0
def find_git_repos(dirpath, excludes, depth, uid, gids, name_for_parent=1):
    """Return list of directories containing a `.git` file or directory.

    Results matching globbing patterns in `excludes` will be ignored.

    `depth` is how many directories deep to search (2 is the minimum in
    most situations).

    `name_for_parent` is which level of the directory hierarchy to name
    the repo after relative to `.git` (1=immediate parent, 2=grandparent)

    """
    def _group(args, primary, operator=None):
        """Pair each arg with primary, then join pairs with operator."""
        out = ['(']
        for i, arg in enumerate(args):
            if operator and i > 0 and i < len(args) - 1:
                out.append(operator)

            out += [primary, arg]

        return out + [')']

    start = time()

    cmd = ['find', '-L', dirpath, '-maxdepth', str(depth)]
    # excludes converted to `find` arguments
    if excludes:
        cmd += _group(excludes, '-name', '-o') + ['-prune', '-o']

    # ignore unreadable directories
    # https://unix.stackexchange.com/a/257058
    cmd.append('(')
    # ignore user-owned that we can't open
    cmd += ['-uid', str(uid), '(', '-perm', '-u=rx', '-o', '-prune', ')']

    # ignore group-owned that we can't open
    cmd += ['-o'] + _group([str(n) for n in gids], '-gid')
    cmd += ['(', '-perm', '-g=rx', '-o', '-prune', ')']

    # ignore other-owned that we can't open
    cmd += ['-o', '(', '-perm', '-o=rx', '-o', '-prune', ')']
    # close "unreadable" group
    cmd.append(')')

    cmd += ['-name', '.git', '-print']
    cmd = [utf8ify(s) for s in cmd]
    try:
        output = subprocess.check_output(cmd)
    except Exception as err:
        log.exception('failed: %r', err)
        raise err

    output = [
        os.path.dirname(s.strip()) for s in decode(output).split('\n')
        if s.strip()
    ]

    results = []
    for filepath in output:
        ignore = False
        for pattern in excludes:
            if fnmatch(filepath, pattern):
                ignore = True
                break

        if ignore:
            continue

        # Work out name for repo
        if name_for_parent < 2:  # ignore 0, it's pointless
            name = os.path.basename(filepath)
        else:
            components = filepath.rstrip('/').split('/')
            if name_for_parent >= len(components):
                log.warning(
                    '%s : `name_for_parent` is %d, but '
                    'only %d levels in file tree', filepath, name_for_parent,
                    len(components))
                name = os.path.basename(filepath)
            else:
                name = components[-(name_for_parent)]

        results.append(Repo(name, filepath))

    log.debug('%d repo(s) found in `%s` in %0.2fs', len(results), dirpath,
              time() - start)

    for r in results:
        log.debug('    %r', r)

    return results
Exemplo n.º 25
0
def get_wall_data(pretty=False):
    """Returns a JSON string of aging data for the wall display."""
    repos = [r for r in Repo.from_yaml() if r.track_pulls]
    wall_data = WallMaker().show_wall(repos)
    return json.dumps(wall_data, indent=4 if pretty else None)
Exemplo n.º 26
0
def get_all_external_pulls():
    repos = [ r.name for r in Repo.from_yaml() if r.track_pulls ]
    for repo in repos:
        for pull in get_external_pulls(repo):
            yield pull
Exemplo n.º 27
0
def find_git_repos(dirpath,
                   excludes,
                   depth,
                   user_id,
                   group_ids,
                   name_for_parent=1):
    """Return list of directories containing a `.git` file or directory.

    Results matching globbing patterns in `excludes` will be ignored.

    `depth` is how many directories deep to search (2 is the minimum in
    most situations).

    `name_for_parent` is which level of the directory hierarchy to name
    the repo after relative to `.git` (1=immediate parent, 2=grandparent)

    """
    start = time()

    # assemble excludes for find
    excludes_for_find = []
    # add excludes from config
    if excludes:
        excludes_for_find += ['('] + \
            ' -o '.join(['-name ' + exclude for exclude in excludes]).split(' ') + \
            [')', '-prune', '-o']
    # tell it to silently ignore folders it can't open
    excludes_for_find.append('(')
    # ignore user-owned that we can't open
    excludes_for_find += [
        '-uid',
        str(user_id), '(', '-perm', '-u=rx', '-o', '-prune', ')'
    ]
    excludes_for_find.append('-o')
    # ignore group-owned that we can't open
    excludes_for_find += ['('] + \
        ' -o '.join(['-gid ' + str(gid) for gid in group_ids]).split(' ') + \
        [')', '(', '-perm', '-g=rx', '-o', '-prune', ')']
    excludes_for_find.append('-o')
    # ignore other-owned that we can't open
    excludes_for_find += ['(', '-perm', '-o=rx', '-o', '-prune', ')']
    excludes_for_find.append(')')

    cmd = ['find', '-L', dirpath,
           '-maxdepth', str(depth)] + \
           excludes_for_find + \
           ['-name', '.git',
           '-print']

    output = subprocess.check_output(cmd)
    output = [
        os.path.dirname(s.strip()) for s in decode(output).split('\n')
        if s.strip()
    ]

    results = []
    for filepath in output:
        ignore = False
        for pattern in excludes:
            if fnmatch(filepath, pattern):
                ignore = True
                break

        if ignore:
            continue

        # Work out name for repo
        if name_for_parent < 2:  # ignore 0, it's pointless
            name = os.path.basename(filepath)
        else:
            components = filepath.rstrip('/').split('/')
            if name_for_parent >= len(components):
                log.warning(
                    u'%s : `name_for_parent` is %d, but '
                    u'only %d levels in file tree', filepath, name_for_parent,
                    len(components))
                name = os.path.basename(filepath)
            else:
                name = components[-(name_for_parent)]

        results.append(Repo(name, filepath))

    log.debug(u'%d repo(s) found in `%s` in %0.2fs', len(results), dirpath,
              time() - start)

    for r in results:
        log.debug('    %r', r)

    return results
Exemplo n.º 28
0
def main(argv):
    global DEBUG

    parser = argparse.ArgumentParser(description="Summarize pull requests.")
    parser.add_argument(
        "--since",
        metavar="DAYS",
        type=int,
        default=14,
        help=
        "For closed issues, only include issues updated in the past DAYS days [%(default)d]"
    )
    parser.add_argument("--human",
                        action="store_true",
                        help="Human-readable output")
    parser.add_argument("--org",
                        action="store_true",
                        help="Break down by organization")
    parser.add_argument("--debug",
                        action="store_true",
                        help="Show debugging info")
    args = parser.parse_args(argv[1:])

    DEBUG = args.debug

    since = None
    if args.since:
        since = date.today() - timedelta(days=args.since)

    if args.org:
        categories = sorted(get_all_orgs())

        def cat_filter(cat, pr):
            return pr.org == cat
    else:
        categories = ["all"]

        def cat_filter(cat, pr):
            return True

    durations = {
        "open": {
            "internal": [],
            "external": [],
        },
        "closed": {
            "internal": [],
            "external": [],
        }
    }

    repos = [r for r in Repo.from_yaml() if r.track_pulls]
    for repo in repos:
        get_duration_data(durations, repo.name, since)

    for linenum, cat in enumerate(categories):
        ss_friendly = []
        for position in ("external", "internal"):
            for state in ("open", "closed"):
                seconds = [
                    p.duration.total_seconds()
                    for p in durations[state][position] if cat_filter(cat, p)
                ]
                if seconds:
                    median_seconds = int(statistics.median(seconds))
                    median_duration = timedelta(seconds=median_seconds)
                else:
                    median_seconds = -1
                    median_duration = "no data"
                population = "all"
                if state == "closed" and since:
                    population = "since {date}".format(date=since)
                if args.human:
                    print(
                        "median {position} {state} ({population}): {duration}".
                        format(position=position,
                               state=state,
                               population=population,
                               duration=median_duration))
                else:
                    ss_friendly += [len(seconds), median_seconds]

        if ss_friendly:
            if linenum == 0:
                print(
                    "cat\twhen\trepos\teopen\teopenage\teclosed\teclosedage\tiopen\tiopenage\ticlosed\ticlosedage"
                )
            ss_data = "\t".join(str(x) for x in ss_friendly)
            print("{}\t{:%m/%d/%Y}\t{}\t{}".format(cat, date.today(),
                                                   len(repos), ss_data))
Exemplo n.º 29
0
 def test_get_valid_locations(self):
     board = np.zeros((6, 7))
     board[0][0] = 2
     ob = Service(Repo())
     self.assertEqual(ob.get_valid_locations(board), [0, 1, 2, 3, 4, 5, 6])
Exemplo n.º 30
0
 def test_get_next_open_row(self):
     board = np.zeros((6, 7))
     board[0][0] = 1
     ob = Service(Repo())
     r = ob.get_next_open_row(board, 0)
     self.assertEqual(r, 1)
Exemplo n.º 31
0
from repos import Repo
from service import Service
from ui import UI
from valid import Valid

v = Valid()
r = Repo()
s = Service(r)
ui = UI(s)
ui.run()
Exemplo n.º 32
0
#!/usr/bin/env python
"""List the webhooks in all the repos."""

from __future__ import print_function

import pprint

from helpers import paginated_get
from repos import Repo

repos = Repo.from_yaml()
repo_names = sorted(repo.name for repo in repos)
for repo_name in repo_names:
    print("\n-- {} ---------------------".format(repo_name))
    url = "https://api.github.com/repos/{name}/hooks".format(name=repo_name)
    for r in paginated_get(url):
        print("{r[name]}".format(r=r))
        for k, v in sorted(r['config'].items()):
            print("  {k}: {v}".format(k=k, v=v))
Exemplo n.º 33
0
def show_pulls(labels=None,
               show_comments=False,
               state="open",
               since=None,
               org=False,
               intext=None,
               merged=False):
    """
    `labels`: Filters PRs by labels (all are shown if None is specified)
    `show_comments`: shows the last 5 comments on each PR, if True
    `state`: Filter PRs by this state (either 'open' or 'closed')
    `since`: a datetime representing the earliest time from which to pull information.
             All PRs regardless of time are shown if None is specified.
    `org`: If True, sorts by PR author affiliation
    `intext`: specify 'int' (internal) or 'ext' (external) pull request
    `merged`: If True and state="closed", shows only PRs that were merged.
    """
    num = 0
    adds = 0
    deletes = 0
    repos = [r for r in Repo.from_yaml() if r.track_pulls]
    for repo in repos:
        issues = get_pulls(repo.name,
                           labels,
                           state,
                           since,
                           org=org or intext,
                           pull_details="all")

        category = None
        for issue in issues:
            issue.repo = repo.nick
            if intext is not None:
                if issue.intext != intext:
                    continue
            if state == 'closed' and merged and issue.combinedstate != 'merged':
                # If we're filtering on closed PRs, and only want those that are merged,
                # skip ones that were closed without merge.
                continue
            if state == 'closed' and since:
                # If this PR was closed prior to the last `since` interval of days, continue on
                # (it may have been *updated* - that is, referenced or commented on - more recently,
                #  but we just want to see what's been merged or closed in the past "since" days)
                if issue.closed_at < since:
                    continue

            if org and issue.org != category:
                # new category! print category header
                category = issue.org
                print("-- {category} ----".format(category=category))

            print(fformat(ISSUE_FMT, issue))
            num += 1
            adds += issue.additions
            deletes += issue.deletions

            if show_comments:
                comments = get_comments(issue)
                last_five_comments = reversed(more_itertools.take(5, comments))
                for comment in last_five_comments:
                    print(fformat(COMMENT_FMT, comment))

    print()
    print("{num} pull requests; {adds}+ {deletes}-".format(num=num,
                                                           adds=adds,
                                                           deletes=deletes))
Exemplo n.º 34
0
def main(argv):
    global DEBUG

    parser = argparse.ArgumentParser(description="Summarize pull requests.")
    parser.add_argument("--since", metavar="DAYS", type=int, default=14,
        help="For closed issues, only include issues updated in the past DAYS days [%(default)d]"
    )
    parser.add_argument("--human", action="store_true",
        help="Human-readable output"
    )
    parser.add_argument("--org", action="store_true",
        help="Break down by organization"
    )
    parser.add_argument("--debug", action="store_true",
        help="Show debugging info"
    )
    args = parser.parse_args(argv[1:])

    DEBUG = args.debug

    since = None
    if args.since:
        since = date.today() - timedelta(days=args.since)

    if args.org:
        categories = sorted(get_all_orgs())
        def cat_filter(cat, pr):
            return pr['org'] == cat
    else:
        categories = ["all"]
        def cat_filter(cat, pr):
            return True

    durations = {
        "open": {
            "internal": [],
            "external": [],
        },
        "closed": {
            "internal": [],
            "external": [],
        }
    }

    repos = [ r for r in Repo.from_yaml() if r.track_pulls ]
    for repo in repos:
        get_duration_data(durations, repo.name, since)

    for linenum, cat in enumerate(categories):
        ss_friendly = []
        for position in ("external", "internal"):
            for state in ("open", "closed"):
                seconds = [p['duration'].total_seconds() for p in durations[state][position] if cat_filter(cat, p)]
                if seconds:
                    median_seconds = int(statistics.median(seconds))
                    median_duration = timedelta(seconds=median_seconds)
                else:
                    median_seconds = -1
                    median_duration = "no data"
                population = "all"
                if state == "closed" and since:
                    population = "since {date}".format(date=since)
                if args.human:
                    print("median {position} {state} ({population}): {duration}".format(
                        position=position, state=state, population=population,
                        duration=median_duration
                    ))
                else:
                    ss_friendly += [len(seconds), median_seconds]

        if ss_friendly:
            if linenum == 0:
                print("cat\twhen\trepos\teopen\teopenage\teclosed\teclosedage\tiopen\tiopenage\ticlosed\ticlosedage")
            ss_data = "\t".join(str(x) for x in ss_friendly)
            print("{}\t{:%m/%d/%Y}\t{}\t{}".format(cat, date.today(), len(repos), ss_data))
Exemplo n.º 35
0
def show_pulls(labels=None, show_comments=False, state="open", since=None,
               org=False, intext=None, merged=False):
    """
    `labels`: Filters PRs by labels (all are shown if None is specified)
    `show_comments`: shows the last 5 comments on each PR, if True
    `state`: Filter PRs by this state (either 'open' or 'closed')
    `since`: a datetime representing the earliest time from which to pull information.
             All PRs regardless of time are shown if None is specified.
    `org`: If True, sorts by PR author affiliation
    `intext`: specify 'int' (internal) or 'ext' (external) pull request
    `merged`: If True and state="closed", shows only PRs that were merged.
    """
    num = 0
    adds = 0
    deletes = 0
    repos = [ r for r in Repo.from_yaml() if r.track_pulls ]
    for repo in repos:
        issues = get_pulls(repo.name, labels, state, since, org=org or intext, pull_details="all")

        category = None
        for issue in issues:
            issue["repo"] = repo.nick
            if intext is not None:
                if issue["intext"] != intext:
                    continue
            if state == 'closed' and merged and issue['combinedstate'] != 'merged':
                # If we're filtering on closed PRs, and only want those that are merged,
                # skip ones that were closed without merge.
                continue
            if state == 'closed' and since:
                # If this PR was closed prior to the last `since` interval of days, continue on
                # (it may have been *updated* - that is, referenced or commented on - more recently,
                #  but we just want to see what's been merged or closed in the past "since" days)
                closed_at = dateutil.parser.parse(issue["closed_at"][:-1])  # Remove TZ information
                if closed_at < since:
                    continue

            if org and issue.get("org") != category:
                # new category! print category header
                category = issue["org"]
                print("-- {category} ----".format(category=category))

            if 0:
                import pprint
                pprint.pprint(issue.obj)
            print(issue.format(ISSUE_FMT))
            num += 1
            adds += issue['pull']['additions']
            deletes += issue['pull']['deletions']

            if show_comments:
                comments_url = URLObject(issue['comments_url'])
                comments_url = comments_url.set_query_param("sort", "created")
                comments_url = comments_url.set_query_param("direction", "desc")
                comments = paginated_get(comments_url)
                last_five_comments = reversed(more_itertools.take(5, comments))
                for comment in last_five_comments:
                    print(comment.format(COMMENT_FMT))

    print()
    print("{num} pull requests; {adds}+ {deletes}-".format(num=num, adds=adds, deletes=deletes))
def main():
    repos = [ r.name for r in Repo.from_yaml() if r.track_pulls ]
    for when, num_authors in unique_authors(repos):
        print("{0:%Y-%m-%d}\t{1}".format(when, num_authors))