Beispiel #1
0
def validate_config(config):
    if not config.has_section('general'):
        die("No [general] section found.")

    twiggy.quickSetup(name2level(config.get('general', 'log.level')),
                      config.get('general', 'log.file'))

    if not config.has_option('general', 'targets'):
        die("No targets= item in [general] found.")

    targets = config.get('general', 'targets')
    targets = [t.strip() for t in targets.split(",")]

    for target in targets:
        if target not in config.sections():
            die("No [%s] section found." % target)

    for option in ['bitly.api_user', 'bitly.api_key']:
        if not config.has_option('general', option):
            log.name('config').warning(
                "URLs will not be shortened with bit.ly")

    # Validate each target one by one.
    for target in targets:
        service = config.get(target, 'service')
        if not service:
            die("No 'service' in [%s]" % target)

        if service not in SERVICES:
            die("'%s' in [%s] is not a valid service." % (service, target))

        # Call the service-specific validator
        SERVICES[service].validate_config(config, target)
Beispiel #2
0
def validate_config(config):
    if not config.has_section("general"):
        die("No [general] section found.")

    twiggy.quickSetup(name2level(config.get("general", "log.level")), config.get("general", "log.file"))

    if not config.has_option("general", "targets"):
        die("No targets= item in [general] found.")

    targets = config.get("general", "targets")
    targets = [t.strip() for t in targets.split(",")]

    for target in targets:
        if target not in config.sections():
            die("No [%s] section found." % target)

    for option in ["bitly.api_user", "bitly.api_key"]:
        if not config.has_option("general", option):
            log.name("config").warning("URLs will not be shortened with bit.ly")

    # Validate each target one by one.
    for target in targets:
        service = config.get(target, "service")
        if not service:
            die("No 'service' in [%s]" % target)

        if service not in SERVICES:
            die("'%s' in [%s] is not a valid service." % (service, target))

        # Call the service-specific validator
        SERVICES[service].validate_config(config, target)
Beispiel #3
0
    def issues(self):
        if self.tag:
            url = self.base_url + "/api/0/projects?tags=" + self.tag
            response = requests.get(url)
            if not bool(response):
                raise IOError('Failed to talk to %r %r' % (url, response))

            all_repos = [r['name'] for r in response.json()['projects']]
        else:
            all_repos = [self.repo]

        repos = filter(self.filter_repos, all_repos)

        issues = []
        for repo in repos:
            issues.extend(self.get_issues(repo, ('issues', 'issues')))
            issues.extend(self.get_issues(repo, ('pull-requests', 'requests')))

        log.name(self.target).debug(" Found {0} issues.", len(issues))
        issues = filter(self.include, issues)
        log.name(self.target).debug(" Pruned down to {0} issues.", len(issues))

        for repo, issue in issues:
            # Stuff this value into the upstream dict for:
            # https://pagure.com/ralphbean/bugwarrior/issues/159
            issue['repo'] = repo

            issue_obj = self.get_issue_for_record(issue)
            extra = {
                'project': repo,
                'type': 'pull_request' if 'branch' in issue else 'issue',
                'annotations': self.annotations(issue, issue_obj)
            }
            issue_obj.update_extra(extra)
            yield issue_obj
Beispiel #4
0
def pull(dry_run, flavor):
    """ Pull down tasks from forges and add them to your taskwarrior tasks.

    Relies on configuration in bugwarriorrc
    """
    twiggy.quickSetup()
    try:
        main_section = _get_section_name(flavor)

        # Load our config file
        config = load_config(main_section)

        tw_config = TaskWarriorBase.load_config(get_taskrc_path(config, main_section))
        lockfile_path = os.path.join(os.path.expanduser(tw_config["data"]["location"]), "bugwarrior.lockfile")

        lockfile = PIDLockFile(lockfile_path)
        lockfile.acquire(timeout=10)
        try:
            # Get all the issues.  This can take a while.
            issue_generator = aggregate_issues(config, main_section)

            # Stuff them in the taskwarrior db as necessary
            synchronize(issue_generator, config, main_section, dry_run)
        finally:
            lockfile.release()
    except LockTimeout:
        log.name("command").critical(
            "Your taskrc repository is currently locked. "
            "Remove the file at %s if you are sure no other "
            "bugwarrior processes are currently running." % (lockfile_path)
        )
    except:
        log.name("command").trace("error").critical("oh noes")
Beispiel #5
0
def _aggregate_issues(conf, main_section, target, queue, service_name):
    """ This worker function is separated out from the main
    :func:`aggregate_issues` func only so that we can use multiprocessing
    on it for speed reasons.
    """

    start = time.time()

    try:
        service = SERVICES[service_name](conf, main_section, target)
        issue_count = 0
        for issue in service.issues():
            queue.put(issue)
            issue_count += 1
    except Exception as e:
        log.name(target).trace('error').critical(
            "Worker for [%s] failed: %s" % (target, e)
        )
        queue.put(
            (SERVICE_FINISHED_ERROR, (target, e))
        )
    else:
        queue.put(
            (SERVICE_FINISHED_OK, (target, issue_count, ))
        )
    finally:
        duration = time.time() - start
        log.name(target).info("Done with [%s] in %fs" % (target, duration))
Beispiel #6
0
def aggregate_issues(conf):
    """ Return all issues from every target.

    Takes a config object and a callable which returns a shortened url.
    """
    log.name('bugwarrior').info("Starting to aggregate remote issues.")

    # Create and call service objects for every target in the config
    targets = [t.strip() for t in conf.get('general', 'targets').split(',')]

    # This multiprocessing stuff is kind of experimental.
    use_multiprocessing = conf.has_option('general', 'multiprocessing') and \
        asbool(conf.get('general', 'multiprocessing'))

    if use_multiprocessing:
        log.name('bugwarrior').info("Spawning %i workers." % len(targets))
        pool = multiprocessing.Pool(processes=len(targets))
        map_function = pool.map
    else:
        log.name('bugwarrior').info("Processing targets in serial.")
        map_function = map

    issues_by_target = map_function(
        _aggregate_issues,
        zip([conf] * len(targets), targets)
    )
    log.name('bugwarrior').info("Done aggregating remote issues.")

    if WORKER_FAILURE in issues_by_target:
        log.name('bugwarrior').critical("A worker failed.  Aborting.")
        raise RuntimeError('Worker failure')

    return sum(issues_by_target, [])
 def issues(self):
     data = self.activecollab.get_my_tasks()
     label_data = self.activecollab.get_assignment_labels()
     labels = dict()
     for item in label_data:
         labels[item['id']] = re.sub(r'\W+', '_', item['name'])
     task_count = 0
     issues = []
     for key, record in data.iteritems():
         for task_id, task in record['assignments'].iteritems():
             task_count = task_count + 1
             # Add tasks
             if task['assignee_id'] == self.user_id:
                 task['label'] = labels.get(task['label_id'])
                 issues.append(task)
             if 'subtasks' in task:
                 for subtask_id, subtask in task['subtasks'].iteritems():
                     # Add subtasks
                     task_count = task_count + 1
                     if subtask['assignee_id'] is self.user_id:
                         # Add some data from the parent task
                         subtask['label'] = labels.get(subtask['label_id'])
                         subtask['project_id'] = task['project_id']
                         subtask['project'] = task['project']
                         subtask['task_id'] = task['task_id']
                         subtask['milestone'] = task['milestone']
                         issues.append(subtask)
     log.name(self.target).debug(" Found {0} total", task_count)
     log.name(self.target).debug(" Pruned down to {0}", len(issues))
     for issue in issues:
         issue_obj = self.get_issue_for_record(issue)
         extra = {'annotations': self.annotations(issue, issue_obj)}
         issue_obj.update_extra(extra)
         yield issue_obj
Beispiel #8
0
    def issues(self):
        user = self.config.get(self.target, 'bitbucket.username')
        response = self.get_data('/users/' + user + '/')
        repos = [
            repo.get('slug') for repo in response.get('repositories')
            if repo.get('has_issues')
        ]

        issues = sum([self.pull(user + "/" + repo) for repo in repos], [])
        log.name(self.target).debug(" Found {0} total.", len(issues))

        closed = ['resolved', 'duplicate', 'wontfix', 'invalid']
        not_resolved = lambda tup: tup[1]['status'] not in closed
        issues = filter(not_resolved, issues)
        issues = filter(self.include, issues)
        log.name(self.target).debug(" Pruned down to {0}", len(issues))

        for tag, issue in issues:
            issue_obj = self.get_issue_for_record(issue)
            extras = {
                'project': tag.split('/')[1],
                'url': self.BASE_URL + '/'.join(
                    issue['resource_uri'].split('/')[3:]
                ).replace('issues', 'issue'),
                'annotations': self.get_annotations(tag, issue, issue_obj)
            }
            issue_obj.update_extra(extras)
            yield issue_obj
Beispiel #9
0
    def issues(self):
        base_url = "https://" + self.config.get(self.target, 'trac.base_uri')
        tickets = self.trac.query_tickets('status!=closed&max=0')
        tickets = map(self.trac.get_ticket, tickets)
        issues = [(self.target, ticket[3]) for ticket in tickets]
        log.name(self.target).debug(" Found {0} total.", len(issues))

        # Build a url for each issue
        for i in range(len(issues)):
            issues[i][1]['url'] = "%s/ticket/%i" % (base_url, tickets[i][0])
            issues[i][1]['number'] = tickets[i][0]

        issues = filter(self.include, issues)
        log.name(self.target).debug(" Pruned down to {0}", len(issues))

        return [dict(
            description=self.description(
                issue['summary'], issue['url'],
                issue['number'], cls="issue"),
            project=tag,
            priority=self.priorities.get(
                issue['priority'],
                self.default_priority,
            ),
            **self.annotations(tag, issue)
        ) for tag, issue in issues]
Beispiel #10
0
    def __init__(self, *args, **kw):
        super(BugzillaService, self).__init__(*args, **kw)
        self.base_uri = self.config_get('base_uri')
        self.username = self.config_get('username')
        self.ignore_cc = self.config_get_default('ignore_cc',
                                                 default=False,
                                                 to_type=lambda x: x == "True")
        self.query_url = self.config_get_default('query_url', default=None)
        self.include_needinfos = self.config_get_default(
            'include_needinfos', False, to_type=lambda x: x == "True")
        self.open_statuses = self.config_get_default(
            'open_statuses', _open_statuses, to_type=lambda x: x.split(','))
        log.name(self.target).debug(" filtering on statuses: {0}",
                                    self.open_statuses)

        # So more modern bugzilla's require that we specify
        # query_format=advanced along with the xmlrpc request.
        # https://bugzilla.redhat.com/show_bug.cgi?id=825370
        # ...but older bugzilla's don't know anything about that argument.
        # Here we make it possible for the user to specify whether they want
        # to pass that argument or not.
        self.advanced = asbool(self.config_get_default('advanced', 'no'))

        self.password = self.config_get_password('password', self.username)

        url = 'https://%s/xmlrpc.cgi' % self.base_uri
        self.bz = bugzilla.Bugzilla(url=url)
        self.bz.login(self.username, self.password)
Beispiel #11
0
    def issues(self):
        base_url = "https://" + self.config.get(self.target, 'trac.base_uri')
        tickets = self.trac.query_tickets('status!=closed&max=0')
        tickets = map(self.trac.get_ticket, tickets)
        issues = [(self.target, ticket[3]) for ticket in tickets]
        log.name(self.target).debug(" Found {0} total.", len(issues))

        # Build a url for each issue
        for i in range(len(issues)):
            issues[i][1]['url'] = "%s/ticket/%i" % (base_url, tickets[i][0])
            issues[i][1]['number'] = tickets[i][0]

        issues = filter(self.include, issues)
        log.name(self.target).debug(" Pruned down to {0}", len(issues))

        return [
            dict(description=self.description(issue['summary'],
                                              issue['url'],
                                              issue['number'],
                                              cls="issue"),
                 project=tag,
                 priority=self.priorities.get(
                     issue['priority'],
                     self.default_priority,
                 ),
                 **self.annotations(tag, issue)) for tag, issue in issues
        ]
Beispiel #12
0
    def issues(self):
        user = self.config.get(self.target, 'github.username')

        all_repos = githubutils.get_repos(username=user, auth=self.auth)
        assert (type(all_repos) == list)
        repos = filter(self.filter_repos, all_repos)

        issues = {}
        if self.involved_issues:
            issues.update(self.get_involved_issues(user))
        else:
            for repo in repos:
                issues.update(
                    self.get_owned_repo_issues(user + "/" + repo['name']))
        issues.update(self.get_directly_assigned_issues())
        log.name(self.target).debug(" Found {0} issues.", len(issues))
        issues = filter(self.include, issues.values())
        log.name(self.target).debug(" Pruned down to {0} issues.", len(issues))

        for tag, issue in issues:
            # Stuff this value into the upstream dict for:
            # https://github.com/ralphbean/bugwarrior/issues/159
            issue['repo'] = tag

            issue_obj = self.get_issue_for_record(issue)
            extra = {
                'project': tag.split('/')[1],
                'type': 'pull_request' if 'pull_request' in issue else 'issue',
                'annotations': self.annotations(tag, issue, issue_obj)
            }
            issue_obj.update_extra(extra)
            yield issue_obj
Beispiel #13
0
    def __init__(self, *args, **kw):
        super(BugzillaService, self).__init__(*args, **kw)
        self.base_uri = self.config_get('base_uri')
        self.username = self.config_get('username')
        self.password = self.config_get('password')
        self.ignore_cc = self.config_get_default('ignore_cc', default=False,
                                                 to_type=lambda x: x == "True")
        self.query_url = self.config_get_default('query_url', default=None)
        self.include_needinfos = self.config_get_default(
            'include_needinfos', False, to_type=lambda x: x == "True")
        self.open_statuses = self.config_get_default(
            'open_statuses', _open_statuses, to_type=lambda x: x.split(','))
        log.name(self.target).debug(" filtering on statuses: {0}", self.open_statuses)

        # So more modern bugzilla's require that we specify
        # query_format=advanced along with the xmlrpc request.
        # https://bugzilla.redhat.com/show_bug.cgi?id=825370
        # ...but older bugzilla's don't know anything about that argument.
        # Here we make it possible for the user to specify whether they want
        # to pass that argument or not.
        self.advanced = asbool(self.config_get_default('advanced', 'no'))

        if not self.password or self.password.startswith("@oracle:"):
            self.password = get_service_password(
                self.get_keyring_service(self.config, self.target),
                self.username, oracle=self.password,
                interactive=self.config.interactive
            )

        url = 'https://%s/xmlrpc.cgi' % self.base_uri
        self.bz = bugzilla.Bugzilla(url=url)
        self.bz.login(self.username, self.password)
Beispiel #14
0
    def issues(self):
        user = self.config.get(self.target, 'github.username')

        all_repos = githubutils.get_repos(username=user, auth=self.auth)
        assert(type(all_repos) == list)
        repos = filter(self.filter_repos, all_repos)

        issues = {}
        if self.involved_issues:
            issues.update(
                self.get_involved_issues(user)
            )
        else:
            for repo in repos:
                issues.update(
                    self.get_owned_repo_issues(user + "/" + repo['name'])
                )
        issues.update(self.get_directly_assigned_issues())
        log.name(self.target).debug(" Found {0} issues.", len(issues))
        issues = filter(self.include, issues.values())
        log.name(self.target).debug(" Pruned down to {0} issues.", len(issues))

        for tag, issue in issues:
            # Stuff this value into the upstream dict for:
            # https://github.com/ralphbean/bugwarrior/issues/159
            issue['repo'] = tag

            issue_obj = self.get_issue_for_record(issue)
            extra = {
                'project': tag.split('/')[1],
                'type': 'pull_request' if 'pull_request' in issue else 'issue',
                'annotations': self.annotations(tag, issue, issue_obj)
            }
            issue_obj.update_extra(extra)
            yield issue_obj
Beispiel #15
0
def _aggregate_issues(conf, main_section, target, queue, service_name):
    """ This worker function is separated out from the main
    :func:`aggregate_issues` func only so that we can use multiprocessing
    on it for speed reasons.
    """

    start = time.time()

    try:
        service = get_service(service_name)(conf, main_section, target)
        issue_count = 0
        for issue in service.issues():
            queue.put(issue)
            issue_count += 1
    except Exception as e:
        log.name(target).trace('error').critical("Worker for [%s] failed: %s" %
                                                 (target, e))
        queue.put((SERVICE_FINISHED_ERROR, (target, e)))
    else:
        queue.put((SERVICE_FINISHED_OK, (
            target,
            issue_count,
        )))
    finally:
        duration = time.time() - start
        log.name(target).info("Done with [%s] in %fs" % (target, duration))
Beispiel #16
0
    def issues(self):
        if self.tag:
            url = self.base_url + "/api/0/projects?tags=" + self.tag
            response = requests.get(url)
            if not bool(response):
                raise IOError('Failed to talk to %r %r' % (url, response))

            all_repos = [r['name'] for r in response.json()['projects']]
        else:
            all_repos = [self.repo]

        repos = filter(self.filter_repos, all_repos)

        issues = []
        for repo in repos:
            issues.extend(self.get_issues(repo, ('issues', 'issues')))
            issues.extend(self.get_issues(repo, ('pull-requests', 'requests')))

        log.name(self.target).debug(" Found {0} issues.", len(issues))
        issues = filter(self.include, issues)
        log.name(self.target).debug(" Pruned down to {0} issues.", len(issues))

        for repo, issue in issues:
            # Stuff this value into the upstream dict for:
            # https://pagure.com/ralphbean/bugwarrior/issues/159
            issue['repo'] = repo

            issue_obj = self.get_issue_for_record(issue)
            extra = {
                'project': repo,
                'type': 'pull_request' if 'branch' in issue else 'issue',
                'annotations': self.annotations(issue, issue_obj)
            }
            issue_obj.update_extra(extra)
            yield issue_obj
Beispiel #17
0
def aggregate_issues(conf):
    """ Return all issues from every target.

    Takes a config object and a callable which returns a shortened url.
    """
    log.name('bugwarrior').info("Starting to aggregate remote issues.")

    # Create and call service objects for every target in the config
    targets = [t.strip() for t in conf.get('general', 'targets').split(',')]

    # This multiprocessing stuff is kind of experimental.
    use_multiprocessing = conf.has_option('general', 'multiprocessing') and \
        asbool(conf.get('general', 'multiprocessing'))

    if use_multiprocessing:
        log.name('bugwarrior').info("Spawning %i workers." % len(targets))
        pool = multiprocessing.Pool(processes=len(targets))
        map_function = pool.map
    else:
        log.name('bugwarrior').info("Processing targets in serial.")
        map_function = map

    issues_by_target = map_function(_aggregate_issues,
                                    zip([conf] * len(targets), targets))
    log.name('bugwarrior').info("Done aggregating remote issues.")
    if WORKER_FAILURE in issues_by_target:
        log.name('bugwarrior').critical("A worker failed.  Aborting.")
        raise RuntimeError('Worker failure')
    return sum(issues_by_target, [])
Beispiel #18
0
def pull():
    """ Pull down tasks from forges and add them to your taskwarrior tasks.

    Relies on configuration in ~/.bugwarriorrc
    """
    twiggy.quickSetup()
    try:
        # Load our config file
        config = load_config()

        tw_config = TaskWarriorBase.load_config(get_taskrc_path(config))
        lockfile_path = os.path.join(
            os.path.expanduser(tw_config['data']['location']),
            'bugwarrior.lockfile')

        lockfile = PIDLockFile(lockfile_path)
        lockfile.acquire(timeout=10)
        try:
            # Get all the issues.  This can take a while.
            issue_generator = aggregate_issues(config)

            # Stuff them in the taskwarrior db as necessary
            synchronize(issue_generator, config)
        finally:
            lockfile.release()
    except LockTimeout:
        log.name('command').critical(
            'Your taskrc repository is currently locked. '
            'Remove the file at %s if you are sure no other '
            'bugwarrior processes are currently running.' % (lockfile_path))
    except:
        log.name('command').trace('error').critical('oh noes')
Beispiel #19
0
    def issues(self):
        user = self.config.get(self.target, 'bitbucket.username')
        response = self.get_data('/users/' + user + '/')
        repos = [
            repo.get('slug') for repo in response.get('repositories')
            if repo.get('has_issues')
        ]

        issues = sum([self.pull(user + "/" + repo) for repo in repos], [])
        log.name(self.target).debug(" Found {0} total.", len(issues))

        closed = ['resolved', 'duplicate', 'wontfix', 'invalid']
        not_resolved = lambda tup: tup[1]['status'] not in closed
        issues = filter(not_resolved, issues)
        issues = filter(self.include, issues)
        log.name(self.target).debug(" Pruned down to {0}", len(issues))

        for tag, issue in issues:
            issue_obj = self.get_issue_for_record(issue)
            extras = {
                'project':
                tag.split('/')[1],
                'url':
                self.BASE_URL +
                '/'.join(issue['resource_uri'].split('/')[3:]).replace(
                    'issues', 'issue'),
                'annotations':
                self.get_annotations(tag, issue, issue_obj)
            }
            issue_obj.update_extra(extras)
            yield issue_obj
Beispiel #20
0
def getNumber(ndigits):
    min = 1
    max = (ndigits - 2)**2
    n = valid(ndigits, random.randint(min, max))
    while n == False:
        n = valid(ndigits, random.randint(min, max))
        l.name("getNumber").fields(number=n).info("getting numbers")
    return n
Beispiel #21
0
    def issues(self):
        email = self.config.get(self.target, 'bugzilla.username')
        # TODO -- doing something with blockedby would be nice.

        query = dict(
            column_list=self.column_list,
            bug_status=self.not_closed_statuses,
            email1=email,
            emailreporter1=1,
            emailcc1=1,
            emailassigned_to1=1,
            emailqa_contact1=1,
            emailtype1="substring",
        )

        if self.advanced:
            # Required for new bugzilla
            # https://bugzilla.redhat.com/show_bug.cgi?id=825370
            query['query_format'] = 'advanced'

        bugs = self.bz.query(query)

        # Convert to dicts
        bugs = [
            dict(
                ((col, getattr(bug, col)) for col in self.column_list)
            ) for bug in bugs
        ]

        issues = [(self.target, bug) for bug in bugs]
        log.name(self.target).debug(" Found {0} total.", len(issues))

        # Build a url for each issue
        base_url = "https://%s/show_bug.cgi?id=" % \
            self.config.get(self.target, 'bugzilla.base_uri')
        for i in range(len(issues)):
            issues[i][1]['url'] = base_url + str(issues[i][1]['id'])
            issues[i][1]['component'] = \
                issues[i][1]['component'].lower().replace(' ', '-')

        # XXX - Note that we don't use the .include() method like all the other
        # IssueService child classes.  That's because the bugzilla xmlrpc API
        # can already do a lot of the filtering we want for us.

        #issues = filter(self.include, issues)
        #log.name(self.target).debug(" Pruned down to {0}", len(issues))

        return [dict(
            description=self.description(
                issue['summary'], issue['url'],
                issue['id'], cls="issue"),
            project=issue['component'],
            priority=self.priorities.get(
                issue['priority'],
                self.default_priority,
            ),
            **self.annotations(tag, issue)
        ) for tag, issue in issues]
Beispiel #22
0
def merge_left(field, local_task, remote_issue, hamming=False):
    """ Merge array field from the remote_issue into local_task

    * Local 'left' entries are preserved without modification
    * Remote 'left' are appended to task if not present in local.

    :param `field`: Task field to merge.
    :param `local_task`: `taskw.task.Task` object into which to merge
        remote changes.
    :param `remote_issue`: `dict` instance from which to merge into
        local task.
    :param `hamming`: (default `False`) If `True`, compare entries by
        truncating to maximum length, and comparing hamming distances.
        Useful generally only for annotations.

    """

    # Ensure that empty defaults are present
    local_field = local_task.get(field, [])
    remote_field = remote_issue.get(field, [])

    # We need to make sure an array exists for this field because
    # we will be appending to it in a moment.
    if field not in local_task:
        local_task[field] = []

    # If a remote does not appear in local, add it to the local task
    new_count = 0
    for remote in remote_field:
        found = False
        for local in local_field:
            if (
                # For annotations, they don't have to match *exactly*.
                (
                    hamming
                    and get_annotation_hamming_distance(remote, local) == 0
                )
                # But for everything else, they should.
                or (
                    remote == local
                )
            ):
                found = True
                break
        if not found:
            log.name('db').debug(
                "%s not found in %r" % (remote, local_field)
            )
            local_task[field].append(remote)
            new_count += 1
    if new_count > 0:
        log.name('db').debug(
            'Added %s new values to %s (total: %s)' % (
                new_count,
                field,
                len(local_task[field]),
            )
        )
Beispiel #23
0
    def issues(self):
        email = self.config.get(self.target, 'bugzilla.username')
        # TODO -- doing something with blockedby would be nice.

        query = dict(
            column_list=self.column_list,
            bug_status=self.not_closed_statuses,
            email1=email,
            emailreporter1=1,
            emailcc1=1,
            emailassigned_to1=1,
            emailqa_contact1=1,
            emailtype1="substring",
        )

        if self.advanced:
            # Required for new bugzilla
            # https://bugzilla.redhat.com/show_bug.cgi?id=825370
            query['query_format'] = 'advanced'

        bugs = self.bz.query(query)

        # Convert to dicts
        bugs = [
            dict(((col, getattr(bug, col)) for col in self.column_list))
            for bug in bugs
        ]

        issues = [(self.target, bug) for bug in bugs]
        log.name(self.target).debug(" Found {0} total.", len(issues))

        # Build a url for each issue
        base_url = "https://%s/show_bug.cgi?id=" % \
            self.config.get(self.target, 'bugzilla.base_uri')
        for i in range(len(issues)):
            issues[i][1]['url'] = base_url + str(issues[i][1]['id'])
            issues[i][1]['component'] = \
                issues[i][1]['component'].lower().replace(' ', '-')

        # XXX - Note that we don't use the .include() method like all the other
        # IssueService child classes.  That's because the bugzilla xmlrpc API
        # can already do a lot of the filtering we want for us.

        #issues = filter(self.include, issues)
        #log.name(self.target).debug(" Pruned down to {0}", len(issues))

        return [
            dict(description=self.description(issue['summary'],
                                              issue['url'],
                                              issue['id'],
                                              cls="issue"),
                 project=issue['component'],
                 priority=self.priorities.get(
                     issue['priority'],
                     self.default_priority,
                 ),
                 **self.annotations(tag, issue)) for tag, issue in issues
        ]
Beispiel #24
0
def getData(cases):
    global DATA

    i=0
    f=open(outfile, "a+")
    for ncase in range(cases):
        rs=(ncase+1, DATA[ncase], process(DATA[ncase]) )
        l.name("getData").fields(ncase=rs[0], data=rs[1], generated=rs[2]).info("ordered")
        f.write("Case #%d: %s \n" % (rs[0], rs[2]))
    f.close()
Beispiel #25
0
def run_hooks(conf, name):
    if conf.has_option('hooks', name):
        pre_import = [t.strip() for t in conf.get('hooks', name).split(',')]
        if pre_import is not None:
            for hook in pre_import:
                exit_code = subprocess.call(hook, shell=True)
                if exit_code is not 0:
                    msg = 'Non-zero exit code %d on hook %s' % (exit_code,
                                                                hook)
                    log.name('hooks:%s' % name).error(msg)
                    raise RuntimeError(msg)
Beispiel #26
0
 def get_involved_issues(self, user):
     """ Grab all 'interesting' issues """
     issues = {}
     for issue in githubutils.get_involved_issues(user, auth=self.auth):
         url = issue['html_url']
         tag = re.match('.*github\\.com/(.*)/(issues|pull)/[^/]*$', url)
         if tag is None:
             log.name(self.target).critical(" Unrecognized issue URL: {0}.", url)
             continue
         issues[url] = (tag.group(1), issue)
     return issues
Beispiel #27
0
def getData(cases, lines):
    global DATA

    i = 0
    f = open(outfile, "a+")
    for ncase in range(cases):
        rs = (ncase + 1, DATA[ncase], process(DATA[ncase]))
        l.name("getData").fields(ncase=rs[0], data=rs[1],
                                 switches=rs[2]).info("switches found")
        f.write("Case #%d: %d\n" % (rs[0], rs[2]))
    f.close()
Beispiel #28
0
    def issues(self):
        issues = self.client.find_issues(self.user_id)
        log.name(self.target).debug(" Found {0} total.", len(issues))

        return [dict(
            description=self.description(
                issue["subject"],
                self.get_issue_url(issue),
                issue["id"], cls="issue"),
            project=self.get_project_name(issue),
            priority=self.default_priority,
        ) for issue in issues]
Beispiel #29
0
    def issues(self):
        issues = self.client.get_task_list()
        log.name(self.target).debug(" Remote has {0} total issues.",
                                    len(issues))

        # Filter out closed tasks.
        issues = filter(lambda i: i["status"] == 1, issues)
        log.name(self.target).debug(" Remote has {0} active issues.",
                                    len(issues))

        for issue in issues:
            yield self.get_issue_for_record(issue)
Beispiel #30
0
    def issues(self):
        issues = self.client.get_task_list()
        log.name(self.target).debug(
            " Remote has {0} total issues.", len(issues))

        # Filter out closed tasks.
        issues = filter(lambda i: i["status"] == 1, issues)
        log.name(self.target).debug(
            " Remote has {0} active issues.", len(issues))

        for issue in issues:
            yield self.get_issue_for_record(issue)
Beispiel #31
0
 def get_involved_issues(self, user):
     """ Grab all 'interesting' issues """
     issues = {}
     for issue in githubutils.get_involved_issues(user, auth=self.auth):
         url = issue['html_url']
         tag = re.match('.*github\\.com/(.*)/(issues|pull)/[^/]*$', url)
         if tag is None:
             log.name(self.target).critical(" Unrecognized issue URL: {0}.",
                                            url)
             continue
         issues[url] = (tag.group(1), issue)
     return issues
Beispiel #32
0
def getData(cases):
    global DATA

    i = 0
    f = open(outfile, "a+")
    for ncase in range(cases):
        rs = (ncase + 1, DATA[ncase], process(DATA[ncase]))
        l.name("getData").fields(ncase=rs[0], data=rs[1],
                                 generated=len(rs[2])).info("jamcoins found")
        f.write("Case #%d:\n" % (rs[0]))
        f.write("\n".join(rs[2]) + "\n")
    f.close()
Beispiel #33
0
def pull():
    try:
        # Load our config file
        config = load_config()

        # Get all the issues.  This can take a while.
        issues = aggregate_issues(config)

        # Stuff them in the taskwarrior db as necessary
        synchronize(issues, config)
    except:
        log.name('command').trace('error').critical('oh noes')
Beispiel #34
0
    def issues(self):
        cases = self.jira.search_issues(self.query, maxResults=-1)

        jira_version = 5  # Default version number
        if self.config.has_option(self.target, 'jira.version'):
            jira_version = self.config.getint(self.target, 'jira.version')
        if jira_version == 4:
            # Convert for older jira versions that don't support the new API
            cases = [self.__convert_for_jira4(case) for case in cases]

        log.name(self.target).debug(" Found {0} total.", len(cases))
        return [self.__issue(case, jira_version) for case in cases]
Beispiel #35
0
    def issues(self):
        issues = self.client.get_actual_tasks()
        log.name(self.target).debug(" Found {0} total.", len(issues))

        return [dict(
            description=self.description(
                self.get_issue_title(issue),
                self.get_issue_url(issue),
                self.get_issue_id(issue),
                cls="issue"),
            project=self.project_name,
            priority=self.default_priority,
        ) for issue in issues]
Beispiel #36
0
 def __init__(self, config, target, shorten):
     self.config = config
     self.target = target
     self.shorten = shorten
     if config.has_option('general', 'description_length'):
         self.desc_len = self.config.getint('general', 'description_length')
     else:
         self.desc_len = 35
     if config.has_option('general', 'annotation_length'):
         self.anno_len = self.config.getint('general', 'annotation_length')
     else:
         self.anno_len = 45
     log.name(target).info("Working on [{0}]", self.target)
Beispiel #37
0
 def __init__(self, config, target, shorten):
     self.config = config
     self.target = target
     self.shorten = shorten
     if config.has_option('general', 'description_length'):
         self.desc_len = self.config.getint('general', 'description_length')
     else:
         self.desc_len = 35
     if config.has_option('general', 'annotation_length'):
         self.anno_len = self.config.getint('general', 'annotation_length')
     else:
         self.anno_len = 45
     log.name(target).info("Working on [{0}]", self.target)
Beispiel #38
0
    def issues(self):
        issues = self.client.get_actual_tasks()
        log.name(self.target).debug(" Found {0} total.", len(issues))

        return [dict(
            description=self.description(
                self.get_issue_title(issue),
                self.get_issue_url(issue),
                self.get_issue_id(issue),
                cls="issue"),
            project=self.project_name,
            priority=self.default_priority,
        ) for issue in issues]
Beispiel #39
0
def process(case):

    ndigits = case[0]
    qty = case[1]
    result = []
    for n in range(qty):
        l.name("process").fields(n=n).info("searching jamcoin...")
        jamcoin = getNumber(ndigits)
        while jamcoin in result:
            jamcoin = getNumber(ndigits)
        result.append(jamcoin)
        l.name("process").fields(jamcoin=jamcoin).info("jamcoin found")

    return result
Beispiel #40
0
def run_hooks(conf, name):
    if conf.has_option('hooks', name):
        pre_import = [
            t.strip() for t in conf.get('hooks', name).split(',')
        ]
        if pre_import is not None:
            for hook in pre_import:
                exit_code = subprocess.call(hook, shell=True)
                if exit_code is not 0:
                    msg = 'Non-zero exit code %d on hook %s' % (
                        exit_code, hook
                    )
                    log.name('hooks:%s' % name).error(msg)
                    raise RuntimeError(msg)
Beispiel #41
0
def merge_left(field, local_task, remote_issue, hamming=False):
    """ Merge array field from the remote_issue into local_task

    * Local 'left' entries are preserved without modification
    * Remote 'left' are appended to task if not present in local.

    :param `field`: Task field to merge.
    :param `local_task`: `taskw.task.Task` object into which to merge
        remote changes.
    :param `remote_issue`: `dict` instance from which to merge into
        local task.
    :param `hamming`: (default `False`) If `True`, compare entries by
        truncating to maximum length, and comparing hamming distances.
        Useful generally only for annotations.

    """

    # Ensure that empty defaults are present
    local_field = local_task.get(field, [])
    remote_field = remote_issue.get(field, [])

    # We need to make sure an array exists for this field because
    # we will be appending to it in a moment.
    if field not in local_task:
        local_task[field] = []

    # If a remote does not appear in local, add it to the local task
    new_count = 0
    for remote in remote_field:
        found = False
        for local in local_field:
            if (
                    # For annotations, they don't have to match *exactly*.
                (hamming
                 and get_annotation_hamming_distance(remote, local) == 0)
                    # But for everything else, they should.
                    or (remote == local)):
                found = True
                break
        if not found:
            log.name('db').debug("%s not found in %r" % (remote, local_field))
            local_task[field].append(remote)
            new_count += 1
    if new_count > 0:
        log.name('db').debug('Added %s new values to %s (total: %s)' % (
            new_count,
            field,
            len(local_task[field]),
        ))
Beispiel #42
0
 def test_format(self):
     logger = log.name("spam")
     list_output = ListOutput(format=LoggingBridgeFormat())
     messages = list_output.messages
     add_emitters(("spam", DEBUG, None, list_output))
     logger.error("eggs")
     self.failUnlessEqual(messages[0], ('|eggs\n', ERROR, 'spam'))
Beispiel #43
0
    def __init__(self, raw_email, debug=False):
        '''
            Setup the base options of the copy/convert setup
        '''
        self.raw_email = raw_email
        self.log_processing = StringIO()
        self.log_content = StringIO()
        self.tree(self.raw_email)

        twiggy_out = outputs.StreamOutput(formats.shell_format, stream=self.log_processing)
        emitters['*'] = filters.Emitter(levels.DEBUG, True, twiggy_out)

        self.log_name = log.name('files')

        self.cur_attachment = None

        self.debug = debug
        if self.debug:
            if not os.path.exists('debug_logs'):
                os.makedirs('debug_logs')
            self.log_debug_err = os.path.join('debug_logs', 'debug_stderr.log')
            self.log_debug_out = os.path.join('debug_logs', 'debug_stdout.log')
        else:
            self.log_debug_err = os.devnull
            self.log_debug_out = os.devnull
Beispiel #44
0
 def test_sanity(self):
     from twiggy import add_emitters, log
     from twiggy.logging_compat import LoggingBridgeOutput, DEBUG
     logger = log.name("decoy")
     add_emitters(("decoy", DEBUG, None, LoggingBridgeOutput()))
     logger.error("spam")
     logger.notice("eggs")
Beispiel #45
0
    def __init__(self, root_src, root_dst, debug=False):
        """Initialized with path to source and dest directories."""
        self.src_root_dir = root_src
        self.dst_root_dir = root_dst
        self.log_root_dir = os.path.join(self.dst_root_dir, 'logs')
        self._safe_rmtree(self.log_root_dir)
        self._safe_mkdir(self.log_root_dir)
        self.log_processing = os.path.join(self.log_root_dir, 'processing.log')
        self.log_content = os.path.join(self.log_root_dir, 'content.log')
        self.tree(self.src_root_dir)

        quick_setup(file=self.log_processing)
        self.log_name = log.name('files')
        self.resources_path = os.path.join(
            os.path.abspath(os.path.dirname(__file__)), 'data')
        os.environ["PATH"] += os.pathsep + self.resources_path

        self.cur_file = None

        self.debug = debug
        if self.debug:
            self.log_debug_err = os.path.join(self.log_root_dir,
                                              'debug_stderr.log')
            self.log_debug_out = os.path.join(self.log_root_dir,
                                              'debug_stdout.log')
        else:
            self.log_debug_err = os.devnull
            self.log_debug_out = os.devnull
Beispiel #46
0
    def __init__(self, root_src, root_dst, debug=False):
        '''
            Setup the base options of the copy/convert setup
        '''
        self.src_root_dir = root_src
        self.dst_root_dir = root_dst
        self.log_root_dir = os.path.join(self.dst_root_dir, 'logs')
        self._safe_rmtree(self.log_root_dir)
        self._safe_mkdir(self.log_root_dir)
        self.log_processing = os.path.join(self.log_root_dir, 'processing.log')

        quick_setup(file=self.log_processing)
        self.log_name = log.name('files')
        self.ressources_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data')
        os.environ["PATH"] += os.pathsep + self.ressources_path

        self.cur_file = None

        self.debug = debug
        if self.debug:
            self.log_debug_err = os.path.join(self.log_root_dir, 'debug_stderr.log')
            self.log_debug_out = os.path.join(self.log_root_dir, 'debug_stdout.log')
        else:
            self.log_debug_err = os.devnull
            self.log_debug_out = os.devnull
Beispiel #47
0
    def issues(self):
        tmpl = 'https://{host}/api/v3/projects'
        all_repos = self._fetch(tmpl)
        repos = filter(self.filter_repos, all_repos)

        repo_map = {}
        issues = {}
        for repo in repos:
            rid = repo['id']
            repo_map[rid] = repo
            issues.update(
                self.get_repo_issues(rid)
            )
        log.name(self.target).debug(" Found {0} issues.", len(issues))
        issues = filter(self.include, issues.values())
        log.name(self.target).debug(" Pruned down to {0} issues.", len(issues))

        for rid, issue in issues:
            repo = repo_map[rid]
            issue['repo'] = repo['path']

            issue_obj = self.get_issue_for_record(issue)
            issue_url = '%s/issues/%d' % (repo['web_url'], issue['iid'])
            extra = {
                'issue_url': issue_url,
                'project': repo['path'],
                'type': 'issue',
                'annotations': self.annotations(repo, issue_url, 'issues', issue, issue_obj)
            }
            issue_obj.update_extra(extra)
            yield issue_obj

        if not self.filter_merge_requests:
            merge_requests = {}
            for repo in repos:
                rid = repo['id']
                merge_requests.update(
                    self.get_repo_merge_requests(rid)
                )
            log.name(self.target).debug(" Found {0} merge requests.", len(merge_requests))
            merge_requests = filter(self.include, merge_requests.values())
            log.name(self.target).debug(" Pruned down to {0} merge requests.", len(merge_requests))

            for rid, issue in merge_requests:
                repo = repo_map[rid]
                issue['repo'] = repo['path']

                issue_obj = self.get_issue_for_record(issue)
                issue_url = '%s/merge_requests/%d' % (repo['web_url'], issue['iid'])
                extra = {
                    'issue_url': issue_url,
                    'project': repo['path'],
                    'type': 'merge_request',
                    'annotations': self.annotations(repo, issue_url, 'merge_requests', issue, issue_obj)
                }
                issue_obj.update_extra(extra)
                yield issue_obj
Beispiel #48
0
    def __init__(self, config, main_section, target):
        self.config = config
        self.main_section = main_section
        self.target = target

        self.desc_len = 35
        if config.has_option(self.main_section, 'description_length'):
            self.desc_len = self.config.getint(self.main_section, 'description_length')

        self.anno_len = 45
        if config.has_option(self.main_section, 'annotation_length'):
            self.anno_len = self.config.getint(self.main_section, 'annotation_length')

        self.inline_links = True
        if config.has_option(self.main_section, 'inline_links'):
            self.inline_links = asbool(config.get(self.main_section, 'inline_links'))

        self.annotation_links = not self.inline_links
        if config.has_option(self.main_section, 'annotation_links'):
            self.annotation_links = asbool(
                config.get(self.main_section, 'annotation_links')
            )

        self.annotation_comments = True
        if config.has_option(self.main_section, 'annotation_comments'):
            self.annotation_comments = asbool(
                config.get(self.main_section, 'annotation_comments')
            )

        self.shorten = False
        if config.has_option(self.main_section, 'shorten'):
            self.shorten = asbool(config.get(self.main_section, 'shorten'))

        self.add_tags = []
        if config.has_option(self.target, 'add_tags'):
            for raw_option in self.config.get(
                self.target, 'add_tags'
            ).split(','):
                option = raw_option.strip(' +;')
                if option:
                    self.add_tags.append(option)

        self.default_priority = 'M'
        if config.has_option(self.target, 'default_priority'):
            self.default_priority = config.get(self.target, 'default_priority')

        log.name(target).info("Working on [{0}]", self.target)
Beispiel #49
0
    def __init__(self, config, main_section, target):
        self.config = config
        self.main_section = main_section
        self.target = target

        self.desc_len = 35
        if config.has_option(self.main_section, 'description_length'):
            self.desc_len = self.config.getint(self.main_section,
                                               'description_length')

        self.anno_len = 45
        if config.has_option(self.main_section, 'annotation_length'):
            self.anno_len = self.config.getint(self.main_section,
                                               'annotation_length')

        self.inline_links = True
        if config.has_option(self.main_section, 'inline_links'):
            self.inline_links = asbool(
                config.get(self.main_section, 'inline_links'))

        self.annotation_links = not self.inline_links
        if config.has_option(self.main_section, 'annotation_links'):
            self.annotation_links = asbool(
                config.get(self.main_section, 'annotation_links'))

        self.annotation_comments = True
        if config.has_option(self.main_section, 'annotation_comments'):
            self.annotation_comments = asbool(
                config.get(self.main_section, 'annotation_comments'))

        self.shorten = False
        if config.has_option(self.main_section, 'shorten'):
            self.shorten = asbool(config.get(self.main_section, 'shorten'))

        self.add_tags = []
        if config.has_option(self.target, 'add_tags'):
            for raw_option in self.config.get(self.target,
                                              'add_tags').split(','):
                option = raw_option.strip(' +;')
                if option:
                    self.add_tags.append(option)

        self.default_priority = 'M'
        if config.has_option(self.target, 'default_priority'):
            self.default_priority = config.get(self.target, 'default_priority')

        log.name(target).info("Working on [{0}]", self.target)
Beispiel #50
0
def aggregate_issues(conf, main_section):
    """ Return all issues from every target. """
    log.name('bugwarrior').info("Starting to aggregate remote issues.")

    # Create and call service objects for every target in the config
    targets = [t.strip() for t in conf.get(main_section, 'targets').split(',')]

    queue = multiprocessing.Queue()

    log.name('bugwarrior').info("Spawning %i workers." % len(targets))
    processes = []

    if (
        conf.has_option(main_section, 'development')
        and asbool(conf.get(main_section, 'development'))
    ):
        for target in targets:
            _aggregate_issues(
                conf,
                main_section,
                target,
                queue,
                conf.get(target, 'service')
            )
    else:
        for target in targets:
            proc = multiprocessing.Process(
                target=_aggregate_issues,
                args=(conf, main_section, target, queue, conf.get(target, 'service'))
            )
            proc.start()
            processes.append(proc)

            # Sleep for 1 second here to try and avoid a race condition where
            # all N workers start up and ask the gpg-agent process for
            # information at the same time.  This causes gpg-agent to fumble
            # and tell some of our workers some incomplete things.
            time.sleep(1)

    currently_running = len(targets)
    while currently_running > 0:
        issue = queue.get(True)
        if isinstance(issue, tuple):
            completion_type, args = issue
            if completion_type == SERVICE_FINISHED_ERROR:
                target, e = args
                log.name('bugwarrior').info("Terminating workers")
                for process in processes:
                    process.terminate()
                raise RuntimeError(
                    "critical error in target '{}'".format(target))
            currently_running -= 1
            continue
        yield issue

    log.name('bugwarrior').info("Done aggregating remote issues.")
Beispiel #51
0
 def __init__(self, hcls, name='Hook', echo=False, baseclass=Hooked, *args):
     self.logger = log.name(name)
     self.echo=echo
     self.log('Created')
     self.args=args
     if issubclass(hcls,baseclass):
         self.log(str(hcls))
         self.hcls=self.filter(hcls.get_all())
Beispiel #52
0
def main():

    ## Twiggy logger setup
    twiggy_setup()
    log.name("main").info("-------------------- START --------------------")

    op = create_option_parser()
    args = op.parse_args()

    df = "%Y/%m/%d-%H:%M:%S"

    start = dt.strptime(args.start, df) if args.start else None
    end = dt.strptime(args.end, df) if args.end else None

    messages = get_data(args.db_name, start, end)
    export_data(args.file_name, messages)

    log.name("main").info("-------------------- STOP --------------------")
Beispiel #53
0
def main():

    ## Twiggy logger setup
    twiggy_setup()
    log.name('main').debug('-------------------- START --------------------')

    op = create_option_parser()
    args = op.parse_args()

    url = args.url
    df = args.date_format
    regex = args.regex
    db_name = args.db_name
    mpp = args.messages_per_page
    
    c = Collector(url=url, mpp=mpp, regex=regex, df=df, db=db_name)
    c.process()
    log.name('main').debug('-------------------- STOP --------------------')
Beispiel #54
0
    def issues(self):
        user = self.config.get(self.target, 'github.username')

        all_repos = githubutils.get_repos(username=user, auth=self.auth)
        assert(type(all_repos) == list)
        repos = filter(self.filter_repos_for_issues, all_repos)

        issues = {}
        for repo in repos:
            issues.update(
                self.get_owned_repo_issues(user + "/" + repo['name'])
            )
        issues.update(self.get_directly_assigned_issues())
        log.name(self.target).debug(" Found {0} issues.", len(issues))
        issues = filter(self.include, issues.values())
        log.name(self.target).debug(" Pruned down to {0} issues.", len(issues))

        # Next, get all the pull requests (and don't prune by default)
        repos = filter(self.filter_repos_for_prs, all_repos)
        requests = sum([self._reqs(user + "/" + r['name']) for r in repos], [])
        log.name(self.target).debug(" Found {0} pull requests.", len(requests))
        if self.filter_pull_requests:
            requests = filter(self.include, requests)
            log.name(self.target).debug(
                " Pruned down to {0} pull requests.",
                len(requests)
            )

        # For pull requests, github lists an 'issue' and a 'pull request' with
        # the same id and the same URL.  So, if we find any pull requests,
        # let's strip those out of the "issues" list so that we don't have
        # unnecessary duplicates.
        request_urls = [r[1]['html_url'] for r in requests]
        issues = [i for i in issues if not i[1]['html_url'] in request_urls]

        for tag, issue in issues:
            issue_obj = self.get_issue_for_record(issue)
            extra = {
                'project': tag.split('/')[1],
                'type': 'issue',
                'annotations': self.annotations(tag, issue, issue_obj)
            }
            issue_obj.update_extra(extra)
            yield issue_obj

        for tag, request in requests:
            issue_obj = self.get_issue_for_record(request)
            extra = {
                'project': tag.split('/')[1],
                'type': 'pull_request',
                'annotations': self.annotations(tag, request, issue_obj)
            }
            issue_obj.update_extra(extra)
            yield issue_obj
Beispiel #55
0
    def issues(self):
        email = self.username
        # TODO -- doing something with blockedby would be nice.

        query = dict(
            column_list=self.COLUMN_LIST,
            bug_status=self.OPEN_STATUSES,
            email1=email,
            emailreporter1=1,
            emailassigned_to1=1,
            emailqa_contact1=1,
            emailtype1="substring",
        )

        if not self.ignore_cc:
            query['emailcc1'] = 1

        if self.advanced:
            # Required for new bugzilla
            # https://bugzilla.redhat.com/show_bug.cgi?id=825370
            query['query_format'] = 'advanced'

        bugs = self.bz.query(query)
        # Convert to dicts
        bugs = [
            dict(
                ((col, _get_bug_attr(bug, col)) for col in self.COLUMN_LIST)
            ) for bug in bugs
        ]

        issues = [(self.target, bug) for bug in bugs]
        log.name(self.target).debug(" Found {0} total.", len(issues))

        # Build a url for each issue
        base_url = "https://%s/show_bug.cgi?id=" % (self.base_uri)
        for tag, issue in issues:
            issue_obj = self.get_issue_for_record(issue)
            extra = {
                'url': base_url + six.text_type(issue['id']),
                'annotations': self.annotations(tag, issue, issue_obj),
            }
            issue_obj.update_extra(extra)
            yield issue_obj