def issues(self): user = self.config.get(self.target, 'username') url = self.base_api + '/users/' + user + '/' f = urllib2.urlopen(url) response = json.loads(f.read()) repos = [repo.get('slug') for repo in response.get('repositories')] issues = sum([self.pull(user + "/" + repo) for repo in repos], []) log.debug(" Found {0} total.", len(issues)) # Build a url for each issue for i in range(len(issues)): issues[i][1]['url'] = self.base_url + "/".join( issues[i][1]['resource_uri'].split('/')[3:] ).replace('issues', 'issue') issues = filter(self.include, issues) log.debug(" Pruned down to {0}", len(issues)) return [{ "description": self.description( issue['title'], issue['url'], issue['local_id'], cls="issue", ), "project": tag.split('/')[1], "priority": self.priorities.get(issue['priority'], 'M'), } for tag, issue in issues]
def issues(self): # Loop through each project start = time.time() issues = [] projects = self.projects # @todo Implement threading here. for project in projects: for project_id, project_name in project.iteritems(): log.debug(" Getting tasks for #" + project_id + " " + project_name + '"') issues += self.client.find_issues(self.user_id, project_id, project_name) log.debug(" Found {0} total.", len(issues)) global api_count log.debug(" {0} API calls", api_count) log.debug(" {0} tasks and tickets analyzed", task_count) log.debug(" Elapsed Time: %s" % (time.time() - start)) formatted_issues = [] for issue in issues: formatted_issue = dict( description=self.description( issue["description"], issue["project_id"], issue["ticket_id"], issue["type"], ), project=self.get_project_name(issue), priority=issue["priority"], **self.annotations(issue) ) if "due" in issue: formatted_issue["due"] = issue["due"] formatted_issues.append(formatted_issue) return formatted_issues
def issues(self): base_url = "https://" + self.config.get(self.target, 'trac.base_uri') tickets = self.trac.query_tickets('status!=closed&max=0') tickets = map(self.trac.get_ticket, tickets) issues = [(self.target, ticket[3]) for ticket in tickets] log.debug(" Found {0} total.", len(issues)) # Build a url for each issue for i in range(len(issues)): issues[i][1]['url'] = "%s/ticket/%i" % (base_url, tickets[i][0]) issues[i][1]['number'] = tickets[i][0] issues = filter(self.include, issues) log.debug(" Pruned down to {0}", len(issues)) return [dict( description=self.description( issue['summary'], issue['url'], issue['number'], cls="issue", ), project=tag, priority=self.priorities.get( issue['priority'], self.default_priority, ), **self.annotations(tag, issue) ) for tag, issue in issues]
def issues(self): user = self.config.get(self.target, 'username') all_repos = self.ghc.repos.list(user) # First get and prune all the real issues has_issues = lambda repo: repo.has_issues # and repo.open_issues > 0 repos = filter(has_issues, all_repos) issues = sum([self._issues(user + "/" + r.name) for r in repos], []) log.debug(" Found {0} total.", len(issues)) issues = filter(self.include, issues) log.debug(" Pruned down to {0}", len(issues)) # Next, get all the pull requests (and don't prune) has_requests = lambda repo: repo.forks > 1 repos = filter(has_requests, all_repos) requests = sum([self._reqs(user + "/" + r.name) for r in repos], []) return [{ "description": self.description( issue.title, issue.html_url, issue.number, cls="issue" ), "project": tag.split('/')[1], } for tag, issue in issues] + [{ "description": self.description( request.title, request.html_url, request.number, cls="pull_request" ), "project": tag.split('/')[1], } for tag, request in requests]
def issues(self): email = self.config.get(self.target, 'bugzilla.username') # TODO -- doing something with blockedby would be nice. query = dict( column_list=self.column_list, bug_status=self.not_closed_statuses, email1=email, emailreporter1=1, emailcc1=1, emailassigned_to1=1, emailqa_contact1=1, emailtype1="substring", # Required for new bugzilla # https://bugzilla.redhat.com/show_bug.cgi?id=825370 query_format='advanced', ) bugs = self.bz.query(query) # Convert to dicts bugs = [ dict( ((col, getattr(bug, col)) for col in self.column_list) ) for bug in bugs ] issues = [(self.target, bug) for bug in bugs] log.debug(" Found {0} total.", len(issues)) # Build a url for each issue base_url = "https://%s/show_bug.cgi?id=" % \ self.config.get(self.target, 'bugzilla.base_uri') for i in range(len(issues)): issues[i][1]['url'] = base_url + str(issues[i][1]['id']) issues[i][1]['component'] = \ issues[i][1]['component'].lower().replace(' ', '-') # XXX - Note that we don't use the .include() method like all the other # IssueService child classes. That's because the bugzilla xmlrpc API # can already do a lot of the filtering we want for us. #issues = filter(self.include, issues) #log.debug(" Pruned down to {0}", len(issues)) return [{ "description": self.description( issue['summary'], issue['url'], issue['id'], cls="issue", ), "project": issue['component'], "priority": self.priorities.get( issue['priority'], self.default_priority, ), } for tag, issue in issues]
def find_issues(self, user_id=None, project_id=None, project_name=None): """ Approach: 1. Get user ID from .bugwarriorrc file 2. Get list of tickets from /user-tasks for a given project 3. For each ticket/task returned from #2, get ticket/task info and check if logged-in user is primary (look at `is_owner` and `user_id`) """ user_tasks_data = self.call_api("/projects/" + str(project_id) + "/user-tasks") global task_count assigned_tasks = [] try: for key, task in enumerate(user_tasks_data): task_count += 1 assigned_task = dict() if task[u'type'] == 'Ticket': # Load Ticket data # @todo Implement threading here. ticket_data = self.call_api("/projects/" + str(task[u'project_id']) + "/tickets/" + str(task[u'ticket_id'])) assignees = ticket_data[u'assignees'] for k, v in enumerate(assignees): if (v[u'is_owner'] is True) and (v[u'user_id'] == int(self.user_id)): assigned_task['permalink'] = ticket_data[u'permalink'] assigned_task['ticket_id'] = ticket_data[u'ticket_id'] assigned_task['project_id'] = ticket_data[u'project_id'] assigned_task['project'] = project_name assigned_task['description'] = ticket_data[u'name'] assigned_task['type'] = "ticket" assigned_task['created_on'] = ticket_data[u'created_on'] assigned_task['created_by_id'] = ticket_data[u'created_by_id'] elif task[u'type'] == 'Task': # Load Task data assigned_task['permalink'] = task[u'permalink'] assigned_task['project'] = project_name assigned_task['description'] = task[u'body'] assigned_task['project_id'] = task[u'project_id'] assigned_task['ticket_id'] = "" assigned_task['type'] = "task" assigned_task['created_on'] = task[u'created_on'] assigned_task['created_by_id'] = task[u'created_by_id'] if assigned_task: log.debug(" Adding '" + assigned_task['description'] + "' to task list.") assigned_tasks.append(assigned_task) except: log.debug('No user tasks loaded for "%s".' % project_name) return assigned_tasks
def issues(self): issues = self.client.find_issues(self.user_id) log.debug(" Found {0} total.", len(issues)) return [ dict( description=self.description(issue["subject"], self.get_issue_url(issue), issue["id"], cls="issue"), project=self.get_project_name(issue), priority=self.default_priority, ) for issue in issues ]
def issues(self): issues = self.client.get_actual_tasks() log.debug(" Found {0} total.", len(issues)) return [dict( description=self.description( self.get_issue_title(issue), self.get_issue_url(issue), self.get_issue_id(issue), cls="issue", ), project=self.project_name, priority=self.default_priority, ) for issue in issues]
def create_term_relationships(db, mapping): """ Creates the relationships between terms in our graph database that are present in our ontology. """ for term in mapping: # First check if we have a valid (set) of relationships for this term if mapping[term]['relationships']: nodeId = mapping[term]['node_id'] childNode = db.node[nodeId] for (type, parentId, parentName) in mapping[term]['relationships']: parentNodeId = mapping[parentId]['node_id'] parentNode = db.node[parentNodeId] childNode.relationships.create(type, parentNode) log.debug("RELATIONSHIP: parent %s --> child %s" % (parentName, childNode.get('name')))
def issues(self): cases = self.jira.search_issues(self.query, maxResults=-1) log.debug(" Found {0} total.", len(cases)) return [dict( description=self.description( title=case.fields.summary, url=self.url + '/browse/' + case.key, number=case.key.rsplit('-', 1)[1], cls="issue", ), project=case.key.rsplit('-', 1)[0], priority=self.priorities.get( get_priority(case.fields.priority), self.default_priority, ), **self.annotations(case.key) ) for case in cases]
def issues(self): issues = self.client.get_task_list() log.debug(" Remote has {0} total issues.", len(issues)) if not issues: return [] # Filter out closed tasks. issues = filter(lambda i: i["status"] == 1, issues) log.debug(" Remote has {0} active issues.", len(issues)) return [dict( description=self.description( issue["title"], self.get_issue_url(issue), issue["id"], cls="issue", ), project=self.project_name, priority=self.get_priority(issue), ) for issue in issues]
def issues(self): user = self.config.get(self.target, 'username') all_repos = githubutils.get_repos(username=user, auth=self.auth) # First get and prune all the real issues has_issues = lambda repo: repo['has_issues'] and \ repo['open_issues_count'] > 0 repos = filter(has_issues, all_repos) issues = sum([self._issues(user + "/" + r['name']) for r in repos], []) log.debug(" Found {0} total.", len(issues)) issues = filter(self.include, issues) log.debug(" Pruned down to {0}", len(issues)) # Next, get all the pull requests (and don't prune) has_requests = lambda repo: repo['forks'] > 1 repos = filter(has_requests, all_repos) requests = sum([self._reqs(user + "/" + r['name']) for r in repos], []) formatted_issues = [dict( description=self.description( issue['title'], issue['html_url'], issue['number'], cls="issue" ), project=tag.split('/')[1], priority=self.default_priority, **self.annotations(tag, issue) ) for tag, issue in issues] formatted_requests = [{ "description": self.description( request['title'], request['html_url'], request['number'], cls="pull_request" ), "project": tag.split('/')[1], "priority": self.default_priority, } for tag, request in requests] return formatted_issues + formatted_requests
def issues(self): user = self.config.get(self.target, 'username') url = self.base_api + '/users/' + user + '/' f = urllib2.urlopen(url) response = json.loads(f.read()) repos = [ repo.get('slug') for repo in response.get('repositories') if repo.get('has_issues') ] issues = sum([self.pull(user + "/" + repo) for repo in repos], []) log.debug(" Found {0} total.", len(issues)) # Build a url for each issue for i in range(len(issues)): issues[i][1]['url'] = self.base_url + "/".join( issues[i][1]['resource_uri'].split('/')[3:] ).replace('issues', 'issue') closed = ['resolved', 'duplicate', 'wontfix', 'invalid'] not_resolved = lambda tup: tup[1]['status'] not in closed issues = filter(not_resolved, issues) issues = filter(self.include, issues) log.debug(" Pruned down to {0}", len(issues)) return [dict( description=self.description( issue['title'], issue['url'], issue['local_id'], cls="issue", ), project=tag.split('/')[1], priority=self.priorities.get( issue['priority'], self.default_priority, ), **self.annotations(tag, issue) ) for tag, issue in issues]
def issues(self): user = self.config.get(self.target, 'username') all_repos = self.gh.repos.list(user=user).all() # First get and prune all the real issues has_issues = lambda repo: repo.has_issues and repo.open_issues > 0 repos = filter(has_issues, all_repos) issues = sum([self._issues(user + "/" + r.name) for r in repos], []) log.debug(" Found {0} total.", len(issues)) issues = filter(self.include, issues) log.debug(" Pruned down to {0}", len(issues)) # Next, get all the pull requests (and don't prune) has_requests = lambda repo: repo.forks > 1 repos = filter(has_requests, all_repos) requests = sum([self._reqs(user + "/" + r.name) for r in repos], []) formatted_issues = [dict( description=self.description( issue.title, issue.html_url, issue.number, cls="issue" ), project=tag.split('/')[1], priority=self.default_priority, **self.annotations(tag, issue) ) for tag, issue in issues] formatted_requests = [{ "description": self.description( request.title, request.html_url, request.number, cls="pull_request" ), "project": tag.split('/')[1], "priority": self.default_priority, } for tag, request in requests] return formatted_issues + formatted_requests
def issues(self): user = self.config.get(self.target, 'username') all_repos, page = [], 1 log.debug(" Getting ready to get list of all repos.", page) while True: log.debug(" Getting {0}th page of repos.", page) new_repos = self.ghc.repos.list(user, page) if not new_repos: break all_repos += new_repos page += 1 # First get and prune all the real issues has_issues = lambda repo: repo.has_issues # and repo.open_issues > 0 repos = filter(has_issues, all_repos) issues = sum([self._issues(user + "/" + r.name) for r in repos], []) log.debug(" Found {0} total.", len(issues)) issues = filter(self.include, issues) log.debug(" Pruned down to {0}", len(issues)) # Next, get all the pull requests (and don't prune) has_requests = lambda repo: repo.forks > 1 repos = filter(has_requests, all_repos) requests = sum([self._reqs(user + "/" + r.name) for r in repos], []) return [dict( description=self.description( issue.title, issue.html_url, issue.number, cls="issue" ), project=tag.split('/')[1], priority=self.default_priority, **self.annotations(tag, issue) ) for tag, issue in issues] + [{ "description": self.description( request.title, request.html_url, request.number, cls="pull_request" ), "project": tag.split('/')[1], "priority": self.default_priority, } for tag, request in requests]