def _get_gc_issues(gc_proj):
    """Get the Google Code issues XML for the given project.
    
    <http://code.google.com/p/support/wiki/IssueTrackerAPI>
    """
    http = _get_http()
    max_results = 1000
    url = ("https://code.google.com/feeds/issues/p/%s/" 
        "issues/full?max-results=%d" % (gc_proj, max_results))
    response, content = http.request(url)
    if response["status"] not in ("200", "304"):
        raise RuntimeError("error GET'ing %s: %s" % (url, response["status"]))
    
    feed = ET.fromstring(content)
    ns = '{http://www.w3.org/2005/Atom}'
    ns_issues = '{http://schemas.google.com/projecthosting/issues/2009}'
    
    issues = []
    for entry in feed.findall(ns+"entry"):
        alt_link = [link for link in entry.findall(ns+"link") if link.get("rel") == "alternate"][0]
        issue = {
            "title": html_unescape(entry.findtext(ns+"title")),
            "published": entry.findtext(ns+"published"),
            "updated": entry.findtext(ns+"updated"),
            "content": html_unescape(entry.findtext(ns+"content")),
            "id": entry.findtext(ns_issues+"id"),
            "url": alt_link.get("href"),
            "stars": entry.findtext(ns_issues+"stars"),
            "state": entry.findtext(ns_issues+"state"),
            "status": entry.findtext(ns_issues+"status"),
            "labels": [label.text for label in entry.findall(ns_issues+"label")],
            "author": {
                "name": entry.find(ns+"author").findtext(ns+"name"),
                "uri": entry.find(ns+"author").findtext(ns+"uri"),
            },
            #TODO: closedDate if exists
            #    <issues:closedDate>2007-11-09T05:15:25.000Z</issues:closedDate>
        }
        #pprint(issue)
        owner = entry.find(ns_issues+"owner")
        if owner is not None:
            issue["owner"] = {
                "username": entry.find(ns_issues+"owner").findtext(ns_issues+"username"),
                "uri": entry.find(ns_issues+"owner").findtext(ns_issues+"uri"),
            }
        issue["published_datetime"] = datetime.datetime.strptime(
            issue["published"], "%Y-%m-%dT%H:%M:%S.000Z")
        issue["updated_datetime"] = datetime.datetime.strptime(
            issue["updated"], "%Y-%m-%dT%H:%M:%S.000Z")

        # Only care about open issues.
        if issue['state'] == 'open':
          issues.append(issue)
    #pprint(issues)
    
    if len(issues) == max_results:
        raise RuntimeError("This project might have more than %d issues and "
            "this script isn't equipped to deal with that. Aborting."
            % max_results)
    return issues
示例#2
0
def _get_gc_issues(gc_proj):
    """Get the Google Code issues XML for the given project.

    <http://code.google.com/p/support/wiki/IssueTrackerAPI>
    """
    http = _get_http()
    max_results = 1000
    url = ("https://code.google.com/feeds/issues/p/%s/"
        "issues/full?max-results=%d" % (gc_proj, max_results))
    response, content = http.request(url)
    if response["status"] not in ("200", "304"):
        raise RuntimeError("error GET'ing %s: %s" % (url, response["status"]))

    feed = ET.fromstring(content)
    ns = '{http://www.w3.org/2005/Atom}'
    ns_issues = '{http://schemas.google.com/projecthosting/issues/2009}'

    issues = []
    for entry in feed.findall(ns+"entry"):
        alt_link = [link for link in entry.findall(ns+"link") if link.get("rel") == "alternate"][0]
        issue = {
            "title": html_unescape(entry.findtext(ns+"title")),
            "published": entry.findtext(ns+"published"),
            "updated": entry.findtext(ns+"updated"),
            "content": html_unescape(entry.findtext(ns+"content")),
            "id": entry.findtext(ns_issues+"id"),
            "url": alt_link.get("href"),
            "stars": entry.findtext(ns_issues+"stars"),
            "state": entry.findtext(ns_issues+"state"),
            "status": entry.findtext(ns_issues+"status"),
            "labels": [label.text for label in entry.findall(ns_issues+"label")],
            "author": {
                "name": entry.find(ns+"author").findtext(ns+"name"),
                "uri": entry.find(ns+"author").findtext(ns+"uri"),
            },
            #TODO: closedDate if exists
            #    <issues:closedDate>2007-11-09T05:15:25.000Z</issues:closedDate>
        }
        #pprint(issue)
        owner = entry.find(ns_issues+"owner")
        if owner is not None:
            issue["owner"] = {
                "username": entry.find(ns_issues+"owner").findtext(ns_issues+"username"),
                "uri": entry.find(ns_issues+"owner").findtext(ns_issues+"uri"),
            }
        issue["published_datetime"] = datetime.datetime.strptime(
            issue["published"], "%Y-%m-%dT%H:%M:%S.000Z")
        issue["updated_datetime"] = datetime.datetime.strptime(
            issue["updated"], "%Y-%m-%dT%H:%M:%S.000Z")

        # Only care about open issues.
        issues.append(issue)
    #pprint(issues)

    if len(issues) == max_results:
        raise RuntimeError("This project might have more than %d issues and "
            "this script isn't equipped to deal with that. Aborting."
            % max_results)
    return issues
示例#3
0
def read_ebnf(url, strict=True):
    LOG.debug('reading ebnf from %r', url)
    content = get_page(url, cache_nexists=not strict)
    if strict or content:
        m = r_pre.search(content)
        assert m, (content, url)
        return (url.rsplit('/', 1)[-1].rsplit('.', 1)[0],
                html_unescape(r_pre.search(content).group(1)))
    else:
        return None
def read_ebnf(url, strict=True):
    LOG.debug('reading ebnf from %r', url)
    content = get_page(url, cache_nexists=not strict)
    if strict or content:
        m = r_pre.search(content)
        assert m, (content, url)
        return (url.rsplit('/', 1)[-1].rsplit('.', 1)[0],
                html_unescape(r_pre.search(content).group(1)))
    else:
        return None