def get_incoming_version(package, timeout, http_proxy=None, arch='i386'):
    try:
        page = open_url(INCOMING_URL, http_proxy, timeout)
    except NoNetwork:
        return None
    except urllib2.HTTPError, x:
        print >> sys.stderr, "Warning:", x
        return None
Example #2
0
def get_incoming_version(package, timeout, http_proxy=None, arch='i386'):
    try:
        page = open_url(INCOMING_URL, http_proxy, timeout)
    except NoNetwork:
        return None
    except urllib2.HTTPError, x:
        print >> sys.stderr, "Warning:", x
        return None
def get_newqueue_available(package, timeout, dists=None, http_proxy=None, arch='i386'):
    if dists is None:
        dists = ('unstable (new queue)', )
    try:
        page = open_url(NEWQUEUE_URL, http_proxy, timeout)
    except NoNetwork:
        return {}
    except urllib2.HTTPError, x:
        print >> sys.stderr, "Warning:", x
        return {}
Example #4
0
def get_newqueue_available(package, timeout, dists=None, http_proxy=None, arch='i386'):
    if dists is None:
        dists = ('unstable (new queue)', )
    try:
        page = open_url(NEWQUEUE_URL, http_proxy, timeout)
    except NoNetwork:
        return {}
    except urllib2.HTTPError, x:
        print >> sys.stderr, "Warning:", x
        return {}
Example #5
0
def parse_mbox_report(number, url, http_proxy, timeout, followups=False):
    page = open_url(url, http_proxy, timeout)
    if not page:
        return None

    # Make this seekable
    wholefile = cStringIO.StringIO(page.read())

    try:
        page.fp._sock.recv = None
    except:
        pass
    page.close()

    mbox = mailbox.UnixMailbox(wholefile, msgfactory)
    title = ''

    output = []
    for message in mbox:
        if not message:
            pass

        subject = message.get('Subject')
        if not title:
            title = subject

        date = message.get('Date')
        fromhdr = message.get('From')

        body = entry = ''
        for part in message.walk():
            if part.get_content_type() == 'text/plain' and not body:
                body = part.get_payload(None, True)

        if fromhdr:
            entry += 'From: %s%s' % (fromhdr, os.linesep)

        if subject and subject != title:
            entry += 'Subject: %s%s' % (subject, os.linesep)

        if date:
            entry += 'Date: %s%s' % (date, os.linesep)

        if entry:
            entry += os.linesep

        entry += body.rstrip('\n') + os.linesep

        output.append(entry)

    if not output:
        return None

    title = "#%d: %s" % (number, title)
    return (title, output)
Example #6
0
def parse_mbox_report(number, url, http_proxy, timeout, followups=False):
    page = open_url(url, http_proxy, timeout)
    if not page:
        return None

    # Make this seekable
    wholefile = cStringIO.StringIO(page.read())

    try:
        page.fp._sock.recv = None
    except:
        pass
    page.close()

    mbox = mailbox.UnixMailbox(wholefile, msgfactory)
    title = ''

    output = []
    for message in mbox:
        if not message:
            pass

        subject = message.get('Subject')
        if not title:
            title = subject

        date = message.get('Date')
        fromhdr = message.get('From')

        body = entry = ''
        for part in message.walk():
            if part.get_content_type() == 'text/plain' and not body:
                body = part.get_payload(None, True)

        if fromhdr:
            entry += 'From: %s%s' % (fromhdr, os.linesep)

        if subject and subject != title:
            entry += 'Subject: %s%s' % (subject, os.linesep)

        if date:
            entry += 'Date: %s%s' % (date, os.linesep)

        if entry:
            entry += os.linesep

        entry += body.rstrip('\n') + os.linesep

        output.append(entry)

    if not output:
        return None

    title = "#%d: %s" % (number, title)
    return (title, output)
def get_versions_available(package, timeout, dists=None, http_proxy=None, arch='i386'):
    if not dists:
        dists = ('oldstable', 'stable', 'testing', 'unstable', 'experimental')

    try:
        page = open_url(RMADISON_URL % package)
    except NoNetwork:
        return {}
    except urllib2.HTTPError, x:
        print >> sys.stderr, "Warning:", x
        return {}
Example #8
0
def get_versions_available(package, timeout, dists=None, http_proxy=None, arch='i386'):
    if not dists:
        dists = ('stable', 'testing', 'unstable')

    try:
        page = open_url(PACKAGES_URL % package, http_proxy, timeout)
    except NoNetwork:
        return {}
    except urllib2.HTTPError, x:
        print >> sys.stderr, "Warning:", x
        return {}
Example #9
0
def get_reports(package, timeout, system='debian', mirrors=None, version=None,
                http_proxy='', archived=False, source=False):
    if isinstance(package, basestring):
        if SYSTEMS[system].get('cgiroot'):
            try:
                result = get_cgi_reports(package, timeout, system, http_proxy, archived,
                                     source, version=version)
            except:
                raise NoNetwork
            if result: return result

        url = package_url(system, package, mirrors, source)
        try:
            page = open_url(url, http_proxy, timeout)
        except:
            raise NoNetwork
        if not page:
            return (0, None, None)

        #content = page.read()
        #if 'Maintainer' not in content:
        #    return (0, None, None)

        parser = BTSParser()
        for line in page:
            parser.feed(line)
        parser.close()
        try:
            page.fp._sock.recv = None
        except:
            pass
        page.close()

        return parser.bugcount, parser.title, parser.hierarchy

    # A list of bug numbers
    this_hierarchy = []
    package = [int(x) for x in package]
    package.sort()
    for bug in package:
        result = get_report(bug, timeout, system, mirrors, http_proxy, archived)
        if result:
            title, body = result
            this_hierarchy.append(title)
            #print title

    title = "Multiple bug reports"
    bugcount = len(this_hierarchy)
    hierarchy = [('Reports', this_hierarchy)]

    return bugcount, title, hierarchy
Example #10
0
def check_built(src_package, timeout, arch=None, http_proxy=None):
    if not arch:
        arch = archname()

    try:
        page = open_url(BUILDD_URL % (arch, src_package), http_proxy, timeout)
    except NoNetwork:
        return {}
    if not page:
        return {}

    parser = BuilddParser()
    parser.feed(page.read())
    parser.close()
    page.close()

    return parser.found_succeeded
Example #11
0
def get_versions_available(package, timeout, dists=None, http_proxy=None, arch='i386'):
    if not dists:
        dists = ('oldstable', 'stable', 'testing', 'unstable', 'experimental')

    arch = utils.get_arch()

    url = RMADISON_URL % package
    url += '&s=' + ','.join(dists)
    # select only those lines that refers to source pkg
    # or to binary packages available on the current arch
    url += '&a=source,all,' + arch
    try:
        page = open_url(url)
    except NoNetwork:
        return {}
    except urllib2.HTTPError, x:
        print >> sys.stderr, "Warning:", x
        return {}
Example #12
0
def get_cgi_reports(package, timeout, system='debian', http_proxy='',
                    archived=False, source=False, version=None):
    try:
        page = open_url(cgi_package_url(system, package, archived, source,
                                    version=version), http_proxy, timeout)
    except:
        raise NoNetwork

    if not page:
        return (0, None, None)

    #content = page.read()
    #if 'Maintainer' not in content:
    #    return (0, None, None)

    parser = BTSParser(cgi=True)
    for line in page:
        try:
            line = line.decode('utf-8') # BTS pages are encoded in utf-8
        except UnicodeDecodeError:
            # page has a bad char
            line = line.decode('utf-8', 'replace')
        parser.feed(line)
    parser.close()
    try:
        page.fp._sock.recv = None
    except:
        pass
    page.close()

    # Reorganize hierarchy to put recently-fixed bugs at top
    parser.reorganize()

    # Morph @ 2008-08-15; due to BTS output format changes
    try:
        parser.hierarchy.remove(('Select bugs', []))
    except:
        pass

    data = (parser.bugcount, parser.title, parser.hierarchy)
    del parser

    return data
Example #13
0
def check_built(src_package, timeout, arch=None, http_proxy=None):
    """Return True if built in the past, False otherwise (even error)"""
    if not arch:
        arch = utils.get_arch()

    try:
        page = open_url(BUILDD_URL % (arch, src_package), http_proxy, timeout)
    except NoNetwork:
        return False

    if not page:
        return False

    parser = BuilddParser()
    parser.feed(page.read())
    parser.close()
    page.close()

    return parser.found_succeeded
Example #14
0
def check_built(src_package, timeout, arch=None, http_proxy=None):
    """Return True if built in the past, False otherwise (even error)"""
    if not arch:
        arch = utils.get_arch()

    try:
        page = open_url(BUILDD_URL % (arch, src_package), http_proxy, timeout)
    except NoNetwork:
        return False

    if not page:
        return False

    parser = BuilddParser()
    parser.feed(page.read())
    parser.close()
    page.close()

    return parser.found_succeeded
Example #15
0
def get_cgi_reports(package, timeout, system='debian', http_proxy='',
                    archived=False, source=False, version=None):
    try:
        page = open_url(cgi_package_url(system, package, archived, source,
                                    version=version), http_proxy, timeout)
    except:
        raise NoNetwork

    if not page:
        return (0, None, None)

    #content = page.read()
    #if 'Maintainer' not in content:
    #    return (0, None, None)

    parser = BTSParser(cgi=True)
    for line in page:
        try:
            line = line.decode('utf-8') # BTS pages are encoded in utf-8
        except UnicodeDecodeError:
            # page has a bad char
            line = line.decode('utf-8', 'replace')
        parser.feed(line)
    parser.close()
    try:
        page.fp._sock.recv = None
    except:
        pass
    page.close()

    # Reorganize hierarchy to put recently-fixed bugs at top
    parser.reorganize()

    # Morph @ 2008-08-15; due to BTS output format changes
    try:
        parser.hierarchy.remove(('Select bugs', []))
    except:
        pass

    data = (parser.bugcount, parser.title, parser.hierarchy)
    del parser

    return data
Example #16
0
def parse_html_report(number, url, http_proxy, timeout, followups=False, cgi=True):
    page = open_url(url, http_proxy, timeout)
    if not page:
        return None

    parser = BTSParser(cgi=cgi, followups=followups)
    for line in page:
        parser.feed(line)
    parser.close()

    try:
        page.fp._sock.recv = None
    except:
        pass
    page.close()

    items = parser.preblock
    title = "#%d: %s" % (number, parser.title)

    if not followups:
        items = [items]

    output = []
    for stuff in items:
        parts = stuff.split('\n\n')
        match = re.search('^Date: (.*)$', parts[0], re.M | re.I)
        date_submitted = ''
        if match:
            date_submitted = 'Date: %s\n' % match.group(1)

        stuff = ('\n\n'.join(parts[1:])).rstrip()
        if not stuff:
            continue

        item = date_submitted+stuff+os.linesep
        output.append(item)

    if not output:
        return None

    return (title, output)
Example #17
0
def parse_html_report(number, url, http_proxy, timeout, followups=False, cgi=True):
    page = open_url(url, http_proxy, timeout)
    if not page:
        return None

    parser = BTSParser(cgi=cgi, followups=followups)
    for line in page:
        parser.feed(line)
    parser.close()

    try:
        page.fp._sock.recv = None
    except:
        pass
    page.close()

    items = parser.preblock
    title = "#%d: %s" % (number, parser.title)

    if not followups:
        items = [items]

    output = []
    for stuff in items:
        parts = stuff.split('\n\n')
        match = re.search('^Date: (.*)$', parts[0], re.M | re.I)
        date_submitted = ''
        if match:
            date_submitted = 'Date: %s\n' % match.group(1)

        stuff = ('\n\n'.join(parts[1:])).rstrip()
        if not stuff:
            continue

        item = date_submitted+stuff+os.linesep
        output.append(item)

    if not output:
        return None

    return (title, output)
Example #18
0
def get_versions_available(package,
                           timeout,
                           dists=None,
                           http_proxy=None,
                           arch='i386'):
    if not dists:
        dists = ('oldstable', 'stable', 'testing', 'unstable', 'experimental')

    arch = utils.get_arch()

    url = RMADISON_URL % package
    url += '&s=' + ','.join(dists)
    # select only those lines that refers to source pkg
    # or to binary packages available on the current arch
    url += '&a=source,all,' + arch
    try:
        page = open_url(url)
    except NoNetwork:
        return {}
    except urllib2.HTTPError, x:
        print >> sys.stderr, "Warning:", x
        return {}
Example #19
0
def launch_mbox_reader(cmd, url, http_proxy, timeout):
    """Runs the command specified by cmd passing the mbox file
    downloaded from url as a parameter. If cmd is None or fails, then
    fallback to mail program."""
    mbox = open_url(url, http_proxy, timeout)
    if mbox is None:
        return
    (fd, fname) = TempFile()
    try:
        for line in mbox:
            fd.write(line)
        fd.close()
        if cmd is not None:
            try:
                cmd = cmd % fname
            except TypeError:
                cmd = "%s %s" % (cmd, fname)
            error = os.system(cmd)
            if not error:
                return
        #fallback
        os.system('mail -f ' + fname)
    finally:
        os.unlink(fname)
Example #20
0
def launch_mbox_reader(cmd, url, http_proxy, timeout):
    """Runs the command specified by cmd passing the mbox file
    downloaded from url as a parameter. If cmd is None or fails, then
    fallback to mail program."""
    mbox = open_url(url, http_proxy, timeout)
    if mbox is None:
        return
    (fd, fname) = TempFile()
    try:
        for line in mbox:
            fd.write(line)
        fd.close()
        if cmd is not None:
            try:
                cmd = cmd % fname
            except TypeError:
                cmd = "%s %s" % (cmd, fname)
            error = os.system(cmd)
            if not error:
                return
        #fallback
        os.system('mail -f ' + fname)
    finally:
        os.unlink(fname)
Example #21
0
def get_reports(package, timeout, system='debian', mirrors=None, version=None,
                http_proxy='', archived=False, source=False):

    if system == 'debian':
        if isinstance(package, basestring):
            if source:
                pkg_filter = 'src'
            else:
                pkg_filter = 'package'
            bugs = debianbts.get_bugs(pkg_filter, package)
        else:
            bugs = map(int, package)

        # retrieve bugs and generate the hierarchy
        stats = debianbts.get_status(bugs)

        d = defaultdict(list)
        for s in stats:
            # We now return debianbts.Bugreport objects, containing all the info
            # for a bug, so UIs can extract them as needed
            d[s.severity].append(s)

        # keep the bugs ordered per severity
        # XXX: shouldn't it be something UI-related?
        #
        # The hierarchy is a list of tuples:
        #     (description of the severity, list of bugs for that severity)
        hier = []
        for sev in SEVLIST:
            if sev in d:
                hier.append(('Bugs with severity %s' % sev, d[sev]))

        return (len(bugs), 'Bug reports for %s' % package, hier)

    # XXX: is the code below used at all now? can we remove it?
    if isinstance(package, basestring):
        if SYSTEMS[system].get('cgiroot'):
            try:
                result = get_cgi_reports(package, timeout, system, http_proxy, archived,
                                     source, version=version)
            except:
                raise NoNetwork
            if result: return result

        url = package_url(system, package, mirrors, source)
        try:
            page = open_url(url, http_proxy, timeout)
        except:
            raise NoNetwork
        if not page:
            return (0, None, None)

        #content = page.read()
        #if 'Maintainer' not in content:
        #    return (0, None, None)

        parser = BTSParser()
        for line in page:
            parser.feed(line)
        parser.close()
        try:
            page.fp._sock.recv = None
        except:
            pass
        page.close()

        return parser.bugcount, parser.title, parser.hierarchy

    # A list of bug numbers
    this_hierarchy = []
    package = [int(x) for x in package]
    package.sort()
    for bug in package:
        result = get_report(bug, timeout, system, mirrors, http_proxy, archived)
        if result:
            title, body = result
            this_hierarchy.append(title)
            #print title

    title = "Multiple bug reports"
    bugcount = len(this_hierarchy)
    hierarchy = [('Reports', this_hierarchy)]

    return bugcount, title, hierarchy
Example #22
0
def get_reports(package, timeout, system='debian', mirrors=None, version=None,
                http_proxy='', archived=False, source=False):

    if system == 'debian':
        if isinstance(package, basestring):
            if source:
                pkg_filter = 'src'
            else:
                pkg_filter = 'package'
            bugs = debianbts.get_bugs(pkg_filter, package)
        else:
            bugs = map(int, package)

        # retrieve bugs and generate the hierarchy
        stats = debianbts.get_status(bugs)

        d = defaultdict(list)
        for s in stats:
            # We now return debianbts.Bugreport objects, containing all the info
            # for a bug, so UIs can extract them as needed
            d[s.severity].append(s)

        # keep the bugs ordered per severity
        # XXX: shouldn't it be something UI-related?
        #
        # The hierarchy is a list of tuples:
        #     (description of the severity, list of bugs for that severity)
        hier = []
        for sev in SEVLIST:
            if sev in d:
                hier.append(('Bugs with severity %s' % sev, d[sev]))

        return (len(bugs), 'Bug reports for %s' % package, hier)

    # XXX: is the code below used at all now? can we remove it?
    if isinstance(package, basestring):
        if SYSTEMS[system].get('cgiroot'):
            try:
                result = get_cgi_reports(package, timeout, system, http_proxy, archived,
                                     source, version=version)
            except:
                raise NoNetwork
            if result: return result

        url = package_url(system, package, mirrors, source)
        try:
            page = open_url(url, http_proxy, timeout)
        except:
            raise NoNetwork
        if not page:
            return (0, None, None)

        #content = page.read()
        #if 'Maintainer' not in content:
        #    return (0, None, None)

        parser = BTSParser()
        for line in page:
            parser.feed(line)
        parser.close()
        try:
            page.fp._sock.recv = None
        except:
            pass
        page.close()

        return parser.bugcount, parser.title, parser.hierarchy

    # A list of bug numbers
    this_hierarchy = []
    package = [int(x) for x in package]
    package.sort()
    for bug in package:
        result = get_report(bug, timeout, system, mirrors, http_proxy, archived)
        if result:
            title, body = result
            this_hierarchy.append(title)
            #print title

    title = "Multiple bug reports"
    bugcount = len(this_hierarchy)
    hierarchy = [('Reports', this_hierarchy)]

    return bugcount, title, hierarchy