def unrep_with_dbd_issues():
    log.info('running unrep_with_dbd_issues check...')
    without_dbd = []
    bad_dbd = []
    sources_without_dbd = set()
    query = '''SELECT s.name, r.version, s.suite, s.architecture
               FROM sources AS s JOIN results AS r ON r.package_id=s.id
               WHERE r.status='unreproducible'
               ORDER BY s.name ASC, s.suite DESC, s.architecture ASC'''
    results = query_db(query)
    for pkg, version, suite, arch in results:
        eversion = strip_epoch(version)
        dbd = DBD_PATH + '/' + suite + '/' + arch + '/' + pkg + '_' + \
            eversion + '.diffoscope.html'
        if not os.access(dbd, os.R_OK):
            without_dbd.append((pkg, version, suite, arch))
            sources_without_dbd.add(pkg)
            log.warning(suite + '/' + arch + '/' + pkg + ' (' + version +
                        ') is '
                        'unreproducible without diffoscope file.')
        else:
            log.debug(dbd + ' found.')
            data = open(dbd, 'br').read(3)
            if b'<' not in data:
                bad_dbd.append((pkg, version, suite, arch))
                log.warning(suite + '/' + arch + '/' + pkg + ' (' + version +
                            ') has '
                            'diffoscope output, but it does not seem to '
                            'be an HTML page.')
                sources_without_dbd.add(pkg)
    return without_dbd, bad_dbd, sources_without_dbd
示例#2
0
def db_update():
    """
    Update the database schema.
    Get a list of queries to perform from schema_updates.
    The need for an update is detected by checking the biggest value in the
    rb_schema table against the biggest value in the schema_updates dictionary.
    """
    current = query_db('SELECT MAX(version) FROM rb_schema')[0][0]
    if not current:
        log.warning('This is probably a new database, there are no ' +
                    'previous updates noted')
        current = 0
    last = max(schema_updates.keys())
    if current == last:
        return False
    if current > last:
        print_critiacal_message('The active database schema is higher than' +
                                '  the last update available.\nPlease check!')
        sys.exit(1)
    log.info('Found schema updates.')
    for update in range(current + 1, last + 1):
        log.info('Applying database update #' + str(update) + '. Queries:')
        startTime = datetime.now()
        for query in schema_updates[update]:
            log.info('\t' + query)
            query_db(query)
        log.info(
            str(len(schema_updates[update])) + ' queries executed in ' +
            str(datetime.now() - startTime))
    return True
def alien_rbpkg():
    log.info('running alien_rbpkg check...')
    query = '''SELECT s.name
               FROM sources AS s
               WHERE s.name='{pkg}' AND s.suite='{suite}'
               AND s.architecture='{arch}'
               ORDER BY s.name ASC, s.suite DESC, s.architecture ASC'''
    bad_files = []
    for root, dirs, files in os.walk(RB_PKG_PATH):
        if not files:
            continue
        # Extract the "suite" and "arch" from the directory structure
        if os.path.split(root)[1] == 'diffoscope-results':
            # We are presently inspecting package pages in the
            # RB_PKG_PATH/{{suite}}/{{arch}}/diffoscope-results directory
            suite, arch = os.path.split(root)[0].rsplit('/', 2)[1:]
        else:
            # We are presently inspecting package pages in the
            # RB_PKG_PATH/{{suite}}/{{arch}}/ directory
            suite, arch = root.rsplit('/', 2)[1:]
        for file in files:
            pkg = file.rsplit('.', 1)[0]
            if not query_db(query.format(pkg=pkg, suite=suite, arch=arch)):
                bad_files.append('/'.join([root, file]))
                log.warning('/'.join([root, file]) + ' should not be there')
    return bad_files
def pbuilder_dep_fail():
    log.info('running pbuilder_dep_fail check...')
    bad_pkgs = []
    # we only care about these failures in the !unstable !experimental suites
    # as they happen all the time in there, as packages are buggy
    # and specific versions also come and go
    query = '''SELECT s.name, r.version, s.suite, s.architecture
               FROM sources AS s JOIN results AS r ON r.package_id=s.id
               WHERE r.status = 'FTBFS'
               AND s.suite NOT IN ('unstable', 'experimental')
               ORDER BY s.name ASC, s.suite DESC, s.architecture ASC'''
    results = query_db(query)
    for pkg, version, suite, arch in results:
        eversion = strip_epoch(version)
        rbuild = RBUILD_PATH + '/' + suite + '/' + arch + '/' + pkg + '_' + \
            eversion + '.rbuild.log'
        if os.access(rbuild, os.R_OK):
            log.debug('\tlooking at ' + rbuild)
            with open(rbuild, "br") as fd:
                for line in fd:
                    if re.search(b'E: pbuilder-satisfydepends failed.', line):
                        bad_pkgs.append((pkg, version, suite, arch))
                        log.warning(suite + '/' + arch + '/' + pkg + ' (' +
                                    version +
                                    ') failed to satisfy its dependencies.')
    return bad_pkgs
示例#5
0
def gather_meta_stats(suite, arch, pkgset_name):
    pkgset_file = os.path.join(PKGSET_DEF_PATH, 'meta_pkgsets-' + suite,
                               pkgset_name + '.pkgset')

    try:
        with open(pkgset_file) as f:
            pkgset_list = [s.strip() for s in f.readlines()]
    except FileNotFoundError:
        log.warning('No meta package set information exists at ' + pkgset_file)
        return {}

    if not pkgset_list:
        log.warning('No packages listed for package set: ' + pkgset_name)
        return {}

    package_where = "s.name in ('" + ("', '").join(pkgset_list) + "')"
    root_query = """
        SELECT s.name
        FROM results AS r
        JOIN sources AS s ON r.package_id=s.id
        WHERE s.suite='{suite}'
        AND s.architecture='{arch}'
        AND date(r.build_date)<='{date}'
        AND {package_where}
    """.format(suite=suite,
               arch=arch,
               date=YESTERDAY,
               package_where=package_where)

    stats = {}
    good = query_db(root_query + "AND r.status = 'reproducible' " +
                    "ORDER BY s.name;")
    stats['good'] = [t[0] for t in good]
    stats['count_good'] = len(stats['good'])

    bad = query_db(root_query + "AND r.status = 'FTBR'" +
                   "ORDER BY r.build_date;")
    stats['bad'] = [t[0] for t in bad]
    stats['count_bad'] = len(stats['bad'])

    ugly = query_db(root_query + "AND r.status = 'FTBFS'" +
                    "ORDER BY r.build_date;")
    stats['ugly'] = [t[0] for t in ugly]
    stats['count_ugly'] = len(stats['ugly'])

    rest = query_db(root_query + "AND (r.status != 'FTBFS' AND " +
                    "r.status != 'FTBR' AND " +
                    "r.status != 'reproducible') ORDER BY r.build_date;")
    stats['rest'] = [t[0] for t in rest]
    stats['count_rest'] = len(stats['rest'])

    stats['count_all'] = (stats['count_good'] + stats['count_bad'] +
                          stats['count_ugly'] + stats['count_rest'])
    stats['count_all'] = stats['count_all'] if stats['count_all'] else 1
    stats['percent_good'] = percent(stats['count_good'], stats['count_all'])
    stats['percent_bad'] = percent(stats['count_bad'], stats['count_all'])
    stats['percent_ugly'] = percent(stats['count_ugly'], stats['count_all'])
    stats['percent_rest'] = percent(stats['count_rest'], stats['count_all'])
    return stats
def alien_log(directory=None):
    if directory is None:
        bad_files = []
        for path in RBUILD_PATH, LOGS_PATH, DIFFS_PATH:
            bad_files.extend(alien_log(path))
        return bad_files
    log.info('running alien_log check over ' + directory + '...')
    query = '''SELECT r.version
               FROM sources AS s JOIN results AS r ON r.package_id=s.id
               WHERE r.status != '' AND s.name='{pkg}' AND s.suite='{suite}'
               AND s.architecture='{arch}'
               ORDER BY s.name ASC, s.suite DESC, s.architecture ASC'''
    bad_files = []
    for root, dirs, files in os.walk(directory):
        if not files:
            continue
        suite, arch = root.rsplit('/', 2)[1:]
        for file in files:
            # different file have differnt name patterns and different splitting needs
            if file.endswith('.diff.gz'):
                rsplit_level = 2
            elif file.endswith('.gz'):
                rsplit_level = 3
            else:
                rsplit_level = 2
            try:
                pkg, version = file.rsplit('.', rsplit_level)[0].rsplit('_', 1)
            except ValueError:
                log.critical(
                    bcolors.FAIL + '/'.join([root, file]) +
                    ' does not seem to be a file that should be there' +
                    bcolors.ENDC)
                continue
            try:
                rversion = query_db(
                    query.format(pkg=pkg, suite=suite, arch=arch))[0][0]
            except IndexError:  # that package is not known (or not yet tested)
                rversion = ''  # continue towards the "bad file" path
            if strip_epoch(rversion) != version:
                try:
                    if os.path.getmtime('/'.join([root, file
                                                  ])) < time.time() - 86400:
                        os.remove('/'.join([root, file]))
                        log.warning(
                            '/'.join([root, file]) +
                            ' should not be there and and was older than a day so it was removed.'
                        )
                    else:
                        bad_files.append('/'.join([root, file]))
                        log.info(
                            '/'.join([root, file]) +
                            ' should not be there, but is also less than 24h old and will probably soon be gone. Probably diffoscope is running on that package right now.'
                        )
                except FileNotFoundError:
                    pass  # that bad file is already gone.
    return bad_files
示例#7
0
def fill_issue_in_note(issue):
    details = issues[issue]
    html = ''
    if 'url' in details:
        html += note_issue_html_url.substitute(url=details['url'])
    if 'description' in details:
        desc = details['description'].replace('\n', '<br />')
        html += note_issue_html_desc.substitute(description=desc)
    else:
        log.warning("The issue " + issue + " misses a description")
    return note_issue_html.substitute(issue=issue, issue_info=html)
def alien_history():
    log.info('running alien_history check...')
    result = query_db('SELECT DISTINCT name FROM sources')
    actual_packages = [x[0] for x in result]
    bad_files = []
    for f in sorted(os.listdir(HISTORY_PATH)):
        full_path = os.path.join(HISTORY_PATH, f)
        if f.rsplit('.', 1)[0] not in actual_packages and not os.path.isdir(full_path):
            bad_files.append(full_path)
            os.remove(full_path)
            log.warning('%s should not be there so it has been removed.', full_path)
    return bad_files
示例#9
0
def purge_old_issues(issues):
    for root, dirs, files in os.walk(ISSUES_PATH):
        if not files:
            continue
        for file in files:
            try:
                issue = file.rsplit('_', 1)[0]
            except ValueError:
                log.critical('/'.join([root, file]) + ' does not seems like '
                             + 'a file that should be there')
                sys.exit(1)
            if issue not in issues:
                log.warning('removing ' + '/'.join([root, file]) + '...')
                os.remove('/'.join([root, file]))
示例#10
0
def gen_html_note(package, note):
    """
    Given a note as input (as a dict:
    {"package_name": {"version": "0.0.0", "comments": "blablabla",
     "bugs": [111, 222], "issues": ["issue1", "issue2"]}}
    ) it returns the html body
    """
    infos = ''
    # check for issues:
    if 'issues' in note:
        tmp = ''
        for issue in note['issues']:
            tmp += fill_issue_in_note(issue)
            issues_count.setdefault(issue, []).append(note['package'])
        infos += note_issues_html.substitute(issues=tmp)
    # check for bugs:
    if 'bugs' in note:
        bugurls = ''
        for bug in note['bugs']:
            try:
                bug_title = ': "%s"' % bugs[package][bug]['title']
            except KeyError:
                bug_title = ''
            bugurls += '<a href="https://bugs.debian.org/' + str(bug) + \
                       '" target="_parent">' + str(bug) + '</a>' + \
                       get_trailing_bug_icon(bug, bugs, package) + \
                       bug_title + '<br />'
        infos += note_bugs_html.substitute(bugs=bugurls)
    # check for comments:
    if 'comments' in note:
        comment = note['comments']
        comment = url2html.sub(r'<a href="\1">\1</a>', comment)
        comment = comment.replace('\n', '<br />')
        infos += note_comments_html.substitute(comments=comment)
    try:
        version = str(note['version'])
        return renderer.render(notes_body_template, {
            'version': version,
            'infos': infos,
            'notesgit_description': NOTESGIT_DESCRIPTION
        })
    except KeyError:
        log.warning('You should really include a version in the ' +
              str(note['package']) + ' note')
        return renderer.render(notes_body_template, {
            'version': 'N/A',
            'infos': infos,
            'notesgit_description': NOTESGIT_DESCRIPTION
        })
示例#11
0
def db_create_tables():
    """
    Check whether all tables are present, and create them if not.
    The check is done against sqlite_master, a reserved sqlite table
    containing all database metadata.
    """
    changed = False
    for table in db_schema:
        if not table_exists(table['name']):
            log.warning(table['name'] + ' does not exists. Creating...')
            for query in table['query']:
                log.info('\t' + re.sub(' +', ' ', query.replace('\n', ' ')))
                query_db(query)
                changed = True
    return changed
def lack_rbuild():
    log.info('running lack_rbuild check...')
    bad_pkgs = []
    query = '''SELECT s.name, r.version, s.suite, s.architecture
               FROM sources AS s JOIN results AS r ON r.package_id=s.id
               WHERE r.status NOT IN ('blacklisted', '')
               ORDER BY s.name ASC, s.suite DESC, s.architecture ASC'''
    results = query_db(query)
    for pkg, version, suite, arch in results:
        rbuild = os.path.join(RBUILD_PATH, suite, arch) + \
                '/{}_{}.rbuild.log.gz'.format(pkg, strip_epoch(version))
        if not os.access(rbuild, os.R_OK):
            bad_pkgs.append((pkg, version, suite, arch))
            log.warning(suite + '/' + arch + '/' + pkg + ' (' + version +
                        ') has been '
                        'built, but a buildlog is missing.')
    return bad_pkgs
def alien_buildinfo():
    log.info('running alien_buildinfo check...')
    query = '''SELECT r.version
               FROM sources AS s JOIN results AS r ON r.package_id=s.id
               WHERE r.status != '' AND s.name='{pkg}' AND s.suite='{suite}'
               AND s.architecture='{arch}'
               AND r.status IN ('reproducible', 'unreproducible')
               ORDER BY s.name ASC, s.suite DESC, s.architecture ASC'''
    bad_files = []
    for root, dirs, files in os.walk(BUILDINFO_PATH):
        if not files:
            continue
        suite, arch = root.rsplit('/', 2)[1:]
        for file in files:
            try:
                pkg, version = file.rsplit('.', 1)[0].split('_')[:2]
            except ValueError:
                log.critical(
                    bcolors.FAIL + '/'.join([root, file]) +
                    ' does not seem to be a file that should be there' +
                    bcolors.ENDC)
                continue
            try:
                rversion = query_db(
                    query.format(pkg=pkg, suite=suite, arch=arch))[0][0]
            except IndexError:  # that package is not known (or not yet tested)
                rversion = ''  # continue towards the "bad file" path
            if strip_epoch(rversion) != version:
                try:
                    if os.path.getmtime('/'.join([root, file
                                                  ])) < time.time() - 86400:
                        os.remove('/'.join([root, file]))
                        log.warning(
                            '/'.join([root, file]) +
                            ' should not be there and and was older than a day so it was removed.'
                        )
                    else:
                        bad_files.append('/'.join([root, file]))
                        log.info(
                            '/'.join([root, file]) +
                            ' should not be there, but is also less than 24h old and will probably soon be gone.'
                        )
                except FileNotFoundError:
                    pass  # that bad file is already gone.
    return bad_files
def not_unrep_with_dbd_file():
    log.info('running not_unrep_with_dbd_file check...')
    bad_pkgs = []
    query = '''SELECT s.name, r.version, s.suite, s.architecture
               FROM sources AS s JOIN results AS r ON r.package_id=s.id
               WHERE r.status != 'unreproducible'
               ORDER BY s.name ASC, s.suite DESC, s.architecture ASC'''
    results = query_db(query)
    for pkg, version, suite, arch in results:
        eversion = strip_epoch(version)
        dbd = DBD_PATH + '/' + suite + '/' + arch + '/' + pkg + '_' + \
            eversion + '.diffoscope.html'
        if os.access(dbd, os.R_OK):
            bad_pkgs.append((pkg, version, suite, arch))
            log.warning(dbd + ' exists but ' + suite + '/' + arch + '/' + pkg +
                        ' (' + version + ')'
                        ' is not unreproducible.')
    return bad_pkgs
def lack_buildinfo():
    log.info('running lack_buildinfo check...')
    bad_pkgs = []
    query = '''SELECT s.name, r.version, s.suite, s.architecture
               FROM sources AS s JOIN results AS r ON r.package_id=s.id
               WHERE r.status NOT IN
                ('blacklisted', 'not for us', 'FTBFS', 'depwait', '404', '')
               ORDER BY s.name ASC, s.suite DESC, s.architecture ASC'''
    results = query_db(query)
    for pkg, version, suite, arch in results:
        eversion = strip_epoch(version)
        buildinfo = BUILDINFO_PATH + '/' + suite + '/' + arch + '/' + pkg + \
            '_' + eversion + '_' + arch + '.buildinfo'
        if not os.access(buildinfo, os.R_OK):
            bad_pkgs.append((pkg, version, suite, arch))
            log.warning(suite + '/' + arch + '/' + pkg + ' (' + version +
                        ') has been '
                        'successfully built, but a .buildinfo is missing')
    return bad_pkgs
def not_unrep_with_dbd_file():
    log.info('running not_unrep_with_dbd_file check...')
    bad_pkgs = []
    query = '''SELECT s.name, r.version, s.suite, s.architecture
               FROM sources AS s JOIN results AS r ON r.package_id=s.id
               WHERE r.status != 'FTBR'
               ORDER BY s.name ASC, s.suite DESC, s.architecture ASC'''
    results = query_db(query)
    for pkg, version, suite, arch in results:
        eversion = strip_epoch(version)
        for prefix, extension in ((
            (DBD_PATH, 'html'),
            (DBDTXT_PATH, 'txt.gz'),
            (DBDJSON_PATH, 'json.gz'),
        )):
            filename = '{}/{}/{}/{}_{}.diffoscope.{}.gz'.format(
                prefix, suite, arch, pkg, eversion, extension)
            if not os.access(filename, os.R_OK):
                continue
            bad_pkgs.append((pkg, version, suite, arch))
            log.warning(filename + ' exists but ' + suite + '/' + arch + '/' + pkg + ' (' + version + ')'
                        ' is not FTBR.')
    return bad_pkgs
示例#17
0
def db_update():
    """
    Update the database schema.
    Get a list of queries to perform from schema_updates.
    The need for an update is detected by checking the biggest value in the
    rb_schema table against the biggest value in the schema_updates dictionary.
    """
    current = query_db('SELECT MAX(version) FROM rb_schema')[0][0]
    if not current:
        log.warning('This is probably a new database, there are no ' +
                    'previous updates noted')
        current = 0
    last = max(schema_updates.keys())
    if current == last:
        return False
    if current > last:
        print_critical_message('The active database schema is higher than' +
                               '  the last update available.\nPlease check!')
        sys.exit(1)
    log.info('Found schema updates.')
    Session = sessionmaker(bind=DB_ENGINE, autocommit=True)
    session = Session()
    for update in range(current + 1, last + 1):
        log.info('Applying database update #' + str(update) + '. Queries:')
        startTime = datetime.now()
        with session.begin():
            for query in schema_updates[update]:
                log.info('\t' + query)
                session.execute(query)
            session.execute(
                "INSERT INTO rb_schema (version, date) "
                "VALUES (:ver, CURRENT_TIMESTAMP)", {'ver': update})
        log.info(
            str(len(schema_updates[update])) + ' queries executed in ' +
            str(datetime.now() - startTime))
    return True
示例#18
0
def load_notes():
    """
    format:
    { 'package_name': {'version': '0.0', 'comments'<etc>}, 'package_name':{} }
    """
    with open(NOTES) as fd:
        possible_notes = yaml.load(fd)
    log.debug("notes loaded. There are " + str(len(possible_notes)) +
                  " package listed")
    notes = copy.copy(possible_notes)
    for package in possible_notes:   # check if every package listed on the notes
        try:                         # actually have been tested
            query = "SELECT s.name " + \
                    "FROM results AS r JOIN sources AS s ON r.package_id=s.id " + \
                    "WHERE s.name='{pkg}' AND r.status != ''"
            query = query.format(pkg=package)
            query_db(query)[0]  # just discard this result, we only care of its success
        except IndexError:
            log.warning("This query produces no results: " + query)
            log.warning("This means there is no tested package with the name " + package + ".")
            del notes[package]
    log.debug("notes checked. There are " + str(len(notes)) +
                  " package listed")
    return notes
def rest(scheduling_args, requester, local, suite, arch):
    "Actually schedule a package for a single suite on a single arch."

    # Shorter names
    reason = scheduling_args.message
    issue = scheduling_args.issue
    status = scheduling_args.status
    built_after = scheduling_args.after
    built_before = scheduling_args.before
    packages = scheduling_args.packages
    artifacts = scheduling_args.keep_artifacts
    notify = scheduling_args.notify
    notify_on_start = scheduling_args.notify_on_start
    dry_run = scheduling_args.dry_run

    log.info("Scheduling packages in %s/%s", arch, suite)

    ids = []
    pkgs = []

    query1 = """SELECT id FROM sources WHERE name='{pkg}' AND suite='{suite}'
                AND architecture='{arch}'"""
    query2 = """SELECT p.date_build_started
                FROM sources AS s JOIN schedule as p ON p.package_id=s.id
                WHERE p.package_id='{id}'"""
    for pkg in set(packages):
        # test whether the package actually exists
        result = query_db(query1.format(pkg=pkg, suite=suite, arch=arch))
        # tests whether the package is already building
        try:
            result2 = query_db(query2.format(id=result[0][0]))
        except IndexError:
            log.error('%sThe package %s is not available in %s/%s%s',
                      bcolors.FAIL, pkg, suite, arch, bcolors.ENDC)
            continue
        try:
            if not result2[0][0]:
                ids.append(result[0][0])
                pkgs.append(pkg)
            else:
                log.warning(bcolors.WARN + 'The package ' + pkg + ' is ' +
                            'already building, not scheduling it.' +
                            bcolors.ENDC)
        except IndexError:
            # it's not in the schedule
            ids.append(result[0][0])
            pkgs.append(pkg)

    def compose_irc_message():
        "One-shot closure to limit scope of the following local variables."
        blablabla = '✂…' if len(' '.join(pkgs)) > 257 else ''
        packages_txt = str(len(ids)) + ' packages ' if len(pkgs) > 1 else ''
        trailing = ' - artifacts will be preserved' if artifacts else ''
        trailing += ' - with irc notification' if notify else ''
        trailing += ' - notify on start too' if notify_on_start else ''

        message = requester + ' scheduled ' + packages_txt + \
            'in ' + suite + '/' + arch
        if reason:
            message += ', reason: \'' + reason + '\''
        message += ': ' + ' '.join(pkgs)[0:256] + blablabla + trailing
        return message

    info_msg = compose_irc_message()
    del compose_irc_message

    # these packages are manually scheduled, so should have high priority,
    # so schedule them in the past, so they are picked earlier :)
    # the current date is subtracted twice, so it sorts before early scheduling
    # schedule on the full hour so we can recognize them easily
    epoch = int(time.time())
    now = datetime.now()
    days = int(now.strftime('%j')) * 2
    hours = int(now.strftime('%H')) * 2
    minutes = int(now.strftime('%M'))
    time_delta = timedelta(days=days, hours=hours, minutes=minutes)
    date = (now - time_delta).strftime('%Y-%m-%d %H:%M')
    log.debug('date_scheduled = ' + date + ' time_delta = ' + str(time_delta))

    # a single person can't schedule more than 500 packages in the same day; this
    # is actually easy to bypass, but let's give some trust to the Debian people
    query = """SELECT count(*) FROM manual_scheduler
               WHERE requester = '{}' AND date_request > '{}'"""
    try:
        amount = int(
            query_db(query.format(requester, int(time.time() - 86400)))[0][0])
    except IndexError:
        amount = 0
    log.debug(requester + ' already scheduled ' + str(amount) +
              ' packages today')
    if amount + len(ids) > 500 and not local:
        log.error(
            bcolors.FAIL + 'You have exceeded the maximum number of manual ' +
            'reschedulings allowed for a day. Please ask in ' +
            '#debian-reproducible if you need to schedule more packages.' +
            bcolors.ENDC)
        sys.exit(1)

    # do the actual scheduling
    add_to_schedule = []
    update_schedule = []
    save_schedule = []
    artifacts_value = 1 if artifacts else 0
    if notify_on_start:
        do_notify = 2
    elif notify or artifacts:
        do_notify = 1
    else:
        do_notify = 0

    schedule_table = db_table('schedule')
    if ids:
        existing_pkg_ids = dict(
            query_db(
                sql.select([
                    schedule_table.c.package_id,
                    schedule_table.c.id,
                ]).where(schedule_table.c.package_id.in_(ids))))

    for id in ids:
        if id in existing_pkg_ids:
            update_schedule.append({
                'update_id': existing_pkg_ids[id],
                'package_id': id,
                'date_scheduled': date,
                'save_artifacts': artifacts_value,
                'notify': str(do_notify),
                'scheduler': requester,
            })
        else:
            add_to_schedule.append({
                'package_id': id,
                'date_scheduled': date,
                'save_artifacts': artifacts_value,
                'notify': str(do_notify),
                'scheduler': requester,
            })

        save_schedule.append({
            'package_id': id,
            'requester': requester,
            'date_request': epoch,
        })

    log.debug('Packages about to be scheduled: ' + str(add_to_schedule) +
              str(update_schedule))

    update_schedule_query = schedule_table.update().\
                            where(schedule_table.c.id == sql.bindparam('update_id'))
    insert_schedule_query = schedule_table.insert()
    insert_manual_query = db_table('manual_scheduler').insert()

    if not dry_run:
        transaction = conn_db.begin()
        if add_to_schedule:
            conn_db.execute(insert_schedule_query, add_to_schedule)
        if update_schedule:
            conn_db.execute(update_schedule_query, update_schedule)
        if save_schedule:
            conn_db.execute(insert_manual_query, save_schedule)
        transaction.commit()
    else:
        log.info('Ran with --dry-run, scheduled nothing')

    log.info(bcolors.GOOD + info_msg + bcolors.ENDC)
    if not (local
            and requester == "jenkins maintenance job") and len(ids) != 0:
        if not dry_run:
            # Always announce on -changes
            irc_msg(info_msg, 'debian-reproducible-changes')
            # Announce some messages on main channel
            if notify_on_start or artifacts:
                irc_msg(info_msg)
示例#20
0
def build_leading_text_section(section, rows, suite, arch):
    html = '<p>\n' + tab
    total = len(rows)
    count_total = int(
        query_db(queries['count_total'].params({
            'suite': suite,
            'arch': arch
        }))[0][0])
    try:
        percent = round(((total / count_total) * 100), 1)
    except ZeroDivisionError:
        log.error('Looks like there are either no tested package or no ' +
                  'packages available at all. Maybe it\'s a new database?')
        percent = 0.0
    try:
        html += '<a href="' + section['icon_link'] + '" target="_parent">'
        no_icon_link = False
    except KeyError:
        no_icon_link = True  # to avoid closing the </a> tag below
    if section.get('icon_status'):
        html += '<img src="/static/' + section['icon_status']
        html += '" alt="reproducible icon" />'
    if not no_icon_link:
        html += '</a>'
    html += '\n' + tab
    if section.get('text') and section.get('timespan'):
        count = len(
            query_db(queries[section['query2']].params({
                'suite': suite,
                'arch': arch
            })))
        percent = round(((count / count_total) * 100), 1)
        timespan = section['timespan']
        timespan_date = timespan_date_map[timespan]
        timespan_count = int(
            query_db(queries['count_timespan'].params({
                'suite':
                suite,
                'arch':
                arch,
                'timespan_date':
                timespan_date
            }))[0][0])
        try:
            timespan_percent = round(((total / timespan_count) * 100), 1)
        except ZeroDivisionError:
            log.error('Looks like there are either no tested package or no ' +
                      'packages available at all. Maybe it\'s a new database?')
            timespan_percent = 0

        html += section['text'].substitute(tot=total,
                                           percent=percent,
                                           timespan_percent=timespan_percent,
                                           timespan_count=timespan_count,
                                           count_total=count_total,
                                           count=count,
                                           suite=suite,
                                           arch=arch)
    elif section.get('text'):
        html += section['text'].substitute(tot=total,
                                           percent=percent,
                                           suite=suite,
                                           arch=arch)
    else:
        log.warning('There is no text for this section')
    html += '\n</p>\n'
    return html
示例#21
0
def gen_html_issue(issue, suite):
    """
    Given a issue as input (as a dict:
    {"issue_identifier": {"description": "blablabla", "url": "blabla"}}
    ) it returns the html body
    """
    # links to the issue in other suites
    suite_links = ''
    for i in SUITES:
        if suite_links != '':
            suite_links += ' / '
        if i != suite:
            suite_links += '<a href="' + REPRODUCIBLE_URL + ISSUES_URI + '/' + i + '/' + issue + '_issue.html">' + i + '</a>'
        else:
            suite_links += '<em>' + i + '</em>'
    # check for url:
    if 'url' in issues[issue]:
        url = issue_html_url.substitute(url=issues[issue]['url'])
    else:
        url = ''
    # add affected packages:
    affected = ''

    results = db_table('results')
    sources = db_table('sources')
    sql = select(
        [sources.c.name]
    ).select_from(
        results.join(sources)
    ).where(
        and_(
            sources.c.suite == bindparam('suite'),
            sources.c.architecture == bindparam('arch'),
            results.c.status == bindparam('status'),
        )
    ).order_by(
        sources.c.name
    )
    try:
        arch = 'amd64'
        for status in Status:
            status = status.value
            pkgs = query_db(sql.where(sources.c.name.in_(issues_count[issue]))\
                .params({'suite': suite, 'arch': arch, 'status': status.name}))
            pkgs = [p[0] for p in pkgs]
            if not pkgs:
                continue
            affected += tab*4 + '<p>\n'
            affected += tab*5 + '<img src="/static/{}"'.format(status.icon)
            affected += ' alt="' + status.name + ' icon" />\n'
            affected += tab*5 + str(len(pkgs)) + ' ' + status.spokenstatus
            affected += ' packages in ' + suite + '/' + arch +':\n'
            affected += tab*5 + '<code>\n'
            pkgs_popcon = issues_popcon_annotate(pkgs)
            try:
                for pkg, popc_num, is_popular in sorted(pkgs_popcon, key=lambda x: x[0] in bugs):
                    affected += tab*6 + Package(pkg).html_link(suite, arch, bugs, popc_num, is_popular)
            except ValueError:
                pass
            affected += tab*5 + '</code>\n'
            affected += tab*4 + '</p>\n'
    except KeyError:    # The note is not listed in any package, that is
        affected = '<i>None</i>'
    # check for description:
    try:
        desc = issues[issue]['description']
    except KeyError:
        log.warning('You should really include a description in the ' +
              issue + ' issue')
        desc = 'N/A'
    desc = url2html.sub(r'<a href="\1">\1</a>', desc)
    desc = desc.replace('\n', '<br />')
    return issue_html.substitute(issue=issue, urls=url, description=desc,
                                   affected_pkgs=affected,
                                   suite=suite, suite_links=suite_links,
                                   notesgit_description=NOTESGIT_DESCRIPTION)