コード例 #1
0
ファイル: utils.py プロジェクト: carlosduclos/dak
def parse_args(Options):
    """ Handle -a, -c and -s arguments; returns them as SQL constraints """
    # XXX: This should go away and everything which calls it be converted
    #      to use SQLA properly.  For now, we'll just fix it not to use
    #      the old Pg interface though
    session = DBConn().session()
    # Process suite
    if Options["Suite"]:
        suite_ids_list = []
        for suitename in split_args(Options["Suite"]):
            suite = get_suite(suitename, session=session)
            if not suite or suite.suite_id is None:
                warn("suite '%s' not recognised." % (suite and suite.suite_name or suitename))
            else:
                suite_ids_list.append(suite.suite_id)
        if suite_ids_list:
            con_suites = "AND su.id IN (%s)" % ", ".join([ str(i) for i in suite_ids_list ])
        else:
            fubar("No valid suite given.")
    else:
        con_suites = ""

    # Process component
    if Options["Component"]:
        component_ids_list = []
        for componentname in split_args(Options["Component"]):
            component = get_component(componentname, session=session)
            if component is None:
                warn("component '%s' not recognised." % (componentname))
            else:
                component_ids_list.append(component.component_id)
        if component_ids_list:
            con_components = "AND c.id IN (%s)" % ", ".join([ str(i) for i in component_ids_list ])
        else:
            fubar("No valid component given.")
    else:
        con_components = ""

    # Process architecture
    con_architectures = ""
    check_source = 0
    if Options["Architecture"]:
        arch_ids_list = []
        for archname in split_args(Options["Architecture"]):
            if archname == "source":
                check_source = 1
            else:
                arch = get_architecture(archname, session=session)
                if arch is None:
                    warn("architecture '%s' not recognised." % (archname))
                else:
                    arch_ids_list.append(arch.arch_id)
        if arch_ids_list:
            con_architectures = "AND a.id IN (%s)" % ", ".join([ str(i) for i in arch_ids_list ])
        else:
            if not check_source:
                fubar("No valid architecture given.")
    else:
        check_source = 1

    return (con_suites, con_architectures, con_components, check_source)
コード例 #2
0
ファイル: utils.py プロジェクト: carlosduclos/dak
def check_reverse_depends(removals, suite, arches=None, session=None, cruft=False, quiet=False):
    dbsuite = get_suite(suite, session)
    overridesuite = dbsuite
    if dbsuite.overridesuite is not None:
        overridesuite = get_suite(dbsuite.overridesuite, session)
    dep_problem = 0
    p2c = {}
    all_broken = defaultdict(lambda: defaultdict(set))
    if arches:
        all_arches = set(arches)
    else:
        all_arches = set(x.arch_string for x in get_suite_architectures(suite))
    all_arches -= set(["source", "all"])
    removal_set = set(removals)
    metakey_d = get_or_set_metadatakey("Depends", session)
    metakey_p = get_or_set_metadatakey("Provides", session)
    params = {
        'suite_id':     dbsuite.suite_id,
        'metakey_d_id': metakey_d.key_id,
        'metakey_p_id': metakey_p.key_id,
    }
    for architecture in all_arches | set(['all']):
        deps = {}
        sources = {}
        virtual_packages = {}
        params['arch_id'] = get_architecture(architecture, session).arch_id

        statement = '''
            SELECT b.package, s.source, c.name as component,
                (SELECT bmd.value FROM binaries_metadata bmd WHERE bmd.bin_id = b.id AND bmd.key_id = :metakey_d_id) AS depends,
                (SELECT bmp.value FROM binaries_metadata bmp WHERE bmp.bin_id = b.id AND bmp.key_id = :metakey_p_id) AS provides
                FROM binaries b
                JOIN bin_associations ba ON b.id = ba.bin AND ba.suite = :suite_id
                JOIN source s ON b.source = s.id
                JOIN files_archive_map af ON b.file = af.file_id
                JOIN component c ON af.component_id = c.id
                WHERE b.architecture = :arch_id'''
        query = session.query('package', 'source', 'component', 'depends', 'provides'). \
            from_statement(statement).params(params)
        for package, source, component, depends, provides in query:
            sources[package] = source
            p2c[package] = component
            if depends is not None:
                deps[package] = depends
            # Maintain a counter for each virtual package.  If a
            # Provides: exists, set the counter to 0 and count all
            # provides by a package not in the list for removal.
            # If the counter stays 0 at the end, we know that only
            # the to-be-removed packages provided this virtual
            # package.
            if provides is not None:
                for virtual_pkg in provides.split(","):
                    virtual_pkg = virtual_pkg.strip()
                    if virtual_pkg == package: continue
                    if not virtual_packages.has_key(virtual_pkg):
                        virtual_packages[virtual_pkg] = 0
                    if package not in removals:
                        virtual_packages[virtual_pkg] += 1

        # If a virtual package is only provided by the to-be-removed
        # packages, treat the virtual package as to-be-removed too.
        removal_set.update(virtual_pkg for virtual_pkg in virtual_packages if not virtual_packages[virtual_pkg])

        # Check binary dependencies (Depends)
        for package in deps:
            if package in removals: continue
            try:
                parsed_dep = apt_pkg.parse_depends(deps[package])
            except ValueError as e:
                print "Error for package %s: %s" % (package, e)
                parsed_dep = []
            for dep in parsed_dep:
                # Check for partial breakage.  If a package has a ORed
                # dependency, there is only a dependency problem if all
                # packages in the ORed depends will be removed.
                unsat = 0
                for dep_package, _, _ in dep:
                    if dep_package in removals:
                        unsat += 1
                if unsat == len(dep):
                    component = p2c[package]
                    source = sources[package]
                    if component != "main":
                        source = "%s/%s" % (source, component)
                    all_broken[source][package].add(architecture)
                    dep_problem = 1

    if all_broken and not quiet:
        if cruft:
            print "  - broken Depends:"
        else:
            print "# Broken Depends:"
        for source, bindict in sorted(all_broken.items()):
            lines = []
            for binary, arches in sorted(bindict.items()):
                if arches == all_arches or 'all' in arches:
                    lines.append(binary)
                else:
                    lines.append('%s [%s]' % (binary, ' '.join(sorted(arches))))
            if cruft:
                print '    %s: %s' % (source, lines[0])
            else:
                print '%s: %s' % (source, lines[0])
            for line in lines[1:]:
                if cruft:
                    print '    ' + ' ' * (len(source) + 2) + line
                else:
                    print ' ' * (len(source) + 2) + line
        if not cruft:
            print

    # Check source dependencies (Build-Depends and Build-Depends-Indep)
    all_broken = defaultdict(set)
    metakey_bd = get_or_set_metadatakey("Build-Depends", session)
    metakey_bdi = get_or_set_metadatakey("Build-Depends-Indep", session)
    params = {
        'suite_id':    dbsuite.suite_id,
        'metakey_ids': (metakey_bd.key_id, metakey_bdi.key_id),
    }
    statement = '''
        SELECT s.source, string_agg(sm.value, ', ') as build_dep
           FROM source s
           JOIN source_metadata sm ON s.id = sm.src_id
           WHERE s.id in
               (SELECT source FROM src_associations
                   WHERE suite = :suite_id)
               AND sm.key_id in :metakey_ids
           GROUP BY s.id, s.source'''
    query = session.query('source', 'build_dep').from_statement(statement). \
        params(params)
    for source, build_dep in query:
        if source in removals: continue
        parsed_dep = []
        if build_dep is not None:
            # Remove [arch] information since we want to see breakage on all arches
            build_dep = re_build_dep_arch.sub("", build_dep)
            try:
                parsed_dep = apt_pkg.parse_src_depends(build_dep)
            except ValueError as e:
                print "Error for source %s: %s" % (source, e)
        for dep in parsed_dep:
            unsat = 0
            for dep_package, _, _ in dep:
                if dep_package in removals:
                    unsat += 1
            if unsat == len(dep):
                component, = session.query(Component.component_name) \
                    .join(Component.overrides) \
                    .filter(Override.suite == overridesuite) \
                    .filter(Override.package == re.sub('/(contrib|non-free)$', '', source)) \
                    .join(Override.overridetype).filter(OverrideType.overridetype == 'dsc') \
                    .first()
                key = source
                if component != "main":
                    key = "%s/%s" % (source, component)
                all_broken[key].add(pp_deps(dep))
                dep_problem = 1

    if all_broken and not quiet:
        if cruft:
            print "  - broken Build-Depends:"
        else:
            print "# Broken Build-Depends:"
        for source, bdeps in sorted(all_broken.items()):
            bdeps = sorted(bdeps)
            if cruft:
                print '    %s: %s' % (source, bdeps[0])
            else:
                print '%s: %s' % (source, bdeps[0])
            for bdep in bdeps[1:]:
                if cruft:
                    print '    ' + ' ' * (len(source) + 2) + bdep
                else:
                    print ' ' * (len(source) + 2) + bdep
        if not cruft:
            print

    return dep_problem
コード例 #3
0
def parse_args(Options):
    """ Handle -a, -c and -s arguments; returns them as SQL constraints """
    # XXX: This should go away and everything which calls it be converted
    #      to use SQLA properly.  For now, we'll just fix it not to use
    #      the old Pg interface though
    session = DBConn().session()
    # Process suite
    if Options["Suite"]:
        suite_ids_list = []
        for suitename in split_args(Options["Suite"]):
            suite = get_suite(suitename, session=session)
            if not suite or suite.suite_id is None:
                warn("suite '%s' not recognised." % (suite and suite.suite_name or suitename))
            else:
                suite_ids_list.append(suite.suite_id)
        if suite_ids_list:
            con_suites = "AND su.id IN (%s)" % ", ".join([ str(i) for i in suite_ids_list ])
        else:
            fubar("No valid suite given.")
    else:
        con_suites = ""

    # Process component
    if Options["Component"]:
        component_ids_list = []
        for componentname in split_args(Options["Component"]):
            component = get_component(componentname, session=session)
            if component is None:
                warn("component '%s' not recognised." % (componentname))
            else:
                component_ids_list.append(component.component_id)
        if component_ids_list:
            con_components = "AND c.id IN (%s)" % ", ".join([ str(i) for i in component_ids_list ])
        else:
            fubar("No valid component given.")
    else:
        con_components = ""

    # Process architecture
    con_architectures = ""
    check_source = 0
    if Options["Architecture"]:
        arch_ids_list = []
        for archname in split_args(Options["Architecture"]):
            if archname == "source":
                check_source = 1
            else:
                arch = get_architecture(archname, session=session)
                if arch is None:
                    warn("architecture '%s' not recognised." % (archname))
                else:
                    arch_ids_list.append(arch.arch_id)
        if arch_ids_list:
            con_architectures = "AND a.id IN (%s)" % ", ".join([ str(i) for i in arch_ids_list ])
        else:
            if not check_source:
                fubar("No valid architecture given.")
    else:
        check_source = 1

    return (con_suites, con_architectures, con_components, check_source)
コード例 #4
0
ファイル: lists.py プロジェクト: pombreda/tanglu-dak
def getBinaries(suite, component, architecture, type, session, timestamp = None):
    '''
    Calculates the binaries in suite and component of architecture and
    type 'deb' or 'udeb' optionally limited to binaries newer than
    timestamp.  Returns a generator that yields a tuple of binary id and
    full pathname to the u(deb) file. See function writeBinaryList() in
    dak/generate_filelist.py for an example that uses this function.
    '''
    extra_cond = ""
    if timestamp:
        extra_cond = "AND extract(epoch from ba.created) > %d" % timestamp
    query = """
CREATE TEMP TABLE b_candidates (
    id integer,
    source integer,
    file integer,
    architecture integer);

INSERT INTO b_candidates (id, source, file, architecture)
    SELECT b.id, b.source, b.file, b.architecture
        FROM binaries b
        JOIN bin_associations ba ON b.id = ba.bin
        WHERE b.type = :type AND ba.suite = :suite AND
            b.architecture IN (:arch_all, :architecture) %s;

CREATE TEMP TABLE gf_candidates (
    id integer,
    filename text,
    path text,
    architecture integer,
    src integer,
    source text);

INSERT INTO gf_candidates (id, filename, path, architecture, src, source)
    SELECT bc.id, c.name || '/' || f.filename, archive.path || 'pool/' , bc.architecture, bc.source as src, s.source
        FROM b_candidates bc
        JOIN source s ON bc.source = s.id
        JOIN files f ON bc.file = f.id
        JOIN files_archive_map fam ON f.id = fam.file_id
        JOIN component c ON fam.component_id = c.id
        JOIN archive ON fam.archive_id = archive.id
        JOIN suite ON suite.archive_id = archive.id

        WHERE c.id = :component AND suite.id = :suite;

WITH arch_any AS

    (SELECT id, path, filename FROM gf_candidates
        WHERE architecture <> :arch_all),

     arch_all_with_any AS
    (SELECT id, path, filename FROM gf_candidates
        WHERE architecture = :arch_all AND
              src IN (SELECT src FROM gf_candidates WHERE architecture <> :arch_all)),

     arch_all_without_any AS
    (SELECT id, path, filename FROM gf_candidates
        WHERE architecture = :arch_all AND
              source NOT IN (SELECT DISTINCT source FROM gf_candidates WHERE architecture <> :arch_all)),

     filelist AS
    (SELECT * FROM arch_any
    UNION
    SELECT * FROM arch_all_with_any
    UNION
    SELECT * FROM arch_all_without_any)

    SELECT * FROM filelist ORDER BY filename
    """ % extra_cond
    args = { 'suite': suite.suite_id,
             'component': component.component_id,
             'architecture': architecture.arch_id,
             'arch_all': get_architecture('all', session).arch_id,
             'type': type }
    return fetch(query, args, session)
コード例 #5
0
def check_reverse_depends(removals, suite, arches=None, session=None, cruft=False, quiet=False, include_arch_all=True):
    dbsuite = get_suite(suite, session)
    overridesuite = dbsuite
    if dbsuite.overridesuite is not None:
        overridesuite = get_suite(dbsuite.overridesuite, session)
    dep_problem = 0
    p2c = {}
    all_broken = defaultdict(lambda: defaultdict(set))
    if arches:
        all_arches = set(arches)
    else:
        all_arches = set(x.arch_string for x in get_suite_architectures(suite))
    all_arches -= set(["source", "all"])
    removal_set = set(removals)
    metakey_d = get_or_set_metadatakey("Depends", session)
    metakey_p = get_or_set_metadatakey("Provides", session)
    params = {
        'suite_id':     dbsuite.suite_id,
        'metakey_d_id': metakey_d.key_id,
        'metakey_p_id': metakey_p.key_id,
    }
    if include_arch_all:
        rdep_architectures = all_arches | set(['all'])
    else:
        rdep_architectures = all_arches
    for architecture in rdep_architectures:
        deps = {}
        sources = {}
        virtual_packages = {}
        try:
            params['arch_id'] = get_architecture(architecture, session).arch_id
        except AttributeError:
            continue

        statement = sql.text('''
            SELECT b.package, s.source, c.name as component,
                (SELECT bmd.value FROM binaries_metadata bmd WHERE bmd.bin_id = b.id AND bmd.key_id = :metakey_d_id) AS depends,
                (SELECT bmp.value FROM binaries_metadata bmp WHERE bmp.bin_id = b.id AND bmp.key_id = :metakey_p_id) AS provides
                FROM binaries b
                JOIN bin_associations ba ON b.id = ba.bin AND ba.suite = :suite_id
                JOIN source s ON b.source = s.id
                JOIN files_archive_map af ON b.file = af.file_id
                JOIN component c ON af.component_id = c.id
                WHERE b.architecture = :arch_id''')
        query = session.query('package', 'source', 'component', 'depends', 'provides'). \
            from_statement(statement).params(params)
        for package, source, component, depends, provides in query:
            sources[package] = source
            p2c[package] = component
            if depends is not None:
                deps[package] = depends
            # Maintain a counter for each virtual package.  If a
            # Provides: exists, set the counter to 0 and count all
            # provides by a package not in the list for removal.
            # If the counter stays 0 at the end, we know that only
            # the to-be-removed packages provided this virtual
            # package.
            if provides is not None:
                for virtual_pkg in provides.split(","):
                    virtual_pkg = virtual_pkg.strip()
                    if virtual_pkg == package: continue
                    if virtual_pkg not in virtual_packages:
                        virtual_packages[virtual_pkg] = 0
                    if package not in removals:
                        virtual_packages[virtual_pkg] += 1

        # If a virtual package is only provided by the to-be-removed
        # packages, treat the virtual package as to-be-removed too.
        removal_set.update(virtual_pkg for virtual_pkg in virtual_packages if not virtual_packages[virtual_pkg])

        # Check binary dependencies (Depends)
        for package in deps:
            if package in removals: continue
            try:
                parsed_dep = apt_pkg.parse_depends(deps[package])
            except ValueError as e:
                print "Error for package %s: %s" % (package, e)
                parsed_dep = []
            for dep in parsed_dep:
                # Check for partial breakage.  If a package has a ORed
                # dependency, there is only a dependency problem if all
                # packages in the ORed depends will be removed.
                unsat = 0
                for dep_package, _, _ in dep:
                    if dep_package in removals:
                        unsat += 1
                if unsat == len(dep):
                    component = p2c[package]
                    source = sources[package]
                    if component != "main":
                        source = "%s/%s" % (source, component)
                    all_broken[source][package].add(architecture)
                    dep_problem = 1

    if all_broken and not quiet:
        if cruft:
            print "  - broken Depends:"
        else:
            print "# Broken Depends:"
        for source, bindict in sorted(all_broken.items()):
            lines = []
            for binary, arches in sorted(bindict.items()):
                if arches == all_arches or 'all' in arches:
                    lines.append(binary)
                else:
                    lines.append('%s [%s]' % (binary, ' '.join(sorted(arches))))
            if cruft:
                print '    %s: %s' % (source, lines[0])
            else:
                print '%s: %s' % (source, lines[0])
            for line in lines[1:]:
                if cruft:
                    print '    ' + ' ' * (len(source) + 2) + line
                else:
                    print ' ' * (len(source) + 2) + line
        if not cruft:
            print

    # Check source dependencies (Build-Depends and Build-Depends-Indep)
    all_broken = defaultdict(set)
    metakey_bd = get_or_set_metadatakey("Build-Depends", session)
    metakey_bdi = get_or_set_metadatakey("Build-Depends-Indep", session)
    if include_arch_all:
        metakey_ids = (metakey_bd.key_id, metakey_bdi.key_id)
    else:
        metakey_ids = (metakey_bd.key_id,)

    params = {
        'suite_id':    dbsuite.suite_id,
        'metakey_ids': metakey_ids,
    }
    statement = sql.text('''
        SELECT s.source, string_agg(sm.value, ', ') as build_dep
           FROM source s
           JOIN source_metadata sm ON s.id = sm.src_id
           WHERE s.id in
               (SELECT src FROM newest_src_association
                   WHERE suite = :suite_id)
               AND sm.key_id in :metakey_ids
           GROUP BY s.id, s.source''')
    query = session.query('source', 'build_dep').from_statement(statement). \
        params(params)
    for source, build_dep in query:
        if source in removals: continue
        parsed_dep = []
        if build_dep is not None:
            # Remove [arch] information since we want to see breakage on all arches
            build_dep = re_build_dep_arch.sub("", build_dep)
            try:
                parsed_dep = apt_pkg.parse_src_depends(build_dep)
            except ValueError as e:
                print "Error for source %s: %s" % (source, e)
        for dep in parsed_dep:
            unsat = 0
            for dep_package, _, _ in dep:
                if dep_package in removals:
                    unsat += 1
            if unsat == len(dep):
                component, = session.query(Component.component_name) \
                    .join(Component.overrides) \
                    .filter(Override.suite == overridesuite) \
                    .filter(Override.package == re.sub('/(contrib|non-free)$', '', source)) \
                    .join(Override.overridetype).filter(OverrideType.overridetype == 'dsc') \
                    .first()
                key = source
                if component != "main":
                    key = "%s/%s" % (source, component)
                all_broken[key].add(pp_deps(dep))
                dep_problem = 1

    if all_broken and not quiet:
        if cruft:
            print "  - broken Build-Depends:"
        else:
            print "# Broken Build-Depends:"
        for source, bdeps in sorted(all_broken.items()):
            bdeps = sorted(bdeps)
            if cruft:
                print '    %s: %s' % (source, bdeps[0])
            else:
                print '%s: %s' % (source, bdeps[0])
            for bdep in bdeps[1:]:
                if cruft:
                    print '    ' + ' ' * (len(source) + 2) + bdep
                else:
                    print ' ' * (len(source) + 2) + bdep
        if not cruft:
            print

    return dep_problem