def compose_sql_difference_type():
    """Produce SQL to compute a difference's `DistroSeriesDifferenceType`.

    Works with the parent_source_version and source_version fields as
    produced by the SQL from `compose_sql_find_differences`.

    :return: SQL query, as a string.
    """
    parameters = {
        'unique_to_derived_series':
        quote(DistroSeriesDifferenceType.UNIQUE_TO_DERIVED_SERIES),
        'missing_from_derived_series':
        quote(DistroSeriesDifferenceType.MISSING_FROM_DERIVED_SERIES),
        'different_versions':
        quote(DistroSeriesDifferenceType.DIFFERENT_VERSIONS),
    }
    return """
        CASE
            WHEN parent_source_version IS NULL THEN
                %(unique_to_derived_series)s
            WHEN source_version IS NULL THEN
                %(missing_from_derived_series)s
            ELSE %(different_versions)s
        END
        """ % parameters
示例#2
0
def needs_refresh(con, table, columns):
    '''Return true if the index needs to be rebuilt.

    We know this by looking in our cache to see what the previous
    definitions were, and the --force command line argument
    '''
    current_columns = repr(sorted(columns))

    existing = execute(
        con, "SELECT columns FROM FtiCache WHERE tablename=%(table)s",
        results=True, args=vars()
        )
    if len(existing) == 0:
        log.debug("No fticache for %(table)s" % vars())
        sexecute(con, """
            INSERT INTO FtiCache (tablename, columns) VALUES (%s, %s)
            """ % (quote(table), quote(current_columns)))
        return True

    if not options.force:
        previous_columns = existing[0][0]
        if current_columns == previous_columns:
            log.debug("FtiCache for %(table)s still valid" % vars())
            return False
        log.debug("Cache out of date - %s != %s" % (
            current_columns, previous_columns
            ))
    sexecute(con, """
        UPDATE FtiCache SET columns = %s
        WHERE tablename = %s
        """ % (quote(current_columns), quote(table)))

    return True
def compose_sql_find_latest_source_package_releases(distroseries):
    """Produce SQL that gets the last-published `SourcePackageRelease`s.

    Within `distroseries`, looks for the `SourcePackageRelease`
    belonging to each respective `SourcePackageName`'s respective latest
    `SourcePackagePublishingHistory`.

    For each of those, it produces a tuple consisting of:
     * `SourcePackageName` id: sourcepackagename
     * `SourcePackageRelease` id: sourcepackagerelease
     * Source package version: version.

    :return: SQL query, as a string.
    """
    parameters = {
        'active_status': quote(active_publishing_status),
        'distroseries': quote(distroseries),
        'main_archive': quote(distroseries.distribution.main_archive),
        'release_pocket': quote(PackagePublishingPocket.RELEASE),
    }
    return """
        SELECT DISTINCT ON (SPR.sourcepackagename)
            SPR.sourcepackagename,
            SPR.id As sourcepackagerelease,
            SPR.version
        FROM SourcePackagePublishingHistory AS SPPH
        JOIN SourcePackageRelease AS SPR ON SPR.id = SPPH.sourcepackagerelease
        WHERE
            SPPH.distroseries = %(distroseries)s AND
            SPPH.archive = %(main_archive)s AND
            SPPH.pocket = %(release_pocket)s AND
            SPPH.status IN %(active_status)s
        ORDER BY SPR.sourcepackagename, SPPH.id DESC
        """ % parameters
示例#4
0
def needs_refresh(con, table, columns):
    '''Return true if the index needs to be rebuilt.

    We know this by looking in our cache to see what the previous
    definitions were, and the --force command line argument
    '''
    current_columns = repr(sorted(columns))

    existing = execute(
        con,
        "SELECT columns FROM FtiCache WHERE tablename=%(table)s",
        results=True,
        args=vars())
    if len(existing) == 0:
        log.debug("No fticache for %(table)s" % vars())
        sexecute(
            con, """
            INSERT INTO FtiCache (tablename, columns) VALUES (%s, %s)
            """ % (quote(table), quote(current_columns)))
        return True

    if not options.force:
        previous_columns = existing[0][0]
        if current_columns == previous_columns:
            log.debug("FtiCache for %(table)s still valid" % vars())
            return False
        log.debug("Cache out of date - %s != %s" %
                  (current_columns, previous_columns))
    sexecute(
        con, """
        UPDATE FtiCache SET columns = %s
        WHERE tablename = %s
        """ % (quote(current_columns), quote(table)))

    return True
def compose_sql_find_latest_source_package_releases(distroseries):
    """Produce SQL that gets the last-published `SourcePackageRelease`s.

    Within `distroseries`, looks for the `SourcePackageRelease`
    belonging to each respective `SourcePackageName`'s respective latest
    `SourcePackagePublishingHistory`.

    For each of those, it produces a tuple consisting of:
     * `SourcePackageName` id: sourcepackagename
     * `SourcePackageRelease` id: sourcepackagerelease
     * Source package version: version.

    :return: SQL query, as a string.
    """
    parameters = {
        'active_status': quote(active_publishing_status),
        'distroseries': quote(distroseries),
        'main_archive': quote(distroseries.distribution.main_archive),
        'release_pocket': quote(PackagePublishingPocket.RELEASE),
    }
    return """
        SELECT DISTINCT ON (SPR.sourcepackagename)
            SPR.sourcepackagename,
            SPR.id As sourcepackagerelease,
            SPR.version
        FROM SourcePackagePublishingHistory AS SPPH
        JOIN SourcePackageRelease AS SPR ON SPR.id = SPPH.sourcepackagerelease
        WHERE
            SPPH.distroseries = %(distroseries)s AND
            SPPH.archive = %(main_archive)s AND
            SPPH.pocket = %(release_pocket)s AND
            SPPH.status IN %(active_status)s
        ORDER BY SPR.sourcepackagename, SPPH.id DESC
        """ % parameters
def compose_sql_difference_type():
    """Produce SQL to compute a difference's `DistroSeriesDifferenceType`.

    Works with the parent_source_version and source_version fields as
    produced by the SQL from `compose_sql_find_differences`.

    :return: SQL query, as a string.
    """
    parameters = {
        'unique_to_derived_series': quote(
            DistroSeriesDifferenceType.UNIQUE_TO_DERIVED_SERIES),
        'missing_from_derived_series': quote(
            DistroSeriesDifferenceType.MISSING_FROM_DERIVED_SERIES),
        'different_versions': quote(
            DistroSeriesDifferenceType.DIFFERENT_VERSIONS),
    }
    return """
        CASE
            WHEN parent_source_version IS NULL THEN
                %(unique_to_derived_series)s
            WHEN source_version IS NULL THEN
                %(missing_from_derived_series)s
            ELSE %(different_versions)s
        END
        """ % parameters
def compose_sql_populate_distroseriesdiff(derived_series, parent_series,
                                          temp_table):
    """Create `DistroSeriesDifference` rows based on found differences.

    Uses field values that describe the difference, as produced by the
    SQL from `compose_sql_find_differences`:
     * sourcepackagename
     * source_version
     * parent_source_version

    Existing `DistroSeriesDifference` rows are not affected.

    :param derived_distroseries: A derived `DistroSeries`.
    :param temp_table: The name of a table to select the input fields
        from.
    :return: SQL query, as a string.
    """
    parameters = {
        'derived_series': quote(derived_series),
        'parent_series': quote(parent_series),
        'difference_type_expression': compose_sql_difference_type(),
        'needs_attention': quote(
            DistroSeriesDifferenceStatus.NEEDS_ATTENTION),
        'temp_table': quote_identifier(temp_table),
    }
    return """
        INSERT INTO DistroSeriesDifference (
            derived_series,
            parent_series,
            source_package_name,
            status,
            difference_type,
            source_version,
            parent_source_version)
        SELECT
            %(derived_series)s,
            %(parent_series)s,
            sourcepackagename,
            %(needs_attention)s,
            %(difference_type_expression)s,
            source_version,
            parent_source_version
        FROM %(temp_table)s
        WHERE sourcepackagename NOT IN (
            SELECT source_package_name
            FROM DistroSeriesDifference
            WHERE derived_series = %(derived_series)s)
        """ % parameters
    def searchByDisplayname(self, displayname, searchfor=None):
        """See ISignedCodeOfConductSet."""
        clauseTables = ['Person']

        # XXX: cprov 2005-02-27:
        # FTI presents problems when query by incomplete names
        # and I'm not sure if the best solution here is to use
        # trivial ILIKE query. Oppinion required on Review.

        # glue Person and SignedCoC table
        query = 'SignedCodeOfConduct.owner = Person.id'

        # XXX cprov 2005-03-02:
        # I'm not sure if the it is correct way to query ALL
        # entries. If it is it should be part of FTI queries,
        # isn't it ?

        # the name shoudl work like a filter, if you don't enter anything
        # you get everything.
        if displayname:
            query += ' AND Person.fti @@ ftq(%s)' % quote(displayname)

        # Attempt to search for directive
        if searchfor == 'activeonly':
            query += ' AND SignedCodeOfConduct.active = true'

        elif searchfor == 'inactiveonly':
            query += ' AND SignedCodeOfConduct.active = false'

        return SignedCodeOfConduct.select(
            query, clauseTables=clauseTables,
            orderBy='SignedCodeOfConduct.active')
    def _setPolicy(self, read_only=True):
        """Set the database session's default transaction read-only policy.

        :param read_only: True for read-only policy, False for read-write
            policy.
        """
        self.store.execute("SET %s TO %s" % (self.db_switch, quote(read_only)))
示例#10
0
    def searchByDisplayname(self, displayname, searchfor=None):
        """See ISignedCodeOfConductSet."""
        clauseTables = ['Person']

        # XXX: cprov 2005-02-27:
        # FTI presents problems when query by incomplete names
        # and I'm not sure if the best solution here is to use
        # trivial ILIKE query. Oppinion required on Review.

        # glue Person and SignedCoC table
        query = 'SignedCodeOfConduct.owner = Person.id'

        # XXX cprov 2005-03-02:
        # I'm not sure if the it is correct way to query ALL
        # entries. If it is it should be part of FTI queries,
        # isn't it ?

        # the name shoudl work like a filter, if you don't enter anything
        # you get everything.
        if displayname:
            query += ' AND Person.fti @@ ftq(%s)' % quote(displayname)

        # Attempt to search for directive
        if searchfor == 'activeonly':
            query += ' AND SignedCodeOfConduct.active = true'

        elif searchfor == 'inactiveonly':
            query += ' AND SignedCodeOfConduct.active = false'

        return SignedCodeOfConduct.select(query,
                                          clauseTables=clauseTables,
                                          orderBy='SignedCodeOfConduct.active')
示例#11
0
def generateResetSequencesSQL(cur):
    """Return SQL that will reset table sequences to match the data in them.
    """
    stmt = []
    for schema, sequence, table, column in listSequences(cur):
        if table is None or column is None:
            continue
        sql = "SELECT max(%s) FROM %s" % (
                quoteIdentifier(column), quoteIdentifier(table)
                )
        cur.execute(sql)
        last_value = cur.fetchone()[0]
        if last_value is None:
            last_value = 1
            flag = 'false'
        else:
            flag = 'true'
        sql = "setval(%s, %d, %s)" % (
                quote('%s.%s' % (schema, sequence)), int(last_value), flag
                )
        stmt.append(sql)
    if stmt:
        stmt = 'SELECT ' + ', '.join(stmt)
        return stmt
    else:
        return ''
def fetch_team_participation_info(log):
    """Fetch people, teams, memberships and participations."""
    slurp = partial(
        execute_long_query, ISlaveStore(TeamParticipation), log, 10000)

    people = dict(
        slurp(
            "SELECT id, name FROM Person"
            " WHERE teamowner IS NULL"
            "   AND merged IS NULL"))
    teams = dict(
        slurp(
            "SELECT id, name FROM Person"
            " WHERE teamowner IS NOT NULL"
            "   AND merged IS NULL"))
    team_memberships = defaultdict(set)
    results = slurp(
        "SELECT team, person FROM TeamMembership"
        " WHERE status in %s" % quote(ACTIVE_STATES))
    for (team, person) in results:
        team_memberships[team].add(person)
    team_participations = defaultdict(set)
    results = slurp(
        "SELECT team, person FROM TeamParticipation")
    for (team, person) in results:
        team_participations[team].add(person)

    # Don't hold any locks.
    transaction.commit()

    return people, teams, team_memberships, team_participations
    def _setPolicy(self, read_only=True):
        """Set the database session's default transaction read-only policy.

        :param read_only: True for read-only policy, False for read-write
            policy.
        """
        self.store.execute("SET %s TO %s" % (self.db_switch, quote(read_only)))
def fetch_team_participation_info(log):
    """Fetch people, teams, memberships and participations."""
    slurp = partial(execute_long_query, ISlaveStore(TeamParticipation), log,
                    10000)

    people = dict(
        slurp("SELECT id, name FROM Person"
              " WHERE teamowner IS NULL"
              "   AND merged IS NULL"))
    teams = dict(
        slurp("SELECT id, name FROM Person"
              " WHERE teamowner IS NOT NULL"
              "   AND merged IS NULL"))
    team_memberships = defaultdict(set)
    results = slurp("SELECT team, person FROM TeamMembership"
                    " WHERE status in %s" % quote(ACTIVE_STATES))
    for (team, person) in results:
        team_memberships[team].add(person)
    team_participations = defaultdict(set)
    results = slurp("SELECT team, person FROM TeamParticipation")
    for (team, person) in results:
        team_participations[team].add(person)

    # Don't hold any locks.
    transaction.commit()

    return people, teams, team_memberships, team_participations
示例#15
0
    def preloadVisibleStackedOnBranches(branches, user=None):
        """Preload the chains of stacked on branches related to the given list
        of branches. Only the branches visible for the given user are
        preloaded/returned.

        """
        if len(branches) == 0:
            return
        store = IStore(Branch)
        result = store.execute("""
            WITH RECURSIVE stacked_on_branches_ids AS (
                SELECT column1 as id FROM (VALUES %s) AS temp
                UNION
                SELECT DISTINCT branch.stacked_on
                FROM stacked_on_branches_ids, Branch AS branch
                WHERE
                    branch.id = stacked_on_branches_ids.id AND
                    branch.stacked_on IS NOT NULL
            )
            SELECT id from stacked_on_branches_ids
            """ % ', '.join(
            ["(%s)" % quote(id) for id in map(attrgetter('id'), branches)]))
        branch_ids = [res[0] for res in result.get_all()]
        # Not really sure this is useful: if a given branch is visible by a
        # user, then I think it means that the whole chain of branches on
        # which is is stacked on is visible by this user
        expressions = [Branch.id.is_in(branch_ids)]
        if user is None:
            collection = AnonymousBranchCollection(
                branch_filter_expressions=expressions)
        else:
            collection = VisibleBranchCollection(
                user=user, branch_filter_expressions=expressions)
        return list(collection.getBranches())
    def preloadVisibleStackedOnBranches(branches, user=None):
        """Preload the chains of stacked on branches related to the given list
        of branches. Only the branches visible for the given user are
        preloaded/returned.

        """
        if len(branches) == 0:
            return
        store = IStore(Branch)
        result = store.execute("""
            WITH RECURSIVE stacked_on_branches_ids AS (
                SELECT column1 as id FROM (VALUES %s) AS temp
                UNION
                SELECT DISTINCT branch.stacked_on
                FROM stacked_on_branches_ids, Branch AS branch
                WHERE
                    branch.id = stacked_on_branches_ids.id AND
                    branch.stacked_on IS NOT NULL
            )
            SELECT id from stacked_on_branches_ids
            """ % ', '.join(
                ["(%s)" % quote(id)
                 for id in map(attrgetter('id'), branches)]))
        branch_ids = [res[0] for res in result.get_all()]
        # Not really sure this is useful: if a given branch is visible by a
        # user, then I think it means that the whole chain of branches on
        # which is is stacked on is visible by this user
        expressions = [Branch.id.is_in(branch_ids)]
        if user is None:
            collection = AnonymousBranchCollection(
                branch_filter_expressions=expressions)
        else:
            collection = VisibleBranchCollection(
                user=user, branch_filter_expressions=expressions)
        return list(collection.getBranches())
def compose_sql_populate_distroseriesdiff(derived_series, parent_series,
                                          temp_table):
    """Create `DistroSeriesDifference` rows based on found differences.

    Uses field values that describe the difference, as produced by the
    SQL from `compose_sql_find_differences`:
     * sourcepackagename
     * source_version
     * parent_source_version

    Existing `DistroSeriesDifference` rows are not affected.

    :param derived_distroseries: A derived `DistroSeries`.
    :param temp_table: The name of a table to select the input fields
        from.
    :return: SQL query, as a string.
    """
    parameters = {
        'derived_series': quote(derived_series),
        'parent_series': quote(parent_series),
        'difference_type_expression': compose_sql_difference_type(),
        'needs_attention': quote(DistroSeriesDifferenceStatus.NEEDS_ATTENTION),
        'temp_table': quote_identifier(temp_table),
    }
    return """
        INSERT INTO DistroSeriesDifference (
            derived_series,
            parent_series,
            source_package_name,
            status,
            difference_type,
            source_version,
            parent_source_version)
        SELECT
            %(derived_series)s,
            %(parent_series)s,
            sourcepackagename,
            %(needs_attention)s,
            %(difference_type_expression)s,
            source_version,
            parent_source_version
        FROM %(temp_table)s
        WHERE sourcepackagename NOT IN (
            SELECT source_package_name
            FROM DistroSeriesDifference
            WHERE derived_series = %(derived_series)s)
        """ % parameters
示例#18
0
    def checkBin(self, binarypackagedata, distroarchinfo):
        """Returns a binarypackage -- if it exists."""
        try:
            binaryname = BinaryPackageName.byName(binarypackagedata.package)
        except SQLObjectNotFound:
            # If the binary package's name doesn't exist, don't even
            # bother looking for a binary package.
            return None

        version = binarypackagedata.version
        architecture = binarypackagedata.architecture

        clauseTables = [
            "BinaryPackageRelease", "DistroSeries", "BinaryPackageBuild",
            "DistroArchSeries"
        ]
        distroseries = distroarchinfo['distroarchseries'].distroseries

        # When looking for binaries, we need to remember that they are
        # shared between distribution releases, so match on the
        # distribution and the architecture tag of the distroarchseries
        # they were built for
        query = (
            "BinaryPackageRelease.binarypackagename=%s AND "
            "BinaryPackageRelease.version=%s AND "
            "BinaryPackageRelease.build = BinaryPackageBuild.id AND "
            "BinaryPackageBuild.distro_arch_series = DistroArchSeries.id AND "
            "DistroArchSeries.distroseries = DistroSeries.id AND "
            "DistroSeries.distribution = %d" %
            (binaryname.id, quote(version), distroseries.distribution.id))

        if architecture != "all":
            query += ("AND DistroArchSeries.architecturetag = %s" %
                      quote(architecture))

        try:
            bpr = BinaryPackageRelease.selectOne(query,
                                                 clauseTables=clauseTables)
        except SQLObjectMoreThanOneResultError:
            # XXX kiko 2005-10-27: Untested
            raise MultiplePackageReleaseError(
                "Found more than one "
                "entry for %s (%s) for %s in %s" %
                (binaryname.name, version, architecture,
                 distroseries.distribution.name))
        return bpr
示例#19
0
    def checkBin(self, binarypackagedata, distroarchinfo):
        """Returns a binarypackage -- if it exists."""
        try:
            binaryname = BinaryPackageName.byName(binarypackagedata.package)
        except SQLObjectNotFound:
            # If the binary package's name doesn't exist, don't even
            # bother looking for a binary package.
            return None

        version = binarypackagedata.version
        architecture = binarypackagedata.architecture

        clauseTables = ["BinaryPackageRelease", "DistroSeries",
                        "BinaryPackageBuild", "DistroArchSeries"]
        distroseries = distroarchinfo['distroarchseries'].distroseries

        # When looking for binaries, we need to remember that they are
        # shared between distribution releases, so match on the
        # distribution and the architecture tag of the distroarchseries
        # they were built for
        query = (
            "BinaryPackageRelease.binarypackagename=%s AND "
            "BinaryPackageRelease.version=%s AND "
            "BinaryPackageRelease.build = BinaryPackageBuild.id AND "
            "BinaryPackageBuild.distro_arch_series = DistroArchSeries.id AND "
            "DistroArchSeries.distroseries = DistroSeries.id AND "
            "DistroSeries.distribution = %d" %
            (binaryname.id, quote(version), distroseries.distribution.id))

        if architecture != "all":
            query += ("AND DistroArchSeries.architecturetag = %s" %
                      quote(architecture))

        try:
            bpr = BinaryPackageRelease.selectOne(
                query, clauseTables=clauseTables)
        except SQLObjectMoreThanOneResultError:
            # XXX kiko 2005-10-27: Untested
            raise MultiplePackageReleaseError("Found more than one "
                    "entry for %s (%s) for %s in %s" %
                    (binaryname.name, version, architecture,
                     distroseries.distribution.name))
        return bpr
示例#20
0
    def ensureBuild(self, binary, srcpkg, distroarchinfo, archtag):
        """Ensure a build record."""
        distroarchseries = distroarchinfo['distroarchseries']
        distribution = distroarchseries.distroseries.distribution
        clauseTables = [
            "BinaryPackageBuild",
            "DistroArchSeries",
            "DistroSeries",
        ]

        # XXX kiko 2006-02-03:
        # This method doesn't work for real bin-only NMUs that are
        # new versions of packages that were picked up by Gina before.
        # The reason for that is that these bin-only NMUs' corresponding
        # source package release will already have been built at least
        # once, and the two checks below will of course blow up when
        # doing it the second time.

        query = ("BinaryPackageBuild.source_package_release = %d AND "
                 "BinaryPackageBuild.distro_arch_series = "
                 "    DistroArchSeries.id AND "
                 "DistroArchSeries.distroseries = DistroSeries.id AND "
                 "DistroSeries.distribution = %d" %
                 (srcpkg.id, distribution.id))

        if archtag != "all":
            query += ("AND DistroArchSeries.architecturetag = %s" %
                      quote(archtag))

        try:
            build = BinaryPackageBuild.selectOne(query, clauseTables)
        except SQLObjectMoreThanOneResultError:
            # XXX kiko 2005-10-27: Untested.
            raise MultipleBuildError("More than one build was found "
                                     "for package %s (%s)" %
                                     (binary.package, binary.version))

        if build:
            for bpr in build.binarypackages:
                if bpr.binarypackagename.name == binary.package:
                    # XXX kiko 2005-10-27: Untested.
                    raise MultipleBuildError(
                        "Build %d was already found "
                        "for package %s (%s)" %
                        (build.id, binary.package, binary.version))
        else:
            processor = distroarchinfo['processor']
            build = getUtility(IBinaryPackageBuildSet).new(
                processor=processor,
                distro_arch_series=distroarchseries,
                status=BuildStatus.FULLYBUILT,
                source_package_release=srcpkg,
                pocket=self.pocket,
                archive=distroarchseries.main_archive)
        return build
示例#21
0
 def getTipRevisionsForBranches(self, branches):
     """See `IRevisionSet`."""
     # If there are no branch_ids, then return None.
     branch_ids = [branch.id for branch in branches]
     if not branch_ids:
         return None
     return Revision.select("""
         Branch.id in %s AND
         Revision.revision_id = Branch.last_scanned_id
         """ % quote(branch_ids),
         clauseTables=['Branch'], prejoins=['revision_author'])
示例#22
0
    def test_CacheSuggestivePOTemplates(self):
        switch_dbuser('testadmin')
        template = self.factory.makePOTemplate()
        self.runDaily()

        count, = IMasterStore(CommercialSubscription).execute("""
            SELECT count(*)
            FROM SuggestivePOTemplate
            WHERE potemplate = %s
            """ % sqlbase.quote(template.id)).get_one()

        self.assertEqual(1, count)
示例#23
0
    def byMsgid(cls, key):
        """Return a POMsgID object for the given msgid."""

        # We can't search directly on msgid, because this database column
        # contains values too large to index. Instead we search on its
        # hash, which *is* indexed
        r = POMsgID.selectOne('sha1(msgid) = sha1(%s)' % quote(key))
        if r is None:
            # To be 100% compatible with the alternateID behaviour, we should
            # raise SQLObjectNotFound instead of KeyError
            raise SQLObjectNotFound(key)
        return r
示例#24
0
    def byMsgid(cls, key):
        """Return a POMsgID object for the given msgid."""

        # We can't search directly on msgid, because this database column
        # contains values too large to index. Instead we search on its
        # hash, which *is* indexed
        r = POMsgID.selectOne('sha1(msgid) = sha1(%s)' % quote(key))
        if r is None:
            # To be 100% compatible with the alternateID behaviour, we should
            # raise SQLObjectNotFound instead of KeyError
            raise SQLObjectNotFound(key)
        return r
示例#25
0
    def ensureBuild(self, binary, srcpkg, distroarchinfo, archtag):
        """Ensure a build record."""
        distroarchseries = distroarchinfo['distroarchseries']
        distribution = distroarchseries.distroseries.distribution
        clauseTables = [
            "BinaryPackageBuild",
            "DistroArchSeries",
            "DistroSeries",
            ]

        # XXX kiko 2006-02-03:
        # This method doesn't work for real bin-only NMUs that are
        # new versions of packages that were picked up by Gina before.
        # The reason for that is that these bin-only NMUs' corresponding
        # source package release will already have been built at least
        # once, and the two checks below will of course blow up when
        # doing it the second time.

        query = ("BinaryPackageBuild.source_package_release = %d AND "
                 "BinaryPackageBuild.distro_arch_series = "
                 "    DistroArchSeries.id AND "
                 "DistroArchSeries.distroseries = DistroSeries.id AND "
                 "DistroSeries.distribution = %d"
                 % (srcpkg.id, distribution.id))

        if archtag != "all":
            query += ("AND DistroArchSeries.architecturetag = %s"
                      % quote(archtag))

        try:
            build = BinaryPackageBuild.selectOne(query, clauseTables)
        except SQLObjectMoreThanOneResultError:
            # XXX kiko 2005-10-27: Untested.
            raise MultipleBuildError("More than one build was found "
                "for package %s (%s)" % (binary.package, binary.version))

        if build:
            for bpr in build.binarypackages:
                if bpr.binarypackagename.name == binary.package:
                    # XXX kiko 2005-10-27: Untested.
                    raise MultipleBuildError("Build %d was already found "
                        "for package %s (%s)" %
                        (build.id, binary.package, binary.version))
        else:
            processor = distroarchinfo['processor']
            build = getUtility(IBinaryPackageBuildSet).new(
                        processor=processor,
                        distro_arch_series=distroarchseries,
                        status=BuildStatus.FULLYBUILT,
                        source_package_release=srcpkg,
                        pocket=self.pocket,
                        archive=distroarchseries.main_archive)
        return build
    def __call__(self, batch_size):
        """See `ITunableLoop`.

        Loop body: pour rows with ids up to "next" over to to_table."""
        batch_size = int(batch_size)

        # Figure out what id lies exactly batch_size rows ahead.
        self.cur.execute("""
            SELECT id
            FROM %s
            WHERE id >= %s
            ORDER BY id
            OFFSET %s
            LIMIT 1
            """ % (self.from_table, quote(self.lowest_id), quote(batch_size)))
        end_id = self.cur.fetchone()

        if end_id is not None:
            next = end_id[0]
        else:
            next = self.highest_id

        next += 1

        self.prepareBatch(
            self.from_table, self.to_table, batch_size, self.lowest_id, next)

        self.logger.debug("pouring %s: %d rows (%d-%d)" % (
            self.from_table, batch_size, self.lowest_id, next))

        self.cur.execute("INSERT INTO %s (SELECT * FROM %s WHERE id < %d)"
                         % (self.to_table, self.from_table, next))

        self.cur.execute("DELETE FROM %s WHERE id < %d"
                         % (self.from_table, next))

        self.lowest_id = next
        self._commit()
    def __call__(self, batch_size):
        """See `ITunableLoop`.

        Loop body: pour rows with ids up to "next" over to to_table."""
        batch_size = int(batch_size)

        # Figure out what id lies exactly batch_size rows ahead.
        self.cur.execute("""
            SELECT id
            FROM %s
            WHERE id >= %s
            ORDER BY id
            OFFSET %s
            LIMIT 1
            """ % (self.from_table, quote(self.lowest_id), quote(batch_size)))
        end_id = self.cur.fetchone()

        if end_id is not None:
            next = end_id[0]
        else:
            next = self.highest_id

        next += 1

        self.prepareBatch(self.from_table, self.to_table, batch_size,
                          self.lowest_id, next)

        self.logger.debug("pouring %s: %d rows (%d-%d)" %
                          (self.from_table, batch_size, self.lowest_id, next))

        self.cur.execute("INSERT INTO %s (SELECT * FROM %s WHERE id < %d)" %
                         (self.to_table, self.from_table, next))

        self.cur.execute("DELETE FROM %s WHERE id < %d" %
                         (self.from_table, next))

        self.lowest_id = next
        self._commit()
示例#28
0
    def selectDifferenceType(self, parent_version=None, derived_version=None):
        """Execute the SQL expression to compute `DistroSeriesDifferenceType`.

        :param parent_version: The parent series' last released version
            of a package, if any.
        :param derived_version: The derived series' last released
            version of the same package, if any.
        :return: A numeric `DistroSeriesDifferenceType` value.
        """
        query = """
            SELECT %s FROM (
                SELECT %s AS source_version, %s AS parent_source_version
            ) AS input""" % (
            compose_sql_difference_type(),
            quote(derived_version),
            quote(parent_version),
        )
        cur = cursor()
        cur.execute(query)
        result = cur.fetchall()
        self.assertEqual(1, len(result))
        self.assertEqual(1, len(result[0]))
        return result[0][0]
    def selectDifferenceType(self, parent_version=None, derived_version=None):
        """Execute the SQL expression to compute `DistroSeriesDifferenceType`.

        :param parent_version: The parent series' last released version
            of a package, if any.
        :param derived_version: The derived series' last released
            version of the same package, if any.
        :return: A numeric `DistroSeriesDifferenceType` value.
        """
        query = """
            SELECT %s FROM (
                SELECT %s AS source_version, %s AS parent_source_version
            ) AS input""" % (
            compose_sql_difference_type(),
            quote(derived_version),
            quote(parent_version),
            )
        cur = cursor()
        cur.execute(query)
        result = cur.fetchall()
        self.assertEqual(1, len(result))
        self.assertEqual(1, len(result[0]))
        return result[0][0]
示例#30
0
    def _getBaseQueryAndClauseTablesForQueryingSprints(self):
        """Return the base SQL query and the clauseTables to be used when
        querying sprints related to this object.

        Subclasses must overwrite this method if it doesn't suit them.
        """
        query = """
            Specification.%s = %s
            AND Specification.id = SprintSpecification.specification
            AND SprintSpecification.sprint = Sprint.id
            AND SprintSpecification.status = %s
            """ % (self._table, self.id,
                   quote(SprintSpecificationStatus.ACCEPTED))
        return query, ['Specification', 'SprintSpecification']
示例#31
0
    def _getBaseQueryAndClauseTablesForQueryingSprints(self):
        """Return the base SQL query and the clauseTables to be used when
        querying sprints related to this object.

        Subclasses must overwrite this method if it doesn't suit them.
        """
        query = """
            Specification.%s = %s
            AND Specification.id = SprintSpecification.specification
            AND SprintSpecification.sprint = Sprint.id
            AND SprintSpecification.status = %s
            """ % (self._table, self.id,
                   quote(SprintSpecificationStatus.ACCEPTED))
        return query, ['Specification', 'SprintSpecification']
    def getSharedEquivalent(self):
        """See `ITranslationMessage`."""
        clauses = [
            'potemplate IS NULL',
            'potmsgset = %s' % sqlvalues(self.potmsgset),
            'language = %s' % sqlvalues(self.language),
            ]

        for form in range(TranslationConstants.MAX_PLURAL_FORMS):
            msgstr_name = 'msgstr%d' % form
            msgstr = getattr(self, 'msgstr%dID' % form)
            if msgstr is None:
                form_clause = "%s IS NULL" % msgstr_name
            else:
                form_clause = "%s = %s" % (msgstr_name, quote(msgstr))
            clauses.append(form_clause)

        where_clause = SQL(' AND '.join(clauses))
        return Store.of(self).find(TranslationMessage, where_clause).one()
示例#33
0
def have_table(cur, table):
    """Is there a table of the given name?

    Returns boolean answer.

    >>> have_table(cur, 'thistabledoesnotexist_i_hope')
    False
    >>> cur.execute("CREATE TEMP TABLE atesttable (x integer)")
    >>> have_table(cur, 'atesttable')
    True
    >>> drop_tables(cur, 'atesttable')
    >>> have_table(cur, 'atesttable')
    False
    """
    cur.execute('''
        SELECT count(*) > 0
        FROM pg_tables
        WHERE tablename=%s
    ''' % str(quote(table)))
    return (cur.fetchall()[0][0] != 0)
示例#34
0
def have_table(cur, table):
    """Is there a table of the given name?

    Returns boolean answer.

    >>> have_table(cur, 'thistabledoesnotexist_i_hope')
    False
    >>> cur.execute("CREATE TEMP TABLE atesttable (x integer)")
    >>> have_table(cur, 'atesttable')
    True
    >>> drop_tables(cur, 'atesttable')
    >>> have_table(cur, 'atesttable')
    False
    """
    cur.execute('''
        SELECT count(*) > 0
        FROM pg_tables
        WHERE tablename=%s
    ''' % str(quote(table)))
    return (cur.fetchall()[0][0] != 0)
 def getVoteSummariesForProposals(proposals):
     """See `IBranchMergeProposalGetter`."""
     if len(proposals) == 0:
         return {}
     ids = quote([proposal.id for proposal in proposals])
     store = Store.of(proposals[0])
     # First get the count of comments.
     query = """
         SELECT bmp.id, count(crm.*)
         FROM BranchMergeProposal bmp, CodeReviewMessage crm,
              Message m, MessageChunk mc
         WHERE bmp.id IN %s
           AND bmp.id = crm.branch_merge_proposal
           AND crm.message = m.id
           AND mc.message = m.id
           AND mc.content is not NULL
         GROUP BY bmp.id
         """ % ids
     comment_counts = dict(store.execute(query))
     # Now get the vote counts.
     query = """
         SELECT bmp.id, crm.vote, count(crv.*)
         FROM BranchMergeProposal bmp, CodeReviewVote crv,
              CodeReviewMessage crm
         WHERE bmp.id IN %s
           AND bmp.id = crv.branch_merge_proposal
           AND crv.vote_message = crm.id
         GROUP BY bmp.id, crm.vote
         """ % ids
     vote_counts = {}
     for proposal_id, vote_value, count in store.execute(query):
         vote = CodeReviewVote.items[vote_value]
         vote_counts.setdefault(proposal_id, {})[vote] = count
     # Now assemble the resulting dict.
     result = {}
     for proposal in proposals:
         summary = result.setdefault(proposal, {})
         summary['comment_count'] = (
             comment_counts.get(proposal.id, 0))
         summary.update(vote_counts.get(proposal.id, {}))
     return result
示例#36
0
 def getVoteSummariesForProposals(proposals):
     """See `IBranchMergeProposalGetter`."""
     if len(proposals) == 0:
         return {}
     ids = quote([proposal.id for proposal in proposals])
     store = Store.of(proposals[0])
     # First get the count of comments.
     query = """
         SELECT bmp.id, count(crm.*)
         FROM BranchMergeProposal bmp, CodeReviewMessage crm,
              Message m, MessageChunk mc
         WHERE bmp.id IN %s
           AND bmp.id = crm.branch_merge_proposal
           AND crm.message = m.id
           AND mc.message = m.id
           AND mc.content is not NULL
         GROUP BY bmp.id
         """ % ids
     comment_counts = dict(store.execute(query))
     # Now get the vote counts.
     query = """
         SELECT bmp.id, crm.vote, count(crv.*)
         FROM BranchMergeProposal bmp, CodeReviewVote crv,
              CodeReviewMessage crm
         WHERE bmp.id IN %s
           AND bmp.id = crv.branch_merge_proposal
           AND crv.vote_message = crm.id
         GROUP BY bmp.id, crm.vote
         """ % ids
     vote_counts = {}
     for proposal_id, vote_value, count in store.execute(query):
         vote = CodeReviewVote.items[vote_value]
         vote_counts.setdefault(proposal_id, {})[vote] = count
     # Now assemble the resulting dict.
     result = {}
     for proposal in proposals:
         summary = result.setdefault(proposal, {})
         summary['comment_count'] = (comment_counts.get(proposal.id, 0))
         summary.update(vote_counts.get(proposal.id, {}))
     return result
示例#37
0
    def getConstraints(self):
        """Return the constraints to use by this search."""
        constraints = []

        if self.search_text:
            constraints.append('FAQ.fti @@ ftq(%s)' % quote(self.search_text))

        if self.owner:
            constraints.append('FAQ.owner = %s' % sqlvalues(self.owner))

        if self.product:
            constraints.append('FAQ.product = %s' % sqlvalues(self.product))

        if self.distribution:
            constraints.append(
                'FAQ.distribution = %s' % sqlvalues(self.distribution))

        if self.project:
            constraints.append(
                'FAQ.product = Product.id AND Product.project = %s' % (
                    sqlvalues(self.project)))

        return '\n AND '.join(constraints)
示例#38
0
    def getConstraints(self):
        """Return the constraints to use by this search."""
        constraints = []

        if self.search_text:
            constraints.append('FAQ.fti @@ ftq(%s)' % quote(self.search_text))

        if self.owner:
            constraints.append('FAQ.owner = %s' % sqlvalues(self.owner))

        if self.product:
            constraints.append('FAQ.product = %s' % sqlvalues(self.product))

        if self.distribution:
            constraints.append(
                'FAQ.distribution = %s' % sqlvalues(self.distribution))

        if self.projectgroup:
            constraints.append(
                'FAQ.product = Product.id AND Product.project = %s' % (
                    sqlvalues(self.projectgroup)))

        return '\n AND '.join(constraints)
示例#39
0
def generateResetSequencesSQL(cur):
    """Return SQL that will reset table sequences to match the data in them.
    """
    stmt = []
    for schema, sequence, table, column in listSequences(cur):
        if table is None or column is None:
            continue
        sql = "SELECT max(%s) FROM %s" % (quoteIdentifier(column),
                                          quoteIdentifier(table))
        cur.execute(sql)
        last_value = cur.fetchone()[0]
        if last_value is None:
            last_value = 1
            flag = 'false'
        else:
            flag = 'true'
        sql = "setval(%s, %d, %s)" % (quote(
            '%s.%s' % (schema, sequence)), int(last_value), flag)
        stmt.append(sql)
    if stmt:
        stmt = 'SELECT ' + ', '.join(stmt)
        return stmt
    else:
        return ''
    def _fetchDBRows(self, simulate_timeout=False):
        msgstr_joins = [
            "LEFT OUTER JOIN POTranslation AS pt%d "
            "ON pt%d.id = TranslationMessage.msgstr%d" % (form, form, form)
            for form in xrange(TranslationConstants.MAX_PLURAL_FORMS)]

        translations = [
            "pt%d.translation AS translation%d" % (form, form)
            for form in xrange(TranslationConstants.MAX_PLURAL_FORMS)]

        substitutions = {
            'translation_columns': ', '.join(translations),
            'translation_joins': '\n'.join(msgstr_joins),
            'language': quote(self.pofile.language),
            'potemplate': quote(self.pofile.potemplate),
            'flag': self._getFlagName(),
        }

        sql = """
            SELECT
                POMsgId.msgid AS msgid,
                POMsgID_Plural.msgid AS msgid_plural,
                context,
                date_reviewed,
                %(translation_columns)s
            FROM POTMsgSet
            JOIN TranslationTemplateItem ON
                TranslationTemplateItem.potmsgset = POTMsgSet.id AND
                TranslationTemplateItem.potemplate = %(potemplate)s
            JOIN TranslationMessage ON
                POTMsgSet.id=TranslationMessage.potmsgset AND (
                    TranslationMessage.potemplate = %(potemplate)s OR
                    TranslationMessage.potemplate IS NULL) AND
                TranslationMessage.language = %(language)s
            %(translation_joins)s
            JOIN POMsgID ON
                POMsgID.id = POTMsgSet.msgid_singular
            LEFT OUTER JOIN POMsgID AS POMsgID_Plural ON
                POMsgID_Plural.id = POTMsgSet.msgid_plural
            WHERE
                %(flag)s IS TRUE
            ORDER BY
                TranslationTemplateItem.sequence,
                TranslationMessage.potemplate NULLS LAST
          """ % substitutions

        cur = cursor()
        try:
            # XXX JeroenVermeulen 2010-11-24 bug=680802: We set a
            # timeout to work around bug 408718, but the query is
            # simpler now.  See if we still need this.

            # We have to commit what we've got so far or we'll lose
            # it when we hit TimeoutError.
            transaction.commit()

            if simulate_timeout:
                # This is used in tests.
                timeout = '1ms'
                query = "SELECT pg_sleep(2)"
            else:
                timeout = 1000 * int(config.poimport.statement_timeout)
                query = sql
            cur.execute("SET statement_timeout to %s" % quote(timeout))
            cur.execute(query)
        except TimeoutError:
            # XXX JeroenVermeulen 2010-11-24 bug=680802: Log this so we
            # know whether it still happens.
            transaction.abort()
            return

        rows = cur.fetchall()

        assert TranslationConstants.MAX_PLURAL_FORMS == 6, (
            "Change this code to support %d plural forms"
            % TranslationConstants.MAX_PLURAL_FORMS)
        for row in rows:
            msgid, msgid_plural, context, date = row[:4]
            # The last part of the row is msgstr0 .. msgstr5. Store them
            # in a dict indexed by the number of the plural form.
            msgstrs = dict(enumerate(row[4:]))

            key = (msgid, msgid_plural, context)
            if key in self.current_messages:
                message = self.current_messages[key]
            else:
                message = TranslationMessageData()
                self.current_messages[key] = message

                message.context = context
                message.msgid_singular = msgid
                message.msgid_plural = msgid_plural

            for plural in xrange(TranslationConstants.MAX_PLURAL_FORMS):
                msgstr = msgstrs.get(plural, None)
                if (msgstr is not None and
                    ((len(message.translations) > plural and
                      message.translations[plural] is None) or
                     (len(message.translations) <= plural))):
                    message.addTranslation(plural, msgstr)
示例#41
0
 def getByEmail(self, email):
     """See `IEmailAddressSet`."""
     return EmailAddress.selectOne("lower(email) = %s" %
                                   quote(email.strip().lower()))
示例#42
0
def _slow_nl_phrase_search(terms, table, constraints,
    extra_constraints_tables):
    """Return the tsearch2 query that should be use to do a phrase search.

    This function implement an algorithm similar to the one used by MySQL
    natural language search (as documented at
    http://dev.mysql.com/doc/refman/5.0/en/fulltext-search.html).

    It eliminates stop words from the phrase and normalize each terms
    according to the full text indexation rules (lowercasing and stemming).

    Each term that is present in more than 50% of the candidate rows is also
    eliminated from the query. That term eliminatation is only done when there
    are 5 candidate rows or more.

    The remaining terms are then ORed together. One should use the rank() or
    rank_cd() function to order the results from running that query. This will
    make rows that use more of the terms and for which the terms are found
    closer in the text at the top of the list, while still returning rows that
    use only some of the terms.

    :terms: Some candidate search terms.

    :table: This should be the SQLBase class representing the base type.

    :constraints: Additional SQL clause that limits the rows to a
    subset of the table.

    :extra_constraints_tables: A list of additional table names that are
    needed by the constraints clause.

    Caveat: The model class must define a 'fti' column which is then used
    for full text searching.
    """
    total = table.select(
        constraints, clauseTables=extra_constraints_tables).count()
    term_candidates = terms
    if total < 5:
        return '|'.join(term_candidates)

    # Build the query to get all the counts. We get all the counts in
    # one query, using COUNT(CASE ...), since issuing separate queries
    # with COUNT(*) is a lot slower.
    count_template = (
        'COUNT(CASE WHEN %(table)s.fti @@ ftq(%(term)s)'
        ' THEN TRUE ELSE null END)')
    select_counts = [
        count_template % {'table': table.__storm_table__, 'term': quote(term)}
        for term in term_candidates
        ]
    select_tables = [table.__storm_table__]
    if extra_constraints_tables is not None:
        select_tables.extend(extra_constraints_tables)
    count_query = "SELECT %s FROM %s" % (
        ', '.join(select_counts), ', '.join(select_tables))
    if constraints != '':
        count_query += " WHERE %s" % constraints
    cur = cursor()
    cur.execute(count_query)
    counts = cur.fetchone()

    # Remove words that are too common.
    terms = [
        term for count, term in zip(counts, term_candidates)
        if float(count) / total < 0.5
        ]
    return '|'.join(terms)
示例#43
0
 def getByEmail(self, email):
     """See `IEmailAddressSet`."""
     return EmailAddress.selectOne(
         "lower(email) = %s" % quote(email.strip().lower()))
def copy_active_translations(child, transaction, logger):
    """Furnish untranslated child `DistroSeries` with previous series's
    translations.

    This method uses `MultiTableCopy` to copy data.

    Translation data for the new series ("child") is first copied into holding
    tables called e.g. "temp_POTemplate_holding_ubuntu_feisty" and processed
    there.  Then, near the end of the procedure, the contents of these holding
    tables are all poured back into the original tables.

    If this procedure fails, it may leave holding tables behind.  This was
    done deliberately to leave some forensics information for failures, and
    also to allow admins to see what data has and has not been copied.

    If a holding table left behind by an abortive run has a column called
    new_id at the end, it contains unfinished data and may as well be dropped.
    If it does not have that column, the holding table was already in the
    process of being poured back into its source table.  In that case the
    sensible thing to do is probably to continue pouring it.
    """
    previous_series = child.previous_series
    if previous_series is None:
        # We don't have a previous series from where we could copy
        # translations.
        return

    translation_tables = ['potemplate', 'translationtemplateitem', 'pofile']

    full_name = "%s_%s" % (child.distribution.name, child.name)
    copier = MultiTableCopy(full_name, translation_tables, logger=logger)

    # Incremental copy of updates is no longer supported
    assert not child.has_translation_templates, (
           "The child series must not yet have any translation templates.")

    logger.info(
        "Populating blank distroseries %s with translations from %s." %
        (child.name, previous_series.name))

    # 1. Extraction phase--for every table involved (called a "source table"
    # in MultiTableCopy parlance), we create a "holding table."  We fill that
    # with all rows from the source table that we want to copy from the
    # previous series.  We make some changes to the copied rows, such as
    # making them belong to ourselves instead of our previous series.
    #
    # The first phase does not modify any tables that other clients may want
    # to use, avoiding locking problems.
    #
    # 2. Pouring phase.  From each holding table we pour all rows back into
    # the matching source table, deleting them from the holding table as we
    # go.  The holding table is dropped once empty.
    #
    # The second phase is "batched," moving only a small number of rows at a
    # time, then performing an intermediate commit.  This avoids holding too
    # many locks for too long and disrupting regular database service.

    # Clean up any remains from a previous run.  If we got here, that means
    # that any such remains are unsalvagable.
    copier.dropHoldingTables()

    # Copy relevant POTemplates from existing series into a holding table,
    # complete with their original id fields.
    where = 'distroseries = %s AND iscurrent' % quote(previous_series)
    copier.extract('potemplate', [], where)

    # Now that we have the data "in private," where nobody else can see it,
    # we're free to play with it.  No risk of locking other processes out of
    # the database.
    # Change series identifiers in the holding table to point to the new
    # series (right now they all bear the previous series's id) and set
    # creation dates to the current transaction time.
    cursor().execute('''
        UPDATE %s
        SET
            distroseries = %s,
            datecreated =
                timezone('UTC'::text,
                    ('now'::text)::timestamp(6) with time zone)
    ''' % (copier.getHoldingTableName('potemplate'), quote(child)))

    # Copy each TranslationTemplateItem whose template we copied, and let
    # MultiTableCopy replace each potemplate reference with a reference to
    # our copy of the original POTMsgSet's potemplate.
    copier.extract('translationtemplateitem', ['potemplate'], 'sequence > 0')

    # Copy POFiles, making them refer to the child's copied POTemplates.
    copier.extract(
        'pofile', ['potemplate'],
        batch_pouring_callback=omit_redundant_pofiles)

    # Finally, pour the holding tables back into the originals.
    copier.pour(transaction)
示例#45
0
    def _compute_packageset_delta(self, origin):
        """Given a source/target archive find obsolete or missing packages.

        This means finding out which packages in a given source archive are
        fresher or new with respect to a target archive.
        """
        store = IStore(BinaryPackagePublishingHistory)
        # The query below will find all packages in the source archive that
        # are fresher than their counterparts in the target archive.
        find_newer_packages = """
            UPDATE tmp_merge_copy_data mcd SET
                s_sspph = secsrc.id,
                s_sourcepackagerelease = spr.id,
                s_version = spr.version,
                obsoleted = True,
                s_status = secsrc.status,
                s_component = secsrc.component,
                s_section = secsrc.section
            FROM
                SourcePackagePublishingHistory secsrc,
                SourcePackageRelease spr,
                SourcePackageName spn
            WHERE
                secsrc.archive = %s AND secsrc.status IN (%s, %s) AND
                secsrc.distroseries = %s AND secsrc.pocket = %s AND
                secsrc.sourcepackagerelease = spr.id AND
                spr.sourcepackagename = spn.id AND
                spn.name = mcd.sourcepackagename AND
                spr.version > mcd.t_version
        """ % sqlvalues(origin.archive, PackagePublishingStatus.PENDING,
                        PackagePublishingStatus.PUBLISHED, origin.distroseries,
                        origin.pocket)

        if origin.component is not None:
            find_newer_packages += (" AND secsrc.component = %s" %
                                    quote(origin.component))
        store.execute(find_newer_packages)

        # Now find the packages that exist in the source archive but *not* in
        # the target archive.
        find_origin_only_packages = """
            INSERT INTO tmp_merge_copy_data (
                s_sspph, s_sourcepackagerelease, sourcepackagename,
                sourcepackagename_id, s_version, missing, s_status,
                s_component, s_section)
            SELECT
                secsrc.id AS s_sspph,
                secsrc.sourcepackagerelease AS s_sourcepackagerelease,
                spn.name AS sourcepackagename,
                spn.id AS sourcepackagename_id,
                spr.version AS s_version,
                True AS missing,
                secsrc.status AS s_status,
                secsrc.component AS s_component,
                secsrc.section AS s_section
            FROM SourcePackagePublishingHistory secsrc
            JOIN SourcePackageRelease AS spr ON
                spr.id = secsrc.sourcepackagerelease
            JOIN SourcePackageName AS spn ON
                spn.id = spr.sourcepackagename
            WHERE
                secsrc.archive = %s AND
                secsrc.status IN (%s, %s) AND
                secsrc.distroseries = %s AND
                secsrc.pocket = %s AND
                spn.name NOT IN (
                    SELECT sourcepackagename FROM tmp_merge_copy_data)
        """ % sqlvalues(origin.archive, PackagePublishingStatus.PENDING,
                        PackagePublishingStatus.PUBLISHED, origin.distroseries,
                        origin.pocket)

        if origin.component is not None:
            find_origin_only_packages += (" AND secsrc.component = %s" %
                                          quote(origin.component))
        store.execute(find_origin_only_packages)
示例#46
0
def _slow_nl_phrase_search(terms, table, constraints,
                           extra_constraints_tables):
    """Return the tsearch2 query that should be use to do a phrase search.

    This function implement an algorithm similar to the one used by MySQL
    natural language search (as documented at
    http://dev.mysql.com/doc/refman/5.0/en/fulltext-search.html).

    It eliminates stop words from the phrase and normalize each terms
    according to the full text indexation rules (lowercasing and stemming).

    Each term that is present in more than 50% of the candidate rows is also
    eliminated from the query. That term eliminatation is only done when there
    are 5 candidate rows or more.

    The remaining terms are then ORed together. One should use the rank() or
    rank_cd() function to order the results from running that query. This will
    make rows that use more of the terms and for which the terms are found
    closer in the text at the top of the list, while still returning rows that
    use only some of the terms.

    :terms: Some candidate search terms.

    :table: This should be the SQLBase class representing the base type.

    :constraints: Additional SQL clause that limits the rows to a
    subset of the table.

    :extra_constraints_tables: A list of additional table names that are
    needed by the constraints clause.

    Caveat: The model class must define a 'fti' column which is then used
    for full text searching.
    """
    total = table.select(constraints,
                         clauseTables=extra_constraints_tables).count()
    term_candidates = terms
    if total < 5:
        return '|'.join(term_candidates)

    # Build the query to get all the counts. We get all the counts in
    # one query, using COUNT(CASE ...), since issuing separate queries
    # with COUNT(*) is a lot slower.
    count_template = ('COUNT(CASE WHEN %(table)s.fti @@ ftq(%(term)s)'
                      ' THEN TRUE ELSE null END)')
    select_counts = [
        count_template % {
            'table': table.__storm_table__,
            'term': quote(term)
        } for term in term_candidates
    ]
    select_tables = [table.__storm_table__]
    if extra_constraints_tables is not None:
        select_tables.extend(extra_constraints_tables)
    count_query = "SELECT %s FROM %s" % (', '.join(select_counts),
                                         ', '.join(select_tables))
    if constraints != '':
        count_query += " WHERE %s" % constraints
    cur = cursor()
    cur.execute(count_query)
    counts = cur.fetchone()

    # Remove words that are too common.
    terms = [
        term for count, term in zip(counts, term_candidates)
        if float(count) / total < 0.5
    ]
    return '|'.join(terms)
示例#47
0
def copy_active_translations(child, transaction, logger):
    """Furnish untranslated child `DistroSeries` with previous series's
    translations.

    This method uses `MultiTableCopy` to copy data.

    Translation data for the new series ("child") is first copied into holding
    tables called e.g. "temp_POTemplate_holding_ubuntu_feisty" and processed
    there.  Then, near the end of the procedure, the contents of these holding
    tables are all poured back into the original tables.

    If this procedure fails, it may leave holding tables behind.  This was
    done deliberately to leave some forensics information for failures, and
    also to allow admins to see what data has and has not been copied.

    If a holding table left behind by an abortive run has a column called
    new_id at the end, it contains unfinished data and may as well be dropped.
    If it does not have that column, the holding table was already in the
    process of being poured back into its source table.  In that case the
    sensible thing to do is probably to continue pouring it.
    """
    previous_series = child.previous_series
    if previous_series is None:
        # We don't have a previous series from where we could copy
        # translations.
        return

    translation_tables = ['potemplate', 'translationtemplateitem', 'pofile']

    full_name = "%s_%s" % (child.distribution.name, child.name)
    copier = MultiTableCopy(full_name, translation_tables, logger=logger)

    # Incremental copy of updates is no longer supported
    assert not child.has_translation_templates, (
        "The child series must not yet have any translation templates.")

    logger.info("Populating blank distroseries %s with translations from %s." %
                (child.name, previous_series.name))

    # 1. Extraction phase--for every table involved (called a "source table"
    # in MultiTableCopy parlance), we create a "holding table."  We fill that
    # with all rows from the source table that we want to copy from the
    # previous series.  We make some changes to the copied rows, such as
    # making them belong to ourselves instead of our previous series.
    #
    # The first phase does not modify any tables that other clients may want
    # to use, avoiding locking problems.
    #
    # 2. Pouring phase.  From each holding table we pour all rows back into
    # the matching source table, deleting them from the holding table as we
    # go.  The holding table is dropped once empty.
    #
    # The second phase is "batched," moving only a small number of rows at a
    # time, then performing an intermediate commit.  This avoids holding too
    # many locks for too long and disrupting regular database service.

    # Clean up any remains from a previous run.  If we got here, that means
    # that any such remains are unsalvagable.
    copier.dropHoldingTables()

    # Copy relevant POTemplates from existing series into a holding table,
    # complete with their original id fields.
    where = 'distroseries = %s AND iscurrent' % quote(previous_series)
    copier.extract('potemplate', [], where)

    # Now that we have the data "in private," where nobody else can see it,
    # we're free to play with it.  No risk of locking other processes out of
    # the database.
    # Change series identifiers in the holding table to point to the new
    # series (right now they all bear the previous series's id) and set
    # creation dates to the current transaction time.
    cursor().execute('''
        UPDATE %s
        SET
            distroseries = %s,
            datecreated =
                timezone('UTC'::text,
                    ('now'::text)::timestamp(6) with time zone)
    ''' % (copier.getHoldingTableName('potemplate'), quote(child)))

    # Copy each TranslationTemplateItem whose template we copied, and let
    # MultiTableCopy replace each potemplate reference with a reference to
    # our copy of the original POTMsgSet's potemplate.
    copier.extract('translationtemplateitem', ['potemplate'], 'sequence > 0')

    # Copy POFiles, making them refer to the child's copied POTemplates.
    copier.extract('pofile', ['potemplate'],
                   batch_pouring_callback=omit_redundant_pofiles)

    # Finally, pour the holding tables back into the originals.
    copier.pour(transaction)
def _calculate_tag_query(conditions, tags):
    """Determine tag-related conditions and assemble a query.

    :param conditions: the other conditions that constrain the query.
    :param tags: the list of tags that the bug has.
    """
    # These are tables and joins we will want.  We leave out the tag join
    # because that needs to be added conditionally.
    tables = [
        StructuralSubscription,
        Join(BugSubscriptionFilter, BugSubscriptionFilter.structural_subscription_id == StructuralSubscription.id),
        LeftJoin(BugSubscriptionFilterStatus, BugSubscriptionFilterStatus.filter_id == BugSubscriptionFilter.id),
        LeftJoin(
            BugSubscriptionFilterImportance, BugSubscriptionFilterImportance.filter_id == BugSubscriptionFilter.id
        ),
        LeftJoin(
            BugSubscriptionFilterInformationType,
            BugSubscriptionFilterInformationType.filter_id == BugSubscriptionFilter.id,
        ),
    ]
    tag_join = LeftJoin(BugSubscriptionFilterTag, BugSubscriptionFilterTag.filter_id == BugSubscriptionFilter.id)
    # If the bug has no tags, this is relatively easy. Otherwise, not so
    # much.
    if len(tags) == 0:
        # The bug has no tags.  We should leave out filters that
        # require any generic non-empty set of tags
        # (BugSubscriptionFilter.include_any_tags), which we do with
        # the conditions.
        conditions.append(Not(BugSubscriptionFilter.include_any_tags))
        tables.append(tag_join)
        return Select(
            BugSubscriptionFilter.id,
            tables=tables,
            where=And(*conditions),
            # We have to make sure that the filter does not require
            # any *specific* tags. We do that with a GROUP BY on the
            # filters, and then a HAVING clause that aggregates the
            # BugSubscriptionFilterTags that are set to "include" the
            # tag.  (If it is not an include, that is an exclude, and a
            # bug without tags will not have a particular tag, so we can
            # ignore those in this case.)  This requires a CASE
            # statement within the COUNT.
            group_by=(BugSubscriptionFilter.id,),
            having=Count(SQL("CASE WHEN BugSubscriptionFilterTag.include " "THEN BugSubscriptionFilterTag.tag END"))
            == 0,
        )
    else:
        # The bug has some tags.  This will require a bit of fancy
        # footwork. First, though, we will simply want to leave out
        # filters that should only match bugs without tags.
        conditions.append(Not(BugSubscriptionFilter.exclude_any_tags))
        # We're going to have to do a union with another query.  One
        # query will handle filters that are marked to include *any*
        # of the filter's selected tags, and the other query will
        # handle filters that include *all* of the filter's selected
        # tags (as determined by BugSubscriptionFilter.find_all_tags).
        # Every aspect of the unioned queries' WHERE clauses *other
        # than tags* will need to be the same, and so we perform that
        # separately, first.  When Storm supports the WITH statement
        # (bug 729134), we can consider folding this back into a single
        # query.
        candidates = list(IStore(BugSubscriptionFilter).using(*tables).find(BugSubscriptionFilter.id, *conditions))
        if not candidates:
            return None
        # As mentioned, in this first SELECT we handle filters that
        # match any of the filter's tags.  This can be a relatively
        # straightforward query--we just need a bit more added to
        # our WHERE clause, and we don't need a GROUP BY/HAVING.
        first_select = Select(
            BugSubscriptionFilter.id,
            tables=[BugSubscriptionFilter, tag_join],
            where=And(
                Or(  # We want filters that proclaim they simply want any tags.
                    BugSubscriptionFilter.include_any_tags,
                    # Also include filters that match any tag...
                    And(
                        Not(BugSubscriptionFilter.find_all_tags),
                        Or(  # ...with a positive match...
                            And(BugSubscriptionFilterTag.include, In(BugSubscriptionFilterTag.tag, tags)),
                            # ...or with a negative match...
                            And(Not(BugSubscriptionFilterTag.include), Not(In(BugSubscriptionFilterTag.tag, tags))),
                            # ...or if the filter does not specify any tags.
                            BugSubscriptionFilterTag.tag == None,
                        ),
                    ),
                ),
                In(BugSubscriptionFilter.id, candidates),
            ),
        )
        # We have our first clause.  Now we start on the second one:
        # handling filters that match *all* tags.
        # This second query will have a HAVING clause, which is where some
        # tricky bits happen. We first make a SQL snippet that
        # represents the tags on this bug.  It is straightforward
        # except for one subtle hack: the addition of the empty
        # space in the array.  This is because we are going to be
        # aggregating the tags on the filters using ARRAY_AGG, which
        # includes NULLs (unlike most other aggregators).  That
        # is an issue here because we use CASE statements to divide
        # up the set of tags that are supposed to be included and
        # supposed to be excluded.  This means that if we aggregate
        # "CASE WHEN BugSubscriptionFilterTag.include THEN
        # BugSubscriptionFilterTag.tag END" then that array will
        # include NULL.  SQL treats NULLs as unknowns that can never
        # be matched, so the array of ['foo', 'bar', NULL] does not
        # contain the array of ['foo', NULL] ("SELECT
        # ARRAY['foo','bar',NULL]::TEXT[] @>
        # ARRAY['foo',NULL]::TEXT[];" is false).  Therefore, so we
        # can make the HAVING statement we want to make without
        # defining a custom Postgres aggregator, we use a single
        # space as, effectively, NULL.  This is safe because a
        # single space is not an acceptable tag.  Again, the
        # clearest alternative is defining a custom Postgres aggregator.
        tags_array = "ARRAY[%s,' ']::TEXT[]" % ",".join(quote(tag) for tag in tags)
        # Now let's build the select itself.
        second_select = Select(
            BugSubscriptionFilter.id,
            tables=[BugSubscriptionFilter, tag_join],
            # Our WHERE clause is straightforward. We are simply
            # focusing on BugSubscriptionFilter.find_all_tags, when the
            # first SELECT did not consider it.
            where=And(BugSubscriptionFilter.find_all_tags, In(BugSubscriptionFilter.id, candidates)),
            # The GROUP BY collects the filters together.
            group_by=(BugSubscriptionFilter.id,),
            having=And(
                # The list of tags should be a superset of the filter tags to
                # be included.
                ArrayContains(
                    SQL(tags_array),
                    # This next line gives us an array of the tags that the
                    # filter wants to include.  Notice that it includes the
                    # empty string when the condition does not match, per the
                    # discussion above.
                    ArrayAgg(
                        SQL(
                            "CASE WHEN BugSubscriptionFilterTag.include "
                            "THEN BugSubscriptionFilterTag.tag "
                            "ELSE ' '::TEXT END"
                        )
                    ),
                ),
                # The list of tags should also not intersect with the
                # tags that the filter wants to exclude.
                Not(
                    ArrayIntersects(
                        SQL(tags_array),
                        # This next line gives us an array of the tags
                        # that the filter wants to exclude.  We do not bother
                        # with the empty string, and therefore allow NULLs
                        # into the array, because in this case we are
                        # determining whether the sets intersect, not if the
                        # first set subsumes the second.
                        ArrayAgg(
                            SQL(
                                "CASE WHEN "
                                "NOT BugSubscriptionFilterTag.include "
                                "THEN BugSubscriptionFilterTag.tag END"
                            )
                        ),
                    )
                ),
            ),
        )
        # Everything is ready.  Return the union.
        return Union(first_select, second_select)
示例#49
0
    def addRequest(self, person, potemplates=None, pofiles=None,
            format=TranslationFileFormat.PO):
        """See `IPOExportRequestSet`."""
        if potemplates is None:
            potemplates = []
        elif IPOTemplate.providedBy(potemplates):
            # Allow single POTemplate as well as list of POTemplates
            potemplates = [potemplates]
        if pofiles is None:
            pofiles = []

        if not (potemplates or pofiles):
            raise AssertionError(
                "Can't add a request with no PO templates and no PO files.")

        potemplate_ids = ", ".join(
            [quote(template) for template in potemplates])
        # A null pofile stands for the template itself.  We represent it in
        # SQL as -1, because that's how it's indexed in the request table.
        pofile_ids = ", ".join([quote(pofile) for pofile in pofiles] + ["-1"])

        query_params = {
            'person': quote(person),
            'format': quote(format),
            'templates': potemplate_ids,
            'pofiles': pofile_ids,
            }

        store = IMasterStore(POExportRequest)

        if potemplates:
            # Create requests for all these templates, insofar as the same
            # user doesn't already have requests pending for them in the same
            # format.
            store.execute("""
                INSERT INTO POExportRequest(person, potemplate, format)
                SELECT %(person)s, template.id, %(format)s
                FROM POTemplate AS template
                LEFT JOIN POExportRequest AS existing ON
                    existing.person = %(person)s AND
                    existing.potemplate = template.id AND
                    existing.pofile IS NULL AND
                    existing.format = %(format)s
                WHERE
                    template.id IN (%(templates)s) AND
                    existing.id IS NULL
            """ % query_params)

        if pofiles:
            # Create requests for all these translations, insofar as the same
            # user doesn't already have identical requests pending.
            store.execute("""
                INSERT INTO POExportRequest(
                    person, potemplate, pofile, format)
                SELECT %(person)s, template.id, pofile.id, %(format)s
                FROM POFile
                JOIN POTemplate AS template ON template.id = POFile.potemplate
                LEFT JOIN POExportRequest AS existing ON
                    existing.person = %(person)s AND
                    existing.pofile = POFile.id AND
                    existing.format = %(format)s
                WHERE
                    POFile.id IN (%(pofiles)s) AND
                    existing.id IS NULL
                """ % query_params)
示例#50
0
    def addRequest(self,
                   person,
                   potemplates=None,
                   pofiles=None,
                   format=TranslationFileFormat.PO):
        """See `IPOExportRequestSet`."""
        if potemplates is None:
            potemplates = []
        elif IPOTemplate.providedBy(potemplates):
            # Allow single POTemplate as well as list of POTemplates
            potemplates = [potemplates]
        if pofiles is None:
            pofiles = []

        if not (potemplates or pofiles):
            raise AssertionError(
                "Can't add a request with no PO templates and no PO files.")

        potemplate_ids = ", ".join(
            [quote(template) for template in potemplates])
        # A null pofile stands for the template itself.  We represent it in
        # SQL as -1, because that's how it's indexed in the request table.
        pofile_ids = ", ".join([quote(pofile) for pofile in pofiles] + ["-1"])

        query_params = {
            'person': quote(person),
            'format': quote(format),
            'templates': potemplate_ids,
            'pofiles': pofile_ids,
        }

        store = IMasterStore(POExportRequest)

        if potemplates:
            # Create requests for all these templates, insofar as the same
            # user doesn't already have requests pending for them in the same
            # format.
            store.execute("""
                INSERT INTO POExportRequest(person, potemplate, format)
                SELECT %(person)s, template.id, %(format)s
                FROM POTemplate AS template
                LEFT JOIN POExportRequest AS existing ON
                    existing.person = %(person)s AND
                    existing.potemplate = template.id AND
                    existing.pofile IS NULL AND
                    existing.format = %(format)s
                WHERE
                    template.id IN (%(templates)s) AND
                    existing.id IS NULL
            """ % query_params)

        if pofiles:
            # Create requests for all these translations, insofar as the same
            # user doesn't already have identical requests pending.
            store.execute("""
                INSERT INTO POExportRequest(
                    person, potemplate, pofile, format)
                SELECT %(person)s, template.id, pofile.id, %(format)s
                FROM POFile
                JOIN POTemplate AS template ON template.id = POFile.potemplate
                LEFT JOIN POExportRequest AS existing ON
                    existing.person = %(person)s AND
                    existing.pofile = POFile.id AND
                    existing.format = %(format)s
                WHERE
                    POFile.id IN (%(pofiles)s) AND
                    existing.id IS NULL
                """ % query_params)
示例#51
0
    def _fetchDBRows(self, simulate_timeout=False):
        msgstr_joins = [
            "LEFT OUTER JOIN POTranslation AS pt%d "
            "ON pt%d.id = TranslationMessage.msgstr%d" % (form, form, form)
            for form in xrange(TranslationConstants.MAX_PLURAL_FORMS)
        ]

        translations = [
            "pt%d.translation AS translation%d" % (form, form)
            for form in xrange(TranslationConstants.MAX_PLURAL_FORMS)
        ]

        substitutions = {
            'translation_columns': ', '.join(translations),
            'translation_joins': '\n'.join(msgstr_joins),
            'language': quote(self.pofile.language),
            'potemplate': quote(self.pofile.potemplate),
            'flag': self._getFlagName(),
        }

        sql = """
            SELECT
                POMsgId.msgid AS msgid,
                POMsgID_Plural.msgid AS msgid_plural,
                context,
                date_reviewed,
                %(translation_columns)s
            FROM POTMsgSet
            JOIN TranslationTemplateItem ON
                TranslationTemplateItem.potmsgset = POTMsgSet.id AND
                TranslationTemplateItem.potemplate = %(potemplate)s
            JOIN TranslationMessage ON
                POTMsgSet.id=TranslationMessage.potmsgset AND (
                    TranslationMessage.potemplate = %(potemplate)s OR
                    TranslationMessage.potemplate IS NULL) AND
                TranslationMessage.language = %(language)s
            %(translation_joins)s
            JOIN POMsgID ON
                POMsgID.id = POTMsgSet.msgid_singular
            LEFT OUTER JOIN POMsgID AS POMsgID_Plural ON
                POMsgID_Plural.id = POTMsgSet.msgid_plural
            WHERE
                %(flag)s IS TRUE
            ORDER BY
                TranslationTemplateItem.sequence,
                TranslationMessage.potemplate NULLS LAST
          """ % substitutions

        cur = cursor()
        try:
            # XXX JeroenVermeulen 2010-11-24 bug=680802: We set a
            # timeout to work around bug 408718, but the query is
            # simpler now.  See if we still need this.

            # We have to commit what we've got so far or we'll lose
            # it when we hit TimeoutError.
            transaction.commit()

            if simulate_timeout:
                # This is used in tests.
                timeout = '1ms'
                query = "SELECT pg_sleep(2)"
            else:
                timeout = 1000 * int(config.poimport.statement_timeout)
                query = sql
            cur.execute("SET statement_timeout to %s" % quote(timeout))
            cur.execute(query)
        except TimeoutError:
            # XXX JeroenVermeulen 2010-11-24 bug=680802: Log this so we
            # know whether it still happens.
            transaction.abort()
            return

        rows = cur.fetchall()

        assert TranslationConstants.MAX_PLURAL_FORMS == 6, (
            "Change this code to support %d plural forms" %
            TranslationConstants.MAX_PLURAL_FORMS)
        for row in rows:
            msgid, msgid_plural, context, date = row[:4]
            # The last part of the row is msgstr0 .. msgstr5. Store them
            # in a dict indexed by the number of the plural form.
            msgstrs = dict(enumerate(row[4:]))

            key = (msgid, msgid_plural, context)
            if key in self.current_messages:
                message = self.current_messages[key]
            else:
                message = TranslationMessageData()
                self.current_messages[key] = message

                message.context = context
                message.msgid_singular = msgid
                message.msgid_plural = msgid_plural

            for plural in xrange(TranslationConstants.MAX_PLURAL_FORMS):
                msgstr = msgstrs.get(plural, None)
                if (msgstr is not None
                        and ((len(message.translations) > plural
                              and message.translations[plural] is None) or
                             (len(message.translations) <= plural))):
                    message.addTranslation(plural, msgstr)
示例#52
0
    def _init_packageset_delta(self, destination):
        """Set up a temp table with data about target archive packages.

        This is a first step in finding out which packages in a given source
        archive are fresher or new with respect to a target archive.

        Merge copying of packages is one of the use cases that requires such a
        package set diff capability.

        In order to find fresher or new packages we first set up a temporary
        table that lists what packages exist in the target archive
        (additionally considering the distroseries, pocket and component).
        """
        store = IStore(BinaryPackagePublishingHistory)
        # Use a temporary table to hold the data needed for the package set
        # delta computation. This will prevent multiple, parallel delta
        # calculations from interfering with each other.
        store.execute("""
            CREATE TEMP TABLE tmp_merge_copy_data (
                -- Source archive package data, only set for packages that
                -- will be copied.
                s_sspph integer,
                s_sourcepackagerelease integer,
                s_version debversion,
                s_status integer,
                s_component integer,
                s_section integer,
                -- Target archive package data, set for all published or
                -- pending packages.
                t_sspph integer,
                t_sourcepackagerelease integer,
                t_version debversion,
                -- Whether a target package became obsolete due to a more
                -- recent source package.
                obsoleted boolean DEFAULT false NOT NULL,
                missing boolean DEFAULT false NOT NULL,
                sourcepackagename text NOT NULL,
                sourcepackagename_id integer NOT NULL
            );
            CREATE INDEX source_name_index
            ON tmp_merge_copy_data USING btree (sourcepackagename);
        """)
        # Populate the temporary table with package data from the target
        # archive considering the distroseries, pocket and component.
        pop_query = """
            INSERT INTO tmp_merge_copy_data (
                t_sspph, t_sourcepackagerelease, sourcepackagename,
                sourcepackagename_id, t_version)
            SELECT
                secsrc.id AS t_sspph,
                secsrc.sourcepackagerelease AS t_sourcepackagerelease,
                spn.name AS sourcepackagerelease,
                spn.id AS sourcepackagename_id,
                spr.version AS t_version
            FROM SourcePackagePublishingHistory secsrc
            JOIN SourcePackageRelease AS spr ON
                spr.id = secsrc.sourcepackagerelease
            JOIN SourcePackageName AS spn ON
                spn.id = spr.sourcepackagename
            WHERE
                secsrc.archive = %s AND
                secsrc.status IN (%s, %s) AND
                secsrc.distroseries = %s AND
                secsrc.pocket = %s
        """ % sqlvalues(destination.archive, PackagePublishingStatus.PENDING,
                        PackagePublishingStatus.PUBLISHED,
                        destination.distroseries, destination.pocket)

        if destination.component is not None:
            pop_query += (" AND secsrc.component = %s" %
                          quote(destination.component))
        store.execute(pop_query)
示例#53
0
def _calculate_tag_query(conditions, tags):
    """Determine tag-related conditions and assemble a query.

    :param conditions: the other conditions that constrain the query.
    :param tags: the list of tags that the bug has.
    """
    # These are tables and joins we will want.  We leave out the tag join
    # because that needs to be added conditionally.
    tables = [
        StructuralSubscription,
        Join(BugSubscriptionFilter,
             BugSubscriptionFilter.structural_subscription_id ==
             StructuralSubscription.id),
        LeftJoin(BugSubscriptionFilterStatus,
                 BugSubscriptionFilterStatus.filter_id ==
                 BugSubscriptionFilter.id),
        LeftJoin(BugSubscriptionFilterImportance,
                 BugSubscriptionFilterImportance.filter_id ==
                 BugSubscriptionFilter.id),
        LeftJoin(BugSubscriptionFilterInformationType,
                 BugSubscriptionFilterInformationType.filter_id ==
                 BugSubscriptionFilter.id)]
    tag_join = LeftJoin(
        BugSubscriptionFilterTag,
        BugSubscriptionFilterTag.filter_id == BugSubscriptionFilter.id)
    # If the bug has no tags, this is relatively easy. Otherwise, not so
    # much.
    if len(tags) == 0:
        # The bug has no tags.  We should leave out filters that
        # require any generic non-empty set of tags
        # (BugSubscriptionFilter.include_any_tags), which we do with
        # the conditions.
        conditions.append(Not(BugSubscriptionFilter.include_any_tags))
        tables.append(tag_join)
        return Select(
            BugSubscriptionFilter.id,
            tables=tables,
            where=And(*conditions),
            # We have to make sure that the filter does not require
            # any *specific* tags. We do that with a GROUP BY on the
            # filters, and then a HAVING clause that aggregates the
            # BugSubscriptionFilterTags that are set to "include" the
            # tag.  (If it is not an include, that is an exclude, and a
            # bug without tags will not have a particular tag, so we can
            # ignore those in this case.)  This requires a CASE
            # statement within the COUNT.
            group_by=(BugSubscriptionFilter.id,),
            having=Count(
                SQL('CASE WHEN BugSubscriptionFilterTag.include '
                    'THEN BugSubscriptionFilterTag.tag END')) == 0)
    else:
        # The bug has some tags.  This will require a bit of fancy
        # footwork. First, though, we will simply want to leave out
        # filters that should only match bugs without tags.
        conditions.append(Not(BugSubscriptionFilter.exclude_any_tags))
        # We're going to have to do a union with another query.  One
        # query will handle filters that are marked to include *any*
        # of the filter's selected tags, and the other query will
        # handle filters that include *all* of the filter's selected
        # tags (as determined by BugSubscriptionFilter.find_all_tags).
        # Every aspect of the unioned queries' WHERE clauses *other
        # than tags* will need to be the same, and so we perform that
        # separately, first.  When Storm supports the WITH statement
        # (bug 729134), we can consider folding this back into a single
        # query.
        candidates = list(
            IStore(BugSubscriptionFilter).using(*tables).find(
                BugSubscriptionFilter.id, *conditions))
        if not candidates:
            return None
        # As mentioned, in this first SELECT we handle filters that
        # match any of the filter's tags.  This can be a relatively
        # straightforward query--we just need a bit more added to
        # our WHERE clause, and we don't need a GROUP BY/HAVING.
        first_select = Select(
            BugSubscriptionFilter.id,
            tables=[BugSubscriptionFilter, tag_join],
            where=And(
                Or(  # We want filters that proclaim they simply want any tags.
                   BugSubscriptionFilter.include_any_tags,
                   # Also include filters that match any tag...
                   And(Not(BugSubscriptionFilter.find_all_tags),
                       Or(  # ...with a positive match...
                          And(BugSubscriptionFilterTag.include,
                              In(BugSubscriptionFilterTag.tag, tags)),
                          # ...or with a negative match...
                          And(Not(BugSubscriptionFilterTag.include),
                              Not(In(BugSubscriptionFilterTag.tag, tags))),
                          # ...or if the filter does not specify any tags.
                          BugSubscriptionFilterTag.tag == None))),
                In(BugSubscriptionFilter.id, candidates)))
        # We have our first clause.  Now we start on the second one:
        # handling filters that match *all* tags.
        # This second query will have a HAVING clause, which is where some
        # tricky bits happen. We first make a SQL snippet that
        # represents the tags on this bug.  It is straightforward
        # except for one subtle hack: the addition of the empty
        # space in the array.  This is because we are going to be
        # aggregating the tags on the filters using ARRAY_AGG, which
        # includes NULLs (unlike most other aggregators).  That
        # is an issue here because we use CASE statements to divide
        # up the set of tags that are supposed to be included and
        # supposed to be excluded.  This means that if we aggregate
        # "CASE WHEN BugSubscriptionFilterTag.include THEN
        # BugSubscriptionFilterTag.tag END" then that array will
        # include NULL.  SQL treats NULLs as unknowns that can never
        # be matched, so the array of ['foo', 'bar', NULL] does not
        # contain the array of ['foo', NULL] ("SELECT
        # ARRAY['foo','bar',NULL]::TEXT[] @>
        # ARRAY['foo',NULL]::TEXT[];" is false).  Therefore, so we
        # can make the HAVING statement we want to make without
        # defining a custom Postgres aggregator, we use a single
        # space as, effectively, NULL.  This is safe because a
        # single space is not an acceptable tag.  Again, the
        # clearest alternative is defining a custom Postgres aggregator.
        tags_array = "ARRAY[%s,' ']::TEXT[]" % ",".join(
            quote(tag) for tag in tags)
        # Now let's build the select itself.
        second_select = Select(
            BugSubscriptionFilter.id,
            tables=[BugSubscriptionFilter, tag_join],
            # Our WHERE clause is straightforward. We are simply
            # focusing on BugSubscriptionFilter.find_all_tags, when the
            # first SELECT did not consider it.
            where=And(BugSubscriptionFilter.find_all_tags,
                      In(BugSubscriptionFilter.id, candidates)),
            # The GROUP BY collects the filters together.
            group_by=(BugSubscriptionFilter.id,),
            having=And(
                # The list of tags should be a superset of the filter tags to
                # be included.
                ArrayContains(
                    SQL(tags_array),
                    # This next line gives us an array of the tags that the
                    # filter wants to include.  Notice that it includes the
                    # empty string when the condition does not match, per the
                    # discussion above.
                    ArrayAgg(
                       SQL("CASE WHEN BugSubscriptionFilterTag.include "
                           "THEN BugSubscriptionFilterTag.tag "
                           "ELSE ' '::TEXT END"))),
                # The list of tags should also not intersect with the
                # tags that the filter wants to exclude.
                Not(
                    ArrayIntersects(
                        SQL(tags_array),
                        # This next line gives us an array of the tags
                        # that the filter wants to exclude.  We do not bother
                        # with the empty string, and therefore allow NULLs
                        # into the array, because in this case we are
                        # determining whether the sets intersect, not if the
                        # first set subsumes the second.
                        ArrayAgg(
                           SQL('CASE WHEN '
                               'NOT BugSubscriptionFilterTag.include '
                               'THEN BugSubscriptionFilterTag.tag END'))))))
        # Everything is ready.  Return the union.
        return Union(first_select, second_select)