예제 #1
0
 def _getPendingJobsClauses(self):
     """WHERE clauses for pending job queries, used for dipatch time
     estimation."""
     virtualized = normalize_virtualization(self.virtualized)
     clauses = """
         BuildQueue.job = Job.id
         AND Job.status = %s
         AND (
             -- The score must be either above my score or the
             -- job must be older than me in cases where the
             -- score is equal.
             BuildQueue.lastscore > %s OR
             (BuildQueue.lastscore = %s AND Job.id < %s))
         -- The virtualized values either match or the job
         -- does not care about virtualization and the job
         -- of interest (JOI) is to be run on a virtual builder
         -- (we want to prevent the execution of untrusted code
         -- on native builders).
         AND COALESCE(buildqueue.virtualized, TRUE) = %s
         """ % sqlvalues(
             JobStatus.WAITING, self.lastscore, self.lastscore, self.job,
             virtualized)
     processor_clause = """
         AND (
             -- The processor values either match or the candidate
             -- job is processor-independent.
             buildqueue.processor = %s OR
             buildqueue.processor IS NULL)
         """ % sqlvalues(self.processor)
     # We don't care about processors if the estimation is for a
     # processor-independent job.
     if self.processor is not None:
         clauses += processor_clause
     return clauses
예제 #2
0
    def getAnnouncements(self, limit=5, published_only=True):
        """See IHasAnnouncements."""

        # Create the SQL query.
        query = '1=1 '
        # Filter for published news items if necessary.
        if published_only:
            query += """ AND
                Announcement.date_announced <= timezone('UTC'::text, now()) AND
                Announcement.active IS TRUE
                """
        if IProduct.providedBy(self):
            if self.project is None:
                query += """ AND
                    Announcement.product = %s""" % sqlvalues(self.id)
            else:
                query += """ AND
                    (Announcement.product = %s OR Announcement.project = %s)
                    """ % sqlvalues(self.id, self.project)
        elif IProjectGroup.providedBy(self):
            query += """ AND
                (Announcement.project = %s OR Announcement.product IN
                    (SELECT id FROM Product WHERE project = %s))
                    """ % sqlvalues (self.id, self.id)
        elif IDistribution.providedBy(self):
            query += (' AND Announcement.distribution = %s'
                % sqlvalues(self.id))
        elif IAnnouncementSet.providedBy(self):
            # There is no need to filter for pillar if we are looking for
            # all announcements.
            pass
        else:
            raise AssertionError, 'Unsupported announcement target'
        return Announcement.select(query, limit=limit)
 def _copy_component_section_and_format_selections(self):
     """Copy the section, component and format selections from the parents
     distro series into this one.
     """
     # Copy the component selections
     self._store.execute('''
         INSERT INTO ComponentSelection (distroseries, component)
         SELECT DISTINCT %s AS distroseries, cs.component AS component
         FROM ComponentSelection AS cs WHERE cs.distroseries IN %s
         ''' % sqlvalues(self.distroseries.id,
         self.derivation_parent_ids))
     # Copy the section selections
     self._store.execute('''
         INSERT INTO SectionSelection (distroseries, section)
         SELECT DISTINCT %s as distroseries, ss.section AS section
         FROM SectionSelection AS ss WHERE ss.distroseries IN %s
         ''' % sqlvalues(self.distroseries.id,
         self.derivation_parent_ids))
     # Copy the source format selections
     self._store.execute('''
         INSERT INTO SourcePackageFormatSelection (distroseries, format)
         SELECT DISTINCT %s as distroseries, spfs.format AS format
         FROM SourcePackageFormatSelection AS spfs
         WHERE spfs.distroseries IN %s
         ''' % sqlvalues(self.distroseries.id,
         self.derivation_parent_ids))
예제 #4
0
def getBinaryPackageDescriptions(results, use_names=False,
                                 max_title_length=50):
    """Return a dict of descriptions keyed by package name.

    See sourcepackage.py:getSourcePackageDescriptions, which is analogous.
    """
    if len(list(results)) < 1:
        return {}
    if use_names:
        clause = ("BinaryPackageName.name in %s" %
                 sqlvalues([pn.name for pn in results]))
    else:
        clause = ("BinaryPackageName.id in %s" %
                 sqlvalues([bpn.id for bpn in results]))

    descriptions = {}
    releases = BinaryPackageRelease.select(
        """BinaryPackageRelease.binarypackagename =
            BinaryPackageName.id AND
           %s""" % clause,
        clauseTables=["BinaryPackageRelease", "BinaryPackageName"],
        orderBy=["-BinaryPackageRelease.datecreated"])

    for release in releases:
        binarypackagename = release.binarypackagename.name
        if binarypackagename not in descriptions:
            description = release.description.strip().replace("\n", " ")
            if len(description) > max_title_length:
                description = (release.description[:max_title_length]
                              + "...")
            descriptions[binarypackagename] = description
    return descriptions
예제 #5
0
    def judge(self, distroseries, pocket):
        """Judge superseded sources and binaries."""
        sources = SourcePackagePublishingHistory.select("""
            sourcepackagepublishinghistory.distroseries = %s AND
            sourcepackagepublishinghistory.archive = %s AND
            sourcepackagepublishinghistory.pocket = %s AND
            sourcepackagepublishinghistory.status IN %s AND
            sourcepackagepublishinghistory.scheduleddeletiondate is NULL AND
            sourcepackagepublishinghistory.dateremoved is NULL
            """ % sqlvalues(
                distroseries, self.archive, pocket,
                inactive_publishing_status))

        binaries = BinaryPackagePublishingHistory.select("""
            binarypackagepublishinghistory.distroarchseries =
                distroarchseries.id AND
            distroarchseries.distroseries = %s AND
            binarypackagepublishinghistory.archive = %s AND
            binarypackagepublishinghistory.pocket = %s AND
            binarypackagepublishinghistory.status IN %s AND
            binarypackagepublishinghistory.scheduleddeletiondate is NULL AND
            binarypackagepublishinghistory.dateremoved is NULL
            """ % sqlvalues(
                distroseries, self.archive, pocket,
                inactive_publishing_status),
            clauseTables=['DistroArchSeries'])

        self._judgeSuperseded(sources, binaries)
예제 #6
0
    def search(self, text=None, search_products=False, show_inactive=False):
        """Search through the Registry database for project groups that match
        the query terms. text is a piece of text in the title / summary /
        description fields of project group (and possibly product). soyuz,
        bazaar, malone etc are hints as to whether the search
        should be limited to projects that are active in those Launchpad
        applications.
        """
        if text:
            text = text.replace("%", "%%")
        clauseTables = set()
        clauseTables.add('Project')
        queries = []

        if text:
            if search_products:
                clauseTables.add('Product')
                product_query = "Product.fti @@ ftq(%s)" % sqlvalues(text)
                queries.append(product_query)
            else:
                project_query = "Project.fti @@ ftq(%s)" % sqlvalues(text)
                queries.append(project_query)

        if 'Product' in clauseTables:
            queries.append('Product.project=Project.id')

        if not show_inactive:
            queries.append('Project.active IS TRUE')
            if 'Product' in clauseTables:
                queries.append('Product.active IS TRUE')

        query = " AND ".join(queries)
        return ProjectGroup.select(
            query, distinct=True, clauseTables=clauseTables)
예제 #7
0
    def getPendingPublications(self, archive, pocket, is_careful):
        """See `ICanPublishPackages`."""
        from lp.soyuz.model.publishing import BinaryPackagePublishingHistory

        queries = [
            "distroarchseries = %s AND archive = %s"
            % sqlvalues(self, archive)
            ]

        target_status = [PackagePublishingStatus.PENDING]
        if is_careful:
            target_status.append(PackagePublishingStatus.PUBLISHED)
        queries.append("status IN %s" % sqlvalues(target_status))

        # restrict to a specific pocket.
        queries.append('pocket = %s' % sqlvalues(pocket))

        # Exclude RELEASE pocket if the distroseries was already released,
        # since it should not change, unless the archive allows it.
        if (not self.distroseries.isUnstable() and
            not archive.allowUpdatesToReleasePocket()):
            queries.append(
            'pocket != %s' % sqlvalues(PackagePublishingPocket.RELEASE))

        publications = BinaryPackagePublishingHistory.select(
                    " AND ".join(queries), orderBy=["-id"])

        return publications
예제 #8
0
def check_script(con, log, hostname, scriptname,
                 completed_from, completed_to):
    """Check whether a script ran on a specific host within stated timeframe.

    Return nothing on success, or log an error message and return error
    message.
    """
    cur = con.cursor()
    cur.execute("""
        SELECT id
        FROM ScriptActivity
        WHERE hostname=%s AND name=%s
            AND date_completed BETWEEN %s AND %s
        LIMIT 1
        """ % sqlvalues(hostname, scriptname, completed_from, completed_to))
    try:
        cur.fetchone()[0]
        return None
    except TypeError:
        output = ("The script '%s' didn't run on '%s' between %s and %s"
                % (scriptname, hostname, completed_from, completed_to))
        cur.execute("""
            SELECT MAX(date_completed)
            FROM ScriptActivity
            WHERE hostname=%s AND name=%s
        """ % sqlvalues(hostname, scriptname))
        date_last_seen = cur.fetchone()[0]
        if date_last_seen is not None:
            output += " (last seen %s)" % (date_last_seen,)
        log.fatal(output)
        return output
    def getLatestPublishingEntry(self, time_interval, deb_only=True):
        """Return the BinaryPackagePublishingHistory record with the
        most recent datepublished.

        :deb_only: If True, return only publishing records whose
                   binarypackagerelease's binarypackagefile.filetype is
                   BinaryPackageFileType.DEB.
        """
        query = """
            BinaryPackagePublishingHistory.pocket = %s
            AND BinaryPackagePublishingHistory.component = %s
            AND BinaryPackagePublishingHistory.distroarchseries = %s
            AND BinaryPackagePublishingHistory.archive = %s
            AND BinaryPackagePublishingHistory.status = %s
            """ % sqlvalues(self.pocket, self.component,
                            self.distro_arch_series,
                            self.distro_arch_series.main_archive,
                            PackagePublishingStatus.PUBLISHED)

        if deb_only:
            query += """
                AND BinaryPackagePublishingHistory.binarypackagerelease =
                    BinaryPackageFile.binarypackagerelease
                AND BinaryPackageFile.filetype = %s
                """ % sqlvalues(BinaryPackageFileType.DEB)

        if time_interval is not None:
            start, end = time_interval
            assert end > start, '%s is not more recent than %s' % (end, start)
            query = (query + " AND datepublished >= %s AND datepublished < %s"
                     % sqlvalues(start, end))
        return BinaryPackagePublishingHistory.selectFirst(
            query, clauseTables=['BinaryPackageFile'],
            orderBy='-datepublished')
예제 #10
0
    def canRemove(self, publication_class, filename, file_md5):
        """Check if given (filename, MD5) can be removed from the pool.

        Check the archive reference-counter implemented in:
        `SourcePackagePublishingHistory` or
        `BinaryPackagePublishingHistory`.

        Only allow removal of unnecessary files.
        """
        clauses = []
        clauseTables = []

        if ISourcePackagePublishingHistory.implementedBy(
            publication_class):
            clauses.append("""
                SourcePackagePublishingHistory.archive = %s AND
                SourcePackagePublishingHistory.dateremoved is NULL AND
                SourcePackagePublishingHistory.sourcepackagerelease =
                    SourcePackageReleaseFile.sourcepackagerelease AND
                SourcePackageReleaseFile.libraryfile = LibraryFileAlias.id
            """ % sqlvalues(self.archive))
            clauseTables.append('SourcePackageReleaseFile')
        elif IBinaryPackagePublishingHistory.implementedBy(
            publication_class):
            clauses.append("""
                BinaryPackagePublishingHistory.archive = %s AND
                BinaryPackagePublishingHistory.dateremoved is NULL AND
                BinaryPackagePublishingHistory.binarypackagerelease =
                    BinaryPackageFile.binarypackagerelease AND
                BinaryPackageFile.libraryfile = LibraryFileAlias.id
            """ % sqlvalues(self.archive))
            clauseTables.append('BinaryPackageFile')
        else:
            raise AssertionError("%r is not supported." % publication_class)

        clauses.append("""
           LibraryFileAlias.content = LibraryFileContent.id AND
           LibraryFileAlias.filename = %s AND
           LibraryFileContent.md5 = %s
        """ % sqlvalues(filename, file_md5))
        clauseTables.extend(
            ['LibraryFileAlias', 'LibraryFileContent'])

        all_publications = publication_class.select(
            " AND ".join(clauses), clauseTables=clauseTables)

        right_now = datetime.datetime.now(pytz.timezone('UTC'))
        for pub in all_publications:
            # Deny removal if any reference is still active.
            if pub.status not in inactive_publishing_status:
                return False
            # Deny removal if any reference wasn't dominated yet.
            if pub.scheduleddeletiondate is None:
                return False
            # Deny removal if any reference is still in 'quarantine'.
            if pub.scheduleddeletiondate > right_now:
                return False

        return True
예제 #11
0
    def A2_markPocketsWithDeletionsDirty(self):
        """An intermediate step in publishing to detect deleted packages.

        Mark pockets containing deleted packages (status DELETED or
        OBSOLETE), scheduledeletiondate NULL and dateremoved NULL as
        dirty, to ensure that they are processed in death row.
        """
        self.log.debug("* Step A2: Mark pockets with deletions as dirty")

        # Query part that is common to both queries below.
        base_query = """
            archive = %s AND
            status = %s AND
            scheduleddeletiondate IS NULL AND
            dateremoved is NULL
            """ % sqlvalues(self.archive,
                            PackagePublishingStatus.DELETED)

        # We need to get a set of (distroseries, pocket) tuples that have
        # publications that are waiting to be deleted.  Each tuple is
        # added to the dirty_pockets set.

        # Loop for each pocket in each distroseries:
        for distroseries in self.distro.series:
            for pocket in self.archive.getPockets():
                if (self.cannotModifySuite(distroseries, pocket)
                    or not self.isAllowed(distroseries, pocket)):
                    # We don't want to mark release pockets dirty in a
                    # stable distroseries, no matter what other bugs
                    # that precede here have dirtied it.
                    continue
                clauses = [base_query]
                clauses.append("pocket = %s" % sqlvalues(pocket))
                clauses.append("distroseries = %s" % sqlvalues(distroseries))

                # Make the source publications query.
                source_query = " AND ".join(clauses)
                sources = SourcePackagePublishingHistory.select(source_query)
                if not sources.is_empty():
                    self.markPocketDirty(distroseries, pocket)
                    # No need to check binaries if the pocket is already
                    # dirtied from a source.
                    continue

                # Make the binary publications query.
                clauses = [base_query]
                clauses.append("pocket = %s" % sqlvalues(pocket))
                clauses.append("DistroArchSeries = DistroArchSeries.id")
                clauses.append("DistroArchSeries.distroseries = %s" %
                    sqlvalues(distroseries))
                binary_query = " AND ".join(clauses)
                binaries = BinaryPackagePublishingHistory.select(binary_query,
                    clauseTables=['DistroArchSeries'])
                if not binaries.is_empty():
                    self.markPocketDirty(distroseries, pocket)
def getSourcePackageDescriptions(
    results, use_names=False, max_title_length=50):
    """Return a dictionary with descriptions keyed on source package names.

    Takes an ISelectResults of a *PackageName query. The use_names
    flag is a hack that allows this method to work for the
    BinaryAndSourcePackageName view, which lacks IDs.

    WARNING: this function assumes that there is little overlap and much
    coherence in how package names are used, in particular across
    distributions if derivation is implemented. IOW, it does not make a
    promise to provide The Correct Description, but a pretty good guess
    at what the description should be.
    """
    # XXX: kiko, 2007-01-17:
    # Use_names could be removed if we instead added IDs to the
    # BinaryAndSourcePackageName view, but we'd still need to find
    # out how to specify the attribute, since it would be
    # sourcepackagename_id and binarypackagename_id depending on
    # whether the row represented one or both of those cases.
    if use_names:
        clause = ("SourcePackageName.name in %s" %
                 sqlvalues([pn.name for pn in results]))
    else:
        clause = ("SourcePackageName.id in %s" %
                 sqlvalues([spn.id for spn in results]))

    cur = cursor()
    cur.execute("""SELECT DISTINCT BinaryPackageName.name,
                          SourcePackageName.name
                     FROM BinaryPackageRelease, SourcePackageName,
                          BinaryPackageBuild, BinaryPackageName
                    WHERE
                       BinaryPackageName.id =
                           BinaryPackageRelease.binarypackagename AND
                       BinaryPackageRelease.build = BinaryPackageBuild.id AND
                       BinaryPackageBuild.source_package_name =
                           SourcePackageName.id AND
                       %s
                   ORDER BY BinaryPackageName.name,
                            SourcePackageName.name"""
                    % clause)

    descriptions = {}
    for binarypackagename, sourcepackagename in cur.fetchall():
        if not sourcepackagename in descriptions:
            descriptions[sourcepackagename] = (
                "Source of: %s" % binarypackagename)
        else:
            if len(descriptions[sourcepackagename]) > max_title_length:
                description = "..."
            else:
                description = ", %s" % binarypackagename
            descriptions[sourcepackagename] += description
    return descriptions
예제 #13
0
def _mergeTeamMembership(cur, from_id, to_id):
    # Transfer active team memberships
    approved = TeamMembershipStatus.APPROVED
    admin = TeamMembershipStatus.ADMIN
    cur.execute(
        'SELECT team, status FROM TeamMembership WHERE person = %s '
        'AND status IN (%s,%s)'
        % sqlvalues(from_id, approved, admin))
    for team_id, status in cur.fetchall():
        cur.execute('SELECT status FROM TeamMembership WHERE person = %s '
                    'AND team = %s'
                    % sqlvalues(to_id, team_id))
        result = cur.fetchone()
        if result is not None:
            current_status = result[0]
            # Now we can safely delete from_person's membership record,
            # because we know to_person has a membership entry for this
            # team, so may only need to change its status.
            cur.execute(
                'DELETE FROM TeamMembership WHERE person = %s '
                'AND team = %s' % sqlvalues(from_id, team_id))

            if current_status == admin.value:
                # to_person is already an administrator of this team, no
                # need to do anything else.
                continue
            # to_person is either an approved or an inactive member,
            # while from_person is either admin or approved. That means we
            # can safely set from_person's membership status on
            # to_person's membership.
            assert status in (approved.value, admin.value)
            cur.execute(
                'UPDATE TeamMembership SET status = %s WHERE person = %s '
                'AND team = %s' % sqlvalues(status, to_id, team_id))
        else:
            # to_person is not a member of this team. just change
            # from_person with to_person in the membership record.
            cur.execute(
                'UPDATE TeamMembership SET person = %s WHERE person = %s '
                'AND team = %s'
                % sqlvalues(to_id, from_id, team_id))

    cur.execute('SELECT team FROM TeamParticipation WHERE person = %s '
                'AND person != team' % sqlvalues(from_id))
    for team_id in cur.fetchall():
        cur.execute(
            'SELECT team FROM TeamParticipation WHERE person = %s '
            'AND team = %s' % sqlvalues(to_id, team_id))
        if not cur.fetchone():
            cur.execute(
                'UPDATE TeamParticipation SET person = %s WHERE '
                'person = %s AND team = %s'
                % sqlvalues(to_id, from_id, team_id))
        else:
            cur.execute(
                'DELETE FROM TeamParticipation WHERE person = %s AND '
                'team = %s' % sqlvalues(from_id, team_id))
    def _latest_publishing_record(self, status=None):
        query = """
            binarypackagerelease = %s AND
            distroarchseries = %s AND
            archive IN %s
        """ % sqlvalues(
            self.binarypackagerelease, self.distroarchseries, self.distribution.all_distro_archive_ids
        )

        if status is not None:
            if not isinstance(status, (tuple, list)):
                status = [status]
            query += " AND status IN %s" % sqlvalues(status)

        return BinaryPackagePublishingHistory.selectFirst(query, orderBy=["-datecreated", "-id"])
예제 #15
0
 def _baseQueryList(self):
     """See `FilteredLanguagePackVocabularyBase`."""
     # We are interested on any full language pack or language pack
     # that is a delta of the current base lanuage pack type,
     # except the ones already used.
     used_lang_packs = []
     if self.context.language_pack_base is not None:
         used_lang_packs.append(self.context.language_pack_base.id)
     if self.context.language_pack_delta is not None:
         used_lang_packs.append(self.context.language_pack_delta.id)
     query = []
     if used_lang_packs:
         query.append("id NOT IN %s" % sqlvalues(used_lang_packs))
     query.append("(updates is NULL OR updates = %s)" % sqlvalues(self.context.language_pack_base))
     return query
def compose_language_match(language_code):
    """Compose SQL condition for matching a language in the deletion query.

    :param: Language code to match.
    :return: SQL condition in string form.
    """
    return 'Language.code = %s' % sqlvalues(language_code)
예제 #17
0
    def check_fragile_connections(self):
        """Fail if any FRAGILE_USERS are connected to the cluster.

        If we interrupt these processes, we may have a mess to clean
        up. If they are connected, the preflight check should fail.
        """
        success = True
        for node in self.lpmain_nodes:
            cur = node.con.cursor()
            cur.execute("""
                SELECT datname, usename, COUNT(*) AS num_connections
                FROM pg_stat_activity
                WHERE
                    datname=current_database()
                    AND procpid <> pg_backend_pid()
                    AND usename IN %s
                GROUP BY datname, usename
                """ % sqlvalues(FRAGILE_USERS))
            for datname, usename, num_connections in cur.fetchall():
                self.log.fatal(
                    "Fragile system %s running. %s has %d connections.",
                    usename, datname, num_connections)
                success = False
        if success:
            self.log.debug(
                "No fragile systems connected to the cluster (%s)"
                % ', '.join(FRAGILE_USERS))
        return success
예제 #18
0
def _mergeProposedInvitedTeamMembership(cur, from_id, to_id):
    # Memberships in an intermediate state are declined to avoid
    # cyclic membership errors and confusion about who the proposed
    # member is.
    TMS = TeamMembershipStatus
    update_template = ("""
        UPDATE TeamMembership
        SET status = %s
        WHERE
            person = %s
            AND status = %s
        """)
    cur.execute(update_template % sqlvalues(
        TMS.DECLINED, from_id, TMS.PROPOSED))
    cur.execute(update_template % sqlvalues(
        TMS.INVITATION_DECLINED, from_id, TMS.INVITED))
예제 #19
0
    def uploadersForComponent(self, archive, component=None):
        """See `IArchivePermissionSet`."""
        clauses = ["""
            ArchivePermission.archive = %s AND
            ArchivePermission.permission = %s
            """ % sqlvalues(archive, ArchivePermissionType.UPLOAD)]

        if component is not None:
            component = self._nameToComponent(component)
            clauses.append(
                "ArchivePermission.component = %s" % sqlvalues(component))
        else:
            clauses.append("ArchivePermission.component IS NOT NULL")

        query = " AND ".join(clauses)
        return ArchivePermission.select(query, prejoins=["component"])
예제 #20
0
 def getPairwiseMatrix(self):
     """See IPoll."""
     assert self.type == PollAlgorithm.CONDORCET
     options = list(self.getAllOptions())
     pairwise_matrix = []
     for option1 in options:
         pairwise_row = []
         for option2 in options:
             points_query = """
                 SELECT COUNT(*) FROM Vote as v1, Vote as v2 WHERE
                     v1.token = v2.token AND
                     v1.option = %s AND v2.option = %s AND
                     (
                      (
                       v1.preference IS NOT NULL AND
                       v2.preference IS NOT NULL AND
                       v1.preference < v2.preference
                      )
                       OR
                      (
                       v1.preference IS NOT NULL AND
                       v2.preference IS NULL
                      )
                     )
                 """ % sqlvalues(option1.id, option2.id)
             if option1 == option2:
                 pairwise_row.append(None)
             else:
                 points = Store.of(self).execute(points_query).get_one()[0]
                 pairwise_row.append(points)
         pairwise_matrix.append(pairwise_row)
     return pairwise_matrix
예제 #21
0
    def getDependencyDict(self, specifications):
        """See `ISpecificationSet`."""
        specification_ids = [spec.id for spec in specifications]

        if len(specification_ids) == 0:
            return {}

        results = Store.of(specifications[0]).execute("""
            SELECT SpecificationDependency.specification,
                   SpecificationDependency.dependency
            FROM SpecificationDependency, Specification
            WHERE SpecificationDependency.specification IN %s
            AND SpecificationDependency.dependency = Specification.id
            ORDER BY Specification.priority DESC, Specification.name,
                     Specification.id
        """ % sqlvalues(specification_ids)).get_all()

        dependencies = {}
        for spec_id, dep_id in results:
            if spec_id not in dependencies:
                dependencies[spec_id] = []
            dependency = Specification.get(dep_id)
            dependencies[spec_id].append(dependency)

        return dependencies
예제 #22
0
 def getPreferredEmailForPeople(self, people):
     """See `IEmailAddressSet`."""
     return EmailAddress.select("""
         EmailAddress.status = %s AND
         EmailAddress.person IN %s
         """ % sqlvalues(EmailAddressStatus.PREFERRED,
                         [person.id for person in people]))
예제 #23
0
 def addCandidateSelectionCriteria(processor, virtualized):
     """See `IBuildFarmJob`."""
     private_statuses = (
         PackagePublishingStatus.PUBLISHED,
         PackagePublishingStatus.SUPERSEDED,
         PackagePublishingStatus.DELETED,
         )
     return """
         SELECT TRUE FROM Archive, BinaryPackageBuild, BuildPackageJob,
                          DistroArchSeries
         WHERE
         BuildPackageJob.job = Job.id AND
         BuildPackageJob.build = BinaryPackageBuild.id AND
         BinaryPackageBuild.distro_arch_series =
             DistroArchSeries.id AND
         BinaryPackageBuild.archive = Archive.id AND
         ((Archive.private IS TRUE AND
           EXISTS (
               SELECT SourcePackagePublishingHistory.id
               FROM SourcePackagePublishingHistory
               WHERE
                   SourcePackagePublishingHistory.distroseries =
                      DistroArchSeries.distroseries AND
                   SourcePackagePublishingHistory.sourcepackagerelease =
                      BinaryPackageBuild.source_package_release AND
                   SourcePackagePublishingHistory.archive = Archive.id AND
                   SourcePackagePublishingHistory.status IN %s))
           OR
           archive.private IS FALSE) AND
         BinaryPackageBuild.status = %s
     """ % sqlvalues(private_statuses, BuildStatus.NEEDSBUILD)
예제 #24
0
 def qualify_subquery(job_type, sub_query):
     """Put the sub-query into a job type context."""
     qualified_query = """
         ((BuildQueue.job_type != %s) OR EXISTS(%%s))
     """ % sqlvalues(job_type)
     qualified_query %= sub_query
     return qualified_query
예제 #25
0
 def getQuestionLanguages(self):
     """See `IQuestionCollection`."""
     return set(Language.select("""
         Language.id = Question.language AND
         Question.product = Product.id AND
         Product.project = %s""" % sqlvalues(self.id),
         clauseTables=['Question', 'Product'], distinct=True))
    def getBuildByArch(self, distroarchseries, archive):
        """See ISourcePackageRelease."""
        # First we try to follow any binaries built from the given source
        # in a distroarchseries with the given architecturetag and published
        # in the given (distroarchseries, archive) location.
        # (Querying all architectures and then picking the right one out
        # of the result turns out to be much faster than querying for
        # just the architecture we want).
        builds_by_arch = self.findBuildsByArchitecture(
            distroarchseries.distroseries, archive)
        build = builds_by_arch.get(distroarchseries.architecturetag)
        if build is not None:
            # If there was any published binary we can use its original build.
            # This case covers the situations when both source and binaries
            # got copied from another location.
            return build

        # If there was no published binary we have to try to find a
        # suitable build in all possible location across the distroseries
        # inheritance tree. See below.
        clause_tables = ['DistroArchSeries']
        queries = [
            "DistroArchSeries.id = BinaryPackageBuild.distro_arch_series AND "
            "BinaryPackageBuild.archive = %s AND "
            "DistroArchSeries.architecturetag = %s AND "
            "BinaryPackageBuild.source_package_release = %s" % (
            sqlvalues(archive.id, distroarchseries.architecturetag, self))]

        # Query only the last build record for this sourcerelease
        # across all possible locations.
        query = " AND ".join(queries)

        return BinaryPackageBuild.selectFirst(
            query, clauseTables=clause_tables,
            orderBy=['-date_created'])
예제 #27
0
 def getReclaimableJobs(self):
     """See `ICodeImportJobSet`."""
     return IStore(CodeImportJob).find(
         CodeImportJob,
         "state = %s and heartbeat < %s + '-%s seconds'"
         % sqlvalues(CodeImportJobState.RUNNING, UTC_NOW,
                     config.codeimportworker.maximum_heartbeat_interval))
    def getPackageSize(self):
        """See ISourcePackageRelease."""
        size_query = """
            SELECT
                SUM(LibraryFileContent.filesize)/1024.0
            FROM
                SourcePackagereLease
                JOIN SourcePackageReleaseFile ON
                    SourcePackageReleaseFile.sourcepackagerelease =
                    SourcePackageRelease.id
                JOIN LibraryFileAlias ON
                    SourcePackageReleaseFile.libraryfile =
                    LibraryFileAlias.id
                JOIN LibraryFileContent ON
                    LibraryFileAlias.content = LibraryFileContent.id
            WHERE
                SourcePackageRelease.id = %s
            """ % sqlvalues(self)

        cur = cursor()
        cur.execute(size_query)
        results = cur.fetchone()

        if len(results) == 1 and results[0] is not None:
            return float(results[0])
        else:
            return 0.0
    def removeOld(cls, distro, archive, log):
        """Delete any cache records for removed packages.

        Also purges all existing cache records for disabled archives.

        :param archive: target `IArchive`.
        :param log: the context logger object able to print DEBUG level
            messages.
        """

        # Get the set of source package names to deal with.
        spns = set(SourcePackageName.select("""
            SourcePackagePublishingHistory.distroseries =
                DistroSeries.id AND
            DistroSeries.distribution = %s AND
            Archive.id = %s AND
            SourcePackagePublishingHistory.archive = Archive.id AND
            SourcePackagePublishingHistory.sourcepackagename =
                SourcePackageName.id AND
            SourcePackagePublishingHistory.dateremoved is NULL AND
            Archive.enabled = TRUE
            """ % sqlvalues(distro, archive),
            distinct=True,
            clauseTables=[
                'Archive',
                'DistroSeries',
                'SourcePackagePublishingHistory']))

        # Remove the cache entries for packages we no longer publish.
        for cache in cls._find(distro, archive):
            if cache.sourcepackagename not in spns:
                log.debug(
                    "Removing source cache for '%s' (%s)"
                    % (cache.name, cache.id))
                cache.destroySelf()
예제 #30
0
def fix_pofile_plurals(pofile, logger, ztm):
    """Fix plural translations for PO files with mismatching headers."""
    logger.debug("Checking if PO file %d needs fixing" % pofile.id)
    plural_forms_mapping = get_mapping_for_pofile_plurals(pofile)
    if plural_forms_mapping is not None:
        logger.info("Fixing PO file %s" % pofile.title)
        pluralmessages = TranslationMessage.select("""
            POTMsgSet.id = TranslationMessage.potmsgset AND
            POTMsgSet.msgid_plural IS NOT NULL AND
            TranslationMessage.pofile = %s""" % sqlvalues(pofile),
            clauseTables=["POTMsgSet"])
        for message in pluralmessages:
            logger.debug("\tFixing translations for '%s'" % (
                message.potmsgset.singular_text))

            for form in xrange(TranslationConstants.MAX_PLURAL_FORMS):
                new_form = plural_forms_mapping[form]
                assert new_form < TranslationConstants.MAX_PLURAL_FORMS, (
                    "Translation with plural form %d in plurals mapping." %
                    new_form)
                translation = getattr(message, 'msgstr%d' % new_form)
                setattr(message, 'msgstr%d' % form, translation)

        # We also need to update the header so we don't try to re-do the
        # migration in the future.
        header = POHeader(pofile.header)
        header.plural_form_expression = pofile.language.pluralexpression
        header.has_plural_forms = True
        pofile.header = header.getRawContent()
        ztm.commit()
예제 #31
0
    def _findBuildCandidate(self):
        """Find a candidate job for dispatch to an idle buildd slave.

        The pending BuildQueue item with the highest score for this builder
        or None if no candidate is available.

        :return: A candidate job.
        """
        def qualify_subquery(job_type, sub_query):
            """Put the sub-query into a job type context."""
            qualified_query = """
                ((BuildQueue.job_type != %s) OR EXISTS(%%s))
            """ % sqlvalues(job_type)
            qualified_query %= sub_query
            return qualified_query

        logger = self._getSlaveScannerLogger()
        candidate = None

        general_query = """
            SELECT buildqueue.id FROM buildqueue, job
            WHERE
                buildqueue.job = job.id
                AND job.status = %s
                AND (
                    -- The processor values either match or the candidate
                    -- job is processor-independent.
                    buildqueue.processor = %s OR
                    buildqueue.processor IS NULL)
                AND (
                    -- The virtualized values either match or the candidate
                    -- job does not care about virtualization and the idle
                    -- builder *is* virtualized (the latter is a security
                    -- precaution preventing the execution of untrusted code
                    -- on native builders).
                    buildqueue.virtualized = %s OR
                    (buildqueue.virtualized IS NULL AND %s = TRUE))
                AND buildqueue.builder IS NULL
        """ % sqlvalues(
            JobStatus.WAITING, self.processor, self.virtualized,
            self.virtualized)
        order_clause = " ORDER BY buildqueue.lastscore DESC, buildqueue.id"

        extra_queries = []
        job_classes = specific_job_classes()
        for job_type, job_class in job_classes.iteritems():
            query = job_class.addCandidateSelectionCriteria(
                self.processor, self.virtualized)
            if query == '':
                # This job class does not need to refine candidate jobs
                # further.
                continue

            # The sub-query should only apply to jobs of the right type.
            extra_queries.append(qualify_subquery(job_type, query))
        query = ' AND '.join([general_query] + extra_queries) + order_clause

        store = IStore(self.__class__)
        candidate_jobs = store.execute(query).get_all()

        for (candidate_id,) in candidate_jobs:
            candidate = getUtility(IBuildQueueSet).get(candidate_id)
            job_class = job_classes[candidate.job_type]
            candidate_approved = job_class.postprocessCandidate(
                candidate, logger)
            if candidate_approved:
                return candidate

        return None
예제 #32
0
    def getBuildRecords(self,
                        build_state=None,
                        name=None,
                        pocket=None,
                        arch_tag=None,
                        user=None,
                        binary_only=True):
        """See `IHasBuildRecords`"""
        # Ignore "user", since it would not make any difference to the
        # records returned here (private builds are only in PPA right
        # now and this method only returns records for SPRs in a
        # distribution).
        # We also ignore the name parameter (required as part of the
        # IHasBuildRecords interface) and use our own name and the
        # binary_only parameter as a source package can only have
        # binary builds.

        clauseTables = [
            'SourcePackageRelease', 'SourcePackagePublishingHistory'
        ]

        condition_clauses = [
            """
        BinaryPackageBuild.source_package_release =
            SourcePackageRelease.id AND
        SourcePackagePublishingHistory.sourcepackagename = %s AND
        SourcePackagePublishingHistory.distroseries = %s AND
        SourcePackagePublishingHistory.archive IN %s AND
        SourcePackagePublishingHistory.sourcepackagerelease =
            SourcePackageRelease.id AND
        SourcePackagePublishingHistory.archive = BinaryPackageBuild.archive
        """ % sqlvalues(self.sourcepackagename, self.distroseries,
                        list(self.distribution.all_distro_archive_ids))
        ]

        # We re-use the optional-parameter handling provided by BuildSet
        # here, but pass None for the name argument as we've already
        # matched on exact source package name.
        BinaryPackageBuildSet().handleOptionalParamsForBuildQueries(
            condition_clauses,
            clauseTables,
            build_state,
            name=None,
            pocket=pocket,
            arch_tag=arch_tag)

        # exclude gina-generated and security (dak-made) builds
        # buildstate == FULLYBUILT && datebuilt == null
        condition_clauses.append(
            "NOT (BinaryPackageBuild.status=%s AND "
            "     BinaryPackageBuild.date_finished is NULL)" %
            sqlvalues(BuildStatus.FULLYBUILT))

        # Ordering according status
        # * NEEDSBUILD, BUILDING & UPLOADING by -lastscore
        # * SUPERSEDED by -datecreated
        # * FULLYBUILT & FAILURES by -datebuilt
        # It should present the builds in a more natural order.
        if build_state in [
                BuildStatus.NEEDSBUILD,
                BuildStatus.BUILDING,
                BuildStatus.UPLOADING,
        ]:
            orderBy = ["-BuildQueue.lastscore"]
            clauseTables.append('BuildPackageJob')
            condition_clauses.append(
                'BuildPackageJob.build = BinaryPackageBuild.id')
            clauseTables.append('BuildQueue')
            condition_clauses.append('BuildQueue.job = BuildPackageJob.job')
        elif build_state == BuildStatus.SUPERSEDED or build_state is None:
            orderBy = [Desc("BinaryPackageBuild.date_created")]
        else:
            orderBy = [Desc("BinaryPackageBuild.date_finished")]

        # Fallback to ordering by -id as a tie-breaker.
        orderBy.append(Desc("id"))

        # End of duplication (see XXX cprov 2006-09-25 above).

        return IStore(BinaryPackageBuild).using(clauseTables).find(
            BinaryPackageBuild, *condition_clauses).order_by(*orderBy)
예제 #33
0
 def getNotificationsOlderThan(self, time_limit):
     """See `IPersonNotificationSet`."""
     return PersonNotification.select('date_created < %s' %
                                      sqlvalues(time_limit))
예제 #34
0
    def _update(cls, distro, sourcepackagename, archive, log):
        """Update cached source package details.

        Update cache details for a given ISourcePackageName, including
        generated binarypackage names, summary and description fti.
        'log' is required and only prints debug level information.
        """

        # Get the set of published sourcepackage releases.
        sprs = list(SourcePackageRelease.select("""
            SourcePackageRelease.id =
                SourcePackagePublishingHistory.sourcepackagerelease AND
            SourcePackagePublishingHistory.sourcepackagename = %s AND
            SourcePackagePublishingHistory.distroseries =
                DistroSeries.id AND
            DistroSeries.distribution = %s AND
            SourcePackagePublishingHistory.archive = %s AND
            SourcePackagePublishingHistory.dateremoved is NULL
            """ % sqlvalues(sourcepackagename, distro, archive),
            orderBy='id',
            clauseTables=['SourcePackagePublishingHistory', 'DistroSeries'],
            distinct=True))

        if len(sprs) == 0:
            log.debug("No sources releases found.")
            return

        # Find or create the cache entry.
        cache = DistributionSourcePackageCache.selectOne("""
            distribution = %s AND
            archive = %s AND
            sourcepackagename = %s
            """ % sqlvalues(distro, archive, sourcepackagename))
        if cache is None:
            log.debug("Creating new source cache entry.")
            cache = DistributionSourcePackageCache(
                archive=archive,
                distribution=distro,
                sourcepackagename=sourcepackagename)

        # Make sure the name is correct.
        cache.name = sourcepackagename.name

        # Get the sets of binary package names, summaries, descriptions.

        # XXX Julian 2007-04-03:
        # This bit of code needs fixing up, it is doing stuff that
        # really needs to be done in SQL, such as sorting and uniqueness.
        # This would also improve the performance.
        binpkgnames = set()
        binpkgsummaries = set()
        binpkgdescriptions = set()
        sprchangelog = set()
        for spr in sprs:
            log.debug("Considering source version %s" % spr.version)
            # changelog may be empty, in which case we don't want to add it
            # to the set as the join would fail below.
            if spr.changelog_entry is not None:
                sprchangelog.add(spr.changelog_entry)
            binpkgs = BinaryPackageRelease.select("""
                BinaryPackageRelease.build = BinaryPackageBuild.id AND
                BinaryPackageBuild.source_package_release = %s
                """ % sqlvalues(spr.id),
                clauseTables=['BinaryPackageBuild'])
            for binpkg in binpkgs:
                log.debug("Considering binary '%s'" % binpkg.name)
                binpkgnames.add(binpkg.name)
                binpkgsummaries.add(binpkg.summary)
                binpkgdescriptions.add(binpkg.description)

        # Update the caches.
        cache.binpkgnames = ' '.join(sorted(binpkgnames))
        cache.binpkgsummaries = ' '.join(sorted(binpkgsummaries))
        cache.binpkgdescriptions = ' '.join(sorted(binpkgdescriptions))
        cache.changelog = ' '.join(sorted(sprchangelog))
예제 #35
0
 def getDirectAnswerQuestionTargets(self):
     """See `IQuestionsPerson`."""
     answer_contacts = AnswerContact.select('person = %s' % sqlvalues(self))
     return self._getQuestionTargetsFromAnswerContacts(answer_contacts)
예제 #36
0
 def _copy_packaging_links(self):
     """Copy the packaging links from the parent series to this one."""
     # We iterate over the parents and copy into the child in
     # sequence to avoid creating duplicates.
     for parent_id in self.derivation_parent_ids:
         spns = self.source_names_by_parent.get(parent_id, None)
         if spns is not None and len(spns) == 0:
             # Some packagesets may have been selected but not a single
             # source from this parent. We will not copy any links for this
             # parent
             continue
         sql = ("""
             INSERT INTO
                 Packaging(
                     distroseries, sourcepackagename, productseries,
                     packaging, owner)
             SELECT
                 ChildSeries.id,
                 Packaging.sourcepackagename,
                 Packaging.productseries,
                 Packaging.packaging,
                 Packaging.owner
             FROM
                 Packaging
                 -- Joining the parent distroseries permits the query to
                 -- build the data set for the series being updated, yet
                 -- results are in fact the data from the original series.
                 JOIN Distroseries ChildSeries
                     ON Packaging.distroseries = %s
             WHERE
                 -- Select only the packaging links that are in the parent
                 -- that are not in the child.
                 ChildSeries.id = %s
             """ % sqlvalues(parent_id, self.distroseries.id))
         sql_filter = ("""
             AND Packaging.sourcepackagename in (
             SELECT
                 Sourcepackagename.id
             FROM
                 Sourcepackagename
             WHERE
                 Sourcepackagename.name IN %s
             )
             """ % sqlvalues(spns))
         sql_end = ("""
             AND Packaging.sourcepackagename in (
             SELECT Packaging.sourcepackagename
             FROM Packaging
             WHERE distroseries in (
                 SELECT id
                 FROM Distroseries
                 WHERE id = %s
                 )
             EXCEPT
             SELECT Packaging.sourcepackagename
             FROM Packaging
             WHERE distroseries in (
                 SELECT id
                 FROM Distroseries
                 WHERE id = ChildSeries.id
                 )
             )
             """ % sqlvalues(parent_id))
         if spns is not None:
             self._store.execute(sql + sql_filter + sql_end)
         else:
             self._store.execute(sql + sql_end)
예제 #37
0
    def _init_packageset_delta(self, destination):
        """Set up a temp table with data about target archive packages.

        This is a first step in finding out which packages in a given source
        archive are fresher or new with respect to a target archive.

        Merge copying of packages is one of the use cases that requires such a
        package set diff capability.

        In order to find fresher or new packages we first set up a temporary
        table that lists what packages exist in the target archive
        (additionally considering the distroseries, pocket and component).
        """
        store = IStore(BinaryPackagePublishingHistory)
        # Use a temporary table to hold the data needed for the package set
        # delta computation. This will prevent multiple, parallel delta
        # calculations from interfering with each other.
        store.execute("""
            CREATE TEMP TABLE tmp_merge_copy_data (
                -- Source archive package data, only set for packages that
                -- will be copied.
                s_sspph integer,
                s_sourcepackagerelease integer,
                s_version debversion,
                s_status integer,
                s_component integer,
                s_section integer,
                -- Target archive package data, set for all published or
                -- pending packages.
                t_sspph integer,
                t_sourcepackagerelease integer,
                t_version debversion,
                -- Whether a target package became obsolete due to a more
                -- recent source package.
                obsoleted boolean DEFAULT false NOT NULL,
                missing boolean DEFAULT false NOT NULL,
                sourcepackagename text NOT NULL,
                sourcepackagename_id integer NOT NULL
            );
            CREATE INDEX source_name_index
            ON tmp_merge_copy_data USING btree (sourcepackagename);
        """)
        # Populate the temporary table with package data from the target
        # archive considering the distroseries, pocket and component.
        pop_query = """
            INSERT INTO tmp_merge_copy_data (
                t_sspph, t_sourcepackagerelease, sourcepackagename,
                sourcepackagename_id, t_version)
            SELECT
                secsrc.id AS t_sspph,
                secsrc.sourcepackagerelease AS t_sourcepackagerelease,
                spn.name AS sourcepackagerelease,
                spn.id AS sourcepackagename_id,
                spr.version AS t_version
            FROM SourcePackagePublishingHistory secsrc
            JOIN SourcePackageRelease AS spr ON
                spr.id = secsrc.sourcepackagerelease
            JOIN SourcePackageName AS spn ON
                spn.id = spr.sourcepackagename
            WHERE
                secsrc.archive = %s AND
                secsrc.status IN (%s, %s) AND
                secsrc.distroseries = %s AND
                secsrc.pocket = %s
        """ % sqlvalues(destination.archive, PackagePublishingStatus.PENDING,
                        PackagePublishingStatus.PUBLISHED,
                        destination.distroseries, destination.pocket)

        if destination.component is not None:
            pop_query += (" AND secsrc.component = %s" %
                          quote(destination.component))
        store.execute(pop_query)
예제 #38
0
    def _clone_binary_packages(self,
                               origin,
                               destination,
                               origin_das,
                               destination_das,
                               sourcepackagenames=None):
        """Copy binary publishing data from origin to destination.

        @type origin: PackageLocation
        @param origin: the location from which binary publishing
            records are to be copied.
        @type destination: PackageLocation
        @param destination: the location to which the data is
            to be copied.
        @type origin_das: DistroArchSeries
        @param origin_das: the DistroArchSeries from which to copy
            binary packages
        @type destination_das: DistroArchSeries
        @param destination_das: the DistroArchSeries to which to copy
            binary packages
        @param sourcepackagenames: List of source packages to restrict
            the copy to
        @type sourcepackagenames: Iterable
        """
        use_names = (sourcepackagenames and len(sourcepackagenames) > 0)
        clause_tables = "FROM BinaryPackagePublishingHistory AS bpph"
        if use_names:
            clause_tables += """,
                BinaryPackageRelease AS bpr,
                BinaryPackageBuild AS bpb,
                SourcePackageRelease AS spr,
                SourcePackageName AS spn
                """
        # We do not need to set phased_update_percentage; that is heavily
        # context-dependent and should be set afresh for the new location if
        # required.
        query = """
            INSERT INTO BinaryPackagePublishingHistory (
                binarypackagerelease, distroarchseries, status,
                component, section, priority, archive, datecreated,
                datepublished, pocket, binarypackagename)
            SELECT
                bpph.binarypackagerelease,
                %s as distroarchseries,
                bpph.status,
                bpph.component,
                bpph.section,
                bpph.priority,
                %s as archive,
                %s as datecreated,
                %s as datepublished,
                %s as pocket,
                bpph.binarypackagename
            """ % sqlvalues(destination_das, destination.archive, UTC_NOW,
                            UTC_NOW, destination.pocket)
        query += clause_tables
        query += """
            WHERE
                bpph.distroarchseries = %s AND
                bpph.status in (%s, %s) AND
                bpph.pocket = %s AND
                bpph.archive = %s
            """ % sqlvalues(origin_das, PackagePublishingStatus.PENDING,
                            PackagePublishingStatus.PUBLISHED, origin.pocket,
                            origin.archive)

        if use_names:
            query += """
                AND bpph.binarypackagerelease = bpr.id
                AND bpb.id = bpr.build
                AND bpb.source_package_release = spr.id
                AND spr.sourcepackagename = spn.id
                AND spn.name IN %s
            """ % sqlvalues(sourcepackagenames)

        IStore(BinaryPackagePublishingHistory).execute(query)
예제 #39
0
 def unsynchronized_lists(self):
     """See `IMailingListSet`."""
     return MailingList.select('status IN %s' % sqlvalues(
         (MailingListStatus.CONSTRUCTING, MailingListStatus.UPDATING)))
예제 #40
0
 def _baseQueryList(self):
     """See `FilteredLanguagePackVocabularyBase`."""
     return [
         '(type = %s AND updates = %s)' %
         sqlvalues(LanguagePackType.DELTA, self.context.language_pack_base)
     ]
예제 #41
0
 def getByName(self, name):
     """See `IHWVendorNameSet`."""
     return HWVendorName.selectOne(
         'ulower(name)=ulower(%s)' % sqlvalues(name))
def remove_translations(logger=None,
                        submitter=None,
                        reviewer=None,
                        reject_license=False,
                        ids=None,
                        potemplate=None,
                        language_code=None,
                        not_language=False,
                        is_current_ubuntu=None,
                        is_current_upstream=None,
                        msgid_singular=None,
                        origin=None):
    """Remove specified translation messages.

    :param logger: Optional logger to write output to.
    :param submitter: Delete only messages submitted by this person.
    :param reviewer: Delete only messages reviewed by this person.
    :param reject_license: Delete only messages submitted by persons who
        have rejected the licensing agreement.
    :param ids: Delete only messages with these `TranslationMessage` ids.
    :param potemplate: Delete only messages in this template.
    :param language_code: Language code.  Depending on `not_language`,
        either delete messages in this language or spare messages in this
        language that would otherwise be deleted.
    :param not_language: Whether to spare (True) or delete (False)
        messages in this language.
    :param is_current_ubuntu: Delete only messages with this is_current_ubuntu
        value.
    :param is_current_upstream: Delete only messages with this
        is_current_upstream value.
    :param msgid_singular: Delete only messages with this singular msgid.
    :param origin: Delete only messages with this `TranslationOrigin` code.

    :return: Number of messages deleted.
    """
    joins = set()
    conditions = set()
    if submitter is not None:
        conditions.add('TranslationMessage.submitter = %s' %
                       sqlvalues(submitter))
    if reviewer is not None:
        conditions.add('TranslationMessage.reviewer = %s' %
                       sqlvalues(reviewer))
    if reject_license:
        joins.add('TranslationRelicensingAgreement')
        conditions.add('TranslationMessage.submitter = '
                       'TranslationRelicensingAgreement.person')
        conditions.add('NOT TranslationRelicensingAgreement.allow_relicensing')
    if ids is not None:
        conditions.add('TranslationMessage.id IN %s' % sqlvalues(ids))
    if potemplate is not None:
        joins.add('TranslationTemplateItem')
        conditions.add('TranslationTemplateItem.potmsgset '
                       ' = TranslationMessage.potmsgset')
        conditions.add('TranslationTemplateItem.potemplate = %s' %
                       sqlvalues(potemplate))

    if language_code is not None:
        joins.add('Language')
        conditions.add('Language.id = TranslationMessage.language')
        language_match = compose_language_match(language_code)
        if not_language:
            conditions.add('NOT (%s)' % language_match)
        else:
            conditions.add(language_match)

    add_bool_match(conditions, 'TranslationMessage.is_current_ubuntu',
                   is_current_ubuntu)
    add_bool_match(conditions, 'TranslationMessage.is_current_upstream',
                   is_current_upstream)

    if msgid_singular is not None:
        joins.add('POTMsgSet')
        conditions.add('POTMsgSet.id = TranslationMessage.potmsgset')
        joins.add('POMsgID')
        conditions.add('POMsgID.id = POTMsgSet.msgid_singular')
        conditions.add('POMsgID.msgid = %s' % sqlvalues(msgid_singular))

    if origin is not None:
        conditions.add('TranslationMessage.origin = %s' % sqlvalues(origin))

    assert len(conditions) > 0, "That would delete ALL translations, maniac!"

    cur = cursor()
    drop_tables(cur, 'temp_doomed_message')

    joins.add('TranslationMessage')
    from_text = ', '.join(joins)
    where_text = ' AND\n    '.join(conditions)

    warn_about_deleting_current_messages(cur, from_text, where_text, logger)

    # Keep track of messages we're going to delete.
    # Don't bother indexing this.  We'd more likely end up optimizing
    # away the operator's "oh-shit-ctrl-c" time than helping anyone.
    query = """
        CREATE TEMP TABLE temp_doomed_message AS
        SELECT TranslationMessage.id, NULL::integer AS imported_message
        FROM %s
        WHERE %s
        """ % (from_text, where_text)
    cur.execute(query)

    # Note which shared messages are masked by the messages we're
    # going to delete.  We'll be making those the current ones.
    query = """
         UPDATE temp_doomed_message
        SET imported_message = Imported.id
        FROM TranslationMessage Doomed, TranslationMessage Imported
        WHERE
            Doomed.id = temp_doomed_message.id AND
            -- Is alternative for the message we're about to delete.
            Imported.potmsgset = Doomed.potmsgset AND
            Imported.language = Doomed.language AND
            Imported.potemplate IS NULL AND
            Doomed.potemplate IS NULL AND
            -- Is used upstream.
            Imported.is_current_upstream IS TRUE AND
            -- Was masked by the message we're about to delete.
            Doomed.is_current_ubuntu IS TRUE AND
            Imported.id <> Doomed.id
            """
    cur.execute(query)

    if logger is not None and logger.getEffectiveLevel() <= logging.DEBUG:
        # Dump sample of doomed messages for debugging purposes.
        cur.execute("""
            SELECT *
            FROM temp_doomed_message
            ORDER BY id
            LIMIT 20
            """)
        rows = cur.fetchall()
        if cur.rowcount > 0:
            logger.debug("Sample of messages to be deleted follows.")
            logger.debug("%10s %10s" % ("[message]", "[unmasks]"))
            for (doomed, unmasked) in rows:
                if unmasked is None:
                    unmasked = '--'
                logger.debug("%10s %10s" % (doomed, unmasked))

    cur.execute("""
        DELETE FROM TranslationMessage
        USING temp_doomed_message
        WHERE TranslationMessage.id = temp_doomed_message.id
        """)

    rows_deleted = cur.rowcount
    if logger is not None:
        if rows_deleted > 0:
            logger.info("Deleting %d message(s)." % rows_deleted)
        else:
            logger.warn("No rows match; not deleting anything.")

    cur.execute("""
        UPDATE TranslationMessage
        SET is_current_ubuntu = TRUE
        FROM temp_doomed_message
        WHERE TranslationMessage.id = temp_doomed_message.imported_message
        """)

    if cur.rowcount > 0 and logger is not None:
        logger.debug("Unmasking %d imported message(s)." % cur.rowcount)

    drop_tables(cur, 'temp_doomed_message')

    return rows_deleted
예제 #43
0
 def _baseQueryList(self):
     """See `FilteredLanguagePackVocabularyBase`."""
     return ['type = %s' % sqlvalues(LanguagePackType.FULL)]
예제 #44
0
 def getImportedBugMessages(self, bug):
     """See IBugMessageSet."""
     return BugMessage.select("""
         BugMessage.bug = %s
         AND BugMessage.bugwatch IS NOT NULL
         """ % sqlvalues(bug), orderBy='id')
예제 #45
0
def main():
    parser = OptionParser()

    db_options(parser)
    parser.add_option("-f",
                      "--from",
                      dest="from_date",
                      default=None,
                      metavar="DATE",
                      help="Only count new files since DATE (yyyy/mm/dd)")
    parser.add_option("-u",
                      "--until",
                      dest="until_date",
                      default=None,
                      metavar="DATE",
                      help="Only count new files until DATE (yyyy/mm/dd)")

    options, args = parser.parse_args()
    if len(args) > 0:
        parser.error("Too many command line arguments.")

    # Handle date filters. We use LibraryFileContent.datecreated rather
    # than LibraryFileAlias.datecreated as this report is about actual
    # disk space usage. A new row in the database linking to a
    # previously existing file in the Librarian takes up no new space.
    if options.from_date is not None:
        from_date = 'AND LFC.datecreated >= %s' % sqlvalues(options.from_date)
    else:
        from_date = ''
    if options.until_date is not None:
        until_date = 'AND LFC.datecreated <= %s' % sqlvalues(
            options.until_date)
    else:
        until_date = ''

    con = connect()
    cur = con.cursor()

    # Collect direct references to the LibraryFileAlias table.
    references = set(
        (from_table, from_column)
        # Note that listReferences is recursive, which we don't
        # care about in this simple report. We also ignore the
        # irrelevant constraint type update and delete flags.
        for from_table, from_column, to_table, to_column, update, delete in
        listReferences(cur, 'libraryfilealias', 'id')
        if to_table == 'libraryfilealias')

    totals = set()
    for referring_table, referring_column in sorted(references):
        if referring_table == 'libraryfiledownloadcount':
            continue
        quoted_referring_table = quoteIdentifier(referring_table)
        quoted_referring_column = quoteIdentifier(referring_column)
        cur.execute("""
            SELECT
                COALESCE(SUM(filesize), 0),
                pg_size_pretty(CAST(COALESCE(SUM(filesize), 0) AS bigint)),
                COUNT(*)
            FROM (
                SELECT DISTINCT ON (LFC.id) LFC.id, LFC.filesize
                FROM LibraryFileContent AS LFC, LibraryFileAlias AS LFA, %s
                WHERE LFC.id = LFA.content
                    AND LFA.id = %s.%s
                    AND (
                        LFA.expires IS NULL
                        OR LFA.expires > CURRENT_TIMESTAMP AT TIME ZONE 'UTC')
                    %s %s
                ORDER BY LFC.id
                ) AS Whatever
            """ % (quoted_referring_table, quoted_referring_table,
                   quoted_referring_column, from_date, until_date))
        total_bytes, formatted_size, num_files = cur.fetchone()
        totals.add((total_bytes, referring_table, formatted_size, num_files))

    for total_bytes, tab_name, formatted_size, num_files in sorted(
            totals, reverse=True):
        print '%-10s %s in %d files' % (formatted_size, tab_name, num_files)

    return 0
예제 #46
0
    def _estimateTimeToNextBuilder(self):
        """Estimate time until next builder becomes available.

        For the purpose of estimating the dispatch time of the job of interest
        (JOI) we need to know how long it will take until the job at the head
        of JOI's queue is dispatched.

        There are two cases to consider here: the head job is

            - processor dependent: only builders with the matching
              processor/virtualization combination should be considered.
            - *not* processor dependent: all builders with the matching
              virtualization setting should be considered.

        :return: The estimated number of seconds untils a builder capable of
            running the head job becomes available.
        """
        head_job_platform = self._getHeadJobPlatform()

        # Return a zero delay if we still have free builders available for the
        # given platform/virtualization combination.
        free_builders = self._getFreeBuildersCount(*head_job_platform)
        if free_builders > 0:
            return 0

        head_job_processor, head_job_virtualized = head_job_platform

        now = self._now()
        delay_query = """
            SELECT MIN(
              CASE WHEN
                EXTRACT(EPOCH FROM
                  (BuildQueue.estimated_duration -
                   (((%s AT TIME ZONE 'UTC') - Job.date_started))))  >= 0
              THEN
                EXTRACT(EPOCH FROM
                  (BuildQueue.estimated_duration -
                   (((%s AT TIME ZONE 'UTC') - Job.date_started))))
              ELSE
                -- Assume that jobs that have overdrawn their estimated
                -- duration time budget will complete within 2 minutes.
                -- This is a wild guess but has worked well so far.
                --
                -- Please note that this is entirely innocuous i.e. if our
                -- guess is off nothing bad will happen but our estimate will
                -- not be as good as it could be.
                120
              END)
            FROM
                BuildQueue, Job, Builder
            WHERE
                BuildQueue.job = Job.id
                AND BuildQueue.builder = Builder.id
                AND Builder.manual = False
                AND Builder.builderok = True
                AND Job.status = %s
                AND Builder.virtualized = %s
            """ % sqlvalues(now, now, JobStatus.RUNNING,
                            normalize_virtualization(head_job_virtualized))

        if head_job_processor is not None:
            # Only look at builders with specific processor types.
            delay_query += """
                AND Builder.processor = %s
                """ % sqlvalues(head_job_processor)

        result_set = IStore(BuildQueue).execute(delay_query)
        head_job_delay = result_set.get_one()[0]
        return (0 if head_job_delay is None else int(head_job_delay))
    def getMostTranslatedPillars(self, limit=50):
        """See `ITranslationsOverview`."""

        # XXX Abel Deuring 2012-10-26 bug=1071751
         # The expression product.information_type IS NULL can be
         # removed once we have the DB constraint
         # "Product.information_type IS NULL".
        query = """
        SELECT LOWER(COALESCE(product_name, distro_name)) AS name,
               product_id,
               distro_id,
               LN(total_karma)/LN(2) AS karma
          FROM (
            SELECT
                product.displayname AS product_name,
                product.id AS product_id,
                distribution.displayname AS distro_name,
                distribution.id AS distro_id,
                SUM(karmavalue) AS total_karma
              FROM karmacache
                   LEFT JOIN product ON
                     product=product.id
                   LEFT JOIN distribution ON
                     distribution=distribution.id
              WHERE category=3 AND
                    (product IS NOT NULL OR distribution IS NOT NULL) AND
                    (product.translations_usage = %s AND
                     (product.information_type = %s OR
                      product.information_type IS NULL) OR
                    distribution.translations_usage = %s)
              GROUP BY product.displayname, product.id,
                       distribution.displayname, distribution.id
              HAVING SUM(karmavalue) > 0
              ORDER BY total_karma DESC
              LIMIT %s) AS something
          ORDER BY name""" % sqlvalues(ServiceUsage.LAUNCHPAD,
                                       InformationType.PUBLIC,
                                       ServiceUsage.LAUNCHPAD,
                                       limit)
        cur = cursor()
        cur.execute(query)

        all_pillars = []

        # Get minimum and maximum relative karma value to be able to normalize
        # them to appropriate font size values.
        minimum = None
        maximum = None
        for (name, product_id, distro_id, relative_karma) in cur.fetchall():
            if minimum is None or relative_karma < minimum:
                minimum = relative_karma
            if maximum is None or relative_karma > maximum:
                maximum = relative_karma
            if product_id is not None:
                pillar = Product.get(product_id)
            elif distro_id is not None:
                pillar = Distribution.get(distro_id)
            else:
                raise MalformedKarmaCacheData(
                    "Lots of karma for non-existing product or distribution.")
            all_pillars.append((pillar, relative_karma))

        # Normalize the relative karma values between MINIMUM_SIZE and
        # MAXIMUM_SIZE.
        return self._normalizeSizes(all_pillars, minimum, maximum)
예제 #48
0
def close_account(con, log, username):
    """Close a person's account.

    Return True on success, or log an error message and return False
    """
    cur = con.cursor()
    cur.execute(
        """
        SELECT Person.id, Person.account, name, teamowner
        FROM Person
        LEFT OUTER JOIN EmailAddress ON Person.id = EmailAddress.person
        WHERE name = %(username)s OR lower(email) = lower(%(username)s)
        """, vars())
    try:
        person_id, account_id, username, teamowner = cur.fetchone()
    except TypeError:
        log.fatal("User %s does not exist" % username)
        return False

    # We don't do teams
    if teamowner is not None:
        log.fatal("%s is a team" % username)
        return False

    log.info("Closing %s's account" % username)

    def table_notification(table):
        log.debug("Handling the %s table" % table)

    # All names starting with 'removed' are blacklisted, so this will always
    # succeed.
    new_name = 'removed%d' % person_id

    # Remove the EmailAddress. This is the most important step, as
    # people requesting account removal seem to primarily be interested
    # in ensuring we no longer store this information.
    table_notification('EmailAddress')
    cur.execute("""
        DELETE FROM EmailAddress WHERE person = %s
        """ % sqlvalues(person_id))

    # Clean out personal details from the Person table
    table_notification('Person')
    unknown_rationale = PersonCreationRationale.UNKNOWN.value
    cur.execute(
        """
        UPDATE Person
        SET
            displayname = 'Removed by request',
            name=%(new_name)s,
            language = NULL,
            account = NULL,
            homepage_content = NULL,
            icon = NULL,
            mugshot = NULL,
            hide_email_addresses = TRUE,
            registrant = NULL,
            logo = NULL,
            creation_rationale = %(unknown_rationale)s,
            creation_comment = NULL
        WHERE id = %(person_id)s
        """, vars())

    # Remove the Account. We don't set the status to deactivated,
    # as this script is used to satisfy people who insist on us removing
    # all their personal details from our systems. This includes any
    # identification tokens like email addresses or openid identifiers.
    # So the Account record would be unusable, and contain no useful
    # information.
    table_notification('Account')
    if account_id is not None:
        cur.execute("""
            DELETE FROM Account WHERE id = %s
            """ % sqlvalues(account_id))

    # Reassign their bugs
    table_notification('BugTask')
    cur.execute(
        """
        UPDATE BugTask SET assignee = NULL WHERE assignee = %(person_id)s
        """, vars())

    # Reassign questions assigned to the user, and close all their questions
    # since nobody else can
    table_notification('Question')
    cur.execute(
        """
        UPDATE Question SET assignee=NULL WHERE assignee=%(person_id)s
        """, vars())
    closed_question_status = QuestionStatus.SOLVED.value
    cur.execute(
        """
        UPDATE Question
        SET status=%(closed_question_status)s, whiteboard=
            'Closed by Launchpad due to owner requesting account removal'
        WHERE owner=%(person_id)s
        """, vars())

    # Remove rows from tables in simple cases in the given order
    removals = [
        # Trash their email addresses. Unsociable privacy nut jobs who request
        # account removal would be pissed if they reregistered with their old
        # email address and this resurrected their deleted account, as the
        # email address is probably the piece of data we store that they where
        # most concerned with being removed from our systems.
        ('EmailAddress', 'person'),

        # Trash their codes of conduct and GPG keys
        ('SignedCodeOfConduct', 'owner'),
        ('GpgKey', 'owner'),

        # Subscriptions
        ('BranchSubscription', 'person'),
        ('BugSubscription', 'person'),
        ('QuestionSubscription', 'person'),
        ('SpecificationSubscription', 'person'),

        # Personal stuff, freeing up the namespace for others who want to play
        # or just to remove any fingerprints identifying the user.
        ('IrcId', 'person'),
        ('JabberId', 'person'),
        ('WikiName', 'person'),
        ('PersonLanguage', 'person'),
        ('PersonLocation', 'person'),
        ('SshKey', 'person'),

        # Karma
        ('Karma', 'person'),
        ('KarmaCache', 'person'),
        ('KarmaTotalCache', 'person'),

        # Team memberships
        ('TeamMembership', 'person'),
        ('TeamParticipation', 'person'),

        # Contacts
        ('AnswerContact', 'person'),

        # Pending items in queues
        ('POExportRequest', 'person'),
    ]
    for table, person_id_column in removals:
        table_notification(table)
        cur.execute("""
                DELETE FROM %(table)s WHERE %(person_id_column)s=%(person_id)d
                """ % vars())

    # Trash Sprint Attendance records in the future.
    table_notification('SprintAttendance')
    cur.execute(
        """
        DELETE FROM SprintAttendance
        USING Sprint
        WHERE Sprint.id = SprintAttendance.sprint
            AND attendee=%(person_id)s
            AND Sprint.time_starts > CURRENT_TIMESTAMP AT TIME ZONE 'UTC'
        """, vars())

    return True
예제 #49
0
 def activeProposalsForBranches(source_branch, target_branch):
     return BranchMergeProposal.select("""
         BranchMergeProposal.source_branch = %s AND
         BranchMergeProposal.target_branch = %s AND
         BranchMergeProposal.queue_status NOT IN %s
             """ % sqlvalues(source_branch, target_branch, FINAL_STATES))
예제 #50
0
def _cleanTeamParticipation(child, parent):
    """Remove child from team and clean up child's subteams.

    A participant of child is removed from parent's TeamParticipation
    entries if the only path from the participant to parent is via
    child.
    """
    # Delete participation entries for the child and the child's
    # direct/indirect members in other ancestor teams, unless those
    # ancestor teams have another path the child besides the
    # membership that has just been deactivated.
    store = Store.of(parent)
    store.execute("""
        DELETE FROM TeamParticipation
        USING (
            /* Get all the participation entries that might need to be
             * deleted, i.e. all the entries where the
             * TeamParticipation.person is a participant of child, and
             * where child participated in TeamParticipation.team until
             * child was removed from parent.
             */
            SELECT person, team
            FROM TeamParticipation
            WHERE person IN (
                    SELECT person
                    FROM TeamParticipation
                    WHERE team = %(child)s
                )
                AND team IN (
                    SELECT team
                    FROM TeamParticipation
                    WHERE person = %(child)s
                        AND team != %(child)s
                )


            EXCEPT (

                /* Compute the TeamParticipation entries that we need to
                 * keep by walking the tree in the TeamMembership table.
                 */
                WITH RECURSIVE parent(person, team) AS (
                    /* Start by getting all the ancestors of the child
                     * from the TeamParticipation table, then get those
                     * ancestors' direct members to recurse through the
                     * tree from the top.
                     */
                    SELECT ancestor.person, ancestor.team
                    FROM TeamMembership ancestor
                    WHERE ancestor.status IN %(active_states)s
                        AND ancestor.team IN (
                            SELECT team
                            FROM TeamParticipation
                            WHERE person = %(child)s
                        )

                    UNION

                    /* Find the next level of direct members, but hold
                     * onto the parent.team, since we want the top and
                     * bottom of the hierarchy to calculate the
                     * TeamParticipation. The query above makes sure
                     * that we do this for all the ancestors.
                     */
                    SELECT child.person, parent.team
                    FROM TeamMembership child
                        JOIN parent ON child.team = parent.person
                    WHERE child.status IN %(active_states)s
                )
                SELECT person, team
                FROM parent
            )
        ) AS keeping
        WHERE TeamParticipation.person = keeping.person
            AND TeamParticipation.team = keeping.team
        """ % sqlvalues(
            child=child.id,
            active_states=ACTIVE_STATES))
    store.invalidate()
예제 #51
0
def merge_people(from_person, to_person, reviewer, delete=False):
    """Helper for merge and delete methods."""
    # since we are doing direct SQL manipulation, make sure all
    # changes have been flushed to the database
    store = Store.of(from_person)
    store.flush()
    if (from_person.is_team and not to_person.is_team
            or not from_person.is_team and to_person.is_team):
        raise AssertionError("Users cannot be merged with teams.")
    if from_person.is_team and reviewer is None:
        raise AssertionError("Team merged require a reviewer.")
    if getUtility(IArchiveSet).getPPAOwnedByPerson(from_person,
                                                   statuses=[
                                                       ArchiveStatus.ACTIVE,
                                                       ArchiveStatus.DELETING
                                                   ]) is not None:
        raise AssertionError(
            'from_person has a ppa in ACTIVE or DELETING status')
    from_person_branches = getUtility(IAllBranches).ownedBy(from_person)
    if not from_person_branches.isPrivate().is_empty():
        raise AssertionError('from_person has private branches.')
    if from_person.is_team:
        _purgeUnmergableTeamArtifacts(from_person, to_person, reviewer)
    if not getUtility(IEmailAddressSet).getByPerson(from_person).is_empty():
        raise AssertionError('from_person still has email addresses.')

    # Get a database cursor.
    cur = cursor()

    # These table.columns will be skipped by the 'catch all'
    # update performed later
    skip = [
        # The AccessPolicy.person reference is to allow private teams to
        # see their own +junk branches. We don't allow merges for teams who
        # own private branches so we can skip this column.
        ('accesspolicy', 'person'),
        ('teammembership', 'person'),
        ('teammembership', 'team'),
        ('teamparticipation', 'person'),
        ('teamparticipation', 'team'),
        ('personlanguage', 'person'),
        ('person', 'merged'),
        ('personsettings', 'person'),
        ('emailaddress', 'person'),
        # Polls are not carried over when merging teams.
        ('poll', 'team'),
        # We can safely ignore the mailinglist table as there's a sanity
        # check above which prevents teams with associated mailing lists
        # from being merged.
        ('mailinglist', 'team'),
        # I don't think we need to worry about the votecast and vote
        # tables, because a real human should never have two profiles
        # in Launchpad that are active members of a given team and voted
        # in a given poll. -- GuilhermeSalgado 2005-07-07
        # We also can't afford to change poll results after they are
        # closed -- StuartBishop 20060602
        ('votecast', 'person'),
        ('vote', 'person'),
        ('translationrelicensingagreement', 'person'),
        # These are ON DELETE CASCADE and maintained by triggers.
        ('bugsummary', 'viewed_by'),
        ('bugsummaryjournal', 'viewed_by'),
        ('latestpersonsourcepackagereleasecache', 'creator'),
        ('latestpersonsourcepackagereleasecache', 'maintainer'),
    ]

    references = list(postgresql.listReferences(cur, 'person', 'id'))

    # Sanity check. If we have an indirect reference, it must
    # be ON DELETE CASCADE. We only have one case of this at the moment,
    # but this code ensures we catch any new ones added incorrectly.
    for src_tab, src_col, ref_tab, ref_col, updact, delact in references:
        # If the ref_tab and ref_col is not Person.id, then we have
        # an indirect reference. Ensure the update action is 'CASCADE'
        if ref_tab != 'person' and ref_col != 'id':
            if updact != 'c':
                raise RuntimeError(
                    '%s.%s reference to %s.%s must be ON UPDATE CASCADE' %
                    (src_tab, src_col, ref_tab, ref_col))

    # These rows are in a UNIQUE index, and we can only move them
    # to the new Person if there is not already an entry. eg. if
    # the destination and source persons are both subscribed to a bug,
    # we cannot change the source persons subscription. We just leave them
    # as noise for the time being.

    to_id = to_person.id
    from_id = from_person.id

    # Update PersonLocation, which is a Person-decorator table.
    _merge_person_decoration(to_person, from_person, skip, 'PersonLocation',
                             'person', [
                                 'last_modified_by',
                             ])

    # Update GPGKey. It won't conflict, but our sanity checks don't
    # know that.
    cur.execute('UPDATE GPGKey SET owner=%(to_id)d WHERE owner=%(from_id)d' %
                vars())
    skip.append(('gpgkey', 'owner'))

    _mergeAccessArtifactGrant(cur, from_id, to_id)
    _mergeAccessPolicyGrant(cur, from_id, to_id)
    skip.append(('accessartifactgrant', 'grantee'))
    skip.append(('accesspolicygrant', 'grantee'))

    # Update the Branches that will not conflict, and fudge the names of
    # ones that *do* conflict.
    _mergeBranches(from_person, to_person)
    skip.append(('branch', 'owner'))

    _mergeBranchMergeQueues(cur, from_id, to_id)
    skip.append(('branchmergequeue', 'owner'))

    _mergeSourcePackageRecipes(from_person, to_person)
    skip.append(('sourcepackagerecipe', 'owner'))

    _mergeMailingListSubscriptions(cur, from_id, to_id)
    skip.append(('mailinglistsubscription', 'person'))

    _mergeBranchSubscription(cur, from_id, to_id)
    skip.append(('branchsubscription', 'person'))

    _mergeBugAffectsPerson(cur, from_id, to_id)
    skip.append(('bugaffectsperson', 'person'))

    _mergeAnswerContact(cur, from_id, to_id)
    skip.append(('answercontact', 'person'))

    _mergeQuestionSubscription(cur, from_id, to_id)
    skip.append(('questionsubscription', 'person'))

    _mergeBugNotificationRecipient(cur, from_id, to_id)
    skip.append(('bugnotificationrecipient', 'person'))

    # We ignore BugSubscriptionFilterMutes.
    skip.append(('bugsubscriptionfiltermute', 'person'))

    # We ignore BugMutes.
    skip.append(('bugmute', 'person'))

    _mergeStructuralSubscriptions(cur, from_id, to_id)
    skip.append(('structuralsubscription', 'subscriber'))

    _mergeSpecificationSubscription(cur, from_id, to_id)
    skip.append(('specificationsubscription', 'person'))

    _mergeSprintAttendance(cur, from_id, to_id)
    skip.append(('sprintattendance', 'attendee'))

    _mergePOExportRequest(cur, from_id, to_id)
    skip.append(('poexportrequest', 'person'))

    _mergeTranslationMessage(cur, from_id, to_id)
    skip.append(('translationmessage', 'submitter'))
    skip.append(('translationmessage', 'reviewer'))

    # Handle the POFileTranslator cache by doing nothing. As it is
    # maintained by triggers, the data migration has already been done
    # for us when we updated the source tables.
    skip.append(('pofiletranslator', 'person'))

    _mergeTranslationImportQueueEntry(cur, from_id, to_id)
    skip.append(('translationimportqueueentry', 'importer'))

    # XXX cprov 2007-02-22 bug=87098:
    # Since we only allow one PPA for each user,
    # we can't reassign the old user archive to the new user.
    # It need to be done manually, probably by reasinning all publications
    # to the old PPA to the new one, performing a careful_publishing on it
    # and removing the old one from disk.
    skip.append(('archive', 'owner'))

    _mergeCodeReviewVote(cur, from_id, to_id)
    skip.append(('codereviewvote', 'reviewer'))

    _mergeKarmaCache(cur, from_id, to_id, from_person.karma)
    skip.append(('karmacache', 'person'))
    skip.append(('karmatotalcache', 'person'))

    _mergeDateCreated(cur, from_id, to_id)

    _mergeLoginTokens(cur, from_id, to_id)
    skip.append(('logintoken', 'requester'))

    # Sanity check. If we have a reference that participates in a
    # UNIQUE index, it must have already been handled by this point.
    # We can tell this by looking at the skip list.
    for src_tab, src_col, ref_tab, ref_col, updact, delact in references:
        uniques = postgresql.listUniques(cur, src_tab, src_col)
        if len(uniques) > 0 and (src_tab, src_col) not in skip:
            raise NotImplementedError(
                '%s.%s reference to %s.%s is in a UNIQUE index '
                'but has not been handled' %
                (src_tab, src_col, ref_tab, ref_col))

    # Handle all simple cases
    for src_tab, src_col, ref_tab, ref_col, updact, delact in references:
        if (src_tab, src_col) in skip:
            continue
        cur.execute('UPDATE %s SET %s=%d WHERE %s=%d' %
                    (src_tab, src_col, to_person.id, src_col, from_person.id))

    _mergeTeamMembership(cur, from_id, to_id)
    _mergeProposedInvitedTeamMembership(cur, from_id, to_id)

    # Flag the person as merged
    cur.execute('''
        UPDATE Person SET merged=%(to_id)d WHERE id=%(from_id)d
        ''' % vars())

    # Append a -merged suffix to the person's name.
    name = base = "%s-merged" % from_person.name.encode('ascii')
    cur.execute("SELECT id FROM Person WHERE name = %s" % sqlvalues(name))
    i = 1
    while cur.fetchone():
        name = "%s%d" % (base, i)
        cur.execute("SELECT id FROM Person WHERE name = %s" % sqlvalues(name))
        i += 1
    cur.execute("UPDATE Person SET name = %s WHERE id = %s" %
                sqlvalues(name, from_person))

    # Since we've updated the database behind Storm's back,
    # flush its caches.
    store.invalidate()

    # Move OpenId Identifiers from the merged account to the new
    # account.
    if from_person.account is not None and to_person.account is not None:
        store.execute("""
            UPDATE OpenIdIdentifier SET account=%s WHERE account=%s
            """ % sqlvalues(to_person.accountID, from_person.accountID))

    if delete:
        # We don't notify anyone about deletes.
        return

    # Inform the user of the merge changes.
    if to_person.is_team:
        mail_text = get_email_template('team-merged.txt', app='registry')
        subject = 'Launchpad teams merged'
    else:
        mail_text = get_email_template('person-merged.txt', app='registry')
        subject = 'Launchpad accounts merged'
    mail_text = mail_text % {
        'dupename': from_person.name,
        'person': to_person.name,
    }
    getUtility(IPersonNotificationSet).addNotification(to_person, subject,
                                                       mail_text)
예제 #52
0
    def _judgeSuperseded(self, source_records, binary_records):
        """Determine whether the superseded packages supplied should
        be moved to death row or not.

        Currently this is done by assuming that any superseded binary
        package should be removed. In the future this should attempt
        to supersede binaries in build-sized chunks only, bug 55030.

        Superseded source packages are considered removable when they
        have no binaries in this distroseries which are published or
        superseded

        When a package is considered for death row it is given a
        'scheduled deletion date' of now plus the defined 'stay of execution'
        time provided in the configuration parameter.
        """
        self.logger.debug("Beginning superseded processing...")

        for pub_record in binary_records:
            binpkg_release = pub_record.binarypackagerelease
            self.logger.debug(
                "%s/%s (%s) has been judged eligible for removal",
                binpkg_release.binarypackagename.name, binpkg_release.version,
                pub_record.distroarchseries.architecturetag)
            self._setScheduledDeletionDate(pub_record)
            # XXX cprov 20070820: 'datemadepending' is useless, since it's
            # always equals to "scheduleddeletiondate - quarantine".
            pub_record.datemadepending = UTC_NOW

        for pub_record in source_records:
            srcpkg_release = pub_record.sourcepackagerelease
            # Attempt to find all binaries of this
            # SourcePackageRelease which are/have been in this
            # distroseries...
            considered_binaries = BinaryPackagePublishingHistory.select(
                """
            binarypackagepublishinghistory.distroarchseries =
                distroarchseries.id AND
            binarypackagepublishinghistory.scheduleddeletiondate IS NULL AND
            binarypackagepublishinghistory.dateremoved IS NULL AND
            binarypackagepublishinghistory.archive = %s AND
            binarypackagebuild.source_package_release = %s AND
            distroarchseries.distroseries = %s AND
            binarypackagepublishinghistory.binarypackagerelease =
            binarypackagerelease.id AND
            binarypackagerelease.build = binarypackagebuild.id AND
            binarypackagepublishinghistory.pocket = %s
            """ % sqlvalues(self.archive, srcpkg_release,
                            pub_record.distroseries, pub_record.pocket),
                clauseTables=[
                    'DistroArchSeries', 'BinaryPackageRelease',
                    'BinaryPackageBuild'
                ])

            # There is at least one non-removed binary to consider
            if not considered_binaries.is_empty():
                # However we can still remove *this* record if there's
                # at least one other PUBLISHED for the spr. This happens
                # when a package is moved between components.
                published = SourcePackagePublishingHistory.selectBy(
                    distroseries=pub_record.distroseries,
                    pocket=pub_record.pocket,
                    status=PackagePublishingStatus.PUBLISHED,
                    archive=self.archive,
                    sourcepackagereleaseID=srcpkg_release.id)
                # Zero PUBLISHED for this spr, so nothing to take over
                # for us, so leave it for consideration next time.
                if published.is_empty():
                    continue

            # Okay, so there's no unremoved binaries, let's go for it...
            self.logger.debug(
                "%s/%s (%s) source has been judged eligible for removal",
                srcpkg_release.sourcepackagename.name, srcpkg_release.version,
                pub_record.id)
            self._setScheduledDeletionDate(pub_record)
            # XXX cprov 20070820: 'datemadepending' is pointless, since it's
            # always equals to "scheduleddeletiondate - quarantine".
            pub_record.datemadepending = UTC_NOW
예제 #53
0
 def getPOTemplate(self, name):
     """See IProductSeries."""
     return POTemplate.selectOne(
         "productseries = %s AND name = %s" % sqlvalues(self.id, name))
예제 #54
0
    def _compute_packageset_delta(self, origin):
        """Given a source/target archive find obsolete or missing packages.

        This means finding out which packages in a given source archive are
        fresher or new with respect to a target archive.
        """
        store = IStore(BinaryPackagePublishingHistory)
        # The query below will find all packages in the source archive that
        # are fresher than their counterparts in the target archive.
        find_newer_packages = """
            UPDATE tmp_merge_copy_data mcd SET
                s_sspph = secsrc.id,
                s_sourcepackagerelease = spr.id,
                s_version = spr.version,
                obsoleted = True,
                s_status = secsrc.status,
                s_component = secsrc.component,
                s_section = secsrc.section
            FROM
                SourcePackagePublishingHistory secsrc,
                SourcePackageRelease spr,
                SourcePackageName spn
            WHERE
                secsrc.archive = %s AND secsrc.status IN (%s, %s) AND
                secsrc.distroseries = %s AND secsrc.pocket = %s AND
                secsrc.sourcepackagerelease = spr.id AND
                spr.sourcepackagename = spn.id AND
                spn.name = mcd.sourcepackagename AND
                spr.version > mcd.t_version
        """ % sqlvalues(origin.archive, PackagePublishingStatus.PENDING,
                        PackagePublishingStatus.PUBLISHED, origin.distroseries,
                        origin.pocket)

        if origin.component is not None:
            find_newer_packages += (" AND secsrc.component = %s" %
                                    quote(origin.component))
        store.execute(find_newer_packages)

        # Now find the packages that exist in the source archive but *not* in
        # the target archive.
        find_origin_only_packages = """
            INSERT INTO tmp_merge_copy_data (
                s_sspph, s_sourcepackagerelease, sourcepackagename,
                sourcepackagename_id, s_version, missing, s_status,
                s_component, s_section)
            SELECT
                secsrc.id AS s_sspph,
                secsrc.sourcepackagerelease AS s_sourcepackagerelease,
                spn.name AS sourcepackagename,
                spn.id AS sourcepackagename_id,
                spr.version AS s_version,
                True AS missing,
                secsrc.status AS s_status,
                secsrc.component AS s_component,
                secsrc.section AS s_section
            FROM SourcePackagePublishingHistory secsrc
            JOIN SourcePackageRelease AS spr ON
                spr.id = secsrc.sourcepackagerelease
            JOIN SourcePackageName AS spn ON
                spn.id = spr.sourcepackagename
            WHERE
                secsrc.archive = %s AND
                secsrc.status IN (%s, %s) AND
                secsrc.distroseries = %s AND
                secsrc.pocket = %s AND
                spn.name NOT IN (
                    SELECT sourcepackagename FROM tmp_merge_copy_data)
        """ % sqlvalues(origin.archive, PackagePublishingStatus.PENDING,
                        PackagePublishingStatus.PUBLISHED, origin.distroseries,
                        origin.pocket)

        if origin.component is not None:
            find_origin_only_packages += (" AND secsrc.component = %s" %
                                          quote(origin.component))
        store.execute(find_origin_only_packages)
예제 #55
0
def calculate_replication_set(cur, seeds):
    """Return the minimal set of tables and sequences needed in a
    replication set containing the seed table.

    A replication set must contain all tables linked by foreign key
    reference to the given table, and sequences used to generate keys.
    Tables and sequences can be added to the IGNORED_TABLES and
    IGNORED_SEQUENCES lists for cases where we known can safely ignore
    this restriction.

    :param seeds: [(namespace, tablename), ...]

    :returns: (tables, sequences)
    """
    # Results
    tables = set()
    sequences = set()

    # Our pending set to check
    pending_tables = set(seeds)

    # Generate the set of tables that reference the seed directly
    # or indirectly via foreign key constraints, including the seed itself.
    while pending_tables:
        namespace, tablename = pending_tables.pop()

        # Skip if the table doesn't exist - we might have seeds listed that
        # have been removed or are yet to be created.
        cur.execute("""
            SELECT TRUE
            FROM pg_class, pg_namespace
            WHERE pg_class.relnamespace = pg_namespace.oid
                AND pg_namespace.nspname = %s
                AND pg_class.relname = %s
            """ % sqlvalues(namespace, tablename))
        if cur.fetchone() is None:
            log.debug("Table %s.%s doesn't exist" % (namespace, tablename))
            continue

        tables.add((namespace, tablename))

        # Find all tables that reference the current (seed) table
        # and all tables that the seed table references.
        cur.execute("""
            SELECT ref_namespace.nspname, ref_class.relname
            FROM
                -- One of the seed tables
                pg_class AS seed_class,
                pg_namespace AS seed_namespace,

                -- A table referencing the seed, or being referenced by
                -- the seed.
                pg_class AS ref_class,
                pg_namespace AS ref_namespace,

                pg_constraint
            WHERE
                seed_class.relnamespace = seed_namespace.oid
                AND ref_class.relnamespace = ref_namespace.oid

                AND seed_namespace.nspname = %s
                AND seed_class.relname = %s

                -- Foreign key constraints are all we care about.
                AND pg_constraint.contype = 'f'

                -- We want tables referenced by, or referred to, the
                -- seed table.
                AND ((pg_constraint.conrelid = ref_class.oid
                        AND pg_constraint.confrelid = seed_class.oid)
                    OR (pg_constraint.conrelid = seed_class.oid
                        AND pg_constraint.confrelid = ref_class.oid)
                    )
            """ % sqlvalues(namespace, tablename))
        for namespace, tablename in cur.fetchall():
            key = (namespace, tablename)
            if (key not in tables and key not in pending_tables and '%s.%s' %
                (namespace, tablename) not in IGNORED_TABLES):
                pending_tables.add(key)

    # Generate the set of sequences that are linked to any of our set of
    # tables. We assume these are all sequences created by creation of
    # serial or bigserial columns, or other sequences OWNED BY a particular
    # column.
    for namespace, tablename in tables:
        cur.execute("""
            SELECT seq
            FROM (
                SELECT pg_get_serial_sequence(%s, attname) AS seq
                FROM pg_namespace, pg_class, pg_attribute
                WHERE pg_namespace.nspname = %s
                    AND pg_class.relnamespace = pg_namespace.oid
                    AND pg_class.relname = %s
                    AND pg_attribute.attrelid = pg_class.oid
                    AND pg_attribute.attisdropped IS FALSE
                ) AS whatever
            WHERE seq IS NOT NULL;
            """ % sqlvalues(fqn(namespace, tablename), namespace, tablename))
        for sequence, in cur.fetchall():
            if sequence not in IGNORED_SEQUENCES:
                sequences.add(sequence)

    # We can't easily convert the sequence name to (namespace, name) tuples,
    # so we might as well convert the tables to dot notation for consistancy.
    tables = set(fqn(namespace, tablename) for namespace, tablename in tables)

    return tables, sequences
예제 #56
0
    def _clone_source_packages(self, origin, destination, sourcepackagenames):
        """Copy source publishing data from origin to destination.

        @type origin: PackageLocation
        @param origin: the location from which source publishing
            records are to be copied.
        @type destination: PackageLocation
        @param destination: the location to which the data is
            to be copied.
        @type sourcepackagenames: Iterable
        @param sourcepackagenames: List of source packages to restrict
            the copy to
        """
        store = IStore(BinaryPackagePublishingHistory)
        query = '''
            INSERT INTO SourcePackagePublishingHistory (
                sourcepackagerelease, distroseries, status, component,
                section, archive, datecreated, datepublished, pocket,
                sourcepackagename)
            SELECT
                spph.sourcepackagerelease,
                %s as distroseries,
                spph.status,
                spph.component,
                spph.section,
                %s as archive,
                %s as datecreated,
                %s as datepublished,
                %s as pocket,
                spph.sourcepackagename
            FROM SourcePackagePublishingHistory AS spph
            WHERE
                spph.distroseries = %s AND
                spph.status in (%s, %s) AND
                spph.pocket = %s AND
                spph.archive = %s
            ''' % sqlvalues(
            destination.distroseries, destination.archive, UTC_NOW, UTC_NOW,
            destination.pocket, origin.distroseries,
            PackagePublishingStatus.PENDING, PackagePublishingStatus.PUBLISHED,
            origin.pocket, origin.archive)

        if sourcepackagenames and len(sourcepackagenames) > 0:
            query += '''
                AND spph.sourcepackagerelease IN (
                    SELECT spr.id
                    FROM SourcePackageRelease AS spr
                    JOIN SourcePackageName AS spn ON
                        spn.id = spr.sourcepackagename
                    WHERE spn.name IN %s
                )''' % sqlvalues(sourcepackagenames)

        if origin.packagesets:
            query += '''
                AND spph.sourcepackagerelease IN (
                    SELECT spr.id
                    FROM SourcePackageRelease AS spr
                    JOIN PackagesetSources AS pss ON
                        PSS.sourcepackagename = spr.sourcepackagename
                    JOIN FlatPackagesetInclusion AS fpsi ON
                        fpsi.child = pss.packageset
                    WHERE fpsi.parent in %s
                )
                     ''' % sqlvalues([p.id for p in origin.packagesets])

        if origin.component:
            query += "and spph.component = %s" % sqlvalues(origin.component)

        store.execute(query)
예제 #57
0
def referenced_oops(start_date, end_date, context_clause, context_params):
    '''Find OOPS codes that are referenced somewhere in Launchpad.

    This returns OOPS references from:
     - any message, message chunk or bug.
     - any question that passes context_clause.

    Privacy and access controls are ignored: the maximum disclosure is a
    single word immediately after the word 'OOPS'.  Future iterations may
    tighten up the returned references.

    :param start_date: The earliest modification date to consider.
    :param end_date: The last modification date to consider.
    :param context_clause: A filter to restrict the question clause against.
        For instance: 'product=%(product)s'.
    :param context_params: Parameters needed to evaluate context_clause.
        For instance: {'product': 12}
    :return: A set of the found OOPS ids.
    '''
    # Note that the POSIX regexp syntax is subtly different to the Python,
    # and that we need to escape all \ characters to keep the SQL interpreter
    # happy.
    posix_oops_match = (r"~* E'^(oops-\\w+)|(\\moops-\\w+)'")
    params = dict(start_date=start_date, end_date=end_date)
    params.update(context_params)
    sql_params = sqlvalues(**params)
    sql_params['posix_oops_match'] = posix_oops_match
    query = ("""
        WITH recent_messages AS
            (SELECT id FROM Message WHERE
             datecreated BETWEEN %(start_date)s AND %(end_date)s)
        SELECT DISTINCT subject FROM Message
        WHERE subject %(posix_oops_match)s AND subject IS NOT NULL
            AND id IN (SELECT id FROM recent_messages)
        UNION ALL
        SELECT content FROM MessageChunk WHERE content %(posix_oops_match)s
            AND message IN (SELECT id FROM recent_messages)
        UNION ALL
        SELECT title || ' ' || description
        FROM Bug WHERE
            date_last_updated BETWEEN %(start_date)s AND %(end_date)s AND
            (title %(posix_oops_match)s OR description %(posix_oops_match)s)
        UNION ALL
        SELECT title || ' ' || description || ' ' || COALESCE(whiteboard,'')
        FROM Question WHERE """ + context_clause + """
            AND (datelastquery BETWEEN %(start_date)s AND %(end_date)s
                OR datelastresponse BETWEEN %(start_date)s AND %(end_date)s)
            AND (title %(posix_oops_match)s
                OR description %(posix_oops_match)s
                OR whiteboard %(posix_oops_match)s)
        """) % sql_params

    referenced_codes = set()
    oops_re = re.compile(r'(?i)(?P<oops>\boops-\w+)')

    cur = cursor()
    cur.execute(query)
    for content in (row[0] for row in cur.fetchall()):
        for oops in oops_re.findall(content):
            referenced_codes.add(oops)

    return referenced_codes