コード例 #1
0
    def main(self):
        "Run UpdateDatabaseTableStats."""
        store = IMasterStore(Person)

        # The logic is in a stored procedure because we want to run
        # ps(1) on the database server rather than the host this script
        # is running on.
        self.logger.debug("Invoking update_database_stats()")
        store.execute("SELECT update_database_stats()", noresult=True)

        self.logger.debug("Committing")
        store.commit()
コード例 #2
0
    def main(self):
        "Run UpdateDatabaseTableStats." ""
        store = IMasterStore(Person)

        # The logic is in a stored procedure because we want to run
        # ps(1) on the database server rather than the host this script
        # is running on.
        self.logger.debug("Invoking update_database_stats()")
        store.execute("SELECT update_database_stats()", noresult=True)

        self.logger.debug("Committing")
        store.commit()
コード例 #3
0
 def test_UnusedPOTMsgSetPruner_removes_unreferenced_message_sets(self):
     # If a POTMsgSet is not referenced by any templates the
     # UnusedPOTMsgSetPruner will remove it.
     switch_dbuser('testadmin')
     potmsgset = self.factory.makePOTMsgSet()
     # Cheekily drop any references to the POTMsgSet we just created.
     store = IMasterStore(POTMsgSet)
     store.execute(
         "DELETE FROM TranslationTemplateItem WHERE potmsgset = %s"
         % potmsgset.id)
     transaction.commit()
     unreferenced_msgsets = store.find(
         POTMsgSet,
         Not(In(
             POTMsgSet.id,
             SQL("SELECT potmsgset FROM TranslationTemplateItem"))))
     self.assertNotEqual(0, unreferenced_msgsets.count())
     self.runDaily()
     self.assertEqual(0, unreferenced_msgsets.count())
コード例 #4
0
    def test_BugSummaryJournalRollup(self):
        switch_dbuser('testadmin')
        store = IMasterStore(CommercialSubscription)

        # Generate a load of entries in BugSummaryJournal.
        store.execute("UPDATE BugTask SET status=42")

        # We only need a few to test.
        num_rows = store.execute(
            "SELECT COUNT(*) FROM BugSummaryJournal").get_one()[0]
        self.assertThat(num_rows, GreaterThan(10))

        self.runFrequently()

        # We just care that the rows have been removed. The bugsummary
        # tests confirm that the rollup stored method is working correctly.
        num_rows = store.execute(
            "SELECT COUNT(*) FROM BugSummaryJournal").get_one()[0]
        self.assertThat(num_rows, Equals(0))
コード例 #5
0
def fix_teamparticipation_consistency(log, errors):
    """Fix missing or spurious participations.

    This function does not consult `TeamMembership` at all, so it /may/
    introduce another participation inconsistency if the records that are the
    subject of the given errors have been modified since being checked.

    :param errors: An iterable of `ConsistencyError` tuples.
    """
    sql_missing = (
        """
        INSERT INTO TeamParticipation (team, person)
        SELECT %(team)s, %(person)s
        EXCEPT
        SELECT team, person
          FROM TeamParticipation
         WHERE team = %(team)s
           AND person = %(person)s
        """)
    sql_spurious = (
        """
        DELETE FROM TeamParticipation
         WHERE team = %(team)s
           AND person IN %(people)s
        """)
    store = IMasterStore(TeamParticipation)
    for error in errors:
        if error.type == "missing":
            for person in error.people:
                statement = sql_missing % sqlvalues(
                    team=error.team, person=person)
                log.debug(statement)
                store.execute(statement)
                transaction.commit()
        elif error.type == "spurious":
            statement = sql_spurious % sqlvalues(
                team=error.team, people=error.people)
            log.debug(statement)
            store.execute(statement)
            transaction.commit()
        else:
            log.warn("Unrecognized error: %r", error)
コード例 #6
0
    def test_OpenIDConsumerAssociationPruner(self):
        pruner = OpenIDConsumerAssociationPruner
        table_name = pruner.table_name
        switch_dbuser('testadmin')
        store = IMasterStore(CommercialSubscription)
        now = time.time()
        # Create some associations in the past with lifetimes
        for delta in range(0, 20):
            store.execute("""
                INSERT INTO %s (server_url, handle, issued, lifetime)
                VALUES (%s, %s, %d, %d)
                """ % (table_name, str(delta), str(delta), now - 10, delta))
        transaction.commit()

        # Ensure that we created at least one expirable row (using the
        # test start time as 'now').
        num_expired = store.execute("""
            SELECT COUNT(*) FROM %s
            WHERE issued + lifetime < %f
            """ % (table_name, now)).get_one()[0]
        self.failUnless(num_expired > 0)

        # Expire all those expirable rows, and possibly a few more if this
        # test is running slow.
        self.runFrequently()

        switch_dbuser('testadmin')
        store = IMasterStore(CommercialSubscription)
        # Confirm all the rows we know should have been expired have
        # been expired. These are the ones that would be expired using
        # the test start time as 'now'.
        num_expired = store.execute("""
            SELECT COUNT(*) FROM %s
            WHERE issued + lifetime < %f
            """ % (table_name, now)).get_one()[0]
        self.failUnlessEqual(num_expired, 0)

        # Confirm that we haven't expired everything. This test will fail
        # if it has taken 10 seconds to get this far.
        num_unexpired = store.execute(
            "SELECT COUNT(*) FROM %s" % table_name).get_one()[0]
        self.failUnless(num_unexpired > 0)
コード例 #7
0
 def __call__(self, chunk_size):
     self.logger.info(
         "%s (limited to %d rows)", self.statement.splitlines()[0],
         chunk_size)
     store = IMasterStore(DistroSeries)
     result = store.execute(self.statement, (self.series.id, chunk_size,))
     self.done = (result.rowcount == 0)
     self.logger.info(
         "%d rows deleted (%s)", result.rowcount,
         ("done" if self.done else "not done"))
     store.commit()
コード例 #8
0
 def __call__(self, chunk_size):
     self.logger.info(
         "%s (limited to %d rows)", self.statement.splitlines()[0],
         chunk_size)
     store = IMasterStore(DistroSeries)
     result = store.execute(self.statement, (self.series.id, chunk_size,))
     self.done = (result.rowcount == 0)
     self.logger.info(
         "%d rows deleted (%s)", result.rowcount,
         ("done" if self.done else "not done"))
     store.commit()
コード例 #9
0
def fix_teamparticipation_consistency(log, errors):
    """Fix missing or spurious participations.

    This function does not consult `TeamMembership` at all, so it /may/
    introduce another participation inconsistency if the records that are the
    subject of the given errors have been modified since being checked.

    :param errors: An iterable of `ConsistencyError` tuples.
    """
    sql_missing = ("""
        INSERT INTO TeamParticipation (team, person)
        SELECT %(team)s, %(person)s
        EXCEPT
        SELECT team, person
          FROM TeamParticipation
         WHERE team = %(team)s
           AND person = %(person)s
        """)
    sql_spurious = ("""
        DELETE FROM TeamParticipation
         WHERE team = %(team)s
           AND person IN %(people)s
        """)
    store = IMasterStore(TeamParticipation)
    for error in errors:
        if error.type == "missing":
            for person in error.people:
                statement = sql_missing % sqlvalues(team=error.team,
                                                    person=person)
                log.debug(statement)
                store.execute(statement)
                transaction.commit()
        elif error.type == "spurious":
            statement = sql_spurious % sqlvalues(team=error.team,
                                                 people=error.people)
            log.debug(statement)
            store.execute(statement)
            transaction.commit()
        else:
            log.warn("Unrecognized error: %r", error)
コード例 #10
0
    def _blockWhenLagged(self):
        """When database replication lag is high, block until it drops."""
        # Lag is most meaningful on the master.
        from lp.services.librarian.model import LibraryFileAlias
        store = IMasterStore(LibraryFileAlias)
        msg_counter = 0
        while not self._isTimedOut():
            lag = store.execute("SELECT replication_lag()").get_one()[0]
            if lag is None or lag <= self.acceptable_replication_lag:
                return

            # Report just once every 10 minutes to avoid log spam.
            msg_counter += 1
            if msg_counter % 60 == 1:
                self.log.info(
                    "Database replication lagged %s. "
                    "Sleeping up to 10 minutes.", lag)

            # Don't become a long running transaction!
            transaction.abort()
            self._sleep(10)
コード例 #11
0
ファイル: looptuner.py プロジェクト: pombredanne/launchpad-3
    def _blockWhenLagged(self):
        """When database replication lag is high, block until it drops."""
        # Lag is most meaningful on the master.
        from lp.services.librarian.model import LibraryFileAlias
        store = IMasterStore(LibraryFileAlias)
        msg_counter = 0
        while not self._isTimedOut():
            lag = store.execute("SELECT replication_lag()").get_one()[0]
            if lag is None or lag <= self.acceptable_replication_lag:
                return

            # Report just once every 10 minutes to avoid log spam.
            msg_counter += 1
            if msg_counter % 60 == 1:
                self.log.info(
                    "Database replication lagged %s. "
                    "Sleeping up to 10 minutes.", lag)

            # Don't become a long running transaction!
            transaction.abort()
            self._sleep(10)
コード例 #12
0
ファイル: looptuner.py プロジェクト: pombredanne/launchpad-3
    def _blockForLongRunningTransactions(self):
        """If there are long running transactions, block to avoid making
        bloat worse."""
        if self.long_running_transaction is None:
            return
        from lp.services.librarian.model import LibraryFileAlias
        store = IMasterStore(LibraryFileAlias)
        msg_counter = 0
        while not self._isTimedOut():
            results = list(
                store.execute(("""
                SELECT
                    CURRENT_TIMESTAMP - xact_start,
                    %(pid)s,
                    usename,
                    datname,
                    %(query)s
                FROM activity()
                WHERE xact_start < CURRENT_TIMESTAMP - interval '%%f seconds'
                    AND datname = current_database()
                ORDER BY xact_start LIMIT 4
                """ % activity_cols(store)) %
                              self.long_running_transaction).get_all())
            if not results:
                break

            # Check for long running transactions every 10 seconds, but
            # only report every 10 minutes to avoid log spam.
            msg_counter += 1
            if msg_counter % 60 == 1:
                for runtime, pid, usename, datname, query in results:
                    self.log.info("Blocked on %s old xact %s@%s/%d - %s.",
                                  runtime, usename, datname, pid, query)
                self.log.info("Sleeping for up to 10 minutes.")
            # Don't become a long running transaction!
            transaction.abort()
            self._sleep(10)
コード例 #13
0
    def _blockForLongRunningTransactions(self):
        """If there are long running transactions, block to avoid making
        bloat worse."""
        if self.long_running_transaction is None:
            return
        from lp.services.librarian.model import LibraryFileAlias
        store = IMasterStore(LibraryFileAlias)
        msg_counter = 0
        while not self._isTimedOut():
            results = list(store.execute("""
                SELECT
                    CURRENT_TIMESTAMP - xact_start,
                    procpid,
                    usename,
                    datname,
                    current_query
                FROM activity()
                WHERE xact_start < CURRENT_TIMESTAMP - interval '%f seconds'
                    AND datname = current_database()
                ORDER BY xact_start LIMIT 4
                """ % self.long_running_transaction).get_all())
            if not results:
                break

            # Check for long running transactions every 10 seconds, but
            # only report every 10 minutes to avoid log spam.
            msg_counter += 1
            if msg_counter % 60 == 1:
                for runtime, procpid, usename, datname, query in results:
                    self.log.info(
                        "Blocked on %s old xact %s@%s/%d - %s.",
                        runtime, usename, datname, procpid, query)
                self.log.info("Sleeping for up to 10 minutes.")
            # Don't become a long running transaction!
            transaction.abort()
            self._sleep(10)
コード例 #14
0
class TestBulkPruner(TestCase):
    layer = ZopelessDatabaseLayer

    def setUp(self):
        super(TestBulkPruner, self).setUp()

        self.store = IMasterStore(CommercialSubscription)
        self.store.execute("CREATE TABLE BulkFoo (id serial PRIMARY KEY)")

        for i in range(10):
            self.store.add(BulkFoo())

        self.log = logging.getLogger('garbo')

    def test_bulkpruner(self):
        pruner = BulkFooPruner(self.log)

        # The loop thinks there is stuff to do. Confirm the initial
        # state is sane.
        self.assertFalse(pruner.isDone())

        # An arbitrary chunk size.
        chunk_size = 2

        # Determine how many items to prune and to leave rather than
        # hardcode these numbers.
        num_to_prune = self.store.find(
            BulkFoo, BulkFoo.id < 5).count()
        num_to_leave = self.store.find(
            BulkFoo, BulkFoo.id >= 5).count()
        self.assertTrue(num_to_prune > chunk_size)
        self.assertTrue(num_to_leave > 0)

        # Run one loop. Make sure it committed by throwing away
        # uncommitted changes.
        pruner(chunk_size)
        transaction.abort()

        # Confirm 'chunk_size' items where removed; no more, no less.
        num_remaining = self.store.find(BulkFoo).count()
        expected_num_remaining = num_to_leave + num_to_prune - chunk_size
        self.assertEqual(num_remaining, expected_num_remaining)

        # The loop thinks there is more stuff to do.
        self.assertFalse(pruner.isDone())

        # Run the loop to completion, removing the remaining targetted
        # rows.
        while not pruner.isDone():
            pruner(1000000)
        transaction.abort()

        # Confirm we have removed all targetted rows.
        self.assertEqual(self.store.find(BulkFoo, BulkFoo.id < 5).count(), 0)

        # Confirm we have the expected number of remaining rows.
        # With the previous check, this means no untargetted rows
        # where removed.
        self.assertEqual(
            self.store.find(BulkFoo, BulkFoo.id >= 5).count(), num_to_leave)

        # Cleanup clears up our resources.
        pruner.cleanUp()

        # We can run it again - temporary objects cleaned up.
        pruner = BulkFooPruner(self.log)
        while not pruner.isDone():
            pruner(chunk_size)
コード例 #15
0
class DatabaseTransactionPolicy:
    """Context manager for read-only transaction policy.

    Use this to define regions of code that explicitly allow or disallow
    changes to the database:

        # We want to be sure that inspect_data does not inadvertently
        # make any changes in the database, but we can't run it on the
        # slave store because it doesn't tolerate replication lag.
        with DatabaseTransactionPolicy(read_only=True):
            inspect_data()

    The simplest way to use this is as a special transaction:
     * You must commit/abort before entering the policy.
     * Exiting the policy through an exception aborts its changes.
     * Before completing a read-write policy region, you must commit or abort.

    You can also have multiple transactions inside one policy, however; the
    policy still applies after a commit or abort.

    Policies can be nested--a nested policy overrides the one it's nested in.
    After the nested policy has exited, the previous policy applies again:

        # This code needs to control the database changes it makes very
        # carefully.  Most of it is just gathering data, with one quick
        # database update at the end.
        with DatabaseTransactionPolicy(read_only=True):
            data = gather_data()
            more_data = figure_stuff_out(data)

            # End the ongoing transaction so we can go into our update.
            transaction.commit()

            # This is the only part where we update the database!
            with DatabaseTransactionPolicy(read_only=False):
                update_model(data, more_data)
                transaction.commit()

            # We've got a bit more work to do here, but it doesn't
            # affect the database.
            write_logs(data)
            notify_user(more_data)
    """

    db_switch = "DEFAULT_TRANSACTION_READ_ONLY"

    def __init__(self, store=None, read_only=False):
        """Create a policy.

        Merely creating a policy has no effect.  Use it with "with" to affect
        writability of database transactions.

        :param store: The store to set policy on.  Defaults to the main master
            store.  You don't want to use this on a slave store!
        :param read_only: Is this policy read-only?
        """
        self.read_only = read_only
        if store is None:
            self.store = IMasterStore(Person)
        else:
            self.store = store

    def __enter__(self):
        """Enter this policy.

        Commits the ongoing transaction, and sets the selected default
        read-only policy on the database.

        :raise TransactionInProgress: if a transaction was already ongoing.
        """
        self._checkNoTransaction("Entered DatabaseTransactionPolicy while in a transaction.")
        self.previous_policy = self._getCurrentPolicy()
        self._setPolicy(self.read_only)
        # Commit should include the policy itself.  If this breaks
        # because the transaction was already in a failed state before
        # we got here, too bad.
        transaction.commit()

    def __exit__(self, exc_type, *args):
        """Exit this policy.

        Commits or aborts, depending on mode of exit, and restores the
        previous default read-only policy.

        :return: True -- any exception will continue to propagate.
        :raise TransactionInProgress: if trying to exit normally from a
            read-write policy without closing its transaction first.
        """
        successful_exit = exc_type is None
        if successful_exit:
            # We're going to abort any ongoing transactions, but flush
            # first to catch out any writes that we might still be
            # caching.
            # Cached writes could hide read-only violations, but also
            # the start of a transaction that we shouldn't be in.
            self._flushPendingWrites()

            if not self.read_only:
                self._checkNoTransaction(
                    "Failed to close transaction before leaving read-write " "DatabaseTransactionPolicy."
                )

        transaction.abort()
        self._setPolicy(self.previous_policy)
        transaction.commit()
        return False

    def _isInTransaction(self):
        """Is our store currently in a transaction?"""
        pg_connection = self.store._connection._raw_connection
        status = pg_connection.get_transaction_status()
        return status != TRANSACTION_STATUS_IDLE

    def _checkNoTransaction(self, error_msg):
        """Verify that no transaction is ongoing.

        :param error_msg: The error message to use if the user got this wrong
            (i.e. if we're in a transaction).
        :raise TransactionInProgress: if we're in a transaction.
        """
        if self._isInTransaction():
            raise TransactionInProgress(error_msg)

    def _flushPendingWrites(self):
        """Flush any pending object changes to the database.

        If you see an `InternalError` exception during this flush, it probably
        means one of two things:

        1. Code within a read-only policy made model changes.

        2. Code within a policy exited normally despite an error that left the
           transaction in an unusable state.
        """
        self.store.flush()

    def _getCurrentPolicy(self):
        """Read the database session's default transaction read-only policy.

        The information is retrieved from the database, so this will give a
        sensible answer even when no DatabaseTransactionPolicy is in effect.

        :return: True for read-only policy, False for read-write policy.
        """
        db_switch_value_to_policy = {"on": True, "off": False}
        show_command = "SHOW %s" % self.db_switch
        db_switch_value, = self.store.execute(show_command).get_one()
        return db_switch_value_to_policy[db_switch_value]

    def _setPolicy(self, read_only=True):
        """Set the database session's default transaction read-only policy.

        :param read_only: True for read-only policy, False for read-write
            policy.
        """
        self.store.execute("SET %s TO %s" % (self.db_switch, quote(read_only)))
コード例 #16
0
    def addRequest(self, person, potemplates=None, pofiles=None,
            format=TranslationFileFormat.PO):
        """See `IPOExportRequestSet`."""
        if potemplates is None:
            potemplates = []
        elif IPOTemplate.providedBy(potemplates):
            # Allow single POTemplate as well as list of POTemplates
            potemplates = [potemplates]
        if pofiles is None:
            pofiles = []

        if not (potemplates or pofiles):
            raise AssertionError(
                "Can't add a request with no PO templates and no PO files.")

        potemplate_ids = ", ".join(
            [quote(template) for template in potemplates])
        # A null pofile stands for the template itself.  We represent it in
        # SQL as -1, because that's how it's indexed in the request table.
        pofile_ids = ", ".join([quote(pofile) for pofile in pofiles] + ["-1"])

        query_params = {
            'person': quote(person),
            'format': quote(format),
            'templates': potemplate_ids,
            'pofiles': pofile_ids,
            }

        store = IMasterStore(POExportRequest)

        if potemplates:
            # Create requests for all these templates, insofar as the same
            # user doesn't already have requests pending for them in the same
            # format.
            store.execute("""
                INSERT INTO POExportRequest(person, potemplate, format)
                SELECT %(person)s, template.id, %(format)s
                FROM POTemplate AS template
                LEFT JOIN POExportRequest AS existing ON
                    existing.person = %(person)s AND
                    existing.potemplate = template.id AND
                    existing.pofile IS NULL AND
                    existing.format = %(format)s
                WHERE
                    template.id IN (%(templates)s) AND
                    existing.id IS NULL
            """ % query_params)

        if pofiles:
            # Create requests for all these translations, insofar as the same
            # user doesn't already have identical requests pending.
            store.execute("""
                INSERT INTO POExportRequest(
                    person, potemplate, pofile, format)
                SELECT %(person)s, template.id, pofile.id, %(format)s
                FROM POFile
                JOIN POTemplate AS template ON template.id = POFile.potemplate
                LEFT JOIN POExportRequest AS existing ON
                    existing.person = %(person)s AND
                    existing.pofile = POFile.id AND
                    existing.format = %(format)s
                WHERE
                    POFile.id IN (%(pofiles)s) AND
                    existing.id IS NULL
                """ % query_params)
コード例 #17
0
    def addFile(self,
                name,
                size,
                file,
                contentType,
                expires=None,
                debugID=None,
                allow_zero_length=False):
        """Add a file to the librarian.

        :param name: Name to store the file as
        :param size: Size of the file
        :param file: File-like object with the content in it
        :param contentType: mime-type, e.g. text/plain
        :param expires: Expiry time of file. See LibrarianGarbageCollection.
            Set to None to only expire when it is no longer referenced.
        :param debugID: Optional.  If set, causes extra logging for this
            request on the server, which will be marked with the value
            given.
        :param allow_zero_length: If True permit zero length files.
        :returns: aliasID as an integer
        :raises UploadFailed: If the server rejects the upload for some
            reason.
        """
        if file is None:
            raise TypeError('Bad File Descriptor: %s' % repr(file))
        if allow_zero_length:
            min_size = -1
        else:
            min_size = 0
        if size <= min_size:
            raise UploadFailed('Invalid length: %d' % size)

        name = six.ensure_binary(name)

        # Import in this method to avoid a circular import
        from lp.services.librarian.model import LibraryFileContent
        from lp.services.librarian.model import LibraryFileAlias

        self._connect()
        try:
            # Get the name of the database the client is using, so that
            # the server can check that the client is using the same
            # database as the server.
            store = IMasterStore(LibraryFileAlias)
            databaseName = self._getDatabaseName(store)

            # Generate new content and alias IDs.
            # (we'll create rows with these IDs later, but not yet)
            contentID = store.execute(
                "SELECT nextval('libraryfilecontent_id_seq')").get_one()[0]
            aliasID = store.execute(
                "SELECT nextval('libraryfilealias_id_seq')").get_one()[0]

            # Send command
            self._sendLine('STORE %d %s' % (size, name))

            # Send headers
            self._sendHeader('Database-Name', databaseName)
            self._sendHeader('File-Content-ID', contentID)
            self._sendHeader('File-Alias-ID', aliasID)

            if debugID is not None:
                self._sendHeader('Debug-ID', debugID)

            # Send blank line. Do not check for a response from the
            # server when no data will be sent. Otherwise
            # _checkError() might consume the "200" response which
            # is supposed to be read below in this method.
            self._sendLine('', check_for_error_responses=(size > 0))

            # Prepare to the upload the file
            md5_digester = hashlib.md5()
            sha1_digester = hashlib.sha1()
            sha256_digester = hashlib.sha256()
            bytesWritten = 0

            # Read in and upload the file 64kb at a time, by using the two-arg
            # form of iter (see
            # /usr/share/doc/python/html/library/functions.html#iter).
            for chunk in iter(lambda: file.read(1024 * 64), ''):
                self.state.f.write(chunk)
                bytesWritten += len(chunk)
                md5_digester.update(chunk)
                sha1_digester.update(chunk)
                sha256_digester.update(chunk)

            assert bytesWritten == size, (
                'size is %d, but %d were read from the file' %
                (size, bytesWritten))
            self.state.f.flush()

            # Read response
            response = self.state.f.readline().strip()
            if response != '200':
                raise UploadFailed('Server said: ' + response)

            # Add rows to DB
            content = LibraryFileContent(id=contentID,
                                         filesize=size,
                                         sha256=sha256_digester.hexdigest(),
                                         sha1=sha1_digester.hexdigest(),
                                         md5=md5_digester.hexdigest())
            LibraryFileAlias(id=aliasID,
                             content=content,
                             filename=name.decode('UTF-8'),
                             mimetype=contentType,
                             expires=expires,
                             restricted=self.restricted)

            Store.of(content).flush()

            assert isinstance(aliasID, (int, long)), \
                    "aliasID %r not an integer" % (aliasID, )
            return aliasID
        finally:
            self._close()
コード例 #18
0
    def makeOneNewBranch(self, old_db_branch):
        """Copy a branch to the new distroseries.

        This function makes a new database branch for the same source package
        as old_db_branch but in the new distroseries and then uses
        `switch_branches` to move the underlying bzr branch to the new series
        and replace the old branch with a branch stacked on the new series'
        branch.

        :param old_db_branch: The branch to copy into the new distroseries.
        :raises BranchExists: This will be raised if old_db_branch has already
            been copied to the new distroseries (in the database, at least).
        """
        if not self.checkConsistentOfficialPackageBranch(old_db_branch):
            self.logger.warning("Skipping branch")
            return
        new_namespace = getUtility(IBranchNamespaceSet).get(
            person=old_db_branch.owner, product=None,
            distroseries=self.new_distroseries,
            sourcepackagename=old_db_branch.sourcepackagename)
        new_db_branch = new_namespace.createBranch(
            BranchType.HOSTED, self.new_distroseries.name,
            old_db_branch.registrant)
        new_db_branch.sourcepackage.setBranch(
            PackagePublishingPocket.RELEASE, new_db_branch,
            new_db_branch.owner)
        old_db_branch.lifecycle_status = BranchLifecycleStatus.MATURE
        # switch_branches *moves* the data to locations dependent on the
        # new_branch's id, so if the transaction was rolled back we wouldn't
        # know the branch id and thus wouldn't be able to find the branch data
        # again.  So commit before doing that.
        transaction.commit()
        switch_branches(
            config.codehosting.mirrored_branches_root,
            'lp-internal', old_db_branch, new_db_branch)
        # Directly copy the branch revisions from the old branch to the new
        # branch.
        store = IMasterStore(BranchRevision)
        store.execute(
            """
            INSERT INTO BranchRevision (branch, revision, sequence)
            SELECT %s, BranchRevision.revision, BranchRevision.sequence
            FROM BranchRevision
            WHERE branch = %s
            """ % (new_db_branch.id, old_db_branch.id))

        # Update the scanned details first, that way when hooking into
        # branchChanged, it won't try to create a new scan job.
        tip_revision = old_db_branch.getTipRevision()
        new_db_branch.updateScannedDetails(
            tip_revision, old_db_branch.revision_count)
        tip_revision_id = (
            tip_revision.revision_id if tip_revision is not None else
            NULL_REVISION)
        new_db_branch.branchChanged(
            '', tip_revision_id,
            old_db_branch.control_format,
            old_db_branch.branch_format,
            old_db_branch.repository_format)
        old_db_branch.stacked_on = new_db_branch
        transaction.commit()
        return new_db_branch
コード例 #19
0
ファイル: client.py プロジェクト: pombreda/UnnaturalCodeFork
    def addFile(self, name, size, file, contentType, expires=None,
                debugID=None, allow_zero_length=False):
        """Add a file to the librarian.

        :param name: Name to store the file as
        :param size: Size of the file
        :param file: File-like object with the content in it
        :param contentType: mime-type, e.g. text/plain
        :param expires: Expiry time of file. See LibrarianGarbageCollection.
            Set to None to only expire when it is no longer referenced.
        :param debugID: Optional.  If set, causes extra logging for this
            request on the server, which will be marked with the value
            given.
        :param allow_zero_length: If True permit zero length files.
        :returns: aliasID as an integer
        :raises UploadFailed: If the server rejects the upload for some
            reason.
        """
        if file is None:
            raise TypeError('Bad File Descriptor: %s' % repr(file))
        if allow_zero_length:
            min_size = -1
        else:
            min_size = 0
        if size <= min_size:
            raise UploadFailed('Invalid length: %d' % size)

        if isinstance(name, unicode):
            name = name.encode('utf-8')

        # Import in this method to avoid a circular import
        from lp.services.librarian.model import LibraryFileContent
        from lp.services.librarian.model import LibraryFileAlias

        self._connect()
        try:
            # Get the name of the database the client is using, so that
            # the server can check that the client is using the same
            # database as the server.
            store = IMasterStore(LibraryFileAlias)
            databaseName = self._getDatabaseName(store)

            # Generate new content and alias IDs.
            # (we'll create rows with these IDs later, but not yet)
            contentID = store.execute(
                "SELECT nextval('libraryfilecontent_id_seq')").get_one()[0]
            aliasID = store.execute(
                "SELECT nextval('libraryfilealias_id_seq')").get_one()[0]

            # Send command
            self._sendLine('STORE %d %s' % (size, name))

            # Send headers
            self._sendHeader('Database-Name', databaseName)
            self._sendHeader('File-Content-ID', contentID)
            self._sendHeader('File-Alias-ID', aliasID)

            if debugID is not None:
                self._sendHeader('Debug-ID', debugID)

            # Send blank line. Do not check for a response from the
            # server when no data will be sent. Otherwise
            # _checkError() might consume the "200" response which
            # is supposed to be read below in this method.
            self._sendLine('', check_for_error_responses=(size > 0))

            # Prepare to the upload the file
            md5_digester = hashlib.md5()
            sha1_digester = hashlib.sha1()
            sha256_digester = hashlib.sha256()
            bytesWritten = 0

            # Read in and upload the file 64kb at a time, by using the two-arg
            # form of iter (see
            # /usr/share/doc/python/html/library/functions.html#iter).
            for chunk in iter(lambda: file.read(1024 * 64), ''):
                self.state.f.write(chunk)
                bytesWritten += len(chunk)
                md5_digester.update(chunk)
                sha1_digester.update(chunk)
                sha256_digester.update(chunk)

            assert bytesWritten == size, (
                'size is %d, but %d were read from the file'
                % (size, bytesWritten))
            self.state.f.flush()

            # Read response
            response = self.state.f.readline().strip()
            if response != '200':
                raise UploadFailed('Server said: ' + response)

            # Add rows to DB
            content = LibraryFileContent(
                id=contentID, filesize=size,
                sha256=sha256_digester.hexdigest(),
                sha1=sha1_digester.hexdigest(),
                md5=md5_digester.hexdigest())
            LibraryFileAlias(
                id=aliasID, content=content, filename=name.decode('UTF-8'),
                mimetype=contentType, expires=expires,
                restricted=self.restricted)

            Store.of(content).flush()

            assert isinstance(aliasID, (int, long)), \
                    "aliasID %r not an integer" % (aliasID, )
            return aliasID
        finally:
            self._close()
コード例 #20
0
class DatabaseTransactionPolicy:
    """Context manager for read-only transaction policy.

    Use this to define regions of code that explicitly allow or disallow
    changes to the database:

        # We want to be sure that inspect_data does not inadvertently
        # make any changes in the database, but we can't run it on the
        # slave store because it doesn't tolerate replication lag.
        with DatabaseTransactionPolicy(read_only=True):
            inspect_data()

    The simplest way to use this is as a special transaction:
     * You must commit/abort before entering the policy.
     * Exiting the policy through an exception aborts its changes.
     * Before completing a read-write policy region, you must commit or abort.

    You can also have multiple transactions inside one policy, however; the
    policy still applies after a commit or abort.

    Policies can be nested--a nested policy overrides the one it's nested in.
    After the nested policy has exited, the previous policy applies again:

        # This code needs to control the database changes it makes very
        # carefully.  Most of it is just gathering data, with one quick
        # database update at the end.
        with DatabaseTransactionPolicy(read_only=True):
            data = gather_data()
            more_data = figure_stuff_out(data)

            # End the ongoing transaction so we can go into our update.
            transaction.commit()

            # This is the only part where we update the database!
            with DatabaseTransactionPolicy(read_only=False):
                update_model(data, more_data)
                transaction.commit()

            # We've got a bit more work to do here, but it doesn't
            # affect the database.
            write_logs(data)
            notify_user(more_data)
    """

    db_switch = "DEFAULT_TRANSACTION_READ_ONLY"

    def __init__(self, store=None, read_only=False):
        """Create a policy.

        Merely creating a policy has no effect.  Use it with "with" to affect
        writability of database transactions.

        :param store: The store to set policy on.  Defaults to the main master
            store.  You don't want to use this on a slave store!
        :param read_only: Is this policy read-only?
        """
        self.read_only = read_only
        if store is None:
            self.store = IMasterStore(Person)
        else:
            self.store = store

    def __enter__(self):
        """Enter this policy.

        Commits the ongoing transaction, and sets the selected default
        read-only policy on the database.

        :raise TransactionInProgress: if a transaction was already ongoing.
        """
        self._checkNoTransaction(
            "Entered DatabaseTransactionPolicy while in a transaction.")
        self.previous_policy = self._getCurrentPolicy()
        self._setPolicy(self.read_only)
        # Commit should include the policy itself.  If this breaks
        # because the transaction was already in a failed state before
        # we got here, too bad.
        transaction.commit()

    def __exit__(self, exc_type, *args):
        """Exit this policy.

        Commits or aborts, depending on mode of exit, and restores the
        previous default read-only policy.

        :return: True -- any exception will continue to propagate.
        :raise TransactionInProgress: if trying to exit normally from a
            read-write policy without closing its transaction first.
        """
        successful_exit = (exc_type is None)
        if successful_exit:
            # We're going to abort any ongoing transactions, but flush
            # first to catch out any writes that we might still be
            # caching.
            # Cached writes could hide read-only violations, but also
            # the start of a transaction that we shouldn't be in.
            self._flushPendingWrites()

            if not self.read_only:
                self._checkNoTransaction(
                    "Failed to close transaction before leaving read-write "
                    "DatabaseTransactionPolicy.")

        transaction.abort()
        self._setPolicy(self.previous_policy)
        transaction.commit()
        return False

    def _isInTransaction(self):
        """Is our store currently in a transaction?"""
        pg_connection = self.store._connection._raw_connection
        status = pg_connection.get_transaction_status()
        return status != TRANSACTION_STATUS_IDLE

    def _checkNoTransaction(self, error_msg):
        """Verify that no transaction is ongoing.

        :param error_msg: The error message to use if the user got this wrong
            (i.e. if we're in a transaction).
        :raise TransactionInProgress: if we're in a transaction.
        """
        if self._isInTransaction():
            raise TransactionInProgress(error_msg)

    def _flushPendingWrites(self):
        """Flush any pending object changes to the database.

        If you see an `InternalError` exception during this flush, it probably
        means one of two things:

        1. Code within a read-only policy made model changes.

        2. Code within a policy exited normally despite an error that left the
           transaction in an unusable state.
        """
        self.store.flush()

    def _getCurrentPolicy(self):
        """Read the database session's default transaction read-only policy.

        The information is retrieved from the database, so this will give a
        sensible answer even when no DatabaseTransactionPolicy is in effect.

        :return: True for read-only policy, False for read-write policy.
        """
        db_switch_value_to_policy = {
            'on': True,
            'off': False,
        }
        show_command = "SHOW %s" % self.db_switch
        db_switch_value, = self.store.execute(show_command).get_one()
        return db_switch_value_to_policy[db_switch_value]

    def _setPolicy(self, read_only=True):
        """Set the database session's default transaction read-only policy.

        :param read_only: True for read-only policy, False for read-write
            policy.
        """
        self.store.execute("SET %s TO %s" % (self.db_switch, quote(read_only)))
コード例 #21
0
    def addRequest(self,
                   person,
                   potemplates=None,
                   pofiles=None,
                   format=TranslationFileFormat.PO):
        """See `IPOExportRequestSet`."""
        if potemplates is None:
            potemplates = []
        elif IPOTemplate.providedBy(potemplates):
            # Allow single POTemplate as well as list of POTemplates
            potemplates = [potemplates]
        if pofiles is None:
            pofiles = []

        if not (potemplates or pofiles):
            raise AssertionError(
                "Can't add a request with no PO templates and no PO files.")

        potemplate_ids = ", ".join(
            [quote(template) for template in potemplates])
        # A null pofile stands for the template itself.  We represent it in
        # SQL as -1, because that's how it's indexed in the request table.
        pofile_ids = ", ".join([quote(pofile) for pofile in pofiles] + ["-1"])

        query_params = {
            'person': quote(person),
            'format': quote(format),
            'templates': potemplate_ids,
            'pofiles': pofile_ids,
        }

        store = IMasterStore(POExportRequest)

        if potemplates:
            # Create requests for all these templates, insofar as the same
            # user doesn't already have requests pending for them in the same
            # format.
            store.execute("""
                INSERT INTO POExportRequest(person, potemplate, format)
                SELECT %(person)s, template.id, %(format)s
                FROM POTemplate AS template
                LEFT JOIN POExportRequest AS existing ON
                    existing.person = %(person)s AND
                    existing.potemplate = template.id AND
                    existing.pofile IS NULL AND
                    existing.format = %(format)s
                WHERE
                    template.id IN (%(templates)s) AND
                    existing.id IS NULL
            """ % query_params)

        if pofiles:
            # Create requests for all these translations, insofar as the same
            # user doesn't already have identical requests pending.
            store.execute("""
                INSERT INTO POExportRequest(
                    person, potemplate, pofile, format)
                SELECT %(person)s, template.id, pofile.id, %(format)s
                FROM POFile
                JOIN POTemplate AS template ON template.id = POFile.potemplate
                LEFT JOIN POExportRequest AS existing ON
                    existing.person = %(person)s AND
                    existing.pofile = POFile.id AND
                    existing.format = %(format)s
                WHERE
                    POFile.id IN (%(pofiles)s) AND
                    existing.id IS NULL
                """ % query_params)
コード例 #22
0
class InitializeDistroSeries:
    """Copy in all of the parents distroseries's configuration. This
    includes all configuration for distroseries as well as distroarchseries,
    publishing and all publishing records for sources and binaries.

    We support 2 use cases here:
      #1 If the child distribution has zero initialized series:
        - the parent list can't be empty (otherwise we trigger an error);
        - the series will be derived from the parents passed as argument;
        - the parents will be set to the parents passed as argument;
        - first_derivation = True.
      #2 If the child distribution has more than zero initialized series:
        - the series will be derived from the previous_series;
        - the parents will be set to the parents passed as argument or
          the parents of the previous_series if the passed argument is empty;
        - first_derivation = False.

    Preconditions:
      The distroseries must exist, and be completly unused, with no source
      or binary packages existing, as well as no distroarchseries set up.
      Section and component selections must be empty. It must not have any
      parent series.

    Outcome:
      The distroarchseries set up in the parent series will be copied.
      The publishing structure will be copied from the parents. All
      PUBLISHED and PENDING packages in the parents will be created in
      this distroseries and its distroarchseriess. All component and section
      selections will be duplicated, as will any permission-related
      structures.

    Note:
      This method will raise a InitializationError when the pre-conditions
      are not met. After this is run, you still need to construct chroots
      for building, you need to add anything missing wrt. ports etc. This
      method is only meant to give you a basic copy of parent series in
      order to assist you in preparing a new series of a distribution or
      in the initialization of a derivative.
    """

    def __init__(
        self, distroseries, parents=(), arches=(), archindep_archtag=None,
        packagesets=(), rebuild=False, overlays=(), overlay_pockets=(),
        overlay_components=()):
        self.distroseries = distroseries
        self.parent_ids = [int(id) for id in parents]
        # Load parent objects in bulk...
        parents_bulk = bulk.load(DistroSeries, self.parent_ids)
        # ... sort the parents to match the order in the 'parents' parameter.
        self.parents = sorted(
            parents_bulk,
            key=lambda parent: self.parent_ids.index(parent.id))
        self.arches = arches
        self.archindep_archtag = archindep_archtag
        self.packagesets_ids = [
            ensure_unicode(packageset) for packageset in packagesets]
        self.packagesets = bulk.load(
            Packageset, [int(packageset) for packageset in packagesets])
        self.rebuild = rebuild
        self.overlays = overlays
        self.overlay_pockets = overlay_pockets
        self.overlay_components = overlay_components
        self._store = IMasterStore(DistroSeries)

        self.first_derivation = (
            not self.distroseries.distribution.has_published_sources)

        if self.first_derivation:
            # Use-case #1.
            self.derivation_parents = self.parents
            self.derivation_parent_ids = self.parent_ids
        else:
            # Use-case #2.
            self.derivation_parents = [self.distroseries.previous_series]
            self.derivation_parent_ids = [
                p.id for p in self.derivation_parents if p is not None]
            if self.parent_ids == []:
                self.parents = (
                    self.distroseries.previous_series.getParentSeries())
        self._create_source_names_by_parent()

    def check(self):
        if self.distroseries.isDerivedSeries():
            raise InitializationError(
                ("Series {child.name} has already been initialised"
                 ".").format(
                    child=self.distroseries))
        self._checkPublisherConfig()
        if (self.distroseries.distribution.has_published_sources and
            self.distroseries.previous_series is None):
            raise InitializationError(
                ("Series {child.name} has no previous series and "
                 "the distribution already has initialised series"
                 ".").format(
                    child=self.distroseries))
        self._checkParents()
        self._checkArchindep()
        for parent in self.derivation_parents:
            self._checkBuilds(parent)
            self._checkQueue(parent)
        self._checkSeries()

    def _checkArchindep(self):
        # Check that the child distroseries has an architecture to
        # build architecture independent binaries.
        if self.archindep_archtag is None:
            # No archindep_archtag was given, so we try to figure out
            # a proper one among the parents'.
            potential_nominated_arches = self._potential_nominated_arches(
                 self.derivation_parents)
            if len(potential_nominated_arches) == 0:
                raise InitializationError(
                    "The distroseries has no architectures selected to "
                    "build architecture independent binaries.")
        else:
            # Make sure that the given archindep_archtag is among the
            # selected architectures.
            if (self.arches is not None and
                len(self.arches) != 0 and
                self.archindep_archtag not in self.arches):
                raise InitializationError(
                    "The selected architecture independent architecture tag "
                    "is not among the selected architectures.")

    def _checkPublisherConfig(self):
        """A series cannot be initialized if it has no publisher config
        set up.
        """
        publisherconfigset = getUtility(IPublisherConfigSet)
        config = publisherconfigset.getByDistribution(
            self.distroseries.distribution)
        if config is None:
            raise InitializationError(
                ("Distribution {child.name} has no publisher configuration. "
                 "Please ask an administrator to set this up"
                 ".").format(
                    child=self.distroseries.distribution))

    def _checkParents(self):
        """If self.first_derivation, the parents list cannot be empty."""
        if self.first_derivation:
            # Use-case #1.
            if len(self.parent_ids) == 0:
                raise InitializationError(
                    "No other series in the distribution is initialised "
                    "and a parent was not explicitly specified.")

    def _checkBuilds(self, parent):
        """Assert there are no pending builds for the given parent series.

        Only cares about the RELEASE, SECURITY and UPDATES pockets, which are
        the only ones inherited via initializeFromParent method.
        Restrict the check to the select architectures (if applicable).
        Restrict the check to the selected packages if a limited set of
        packagesets is used by the initialization.
        """
        spns = self.source_names_by_parent.get(parent.id, None)
        if spns is not None and len(spns) == 0:
            # If no sources are selected in this parent, skip the check.
            return
        # spns=None means no packagesets selected so we need to consider
        # all sources.

        arch_tags = self.arches if len(self.arches) != 0 else None
        pending_builds = parent.getBuildRecords(
            BuildStatus.NEEDSBUILD, pocket=INIT_POCKETS,
            arch_tag=arch_tags, name=spns)

        if not pending_builds.is_empty():
            raise InitializationError(
                "The parent series has pending builds "
                "for selected sources.")

    def _checkQueue(self, parent):
        """Assert upload queue is empty on the given parent series.

        Only cares about the RELEASE, SECURITY and UPDATES pockets, which are
        the only ones inherited via initializeFromParent method.
        Restrict the check to the selected packages if a limited set of
        packagesets is used by the initialization.
         """
        statuses = [
            PackageUploadStatus.NEW,
            PackageUploadStatus.ACCEPTED,
            PackageUploadStatus.UNAPPROVED,
            ]
        spns = self.source_names_by_parent.get(parent.id, None)
        if spns is not None and len(spns) == 0:
            # If no sources are selected in this parent, skip the check.
            return
        # spns=None means no packagesets selected so we need to consider
        # all sources.

        items = getUtility(IPackageUploadSet).getBuildsForSources(
            parent, statuses, INIT_POCKETS, spns)
        if not items.is_empty():
            raise InitializationError(
                "The parent series has sources waiting in its upload "
                "queues that match your selection.")

    def _checkSeries(self):
        error = (
            "Cannot copy distroarchseries from parent; there are "
            "already one or more distroarchseries initialised for "
            "this series.")
        sources = self.distroseries.getAllPublishedSources()
        binaries = self.distroseries.getAllPublishedBinaries()
        if not all(
            map(methodcaller('is_empty'), (
                sources, binaries, self.distroseries.architectures,
                self.distroseries.sections))):
            raise InitializationError(error)
        if self.distroseries.components:
            raise InitializationError(error)

    def initialize(self):
        self._set_parents()
        self._copy_configuration()
        self._copy_architectures()
        self._set_nominatedarchindep()
        self._copy_packages()
        self._copy_packagesets()
        self._copy_pocket_permissions()
        self._create_dsds()
        self._set_initialized()
        transaction.commit()

    def _set_parents(self):
        count = 0
        for parent in self.parents:
            dsp_set = getUtility(IDistroSeriesParentSet)
            if self.overlays and self.overlays[count]:
                pocket = PackagePublishingPocket.__metaclass__.getTermByToken(
                    PackagePublishingPocket,
                    self.overlay_pockets[count]).value
                component_set = getUtility(IComponentSet)
                component = component_set[self.overlay_components[count]]
                dsp_set.new(
                    self.distroseries, parent, initialized=False,
                    is_overlay=True, pocket=pocket, component=component,
                    ordering=count)
            else:
                dsp_set.new(
                    self.distroseries, parent, initialized=False,
                    is_overlay=False, ordering=count)
            count += 1

    def _set_initialized(self):
        dsp_set = getUtility(IDistroSeriesParentSet)
        distroseriesparents = dsp_set.getByDerivedSeries(
            self.distroseries)
        for distroseriesparent in distroseriesparents:
            distroseriesparent.initialized = True

    def _has_same_parents_as_previous_series(self):
        # Does this distroseries have the same parents as its previous
        # series? (note that the parent's order does not matter here)
        dsp_set = getUtility(IDistroSeriesParentSet)
        previous_series_parents = [
            dsp.parent_series for dsp in dsp_set.getByDerivedSeries(
                self.distroseries.previous_series)]
        return set(previous_series_parents) == set(self.parents)

    def _create_dsds(self):
        if not self.first_derivation:
            if (self._has_same_parents_as_previous_series() and
                not self.packagesets_ids):
                # If the parents are the same as previous_series's
                # parents and all the packagesets are being copied,
                # then we simply copy the DSDs from previous_series
                # for performance reasons.
                self._copy_dsds_from_previous_series()
            else:
                # Either the parents have changed (compared to
                # previous_series's parents) or a selection only of the
                # packagesets is being copied so we have to recompute
                # the DSDs by creating DSD Jobs.
                self._create_dsd_jobs()
        else:
            # If this is the first derivation, create the DSD Jobs.
            self._create_dsd_jobs()

    def _copy_dsds_from_previous_series(self):
        self._store.execute("""
            INSERT INTO DistroSeriesDifference
                (derived_series, source_package_name, package_diff,
                status, difference_type, parent_package_diff,
                source_version, parent_source_version,
                base_version, parent_series)
            SELECT
                %s AS derived_series, source_package_name,
                package_diff, status,
                difference_type, parent_package_diff, source_version,
                parent_source_version, base_version, parent_series
            FROM DistroSeriesDifference AS dsd
                WHERE dsd.derived_series = %s
            """ % sqlvalues(
                self.distroseries.id,
                self.distroseries.previous_series.id))

    def _create_dsd_jobs(self):
        job_source = getUtility(IDistroSeriesDifferenceJobSource)
        job_source.massCreateForSeries(self.distroseries)

    def _copy_configuration(self):
        self.distroseries.backports_not_automatic = any(
            parent.backports_not_automatic
                for parent in self.derivation_parents)
        self.distroseries.include_long_descriptions = any(
            parent.include_long_descriptions
                for parent in self.derivation_parents)

    def _copy_architectures(self):
        das_filter = ' AND distroseries IN %s ' % (
                sqlvalues([p.id for p in self.derivation_parents]))
        if self.arches:
            das_filter += ' AND architecturetag IN %s ' % (
                sqlvalues(self.arches))
        self._store.execute("""
            INSERT INTO DistroArchSeries
            (distroseries, processor, architecturetag, owner, official,
             supports_virtualized)
            SELECT %s, processor, architecturetag, %s,
                bool_and(official), bool_or(supports_virtualized)
            FROM DistroArchSeries WHERE enabled = TRUE %s
            GROUP BY processor, architecturetag
            """ % (sqlvalues(self.distroseries, self.distroseries.owner)
            + (das_filter, )))
        self._store.flush()

    def _set_nominatedarchindep(self):
        if self.archindep_archtag is None:
            # Select the arch-indep builder from the intersection between
            # the selected architectures and the list of the parent's
            # arch-indep builders.
            arch_tag = self._potential_nominated_arches(
                self.derivation_parents).pop()
            self.distroseries.nominatedarchindep = (
                self.distroseries.getDistroArchSeries(arch_tag))
        else:
            self.distroseries.nominatedarchindep = (
                self.distroseries.getDistroArchSeries(self.archindep_archtag))

    def _potential_nominated_arches(self, parent_list):
        parent_indep_archtags = set(
            parent.nominatedarchindep.architecturetag
            for parent in parent_list
            if parent.nominatedarchindep is not None)

        if len(self.arches) == 0:
            return parent_indep_archtags
        else:
            return parent_indep_archtags.intersection(self.arches)

    def _copy_packages(self):
        # Perform the copies
        self._copy_component_section_and_format_selections()

        # Prepare the lists of distroarchseries for which binary packages
        # shall be copied.
        distroarchseries_lists = {}
        for parent in self.derivation_parents:
            distroarchseries_lists[parent] = []
            for arch in self.distroseries.architectures:
                if self.arches and (arch.architecturetag not in self.arches):
                    continue
                try:
                    parent_arch = parent.getDistroArchSeries(
                        arch.architecturetag)
                except NotFoundError:
                    continue

                distroarchseries_lists[parent].append((parent_arch, arch))
        # Now copy source and binary packages.
        self._copy_publishing_records(distroarchseries_lists)
        self._copy_packaging_links()

    def _use_cloner(self, target_archive, archive):
        """Returns True if it's safe to use the packagecloner (as opposed
        to using the packagecopier).
        We use two different ways to copy packages:
         - the packagecloner: fast but not conflict safe.
         - the packagecopier: slow but performs lots of checks to
         avoid creating conflicts.
        1. We'll use the cloner:
        If this is not a first initialization.
        And If:
            1.a If the archives are different and the target archive is
                empty use the cloner.
            Or
            1.b. If the archives are the same and the target series is
                empty use the cloner.
        2.  Otherwise use the copier.
        """
        if self.first_derivation:
            return False

        target_archive_empty = target_archive.getPublishedSources().is_empty()
        case_1a = (target_archive != archive and
                   target_archive_empty)
        case_1b = (target_archive == archive and
                   (target_archive_empty or
                    target_archive.getPublishedSources(
                        distroseries=self.distroseries).is_empty()))
        return case_1a or case_1b

    def _create_source_names_by_parent(self):
        """If only a subset of the packagesets was selected to be copied,
        create a dict with the list of source names to be copied for each
        parent.

        source_names_by_parent.get(parent) can be 3 different things:
        - None: this means that no specific packagesets where selected
        for the initialization. In this case we need to consider *all*
        the packages in this parent.
        - []: this means that some specific packagesets where selected
        for the initialization but none in this parent. We can skip
        this parent for all the copy/check operations.
        - [name1, ...]: this means that some specific packagesets
        were selected for the initialization and some are in this
        parent so the list of packages to consider in not empty.
        """
        source_names_by_parent = {}
        if self.packagesets_ids:
            for parent in self.derivation_parents:
                spns = []
                for pkgset in self.packagesets:
                    if pkgset.distroseries == parent:
                        spns += list(pkgset.getSourcesIncluded())
                source_names_by_parent[parent.id] = spns
        self.source_names_by_parent = source_names_by_parent

    def _copy_publishing_records(self, distroarchseries_lists):
        """Copy the publishing records from the parent arch series
        to the given arch series in ourselves.

        We copy all PENDING and PUBLISHED records as PENDING into our own
        publishing records.

        We copy only the RELEASE pocket in the PRIMARY archive.
        """
        archive_set = getUtility(IArchiveSet)

        for parent in self.derivation_parents:
            spns = self.source_names_by_parent.get(parent.id, None)
            if spns is not None and len(spns) == 0:
                # Some packagesets where selected but not a single
                # source from this parent: we skip the copy since
                # calling copy with spns=[] would copy all the packagesets
                # from this parent.
                continue
            # spns=None means no packagesets selected so we need to consider
            # all sources.

            distroarchseries_list = distroarchseries_lists[parent]
            for archive in parent.distribution.all_distro_archives:
                if archive.purpose != ArchivePurpose.PRIMARY:
                    continue

                target_archive = archive_set.getByDistroPurpose(
                    self.distroseries.distribution, archive.purpose)
                if archive.purpose is ArchivePurpose.PRIMARY:
                    assert target_archive is not None, (
                        "Target archive doesn't exist?")
                if self._use_cloner(target_archive, archive):
                    origin = PackageLocation(
                        archive, parent.distribution, parent,
                        PackagePublishingPocket.RELEASE)
                    destination = PackageLocation(
                        target_archive, self.distroseries.distribution,
                        self.distroseries, PackagePublishingPocket.RELEASE)
                    processors = None
                    if self.rebuild:
                        processors = [
                            das[1].processor for das in distroarchseries_list]
                        distroarchseries_list = ()
                    getUtility(IPackageCloner).clonePackages(
                        origin, destination, distroarchseries_list,
                        processors, spns, self.rebuild)
                else:
                    # There is only one available pocket in an unreleased
                    # series.
                    target_pocket = PackagePublishingPocket.RELEASE
                    sources = archive.getPublishedSources(
                        distroseries=parent, pocket=INIT_POCKETS,
                        status=(PackagePublishingStatus.PENDING,
                                PackagePublishingStatus.PUBLISHED),
                        name=spns)
                    # XXX: rvb 2011-06-23 bug=801112: do_copy is atomic (all
                    # or none of the sources will be copied). This might
                    # lead to a partially initialised series if there is a
                    # single conflict in the destination series.
                    try:
                        sources_published = do_copy(
                            sources, target_archive, self.distroseries,
                            target_pocket, include_binaries=not self.rebuild,
                            check_permissions=False, strict_binaries=False,
                            close_bugs=False, create_dsd_job=False,
                            person=None)
                        if self.rebuild:
                            rebuilds = []
                            for pubrec in sources_published:
                                builds = pubrec.createMissingBuilds(
                                   list(self.distroseries.architectures))
                                rebuilds.extend(builds)
                            self._rescore_rebuilds(rebuilds)
                    except CannotCopy as error:
                        raise InitializationError(error)

    def _rescore_rebuilds(self, builds):
        """Rescore the passed builds so that they have an appropriately low
         score.
        """
        for build in builds:
            build.buildqueue_record.lastscore -= COPY_ARCHIVE_SCORE_PENALTY

    def _copy_component_section_and_format_selections(self):
        """Copy the section, component and format selections from the parents
        distro series into this one.
        """
        # Copy the component selections
        self._store.execute('''
            INSERT INTO ComponentSelection (distroseries, component)
            SELECT DISTINCT %s AS distroseries, cs.component AS component
            FROM ComponentSelection AS cs WHERE cs.distroseries IN %s
            ''' % sqlvalues(self.distroseries.id,
            self.derivation_parent_ids))
        # Copy the section selections
        self._store.execute('''
            INSERT INTO SectionSelection (distroseries, section)
            SELECT DISTINCT %s as distroseries, ss.section AS section
            FROM SectionSelection AS ss WHERE ss.distroseries IN %s
            ''' % sqlvalues(self.distroseries.id,
            self.derivation_parent_ids))
        # Copy the source format selections
        self._store.execute('''
            INSERT INTO SourcePackageFormatSelection (distroseries, format)
            SELECT DISTINCT %s as distroseries, spfs.format AS format
            FROM SourcePackageFormatSelection AS spfs
            WHERE spfs.distroseries IN %s
            ''' % sqlvalues(self.distroseries.id,
            self.derivation_parent_ids))

    def _copy_packaging_links(self):
        """Copy the packaging links from the parent series to this one."""
        # We iterate over the parents and copy into the child in
        # sequence to avoid creating duplicates.
        for parent_id in self.derivation_parent_ids:
            self._store.execute("""
                INSERT INTO
                    Packaging(
                        distroseries, sourcepackagename, productseries,
                        packaging, owner)
                SELECT
                    ChildSeries.id,
                    Packaging.sourcepackagename,
                    Packaging.productseries,
                    Packaging.packaging,
                    Packaging.owner
                FROM
                    Packaging
                    -- Joining the parent distroseries permits the query to
                    -- build the data set for the series being updated, yet
                    -- results are in fact the data from the original series.
                    JOIN Distroseries ChildSeries
                        ON Packaging.distroseries = %s
                WHERE
                    -- Select only the packaging links that are in the parent
                    -- that are not in the child.
                    ChildSeries.id = %s
                    AND Packaging.sourcepackagename in (
                        SELECT sourcepackagename
                        FROM Packaging
                        WHERE distroseries in (
                            SELECT id
                            FROM Distroseries
                            WHERE id = %s
                            )
                        EXCEPT
                        SELECT sourcepackagename
                        FROM Packaging
                        WHERE distroseries in (
                            SELECT id
                            FROM Distroseries
                            WHERE id = ChildSeries.id
                            )
                        )
                """ % sqlvalues(
                    parent_id, self.distroseries.id, parent_id))

    def _copy_packagesets(self):
        """Copy packagesets from the parent distroseries."""
        packagesets = self._store.find(
            Packageset,
            Packageset.distroseries_id.is_in(self.derivation_parent_ids))
        parent_to_child = {}
        # Create the packagesets and any archivepermissions if we're not
        # copying cross-distribution.
        parent_distro_ids = [
            parent.distribution.id for parent in self.derivation_parents]
        for parent_ps in packagesets:
            # Cross-distro initializations get packagesets owned by the
            # distro owner, otherwise the old owner is preserved.
            if (self.packagesets_ids and
                str(parent_ps.id) not in self.packagesets_ids):
                continue
            packageset_set = getUtility(IPackagesetSet)
            # First, try to fetch an existing packageset with this name.
            try:
                child_ps = packageset_set.getByName(
                    parent_ps.name, self.distroseries)
            except NoSuchPackageSet:
                if self.distroseries.distribution.id in parent_distro_ids:
                    new_owner = parent_ps.owner
                else:
                    new_owner = self.distroseries.owner
                child_ps = getUtility(IPackagesetSet).new(
                    parent_ps.name, parent_ps.description,
                    new_owner, distroseries=self.distroseries,
                    related_set=parent_ps)
            parent_to_child[parent_ps] = child_ps
            # Copy archivepermissions if we're not copying
            # cross-distribution.
            if (self.distroseries.distribution ==
                    parent_ps.distroseries.distribution):
                self._store.execute("""
                    INSERT INTO Archivepermission
                    (person, permission, archive, packageset, explicit)
                    SELECT person, permission, %s, %s, explicit
                    FROM Archivepermission WHERE packageset = %s
                    """ % sqlvalues(
                        self.distroseries.main_archive, child_ps.id,
                        parent_ps.id))
        # Copy the relations between sets, and the contents.
        for old_series_ps, new_series_ps in parent_to_child.items():
            old_series_sets = old_series_ps.setsIncluded(
                direct_inclusion=True)
            for old_series_child in old_series_sets:
                new_series_ps.add(parent_to_child[old_series_child])
            new_series_ps.add(old_series_ps.sourcesIncluded(
                direct_inclusion=True))

    def _copy_pocket_permissions(self):
        """Copy per-distroseries/pocket permissions from the parent series."""
        for parent in self.derivation_parents:
            if self.distroseries.distribution == parent.distribution:
                self._store.execute("""
                    INSERT INTO Archivepermission
                    (person, permission, archive, pocket, distroseries)
                    SELECT person, permission, %s, pocket, %s
                    FROM Archivepermission
                    WHERE pocket IS NOT NULL AND distroseries = %s
                    """ % sqlvalues(
                        self.distroseries.main_archive, self.distroseries.id,
                        parent.id))
コード例 #23
0
class BugWatchScheduler(TunableLoop):
    """An `ITunableLoop` for scheduling BugWatches."""

    maximum_chunk_size = 1000

    def __init__(self, log, abort_time=None, max_delay_days=None,
                 max_sample_size=None):
        super(BugWatchScheduler, self).__init__(log, abort_time)
        self.transaction = transaction
        self.store = IMasterStore(BugWatch)

        if max_delay_days is None:
            max_delay_days = MAX_DELAY_DAYS
        if max_sample_size is None:
            max_sample_size = MAX_SAMPLE_SIZE
        self.max_sample_size = max_sample_size

        self.delay_coefficient = get_delay_coefficient(
            max_delay_days, max_sample_size)

    def __call__(self, chunk_size):
        """Run the loop."""
        # XXX 2010-03-25 gmb bug=198767:
        #     We cast chunk_size to an integer to ensure that we're not
        #     trying to slice using floats or anything similarly
        #     foolish. We shouldn't have to do this.
        chunk_size = int(chunk_size)
        query = """
        UPDATE BugWatch
            SET next_check =
                COALESCE(
                    lastchecked + interval '1 day',
                    now() AT TIME ZONE 'UTC') +
                (interval '1 day' * (%s * recent_failure_count))
            FROM (
                SELECT bug_watch.id,
                    (SELECT COUNT(*)
                        FROM (SELECT 1
                            FROM bugwatchactivity
                           WHERE bugwatchactivity.bug_watch = bug_watch.id
                             AND bugwatchactivity.result NOT IN (%s)
                           ORDER BY bugwatchactivity.id DESC
                           LIMIT %s) AS recent_failures
                    ) AS recent_failure_count
                FROM BugWatch AS bug_watch
                WHERE bug_watch.next_check IS NULL
                LIMIT %s
            ) AS counts
        WHERE BugWatch.id = counts.id
        """ % sqlvalues(
            self.delay_coefficient, BUG_WATCH_ACTIVITY_SUCCESS_STATUSES,
            self.max_sample_size, chunk_size)
        self.transaction.begin()
        result = self.store.execute(query)
        self.log.debug("Scheduled %s watches" % result.rowcount)
        self.transaction.commit()

    def isDone(self):
        """Return True when there are no more watches to schedule."""
        return self.store.find(
            BugWatch, BugWatch.next_check == None).is_empty()
コード例 #24
0
    def test_PopulateLatestPersonSourcePackageReleaseCache(self):
        switch_dbuser('testadmin')
        # Make some same test data - we create published source package
        # releases for 2 different creators and maintainers.
        creators = []
        for _ in range(2):
            creators.append(self.factory.makePerson())
        maintainers = []
        for _ in range(2):
            maintainers.append(self.factory.makePerson())

        spn = self.factory.makeSourcePackageName()
        distroseries = self.factory.makeDistroSeries()
        spr1 = self.factory.makeSourcePackageRelease(
            creator=creators[0], maintainer=maintainers[0],
            distroseries=distroseries, sourcepackagename=spn,
            date_uploaded=datetime(2010, 12, 1, tzinfo=UTC))
        self.factory.makeSourcePackagePublishingHistory(
            status=PackagePublishingStatus.PUBLISHED,
            sourcepackagerelease=spr1)
        spr2 = self.factory.makeSourcePackageRelease(
            creator=creators[0], maintainer=maintainers[1],
            distroseries=distroseries, sourcepackagename=spn,
            date_uploaded=datetime(2010, 12, 2, tzinfo=UTC))
        self.factory.makeSourcePackagePublishingHistory(
            status=PackagePublishingStatus.PUBLISHED,
            sourcepackagerelease=spr2)
        spr3 = self.factory.makeSourcePackageRelease(
            creator=creators[1], maintainer=maintainers[0],
            distroseries=distroseries, sourcepackagename=spn,
            date_uploaded=datetime(2010, 12, 3, tzinfo=UTC))
        self.factory.makeSourcePackagePublishingHistory(
            status=PackagePublishingStatus.PUBLISHED,
            sourcepackagerelease=spr3)
        spr4 = self.factory.makeSourcePackageRelease(
            creator=creators[1], maintainer=maintainers[1],
            distroseries=distroseries, sourcepackagename=spn,
            date_uploaded=datetime(2010, 12, 4, tzinfo=UTC))
        spph_1 = self.factory.makeSourcePackagePublishingHistory(
            status=PackagePublishingStatus.PUBLISHED,
            sourcepackagerelease=spr4)

        transaction.commit()
        self.runFrequently()

        store = IMasterStore(LatestPersonSourcePackageReleaseCache)
        # Check that the garbo state table has data.
        self.assertIsNotNone(
            store.execute(
                'SELECT * FROM GarboJobState WHERE name=?',
                params=[u'PopulateLatestPersonSourcePackageReleaseCache']
            ).get_one())

        def _assert_release_by_creator(creator, spr):
            release_records = store.find(
                LatestPersonSourcePackageReleaseCache,
                LatestPersonSourcePackageReleaseCache.creator_id == creator.id)
            [record] = list(release_records)
            self.assertEqual(spr.creator, record.creator)
            self.assertIsNone(record.maintainer_id)
            self.assertEqual(
                spr.dateuploaded, UTC.localize(record.dateuploaded))

        def _assert_release_by_maintainer(maintainer, spr):
            release_records = store.find(
                LatestPersonSourcePackageReleaseCache,
                LatestPersonSourcePackageReleaseCache.maintainer_id ==
                maintainer.id)
            [record] = list(release_records)
            self.assertEqual(spr.maintainer, record.maintainer)
            self.assertIsNone(record.creator_id)
            self.assertEqual(
                spr.dateuploaded, UTC.localize(record.dateuploaded))

        _assert_release_by_creator(creators[0], spr2)
        _assert_release_by_creator(creators[1], spr4)
        _assert_release_by_maintainer(maintainers[0], spr3)
        _assert_release_by_maintainer(maintainers[1], spr4)

        job_data = load_garbo_job_state(
            'PopulateLatestPersonSourcePackageReleaseCache')
        self.assertEqual(spph_1.id, job_data['last_spph_id'])

        # Create a newer published source package release and ensure the
        # release cache table is correctly updated.
        switch_dbuser('testadmin')
        spr5 = self.factory.makeSourcePackageRelease(
            creator=creators[1], maintainer=maintainers[1],
            distroseries=distroseries, sourcepackagename=spn,
            date_uploaded=datetime(2010, 12, 5, tzinfo=UTC))
        spph_2 = self.factory.makeSourcePackagePublishingHistory(
            status=PackagePublishingStatus.PUBLISHED,
            sourcepackagerelease=spr5)

        transaction.commit()
        self.runFrequently()

        _assert_release_by_creator(creators[0], spr2)
        _assert_release_by_creator(creators[1], spr5)
        _assert_release_by_maintainer(maintainers[0], spr3)
        _assert_release_by_maintainer(maintainers[1], spr5)

        job_data = load_garbo_job_state(
            'PopulateLatestPersonSourcePackageReleaseCache')
        self.assertEqual(spph_2.id, job_data['last_spph_id'])
コード例 #25
0
class BugWatchScheduler(TunableLoop):
    """An `ITunableLoop` for scheduling BugWatches."""

    maximum_chunk_size = 1000

    def __init__(self,
                 log,
                 abort_time=None,
                 max_delay_days=None,
                 max_sample_size=None):
        super(BugWatchScheduler, self).__init__(log, abort_time)
        self.transaction = transaction
        self.store = IMasterStore(BugWatch)

        if max_delay_days is None:
            max_delay_days = MAX_DELAY_DAYS
        if max_sample_size is None:
            max_sample_size = MAX_SAMPLE_SIZE
        self.max_sample_size = max_sample_size

        self.delay_coefficient = get_delay_coefficient(max_delay_days,
                                                       max_sample_size)

    def __call__(self, chunk_size):
        """Run the loop."""
        # XXX 2010-03-25 gmb bug=198767:
        #     We cast chunk_size to an integer to ensure that we're not
        #     trying to slice using floats or anything similarly
        #     foolish. We shouldn't have to do this.
        chunk_size = int(chunk_size)
        query = """
        UPDATE BugWatch
            SET next_check =
                COALESCE(
                    lastchecked + interval '1 day',
                    now() AT TIME ZONE 'UTC') +
                (interval '1 day' * (%s * recent_failure_count))
            FROM (
                SELECT bug_watch.id,
                    (SELECT COUNT(*)
                        FROM (SELECT 1
                            FROM bugwatchactivity
                           WHERE bugwatchactivity.bug_watch = bug_watch.id
                             AND bugwatchactivity.result NOT IN (%s)
                           ORDER BY bugwatchactivity.id DESC
                           LIMIT %s) AS recent_failures
                    ) AS recent_failure_count
                FROM BugWatch AS bug_watch
                WHERE bug_watch.next_check IS NULL
                LIMIT %s
            ) AS counts
        WHERE BugWatch.id = counts.id
        """ % sqlvalues(self.delay_coefficient,
                        BUG_WATCH_ACTIVITY_SUCCESS_STATUSES,
                        self.max_sample_size, chunk_size)
        self.transaction.begin()
        result = self.store.execute(query)
        self.log.debug("Scheduled %s watches" % result.rowcount)
        self.transaction.commit()

    def isDone(self):
        """Return True when there are no more watches to schedule."""
        return self.store.find(BugWatch,
                               BugWatch.next_check == None).is_empty()
コード例 #26
0
def close_account(username, log):
    """Close a person's account.

    Return True on success, or log an error message and return False
    """
    store = IMasterStore(Person)
    janitor = getUtility(ILaunchpadCelebrities).janitor

    cur = cursor()
    references = list(postgresql.listReferences(cur, 'person', 'id'))
    postgresql.check_indirect_references(references)

    person = store.using(
        Person,
        LeftJoin(EmailAddress, Person.id == EmailAddress.personID)).find(
            Person,
            Or(Person.name == username,
               Lower(EmailAddress.email) == Lower(username))).one()
    if person is None:
        raise LaunchpadScriptFailure("User %s does not exist" % username)
    person_name = person.name

    # We don't do teams
    if person.is_team:
        raise LaunchpadScriptFailure("%s is a team" % person_name)

    log.info("Closing %s's account" % person_name)

    def table_notification(table):
        log.debug("Handling the %s table" % table)

    # All names starting with 'removed' are blacklisted, so this will always
    # succeed.
    new_name = 'removed%d' % person.id

    # Some references can safely remain in place and link to the cleaned-out
    # Person row.
    skip = {
        # These references express some kind of audit trail.  The actions in
        # question still happened, and in some cases the rows may still have
        # functional significance (e.g. subscriptions or access grants), but
        # we no longer identify the actor.
        ('accessartifactgrant', 'grantor'),
        ('accesspolicygrant', 'grantor'),
        ('binarypackagepublishinghistory', 'removed_by'),
        ('branch', 'registrant'),
        ('branchmergeproposal', 'merge_reporter'),
        ('branchmergeproposal', 'merger'),
        ('branchmergeproposal', 'queuer'),
        ('branchmergeproposal', 'registrant'),
        ('branchmergeproposal', 'reviewer'),
        ('branchsubscription', 'subscribed_by'),
        ('bug', 'owner'),
        ('bug', 'who_made_private'),
        ('bugactivity', 'person'),
        ('bugnomination', 'decider'),
        ('bugnomination', 'owner'),
        ('bugtask', 'owner'),
        ('bugsubscription', 'subscribed_by'),
        ('codeimport', 'owner'),
        ('codeimport', 'registrant'),
        ('codeimportevent', 'person'),
        ('faq', 'last_updated_by'),
        ('featureflagchangelogentry', 'person'),
        ('gitactivity', 'changee'),
        ('gitactivity', 'changer'),
        ('gitrepository', 'registrant'),
        ('gitrule', 'creator'),
        ('gitrulegrant', 'grantor'),
        ('gitsubscription', 'subscribed_by'),
        ('message', 'owner'),
        ('messageapproval', 'disposed_by'),
        ('messageapproval', 'posted_by'),
        ('packagecopyrequest', 'requester'),
        ('packagediff', 'requester'),
        ('packageupload', 'signing_key_owner'),
        ('personlocation', 'last_modified_by'),
        ('persontransferjob', 'major_person'),
        ('persontransferjob', 'minor_person'),
        ('poexportrequest', 'person'),
        ('pofile', 'lasttranslator'),
        ('pofiletranslator', 'person'),
        ('product', 'registrant'),
        ('question', 'answerer'),
        ('questionreopening', 'answerer'),
        ('questionreopening', 'reopener'),
        ('snapbuild', 'requester'),
        ('sourcepackagepublishinghistory', 'creator'),
        ('sourcepackagepublishinghistory', 'removed_by'),
        ('sourcepackagepublishinghistory', 'sponsor'),
        ('sourcepackagerecipebuild', 'requester'),
        ('sourcepackagerelease', 'creator'),
        ('sourcepackagerelease', 'maintainer'),
        ('sourcepackagerelease', 'signing_key_owner'),
        ('specification', 'approver'),
        ('specification', 'completer'),
        ('specification', 'drafter'),
        ('specification', 'goal_decider'),
        ('specification', 'goal_proposer'),
        ('specification', 'last_changed_by'),
        ('specification', 'starter'),
        ('structuralsubscription', 'subscribed_by'),
        ('teammembership', 'acknowledged_by'),
        ('teammembership', 'proposed_by'),
        ('teammembership', 'reviewed_by'),
        ('translationimportqueueentry', 'importer'),
        ('translationmessage', 'reviewer'),
        ('translationmessage', 'submitter'),
        ('translationrelicensingagreement', 'person'),
        ('usertouseremail', 'recipient'),
        ('usertouseremail', 'sender'),
        ('xref', 'creator'),

        # This is maintained by trigger functions and a garbo job.  It
        # doesn't need to be updated immediately.
        ('bugsummary', 'viewed_by'),

        # XXX cjwatson 2019-05-02 bug=1827399: This is suboptimal because it
        # does retain some personal information, but it's currently hard to
        # deal with due to the size and complexity of references to it.  We
        # can hopefully provide a garbo job for this eventually.
        ('revisionauthor', 'person'),
    }
    reference_names = {(src_tab, src_col)
                       for src_tab, src_col, _, _, _, _ in references}
    for src_tab, src_col in skip:
        if (src_tab, src_col) not in reference_names:
            raise AssertionError(
                "%s.%s is not a Person reference; possible typo?" %
                (src_tab, src_col))

    # XXX cjwatson 2018-11-29: Registrants could possibly be left as-is, but
    # perhaps we should pretend that the registrant was ~registry in that
    # case instead?

    # Remove the EmailAddress. This is the most important step, as
    # people requesting account removal seem to primarily be interested
    # in ensuring we no longer store this information.
    table_notification('EmailAddress')
    store.find(EmailAddress, EmailAddress.personID == person.id).remove()

    # Clean out personal details from the Person table
    table_notification('Person')
    person.display_name = 'Removed by request'
    person.name = new_name
    person.homepage_content = None
    person.icon = None
    person.mugshot = None
    person.hide_email_addresses = False
    person.registrant = None
    person.logo = None
    person.creation_rationale = PersonCreationRationale.UNKNOWN
    person.creation_comment = None

    # Keep the corresponding PersonSettings row, but reset everything to the
    # defaults.
    table_notification('PersonSettings')
    store.find(PersonSettings, PersonSettings.personID == person.id).set(
        selfgenerated_bugnotifications=DEFAULT,
        # XXX cjwatson 2018-11-29: These two columns have NULL defaults, but
        # perhaps shouldn't?
        expanded_notification_footers=False,
        require_strong_email_authentication=False)
    skip.add(('personsettings', 'person'))

    # Remove almost everything from the Account row and the corresponding
    # OpenIdIdentifier rows, preserving only a minimal audit trail.
    if person.account is not None:
        table_notification('Account')
        account = removeSecurityProxy(person.account)
        account.displayname = 'Removed by request'
        account.creation_rationale = AccountCreationRationale.UNKNOWN
        person.setAccountStatus(AccountStatus.CLOSED, janitor,
                                "Closed using close-account.")

        table_notification('OpenIdIdentifier')
        store.find(OpenIdIdentifier,
                   OpenIdIdentifier.account_id == account.id).remove()

    # Reassign their bugs
    table_notification('BugTask')
    store.find(BugTask, BugTask.assigneeID == person.id).set(assigneeID=None)

    # Reassign questions assigned to the user, and close all their questions
    # in non-final states since nobody else can.
    table_notification('Question')
    store.find(Question, Question.assigneeID == person.id).set(assigneeID=None)
    owned_non_final_questions = store.find(
        Question, Question.ownerID == person.id,
        Question.status.is_in([
            QuestionStatus.OPEN,
            QuestionStatus.NEEDSINFO,
            QuestionStatus.ANSWERED,
        ]))
    owned_non_final_questions.set(
        status=QuestionStatus.SOLVED,
        whiteboard=(
            'Closed by Launchpad due to owner requesting account removal'))
    skip.add(('question', 'owner'))

    # Remove rows from tables in simple cases in the given order
    removals = [
        # Trash their email addresses. People who request complete account
        # removal would be unhappy if they reregistered with their old email
        # address and this resurrected their deleted account, as the email
        # address is probably the piece of data we store that they were most
        # concerned with being removed from our systems.
        ('EmailAddress', 'person'),

        # Trash their codes of conduct and GPG keys
        ('SignedCodeOfConduct', 'owner'),
        ('GpgKey', 'owner'),

        # Subscriptions and notifications
        ('BranchSubscription', 'person'),
        ('BugMute', 'person'),
        ('BugNotificationRecipient', 'person'),
        ('BugSubscription', 'person'),
        ('BugSubscriptionFilterMute', 'person'),
        ('GitSubscription', 'person'),
        ('MailingListSubscription', 'person'),
        ('QuestionSubscription', 'person'),
        ('SpecificationSubscription', 'person'),
        ('StructuralSubscription', 'subscriber'),

        # Personal stuff, freeing up the namespace for others who want to play
        # or just to remove any fingerprints identifying the user.
        ('IrcId', 'person'),
        ('JabberId', 'person'),
        ('WikiName', 'person'),
        ('PersonLanguage', 'person'),
        ('PersonLocation', 'person'),
        ('SshKey', 'person'),

        # Karma
        ('Karma', 'person'),
        ('KarmaCache', 'person'),
        ('KarmaTotalCache', 'person'),

        # Team memberships
        ('TeamMembership', 'person'),
        ('TeamParticipation', 'person'),

        # Contacts
        ('AnswerContact', 'person'),

        # Pending items in queues
        ('POExportRequest', 'person'),

        # Access grants
        ('AccessArtifactGrant', 'grantee'),
        ('AccessPolicyGrant', 'grantee'),
        ('ArchivePermission', 'person'),
        ('GitRuleGrant', 'grantee'),
        ('SharingJob', 'grantee'),

        # Soyuz reporting
        ('LatestPersonSourcePackageReleaseCache', 'creator'),
        ('LatestPersonSourcePackageReleaseCache', 'maintainer'),

        # "Affects me too" information
        ('BugAffectsPerson', 'person'),
    ]
    for table, person_id_column in removals:
        table_notification(table)
        store.execute(
            """
            DELETE FROM %(table)s WHERE %(person_id_column)s = ?
            """ % {
                'table': table,
                'person_id_column': person_id_column,
            }, (person.id, ))

    # Trash Sprint Attendance records in the future.
    table_notification('SprintAttendance')
    store.execute(
        """
        DELETE FROM SprintAttendance
        USING Sprint
        WHERE Sprint.id = SprintAttendance.sprint
            AND attendee = ?
            AND Sprint.time_starts > CURRENT_TIMESTAMP AT TIME ZONE 'UTC'
        """, (person.id, ))
    # Any remaining past sprint attendance records can harmlessly refer to
    # the placeholder person row.
    skip.add(('sprintattendance', 'attendee'))

    # generate_ppa_htaccess currently relies on seeing active
    # ArchiveAuthToken rows so that it knows which ones to remove from
    # .htpasswd files on disk in response to the cancellation of the
    # corresponding ArchiveSubscriber rows; but even once PPA authorisation
    # is handled dynamically, we probably still want to have the per-person
    # audit trail here.
    archive_subscriber_ids = set(
        store.find(
            ArchiveSubscriber.id, ArchiveSubscriber.subscriber_id == person.id,
            ArchiveSubscriber.status == ArchiveSubscriberStatus.CURRENT))
    if archive_subscriber_ids:
        getUtility(IArchiveSubscriberSet).cancel(archive_subscriber_ids,
                                                 janitor)
    skip.add(('archivesubscriber', 'subscriber'))
    skip.add(('archiveauthtoken', 'person'))

    # Remove hardware submissions.
    table_notification('HWSubmissionDevice')
    store.execute(
        """
        DELETE FROM HWSubmissionDevice
        USING HWSubmission
        WHERE HWSubmission.id = HWSubmissionDevice.submission
            AND owner = ?
        """, (person.id, ))
    table_notification('HWSubmission')
    store.find(HWSubmission, HWSubmission.ownerID == person.id).remove()

    has_references = False

    # Check for active related projects, and skip inactive ones.
    for col in 'bug_supervisor', 'driver', 'owner':
        # Raw SQL because otherwise using Product._owner while displaying it
        # as Product.owner is too fiddly.
        result = store.execute(
            """
            SELECT COUNT(*) FROM product WHERE active AND %(col)s = ?
            """ % {'col': col}, (person.id, ))
        count = result.get_one()[0]
        if count:
            log.error("User %s is still referenced by %d product.%s values" %
                      (person_name, count, col))
            has_references = True
        skip.add(('product', col))
    for col in 'driver', 'owner':
        count = store.find(ProductSeries, ProductSeries.product == Product.id,
                           Product.active,
                           getattr(ProductSeries, col) == person).count()
        if count:
            log.error(
                "User %s is still referenced by %d productseries.%s values" %
                (person_name, count, col))
            has_references = True
        skip.add(('productseries', col))

    # Closing the account will only work if all references have been handled
    # by this point.  If not, it's safer to bail out.  It's OK if this
    # doesn't work in all conceivable situations, since some of them may
    # require careful thought and decisions by a human administrator.
    for src_tab, src_col, ref_tab, ref_col, updact, delact in references:
        if (src_tab, src_col) in skip:
            continue
        result = store.execute(
            """
            SELECT COUNT(*) FROM %(src_tab)s WHERE %(src_col)s = ?
            """ % {
                'src_tab': src_tab,
                'src_col': src_col,
            }, (person.id, ))
        count = result.get_one()[0]
        if count:
            log.error("User %s is still referenced by %d %s.%s values" %
                      (person_name, count, src_tab, src_col))
            has_references = True
    if has_references:
        raise LaunchpadScriptFailure("User %s is still referenced" %
                                     person_name)

    return True
コード例 #27
0
class InitializeDistroSeries:
    """Copy in all of the parents distroseries's configuration. This
    includes all configuration for distroseries as well as distroarchseries,
    publishing and all publishing records for sources and binaries.

    We support 2 use cases here:
      #1 If the child distribution has zero initialized series:
        - the parent list can't be empty (otherwise we trigger an error);
        - the series will be derived from the parents passed as argument;
        - the parents will be set to the parents passed as argument;
        - first_derivation = True.
      #2 If the child distribution has more than zero initialized series:
        - the series will be derived from the previous_series;
        - the parents will be set to the parents passed as argument or
          the parents of the previous_series if the passed argument is empty;
        - first_derivation = False.

    Preconditions:
      The distroseries must exist, and be completly unused, with no source
      or binary packages existing, as well as no distroarchseries set up.
      Section and component selections must be empty. It must not have any
      parent series.

    Outcome:
      The distroarchseries set up in the parent series will be copied.
      The publishing structure will be copied from the parents. All
      PUBLISHED and PENDING packages in the parents will be created in
      this distroseries and its distroarchseriess. All component and section
      selections will be duplicated, as will any permission-related
      structures.

    Note:
      This method will raise a InitializationError when the pre-conditions
      are not met. After this is run, you still need to construct chroots
      for building, you need to add anything missing wrt. ports etc. This
      method is only meant to give you a basic copy of parent series in
      order to assist you in preparing a new series of a distribution or
      in the initialization of a derivative.
    """
    def __init__(self,
                 distroseries,
                 parents=(),
                 arches=(),
                 archindep_archtag=None,
                 packagesets=(),
                 rebuild=False,
                 overlays=(),
                 overlay_pockets=(),
                 overlay_components=()):
        self.distroseries = distroseries
        self.parent_ids = [int(id) for id in parents]
        # Load parent objects in bulk...
        parents_bulk = bulk.load(DistroSeries, self.parent_ids)
        # ... sort the parents to match the order in the 'parents' parameter.
        self.parents = sorted(
            parents_bulk, key=lambda parent: self.parent_ids.index(parent.id))
        self.arches = arches
        self.archindep_archtag = archindep_archtag
        self.packagesets_ids = [
            ensure_unicode(packageset) for packageset in packagesets
        ]
        self.packagesets = bulk.load(
            Packageset, [int(packageset) for packageset in packagesets])
        self.rebuild = rebuild
        self.overlays = overlays
        self.overlay_pockets = overlay_pockets
        self.overlay_components = overlay_components
        self._store = IMasterStore(DistroSeries)

        self.first_derivation = (
            not self.distroseries.distribution.has_published_sources)

        if self.first_derivation:
            # Use-case #1.
            self.derivation_parents = self.parents
            self.derivation_parent_ids = self.parent_ids
        else:
            # Use-case #2.
            self.derivation_parents = [self.distroseries.previous_series]
            self.derivation_parent_ids = [
                p.id for p in self.derivation_parents if p is not None
            ]
            if self.parent_ids == []:
                self.parents = (
                    self.distroseries.previous_series.getParentSeries())
        self._create_source_names_by_parent()

    def check(self):
        if self.distroseries.isDerivedSeries():
            raise InitializationError(
                ("Series {child.name} has already been initialised"
                 ".").format(child=self.distroseries))
        self._checkPublisherConfig()
        if (self.distroseries.distribution.has_published_sources
                and self.distroseries.previous_series is None):
            raise InitializationError(
                ("Series {child.name} has no previous series and "
                 "the distribution already has initialised series"
                 ".").format(child=self.distroseries))
        self._checkParents()
        self._checkArchindep()
        for parent in self.derivation_parents:
            self._checkBuilds(parent)
            self._checkQueue(parent)
        self._checkSeries()

    def _checkArchindep(self):
        # Check that the child distroseries has an architecture to
        # build architecture independent binaries.
        if self.archindep_archtag is None:
            # No archindep_archtag was given, so we try to figure out
            # a proper one among the parents'.
            potential_nominated_arches = self._potential_nominated_arches(
                self.derivation_parents)
            if len(potential_nominated_arches) == 0:
                raise InitializationError(
                    "The distroseries has no architectures selected to "
                    "build architecture independent binaries.")
        else:
            # Make sure that the given archindep_archtag is among the
            # selected architectures.
            if (self.arches is not None and len(self.arches) != 0
                    and self.archindep_archtag not in self.arches):
                raise InitializationError(
                    "The selected architecture independent architecture tag "
                    "is not among the selected architectures.")

    def _checkPublisherConfig(self):
        """A series cannot be initialized if it has no publisher config
        set up.
        """
        publisherconfigset = getUtility(IPublisherConfigSet)
        config = publisherconfigset.getByDistribution(
            self.distroseries.distribution)
        if config is None:
            raise InitializationError(
                ("Distribution {child.name} has no publisher configuration. "
                 "Please ask an administrator to set this up"
                 ".").format(child=self.distroseries.distribution))

    def _checkParents(self):
        """If self.first_derivation, the parents list cannot be empty."""
        if self.first_derivation:
            # Use-case #1.
            if len(self.parent_ids) == 0:
                raise InitializationError(
                    "No other series in the distribution is initialised "
                    "and a parent was not explicitly specified.")

    def _checkBuilds(self, parent):
        """Assert there are no pending builds for the given parent series.

        Only cares about the RELEASE, SECURITY and UPDATES pockets, which are
        the only ones inherited via initializeFromParent method.
        Restrict the check to the select architectures (if applicable).
        Restrict the check to the selected packages if a limited set of
        packagesets is used by the initialization.
        """
        spns = self.source_names_by_parent.get(parent.id, None)
        if spns is not None and len(spns) == 0:
            # If no sources are selected in this parent, skip the check.
            return
        # spns=None means no packagesets selected so we need to consider
        # all sources.

        arch_tags = self.arches if len(self.arches) != 0 else None
        pending_builds = parent.getBuildRecords(BuildStatus.NEEDSBUILD,
                                                pocket=INIT_POCKETS,
                                                arch_tag=arch_tags,
                                                name=spns)

        if not pending_builds.is_empty():
            raise InitializationError("The parent series has pending builds "
                                      "for selected sources.")

    def _checkQueue(self, parent):
        """Assert upload queue is empty on the given parent series.

        Only cares about the RELEASE, SECURITY and UPDATES pockets, which are
        the only ones inherited via initializeFromParent method.
        Restrict the check to the selected packages if a limited set of
        packagesets is used by the initialization.
         """
        statuses = [
            PackageUploadStatus.NEW,
            PackageUploadStatus.ACCEPTED,
            PackageUploadStatus.UNAPPROVED,
        ]
        spns = self.source_names_by_parent.get(parent.id, None)
        if spns is not None and len(spns) == 0:
            # If no sources are selected in this parent, skip the check.
            return
        # spns=None means no packagesets selected so we need to consider
        # all sources.

        items = getUtility(IPackageUploadSet).getBuildsForSources(
            parent, statuses, INIT_POCKETS, spns)
        if not items.is_empty():
            raise InitializationError(
                "The parent series has sources waiting in its upload "
                "queues that match your selection.")

    def _checkSeries(self):
        error = ("Cannot copy distroarchseries from parent; there are "
                 "already one or more distroarchseries initialised for "
                 "this series.")
        sources = self.distroseries.getAllPublishedSources()
        binaries = self.distroseries.getAllPublishedBinaries()
        if not all(
                map(methodcaller('is_empty'),
                    (sources, binaries, self.distroseries.architectures,
                     self.distroseries.sections))):
            raise InitializationError(error)
        if self.distroseries.components:
            raise InitializationError(error)

    def initialize(self):
        self._set_parents()
        self._copy_configuration()
        self._copy_architectures()
        self._set_nominatedarchindep()
        self._copy_packages()
        self._copy_packagesets()
        self._copy_pocket_permissions()
        self._create_dsds()
        self._set_initialized()
        transaction.commit()

    def _set_parents(self):
        count = 0
        for parent in self.parents:
            dsp_set = getUtility(IDistroSeriesParentSet)
            if self.overlays and self.overlays[count]:
                pocket = PackagePublishingPocket.__metaclass__.getTermByToken(
                    PackagePublishingPocket, self.overlay_pockets[count]).value
                component_set = getUtility(IComponentSet)
                component = component_set[self.overlay_components[count]]
                dsp_set.new(self.distroseries,
                            parent,
                            initialized=False,
                            is_overlay=True,
                            pocket=pocket,
                            component=component,
                            ordering=count)
            else:
                dsp_set.new(self.distroseries,
                            parent,
                            initialized=False,
                            is_overlay=False,
                            ordering=count)
            count += 1

    def _set_initialized(self):
        dsp_set = getUtility(IDistroSeriesParentSet)
        distroseriesparents = dsp_set.getByDerivedSeries(self.distroseries)
        for distroseriesparent in distroseriesparents:
            distroseriesparent.initialized = True

    def _has_same_parents_as_previous_series(self):
        # Does this distroseries have the same parents as its previous
        # series? (note that the parent's order does not matter here)
        dsp_set = getUtility(IDistroSeriesParentSet)
        previous_series_parents = [
            dsp.parent_series for dsp in dsp_set.getByDerivedSeries(
                self.distroseries.previous_series)
        ]
        return set(previous_series_parents) == set(self.parents)

    def _create_dsds(self):
        if not self.first_derivation:
            if (self._has_same_parents_as_previous_series()
                    and not self.packagesets_ids):
                # If the parents are the same as previous_series's
                # parents and all the packagesets are being copied,
                # then we simply copy the DSDs from previous_series
                # for performance reasons.
                self._copy_dsds_from_previous_series()
            else:
                # Either the parents have changed (compared to
                # previous_series's parents) or a selection only of the
                # packagesets is being copied so we have to recompute
                # the DSDs by creating DSD Jobs.
                self._create_dsd_jobs()
        else:
            # If this is the first derivation, create the DSD Jobs.
            self._create_dsd_jobs()

    def _copy_dsds_from_previous_series(self):
        self._store.execute("""
            INSERT INTO DistroSeriesDifference
                (derived_series, source_package_name, package_diff,
                status, difference_type, parent_package_diff,
                source_version, parent_source_version,
                base_version, parent_series)
            SELECT
                %s AS derived_series, source_package_name,
                package_diff, status,
                difference_type, parent_package_diff, source_version,
                parent_source_version, base_version, parent_series
            FROM DistroSeriesDifference AS dsd
                WHERE dsd.derived_series = %s
            """ % sqlvalues(self.distroseries.id,
                            self.distroseries.previous_series.id))

    def _create_dsd_jobs(self):
        job_source = getUtility(IDistroSeriesDifferenceJobSource)
        job_source.massCreateForSeries(self.distroseries)

    def _copy_configuration(self):
        self.distroseries.backports_not_automatic = any(
            parent.backports_not_automatic
            for parent in self.derivation_parents)
        self.distroseries.include_long_descriptions = any(
            parent.include_long_descriptions
            for parent in self.derivation_parents)

    def _copy_architectures(self):
        das_filter = ' AND distroseries IN %s ' % (sqlvalues(
            [p.id for p in self.derivation_parents]))
        if self.arches:
            das_filter += ' AND architecturetag IN %s ' % (sqlvalues(
                self.arches))
        self._store.execute("""
            INSERT INTO DistroArchSeries
            (distroseries, processor, architecturetag, owner, official,
             supports_virtualized)
            SELECT %s, processor, architecturetag, %s,
                bool_and(official), bool_or(supports_virtualized)
            FROM DistroArchSeries WHERE enabled = TRUE %s
            GROUP BY processor, architecturetag
            """ % (sqlvalues(self.distroseries, self.distroseries.owner) +
                   (das_filter, )))
        self._store.flush()

    def _set_nominatedarchindep(self):
        if self.archindep_archtag is None:
            # Select the arch-indep builder from the intersection between
            # the selected architectures and the list of the parent's
            # arch-indep builders.
            arch_tag = self._potential_nominated_arches(
                self.derivation_parents).pop()
            self.distroseries.nominatedarchindep = (
                self.distroseries.getDistroArchSeries(arch_tag))
        else:
            self.distroseries.nominatedarchindep = (
                self.distroseries.getDistroArchSeries(self.archindep_archtag))

    def _potential_nominated_arches(self, parent_list):
        parent_indep_archtags = set(parent.nominatedarchindep.architecturetag
                                    for parent in parent_list
                                    if parent.nominatedarchindep is not None)

        if len(self.arches) == 0:
            return parent_indep_archtags
        else:
            return parent_indep_archtags.intersection(self.arches)

    def _copy_packages(self):
        # Perform the copies
        self._copy_component_section_and_format_selections()

        # Prepare the lists of distroarchseries for which binary packages
        # shall be copied.
        distroarchseries_lists = {}
        for parent in self.derivation_parents:
            distroarchseries_lists[parent] = []
            for arch in self.distroseries.architectures:
                if self.arches and (arch.architecturetag not in self.arches):
                    continue
                try:
                    parent_arch = parent.getDistroArchSeries(
                        arch.architecturetag)
                except NotFoundError:
                    continue

                distroarchseries_lists[parent].append((parent_arch, arch))
        # Now copy source and binary packages.
        self._copy_publishing_records(distroarchseries_lists)
        self._copy_packaging_links()

    def _use_cloner(self, target_archive, archive):
        """Returns True if it's safe to use the packagecloner (as opposed
        to using the packagecopier).
        We use two different ways to copy packages:
         - the packagecloner: fast but not conflict safe.
         - the packagecopier: slow but performs lots of checks to
         avoid creating conflicts.
        1. We'll use the cloner:
        If this is not a first initialization.
        And If:
            1.a If the archives are different and the target archive is
                empty use the cloner.
            Or
            1.b. If the archives are the same and the target series is
                empty use the cloner.
        2.  Otherwise use the copier.
        """
        if self.first_derivation:
            return False

        target_archive_empty = target_archive.getPublishedSources().is_empty()
        case_1a = (target_archive != archive and target_archive_empty)
        case_1b = (target_archive == archive and
                   (target_archive_empty or target_archive.getPublishedSources(
                       distroseries=self.distroseries).is_empty()))
        return case_1a or case_1b

    def _create_source_names_by_parent(self):
        """If only a subset of the packagesets was selected to be copied,
        create a dict with the list of source names to be copied for each
        parent.

        source_names_by_parent.get(parent) can be 3 different things:
        - None: this means that no specific packagesets where selected
        for the initialization. In this case we need to consider *all*
        the packages in this parent.
        - []: this means that some specific packagesets where selected
        for the initialization but none in this parent. We can skip
        this parent for all the copy/check operations.
        - [name1, ...]: this means that some specific packagesets
        were selected for the initialization and some are in this
        parent so the list of packages to consider in not empty.
        """
        source_names_by_parent = {}
        if self.packagesets_ids:
            for parent in self.derivation_parents:
                spns = []
                for pkgset in self.packagesets:
                    if pkgset.distroseries == parent:
                        spns += list(pkgset.getSourcesIncluded())
                source_names_by_parent[parent.id] = spns
        self.source_names_by_parent = source_names_by_parent

    def _copy_publishing_records(self, distroarchseries_lists):
        """Copy the publishing records from the parent arch series
        to the given arch series in ourselves.

        We copy all PENDING and PUBLISHED records as PENDING into our own
        publishing records.

        We copy only the RELEASE pocket in the PRIMARY archive.
        """
        archive_set = getUtility(IArchiveSet)

        for parent in self.derivation_parents:
            spns = self.source_names_by_parent.get(parent.id, None)
            if spns is not None and len(spns) == 0:
                # Some packagesets where selected but not a single
                # source from this parent: we skip the copy since
                # calling copy with spns=[] would copy all the packagesets
                # from this parent.
                continue
            # spns=None means no packagesets selected so we need to consider
            # all sources.

            distroarchseries_list = distroarchseries_lists[parent]
            for archive in parent.distribution.all_distro_archives:
                if archive.purpose != ArchivePurpose.PRIMARY:
                    continue

                target_archive = archive_set.getByDistroPurpose(
                    self.distroseries.distribution, archive.purpose)
                if archive.purpose is ArchivePurpose.PRIMARY:
                    assert target_archive is not None, (
                        "Target archive doesn't exist?")
                if self._use_cloner(target_archive, archive):
                    origin = PackageLocation(archive, parent.distribution,
                                             parent,
                                             PackagePublishingPocket.RELEASE)
                    destination = PackageLocation(
                        target_archive, self.distroseries.distribution,
                        self.distroseries, PackagePublishingPocket.RELEASE)
                    processors = None
                    if self.rebuild:
                        processors = [
                            das[1].processor for das in distroarchseries_list
                        ]
                        distroarchseries_list = ()
                    getUtility(IPackageCloner).clonePackages(
                        origin, destination, distroarchseries_list, processors,
                        spns, self.rebuild)
                else:
                    # There is only one available pocket in an unreleased
                    # series.
                    target_pocket = PackagePublishingPocket.RELEASE
                    sources = archive.getPublishedSources(
                        distroseries=parent,
                        pocket=INIT_POCKETS,
                        status=(PackagePublishingStatus.PENDING,
                                PackagePublishingStatus.PUBLISHED),
                        name=spns)
                    # XXX: rvb 2011-06-23 bug=801112: do_copy is atomic (all
                    # or none of the sources will be copied). This might
                    # lead to a partially initialised series if there is a
                    # single conflict in the destination series.
                    try:
                        sources_published = do_copy(
                            sources,
                            target_archive,
                            self.distroseries,
                            target_pocket,
                            include_binaries=not self.rebuild,
                            check_permissions=False,
                            strict_binaries=False,
                            close_bugs=False,
                            create_dsd_job=False,
                            person=None)
                        if self.rebuild:
                            rebuilds = []
                            for pubrec in sources_published:
                                builds = pubrec.createMissingBuilds(
                                    list(self.distroseries.architectures))
                                rebuilds.extend(builds)
                            self._rescore_rebuilds(rebuilds)
                    except CannotCopy as error:
                        raise InitializationError(error)

    def _rescore_rebuilds(self, builds):
        """Rescore the passed builds so that they have an appropriately low
         score.
        """
        for build in builds:
            build.buildqueue_record.lastscore -= COPY_ARCHIVE_SCORE_PENALTY

    def _copy_component_section_and_format_selections(self):
        """Copy the section, component and format selections from the parents
        distro series into this one.
        """
        # Copy the component selections
        self._store.execute('''
            INSERT INTO ComponentSelection (distroseries, component)
            SELECT DISTINCT %s AS distroseries, cs.component AS component
            FROM ComponentSelection AS cs WHERE cs.distroseries IN %s
            ''' % sqlvalues(self.distroseries.id, self.derivation_parent_ids))
        # Copy the section selections
        self._store.execute('''
            INSERT INTO SectionSelection (distroseries, section)
            SELECT DISTINCT %s as distroseries, ss.section AS section
            FROM SectionSelection AS ss WHERE ss.distroseries IN %s
            ''' % sqlvalues(self.distroseries.id, self.derivation_parent_ids))
        # Copy the source format selections
        self._store.execute('''
            INSERT INTO SourcePackageFormatSelection (distroseries, format)
            SELECT DISTINCT %s as distroseries, spfs.format AS format
            FROM SourcePackageFormatSelection AS spfs
            WHERE spfs.distroseries IN %s
            ''' % sqlvalues(self.distroseries.id, self.derivation_parent_ids))

    def _copy_packaging_links(self):
        """Copy the packaging links from the parent series to this one."""
        # We iterate over the parents and copy into the child in
        # sequence to avoid creating duplicates.
        for parent_id in self.derivation_parent_ids:
            self._store.execute("""
                INSERT INTO
                    Packaging(
                        distroseries, sourcepackagename, productseries,
                        packaging, owner)
                SELECT
                    ChildSeries.id,
                    Packaging.sourcepackagename,
                    Packaging.productseries,
                    Packaging.packaging,
                    Packaging.owner
                FROM
                    Packaging
                    -- Joining the parent distroseries permits the query to
                    -- build the data set for the series being updated, yet
                    -- results are in fact the data from the original series.
                    JOIN Distroseries ChildSeries
                        ON Packaging.distroseries = %s
                WHERE
                    -- Select only the packaging links that are in the parent
                    -- that are not in the child.
                    ChildSeries.id = %s
                    AND Packaging.sourcepackagename in (
                        SELECT sourcepackagename
                        FROM Packaging
                        WHERE distroseries in (
                            SELECT id
                            FROM Distroseries
                            WHERE id = %s
                            )
                        EXCEPT
                        SELECT sourcepackagename
                        FROM Packaging
                        WHERE distroseries in (
                            SELECT id
                            FROM Distroseries
                            WHERE id = ChildSeries.id
                            )
                        )
                """ % sqlvalues(parent_id, self.distroseries.id, parent_id))

    def _copy_packagesets(self):
        """Copy packagesets from the parent distroseries."""
        packagesets = self._store.find(
            Packageset,
            Packageset.distroseries_id.is_in(self.derivation_parent_ids))
        parent_to_child = {}
        # Create the packagesets and any archivepermissions if we're not
        # copying cross-distribution.
        parent_distro_ids = [
            parent.distribution.id for parent in self.derivation_parents
        ]
        for parent_ps in packagesets:
            # Cross-distro initializations get packagesets owned by the
            # distro owner, otherwise the old owner is preserved.
            if (self.packagesets_ids
                    and str(parent_ps.id) not in self.packagesets_ids):
                continue
            packageset_set = getUtility(IPackagesetSet)
            # First, try to fetch an existing packageset with this name.
            try:
                child_ps = packageset_set.getByName(parent_ps.name,
                                                    self.distroseries)
            except NoSuchPackageSet:
                if self.distroseries.distribution.id in parent_distro_ids:
                    new_owner = parent_ps.owner
                else:
                    new_owner = self.distroseries.owner
                child_ps = getUtility(IPackagesetSet).new(
                    parent_ps.name,
                    parent_ps.description,
                    new_owner,
                    distroseries=self.distroseries,
                    related_set=parent_ps)
            parent_to_child[parent_ps] = child_ps
            # Copy archivepermissions if we're not copying
            # cross-distribution.
            if (self.distroseries.distribution ==
                    parent_ps.distroseries.distribution):
                self._store.execute("""
                    INSERT INTO Archivepermission
                    (person, permission, archive, packageset, explicit)
                    SELECT person, permission, %s, %s, explicit
                    FROM Archivepermission WHERE packageset = %s
                    """ % sqlvalues(self.distroseries.main_archive,
                                    child_ps.id, parent_ps.id))
        # Copy the relations between sets, and the contents.
        for old_series_ps, new_series_ps in parent_to_child.items():
            old_series_sets = old_series_ps.setsIncluded(direct_inclusion=True)
            for old_series_child in old_series_sets:
                new_series_ps.add(parent_to_child[old_series_child])
            new_series_ps.add(
                old_series_ps.sourcesIncluded(direct_inclusion=True))

    def _copy_pocket_permissions(self):
        """Copy per-distroseries/pocket permissions from the parent series."""
        for parent in self.derivation_parents:
            if self.distroseries.distribution == parent.distribution:
                self._store.execute("""
                    INSERT INTO Archivepermission
                    (person, permission, archive, pocket, distroseries)
                    SELECT person, permission, %s, pocket, %s
                    FROM Archivepermission
                    WHERE pocket IS NOT NULL AND distroseries = %s
                    """ % sqlvalues(self.distroseries.main_archive,
                                    self.distroseries.id, parent.id))