def test_BranchJobPruner(self): # Garbo should remove jobs completed over 30 days ago. switch_dbuser('testadmin') store = IMasterStore(Job) db_branch = self.factory.makeAnyBranch() db_branch.branch_format = BranchFormat.BZR_BRANCH_5 db_branch.repository_format = RepositoryFormat.BZR_KNIT_1 Store.of(db_branch).flush() branch_job = BranchUpgradeJob.create( db_branch, self.factory.makePerson()) branch_job.job.date_finished = THIRTY_DAYS_AGO self.assertEqual( store.find( BranchJob, BranchJob.branch == db_branch.id).count(), 1) self.runDaily() switch_dbuser('testadmin') self.assertEqual( store.find( BranchJob, BranchJob.branch == db_branch.id).count(), 0)
def _test_AnswerContactPruner(self, status, interval, expected_count=0): # Garbo should remove answer contacts for accounts with given 'status' # which was set more than 'interval' days ago. switch_dbuser('testadmin') store = IMasterStore(AnswerContact) person = self.factory.makePerson() person.addLanguage(getUtility(ILanguageSet)['en']) question = self.factory.makeQuestion() with person_logged_in(question.owner): question.target.addAnswerContact(person, person) Store.of(question).flush() self.assertEqual( store.find( AnswerContact, AnswerContact.person == person.id).count(), 1) account = person.account account.status = status # We flush because a trigger sets the date_status_set and we need to # modify it ourselves. Store.of(account).flush() if interval is not None: account.date_status_set = interval self.runDaily() switch_dbuser('testadmin') self.assertEqual( store.find( AnswerContact, AnswerContact.person == person.id).count(), expected_count)
def test_CodeImportEventPruner(self): now = datetime.now(UTC) store = IMasterStore(CodeImportResult) switch_dbuser('testadmin') machine = self.factory.makeCodeImportMachine() requester = self.factory.makePerson() # Create 6 code import events for this machine, 3 on each side of 30 # days. Use the event set to the extra event data rows get created # too. event_set = getUtility(ICodeImportEventSet) for age in (35, 33, 31, 29, 27, 15): event_set.newOnline( machine, user=requester, message='Hello', _date_created=(now - timedelta(days=age))) transaction.commit() # Run the garbage collector self.runDaily() # Only the three most recent results are left. events = list(machine.events) self.assertEqual(3, len(events)) # We now have no CodeImportEvents older than 30 days self.failUnless( store.find( Min(CodeImportEvent.date_created)).one().replace(tzinfo=UTC) >= now - timedelta(days=30))
def test_store_disconnected_after_request_handled_logs_oops(self): # Bug #504291 was that a Store was being left in a disconnected # state after a request, causing subsequent requests handled by that # thread to fail. We detect this state in endRequest and log an # OOPS to help track down the trigger. request = LaunchpadTestRequest() publication = WebServicePublication(None) dbadapter.set_request_started() # Disconnect a store store = IMasterStore(EmailAddress) store._connection._state = STATE_DISCONNECTED # Invoke the endRequest hook. publication.endRequest(request, None) self.assertEqual(1, len(self.oopses)) oops = self.oopses[0] # Ensure the OOPS mentions the correct exception self.assertStartsWith(oops['value'], "Bug #504291") # Ensure the store has been rolled back and in a usable state. self.assertEqual(store._connection._state, STATE_RECONNECT) store.find(EmailAddress).first() # Confirms Store is working.
def test_headers(self): client = LibrarianClient() # Upload a file so we can retrieve it. sample_data = 'blah' file_alias_id = client.addFile( 'sample', len(sample_data), StringIO(sample_data), contentType='text/plain') url = client.getURLForAlias(file_alias_id) # Change the date_created to a known value that doesn't match # the disk timestamp. The timestamp on disk cannot be trusted. file_alias = IMasterStore(LibraryFileAlias).get( LibraryFileAlias, file_alias_id) file_alias.date_created = datetime( 2001, 01, 30, 13, 45, 59, tzinfo=pytz.utc) # Commit so the file is available from the Librarian. self.commit() # Fetch the file via HTTP, recording the interesting headers result = urlopen(url) last_modified_header = result.info()['Last-Modified'] cache_control_header = result.info()['Cache-Control'] # URLs point to the same content for ever, so we have a hardcoded # 1 year max-age cache policy. self.failUnlessEqual(cache_control_header, 'max-age=31536000, public') # And we should have a correct Last-Modified header too. self.failUnlessEqual( last_modified_header, 'Tue, 30 Jan 2001 13:45:59 GMT')
def iterReady(cls): """See `IJobSource`.""" store = IMasterStore(QuestionJob) jobs = store.find( QuestionJob, And(QuestionJob.job_type == cls.class_job_type, QuestionJob.job_id.is_in(Job.ready_jobs))) return (cls(job) for job in jobs)
def getRecentBuilds(cls, requester, recipe, distroseries, _now=None): if _now is None: _now = datetime.now(pytz.UTC) store = IMasterStore(SourcePackageRecipeBuild) old_threshold = _now - timedelta(days=1) return store.find(cls, cls.distroseries_id == distroseries.id, cls.requester_id == requester.id, cls.recipe_id == recipe.id, cls.date_created > old_threshold)
def new(cls, job_type, status=BuildStatus.NEEDSBUILD, date_created=None, builder=None, archive=None): """See `IBuildFarmJobSource`.""" build_farm_job = BuildFarmJob( job_type, status, date_created, builder, archive) store = IMasterStore(BuildFarmJob) store.add(build_farm_job) return build_farm_job
def iterReady(cls): """Iterate through all ready ProductJobs.""" store = IMasterStore(ProductJob) jobs = store.find( ProductJob, And(ProductJob.job_type == cls.class_job_type, ProductJob.job_id.is_in(Job.ready_jobs))) return (cls(job) for job in jobs)
def removeAssociation(self, server_url, handle): """See `OpenIDStore`.""" store = IMasterStore(self.Association) assoc = store.get(self.Association, ( server_url.decode('UTF-8'), handle.decode('ASCII'))) if assoc is None: return False store.remove(assoc) return True
def _getOldestLiveRequest(self): """Return the oldest live request on the master store. Due to replication lag, the master store is always a little ahead of the slave store that exports come from. """ master_store = IMasterStore(POExportRequest) sorted_by_id = master_store.find(POExportRequest).order_by( POExportRequest.id) return sorted_by_id.first()
def test_useNonce(self): timestamp = time.time() # The nonce can only be used once. self.assertEqual(self.store.useNonce("server-url", timestamp, "salt"), True) storm_store = IMasterStore(self.store.Nonce) new_nonce = storm_store.get(self.store.Nonce, (u"server-url", timestamp, u"salt")) self.assertIsNot(None, new_nonce) self.assertEqual(self.store.useNonce("server-url", timestamp, "salt"), False) self.assertEqual(self.store.useNonce("server-url", timestamp, "salt"), False)
def new(self, distribution, root_dir, base_url, copy_base_url): """Make and return a new `PublisherConfig`.""" store = IMasterStore(PublisherConfig) pubconf = PublisherConfig() pubconf.distribution = distribution pubconf.root_dir = root_dir pubconf.base_url = base_url pubconf.copy_base_url = copy_base_url store.add(pubconf) return pubconf
def pruneRevisionCache(limit): """See `IRevisionSet`.""" # Storm doesn't handle remove a limited result set: # FeatureError: Can't remove a sliced result set store = IMasterStore(RevisionCache) epoch = datetime.now(tz=pytz.UTC) - timedelta(days=30) subquery = Select( [RevisionCache.id], RevisionCache.revision_date < epoch, limit=limit) store.find(RevisionCache, RevisionCache.id.is_in(subquery)).remove()
def newPackagesetUploader( self, archive, person, packageset, explicit=False): """See `IArchivePermissionSet`.""" packageset = self._nameToPackageset(packageset) store = IMasterStore(ArchivePermission) # First see whether we have a matching permission in the database # already. query = ''' SELECT ap.id FROM archivepermission ap, teamparticipation tp WHERE ap.person = tp.team AND tp.person = ? AND ap.packageset = ? AND ap.archive = ? ''' query = SQL(query, (person.id, packageset.id, archive.id)) permissions = list( store.find( ArchivePermission, ArchivePermission.id.is_in(query))) if len(permissions) > 0: # Found permissions in the database, does the 'explicit' flag # have the requested value? conflicting = [permission for permission in permissions if permission.explicit != explicit] if len(conflicting) > 0: # At least one permission with conflicting 'explicit' flag # value exists already. cperm = conflicting[0] raise ValueError( "Permission for package set '%s' already exists for %s " "but with a different 'explicit' flag value (%s)." % (packageset.name, cperm.person.name, cperm.explicit)) else: # No conflicts, does the requested permission exist already? existing = [permission for permission in permissions if (permission.explicit == explicit and permission.person == person and permission.packageset == packageset)] assert len(existing) <= 1, ( "Too many permissions for %s and %s" % (person.name, packageset.name)) if len(existing) == 1: # The existing permission matches, just return it. return existing[0] # The requested permission does not exist yet. Insert it into the # database. permission = ArchivePermission( archive=archive, person=person, packageset=packageset, permission=ArchivePermissionType.UPLOAD, explicit=explicit) store.add(permission) return permission
def exists(owner, name): """See `ISourcePackageRecipeSource.new`.""" store = IMasterStore(SourcePackageRecipe) recipe = store.find( SourcePackageRecipe, SourcePackageRecipe.owner == owner, SourcePackageRecipe.name == name).one() if recipe: return True else: return False
def cleanupAssociations(self): """See `OpenIDStore`.""" store = IMasterStore(self.Association) now = int(time.time()) expired = store.find( self.Association, self.Association.issued + self.Association.lifetime < now) count = expired.count() if count > 0: expired.remove() return count
def getMatchingDSD(self): """Find an existing `DistroSeriesDifference` for this difference.""" spn_id = self.metadata["sourcepackagename"] parent_id = self.metadata["parent_series"] store = IMasterStore(DistroSeriesDifference) search = store.find( DistroSeriesDifference, DistroSeriesDifference.derived_series == self.derived_series, DistroSeriesDifference.parent_series_id == parent_id, DistroSeriesDifference.source_package_name_id == spn_id) return search.one()
def __call__(self, chunk_size): self.logger.info( "%s (limited to %d rows)", self.statement.splitlines()[0], chunk_size) store = IMasterStore(DistroSeries) result = store.execute(self.statement, (self.series.id, chunk_size,)) self.done = (result.rowcount == 0) self.logger.info( "%d rows deleted (%s)", result.rowcount, ("done" if self.done else "not done")) store.commit()
def storeAssociation(self, server_url, association): """See `OpenIDStore`.""" store = IMasterStore(self.Association) db_assoc = store.get( self.Association, (server_url.decode('UTF-8'), association.handle.decode('ASCII'))) if db_assoc is None: db_assoc = self.Association(server_url, association) store.add(db_assoc) else: db_assoc.update(association)
def test_getAssociation_expired(self): lifetime = 600 timestamp = int(time.time()) - 2 * lifetime self.store.storeAssociation("server-url", Association("handle", "secret", timestamp, lifetime, "HMAC-SHA1")) # The association is not returned because it is out of date. # Further more, it is removed from the database. assoc = self.store.getAssociation("server-url", "handle") self.assertEquals(assoc, None) store = IMasterStore(self.store.Association) db_assoc = store.get(self.store.Association, (u"server-url", u"handle")) self.assertEqual(db_assoc, None)
class MigrateCurrentFlagProcess: """Mark all translations as is_imported if they are is_current. Processes only translations for upstream projects, since Ubuntu source packages need no migration. """ def __init__(self, transaction, logger=None): self.transaction = transaction self.logger = logger if logger is None: self.logger = logging.getLogger("migrate-current-flag") self.store = IMasterStore(Product) def getProductsWithTemplates(self): """Get Product.ids for projects with any translations templates.""" return ( self.store.find( Product, POTemplate.productseriesID == ProductSeries.id, ProductSeries.productID == Product.id ) .group_by(Product) .having(Count(POTemplate.id) > 0) ) def getCurrentNonimportedTranslations(self, product): """Get TranslationMessage.ids that need migration for a `product`.""" return self.store.find( TranslationMessage.id, TranslationMessage.is_current_ubuntu == True, TranslationMessage.is_current_upstream == False, (TranslationMessage.potmsgsetID == TranslationTemplateItem.potmsgsetID), TranslationTemplateItem.potemplateID == POTemplate.id, POTemplate.productseriesID == ProductSeries.id, ProductSeries.productID == product.id, ).config(distinct=True) def run(self): products_with_templates = list(self.getProductsWithTemplates()) total_products = len(products_with_templates) if total_products == 0: self.logger.info("Nothing to do.") current_product = 0 for product in products_with_templates: current_product += 1 self.logger.info( "Migrating %s translations (%d of %d)..." % (product.name, current_product, total_products) ) tm_ids = self.getCurrentNonimportedTranslations(product) tm_loop = TranslationMessageImportedFlagUpdater(self.transaction, self.logger, tm_ids) DBLoopTuner(tm_loop, 5, minimum_chunk_size=100).run() self.logger.info("Done.")
def find_dsd_for(dsp, package): """Find `DistroSeriesDifference`. :param dsp: `DistroSeriesParent`. :param package: `SourcePackageName`. """ store = IMasterStore(DistroSeriesDifference) return store.find( DistroSeriesDifference, DistroSeriesDifference.derived_series == dsp.derived_series, DistroSeriesDifference.parent_series == dsp.parent_series, DistroSeriesDifference.source_package_name == package)
def main(self): "Run UpdateDatabaseTableStats.""" store = IMasterStore(Person) # The logic is in a stored procedure because we want to run # ps(1) on the database server rather than the host this script # is running on. self.logger.debug("Invoking update_database_stats()") store.execute("SELECT update_database_stats()", noresult=True) self.logger.debug("Committing") store.commit()
def deletePackagesetUploader( self, archive, person, packageset, explicit=False): """See `IArchivePermissionSet`.""" packageset = self._nameToPackageset(packageset) store = IMasterStore(ArchivePermission) # Do we have the permission the user wants removed in the database? permission = store.find( ArchivePermission, archive=archive, person=person, packageset=packageset, permission=ArchivePermissionType.UPLOAD, explicit=explicit).one() self._remove_permission(permission)
def findUnfinishedJobs(branch, since=None): """See `IRosettaUploadJobSource`.""" store = IMasterStore(BranchJob) match = And( Job.id == BranchJob.jobID, BranchJob.branch == branch, BranchJob.job_type == BranchJobType.ROSETTA_UPLOAD, Job._status != JobStatus.COMPLETED, Job._status != JobStatus.FAILED) if since is not None: match = And(match, Job.date_created > since) jobs = store.using(BranchJob, Job).find((BranchJob), match) return jobs
def new(self, derived_series, parent_series, initialized, is_overlay=False, pocket=None, component=None, ordering=1): """Make and return a new `DistroSeriesParent`.""" store = IMasterStore(DistroSeriesParent) dsp = DistroSeriesParent() dsp.derived_series = derived_series dsp.parent_series = parent_series dsp.initialized = initialized dsp.is_overlay = is_overlay dsp.pocket = pocket dsp.component = component dsp.ordering = ordering store.add(dsp) return dsp
def test_can_shutdown_slave_only(self): """Confirm that this TestCase's test infrastructure works as needed. """ master_store = IMasterStore(Person) slave_store = ISlaveStore(Person) # Both Stores work when pgbouncer is up. master_store.get(Person, 1) slave_store.get(Person, 1) # Slave Store breaks when pgbouncer is torn down. Master Store # is fine. self.pgbouncer_fixture.stop() master_store.get(Person, 2) self.assertRaises(DisconnectionError, slave_store.get, Person, 2)
def get_object_from_master_store(obj): """Return a copy of the given object retrieved from its master Store. Returns the object if it already comes from the relevant master Store. Registered as a trusted adapter, so if the input is security wrapped, so is the result. Otherwise an unwrapped object is returned. """ master_store = IMasterStore(obj) if master_store is not Store.of(obj): obj = master_store.get(obj.__class__, obj.id) if obj is None: return None alsoProvides(obj, IMasterObject) return obj
def new(cls, distroseries, recipe, requester, archive, pocket=None, date_created=None, duration=None): """See `ISourcePackageRecipeBuildSource`.""" store = IMasterStore(SourcePackageRecipeBuild) if pocket is None: pocket = PackagePublishingPocket.RELEASE if date_created is None: date_created = UTC_NOW build_farm_job = getUtility(IBuildFarmJobSource).new( cls.job_type, BuildStatus.NEEDSBUILD, date_created, None, archive) spbuild = cls( build_farm_job, distroseries, recipe, requester, archive, pocket, date_created) store.add(spbuild) return spbuild
def new(registrant, owner, name, recipe, description, distroseries=None, daily_build_archive=None, build_daily=False, date_created=DEFAULT): """See `ISourcePackageRecipeSource.new`.""" store = IMasterStore(SourcePackageRecipe) sprecipe = SourcePackageRecipe() builder_recipe, recipe_branch_type = ( getUtility(IRecipeBranchSource).getParsedRecipe(recipe)) SourcePackageRecipeData(builder_recipe, recipe_branch_type, sprecipe) sprecipe.registrant = registrant sprecipe.owner = owner sprecipe.name = name if distroseries is not None: for distroseries_item in distroseries: sprecipe.distroseries.add(distroseries_item) sprecipe.description = description sprecipe.daily_build_archive = daily_build_archive sprecipe.build_daily = build_daily sprecipe.date_created = date_created sprecipe.date_last_modified = date_created store.add(sprecipe) return sprecipe
def test_slave_shutdown_during_transaction(self): '''Slave is shutdown while running, but we can recover.''' master_store = IMasterStore(Person) slave_store = ISlaveStore(Person) self.assertIsNot(master_store, slave_store) self.pgbouncer_fixture.stop() # The transaction fails if the slave store is used. Robust # processes will handle this and retry (even if just means exit # and wait for the next scheduled invocation). self.assertRaises(DisconnectionError, slave_store.get, Person, 1) transaction.abort() # But in the next transaction, we get the master Store if we ask # for the slave Store so we can continue. master_store = IMasterStore(Person) slave_store = ISlaveStore(Person) self.assertIs(master_store, slave_store)
def create(cls, distroseries, sourcepackagerelease, bug_ids): """See `IProcessAcceptedBugsJobSource`.""" assert distroseries is not None, "No distroseries specified." assert sourcepackagerelease is not None, ( "No sourcepackagerelease specified.") assert sourcepackagerelease.changelog_entry is not None, ( "New source uploads should have a changelog.") assert bug_ids, "No bug IDs specified." job = ProcessAcceptedBugsJob(distroseries, sourcepackagerelease, bug_ids) IMasterStore(ProcessAcceptedBugsJob).add(job) job.celeryRunOnCommit() return job
def test_slave_shutdown_between_transactions(self): '''Slave is shutdown in between transactions.''' master_store = IMasterStore(Person) slave_store = ISlaveStore(Person) self.assertIsNot(master_store, slave_store) transaction.abort() self.pgbouncer_fixture.stop() # The process doesn't notice the slave going down, and things # will fail the next time the slave is used. master_store = IMasterStore(Person) slave_store = ISlaveStore(Person) self.assertIsNot(master_store, slave_store) self.assertRaises(DisconnectionError, slave_store.get, Person, 1) # But now it has been discovered the socket is no longer # connected to anything, next transaction we get a master # Store when we ask for a slave. master_store = IMasterStore(Person) slave_store = ISlaveStore(Person) self.assertIs(master_store, slave_store)
def test_headers(self): client = LibrarianClient() # Upload a file so we can retrieve it. sample_data = b'blah' file_alias_id = client.addFile('sample', len(sample_data), BytesIO(sample_data), contentType='text/plain') url = client.getURLForAlias(file_alias_id) # Change the date_created to a known value that doesn't match # the disk timestamp. The timestamp on disk cannot be trusted. file_alias = IMasterStore(LibraryFileAlias).get( LibraryFileAlias, file_alias_id) file_alias.date_created = datetime(2001, 1, 30, 13, 45, 59, tzinfo=pytz.utc) # Commit so the file is available from the Librarian. self.commit() # Fetch the file via HTTP, recording the interesting headers response = requests.get(url) response.raise_for_status() last_modified_header = response.headers['Last-Modified'] cache_control_header = response.headers['Cache-Control'] # URLs point to the same content for ever, so we have a hardcoded # 1 year max-age cache policy. self.assertEqual(cache_control_header, 'max-age=31536000, public') # And we should have a correct Last-Modified header too. self.assertEqual(last_modified_header, 'Tue, 30 Jan 2001 13:45:59 GMT')
def new(self, requester, livefs, archive, distro_arch_series, pocket, unique_key=None, metadata_override=None, version=None, date_created=DEFAULT): """See `ILiveFSBuildSet`.""" store = IMasterStore(LiveFSBuild) build_farm_job = getUtility(IBuildFarmJobSource).new( LiveFSBuild.job_type, BuildStatus.NEEDSBUILD, date_created, None, archive) livefsbuild = LiveFSBuild( build_farm_job, requester, livefs, archive, distro_arch_series, pocket, distro_arch_series.processor, not distro_arch_series.processor.supports_nonvirtualized or livefs.require_virtualized or archive.require_virtualized, unique_key, metadata_override, version, date_created) store.add(livefsbuild) return livefsbuild
def delete(sourcepackage, pocket): """Remove the SeriesSourcePackageBranch for sourcepackage and pocket. :param sourcepackage: An `ISourcePackage`. :param pocket: A `PackagePublishingPocket` enum item. """ distroseries = sourcepackage.distroseries sourcepackagename = sourcepackage.sourcepackagename return IMasterStore(SeriesSourcePackageBranch).find( SeriesSourcePackageBranch, SeriesSourcePackageBranch.distroseries == distroseries.id, SeriesSourcePackageBranch.sourcepackagename == sourcepackagename.id, SeriesSourcePackageBranch.pocket == pocket).remove()
def new(distroseries, pocket, sourcepackagename, branch, registrant, date_created=None): """Link a source package in a distribution suite to a branch.""" if date_created is None: date_created = datetime.now(pytz.UTC) sspb = SeriesSourcePackageBranch(distroseries, pocket, sourcepackagename, branch, registrant, date_created) IMasterStore(SeriesSourcePackageBranch).add(sspb) return sspb
def test_restricted_file_headers(self): fileAlias, url = self.get_restricted_file_and_public_url() token = TimeLimitedToken.allocate(url) # Change the date_created to a known value for testing. file_alias = IMasterStore(LibraryFileAlias).get( LibraryFileAlias, fileAlias) file_alias.date_created = datetime(2001, 1, 30, 13, 45, 59, tzinfo=pytz.utc) # Commit the update. self.commit() # Fetch the file via HTTP, recording the interesting headers response = requests.get(url, params={"token": token}) last_modified_header = response.headers['Last-Modified'] cache_control_header = response.headers['Cache-Control'] # No caching for restricted files. self.assertEqual(cache_control_header, 'max-age=0, private') # And we should have a correct Last-Modified header too. self.assertEqual(last_modified_header, 'Tue, 30 Jan 2001 13:45:59 GMT')
def find_waiting_jobs(derived_series, sourcepackagename, parent_series): """Look for pending `DistroSeriesDifference` jobs on a package.""" # Look for identical pending jobs. This compares directly on # the metadata string. It's fragile, but this is only an # optimization. It's not actually disastrous to create # redundant jobs occasionally. json_metadata = make_metadata(sourcepackagename.id, parent_series.id) # Use master store because we don't like outdated information # here. store = IMasterStore(DistributionJob) candidates = store.find( DistributionJob, DistributionJob.job_type == DistributionJobType.DISTROSERIESDIFFERENCE, DistributionJob.distroseries == derived_series, DistributionJob.metadata == json_metadata, DistributionJob.job_id.is_in(Job.ready_jobs)) return [ job for job in candidates if job.metadata["parent_series"] == parent_series.id ]
def acquireRevisionAuthors(self, author_names): """Find or create the RevisionAuthors with the specified names. A name may be any arbitrary string, but if it is an email-id, and its email address is a verified email address, it will be automatically linked to the corresponding Person. Email-ids come in two major forms: "Foo Bar" <*****@*****.**> [email protected] (Foo Bar) :return: a dict of name -> RevisionAuthor """ store = IMasterStore(Revision) author_names = set(author_names) authors = {} for author in store.find(RevisionAuthor, RevisionAuthor.name.is_in(author_names)): authors[author.name] = author missing = author_names - set(authors.keys()) # create missing RevisionAuthors for name in missing: authors[name] = self._createRevisionAuthor(name) return authors
def _blockForLongRunningTransactions(self): """If there are long running transactions, block to avoid making bloat worse.""" if self.long_running_transaction is None: return from lp.services.librarian.model import LibraryFileAlias store = IMasterStore(LibraryFileAlias) msg_counter = 0 while not self._isTimedOut(): results = list( store.execute((""" SELECT CURRENT_TIMESTAMP - xact_start, %(pid)s, usename, datname, %(query)s FROM activity() WHERE xact_start < CURRENT_TIMESTAMP - interval '%%f seconds' AND datname = current_database() ORDER BY xact_start LIMIT 4 """ % activity_cols(store)) % self.long_running_transaction).get_all()) if not results: break # Check for long running transactions every 10 seconds, but # only report every 10 minutes to avoid log spam. msg_counter += 1 if msg_counter % 60 == 1: for runtime, pid, usename, datname, query in results: self.log.info("Blocked on %s old xact %s@%s/%d - %s.", runtime, usename, datname, pid, query) self.log.info("Sleeping for up to 10 minutes.") # Don't become a long running transaction! transaction.abort() self._sleep(10)
def new(self, rationale, displayname, openid_identifier=None): """See `IAccountSet`.""" account = Account(displayname=displayname, creation_rationale=rationale) # Create an OpenIdIdentifier record if requested. if openid_identifier is not None: assert isinstance(openid_identifier, unicode) identifier = OpenIdIdentifier() identifier.account = account identifier.identifier = openid_identifier IMasterStore(OpenIdIdentifier).add(identifier) return account
def test_load_with_store(self): # load() can use an alternative store. db_object = self.factory.makeComponent() # Commit so the database object is available in both master # and slave stores. transaction.commit() # Master store. master_store = IMasterStore(db_object) [db_object_from_master] = bulk.load(Component, [db_object.id], store=master_store) self.assertEqual(Store.of(db_object_from_master), master_store) # Slave store. slave_store = ISlaveStore(db_object) [db_object_from_slave] = bulk.load(Component, [db_object.id], store=slave_store) self.assertEqual(Store.of(db_object_from_slave), slave_store)
def generateIncrementalDiff(self, old_revision, new_revision, diff=None): """See `IBranchMergeProposal`.""" if diff is None: source_branch = self.source_branch.getBzrBranch() ignore_branches = [self.target_branch.getBzrBranch()] if self.prerequisite_branch is not None: ignore_branches.append(self.prerequisite_branch.getBzrBranch()) diff = Diff.generateIncrementalDiff(old_revision, new_revision, source_branch, ignore_branches) incremental_diff = IncrementalDiff() incremental_diff.diff = diff incremental_diff.branch_merge_proposal = self incremental_diff.old_revision = old_revision incremental_diff.new_revision = new_revision IMasterStore(IncrementalDiff).add(incremental_diff) return incremental_diff
def test_storeAssociation_update_existing(self): self.store.storeAssociation( 'server-url', Association('handle', 'secret', 42, 600, 'HMAC-SHA1')) db_assoc = IMasterStore(self.store.Association).get( self.store.Association, (u'server-url', u'handle')) self.assertNotEqual(db_assoc, None) # Now update the association with new information. self.store.storeAssociation( 'server-url', Association('handle', 'secret2', 420, 900, 'HMAC-SHA256')) self.assertEqual(db_assoc.secret, 'secret2') self.assertEqual(db_assoc.issued, 420) self.assertEqual(db_assoc.lifetime, 900) self.assertEqual(db_assoc.assoc_type, u'HMAC-SHA256')
def new(distroseries, pocket, sourcepackagename, branch, registrant, date_created=None): """Link a source package in a distribution suite to a branch.""" # Circular import. from lp.soyuz.model.distributionsourcepackagecache import ( DistributionSourcePackageCache, ) if date_created is None: date_created = datetime.now(pytz.UTC) sspb = SeriesSourcePackageBranch( distroseries, pocket, sourcepackagename, branch, registrant, date_created) IMasterStore(SeriesSourcePackageBranch).add(sspb) DistributionSourcePackageCache.updateOfficialBranches( distroseries.distribution, [sourcepackagename]) return sspb
def test_restricted_with_macaroon(self): fileAlias, url = self.get_restricted_file_and_public_url() lfa = IMasterStore(LibraryFileAlias).get(LibraryFileAlias, fileAlias) with dbuser('testadmin'): build = self.factory.makeBinaryPackageBuild( archive=self.factory.makeArchive(private=True)) naked_build = removeSecurityProxy(build) self.factory.makeSourcePackageReleaseFile( sourcepackagerelease=naked_build.source_package_release, library_file=lfa) naked_build.updateStatus(BuildStatus.BUILDING) issuer = getUtility(IMacaroonIssuer, "binary-package-build") macaroon = removeSecurityProxy(issuer).issueMacaroon(build) self.commit() response = requests.get(url, auth=("", macaroon.serialize())) response.raise_for_status() self.assertEqual(b"a" * 12, response.content)
def main(self): "Run UpdateDatabaseTableStats." "" store = IMasterStore(Person) # The logic is in a stored procedure because we want to run # ps(1) on the database server rather than the host this script # is running on. self.logger.debug("Invoking update_database_stats()") store.execute("SELECT update_database_stats()", noresult=True) self.logger.debug("Committing") store.commit()
def iterReady(klass): """Iterate through all ready BranchMergeProposalJobs.""" from lp.code.model.branch import Branch jobs = IMasterStore(Branch).find( (BranchMergeProposalJob), And( BranchMergeProposalJob.job_type == klass.class_job_type, BranchMergeProposalJob.job == Job.id, Job.id.is_in(Job.ready_jobs), BranchMergeProposalJob.branch_merge_proposal == BranchMergeProposal.id, BranchMergeProposal.source_branch == Branch.id, # A proposal isn't considered ready if it has no revisions, # or if it is hosted but pending a mirror. Branch.revision_count > 0, Or(Branch.next_mirror_time == None, Branch.branch_type != BranchType.HOSTED))) return (klass(job) for job in jobs)
def create_job(derived_series, sourcepackagename, parent_series): """Create a `DistroSeriesDifferenceJob` for a given source package. :param derived_series: A `DistroSeries` that is assumed to be derived from another one. :param sourcepackagename: The `SourcePackageName` whose publication history has changed. :param parent_series: A `DistroSeries` that is a parent of `derived_series`. The difference is between the versions of `sourcepackagename` in `parent_series` and `derived_series`. """ db_job = DistributionJob( distribution=derived_series.distribution, distroseries=derived_series, job_type=DistributionJobType.DISTROSERIESDIFFERENCE, metadata=make_metadata(sourcepackagename.id, parent_series.id)) IMasterStore(DistributionJob).add(db_job) job = DistroSeriesDifferenceJob(db_job) job.celeryRunOnCommit() return job
def unscheduleDeletion(archive_files): """See `IArchiveFileSet`.""" clauses = [ ArchiveFile.id.is_in( set(archive_file.id for archive_file in archive_files)), ArchiveFile.library_file == LibraryFileAlias.id, LibraryFileAlias.content == LibraryFileContent.id, ] return_columns = [ ArchiveFile.container, ArchiveFile.path, LibraryFileContent.sha256 ] return list( IMasterStore(ArchiveFile).execute( Returning(BulkUpdate( {ArchiveFile.scheduled_deletion_date: None}, table=ArchiveFile, values=[LibraryFileAlias, LibraryFileContent], where=And(*clauses)), columns=return_columns)))
def new(self, rationale, displayname, openid_identifier=None, status=AccountStatus.NOACCOUNT): """See `IAccountSet`.""" assert status in (AccountStatus.NOACCOUNT, AccountStatus.PLACEHOLDER) account = Account(displayname=displayname, creation_rationale=rationale, status=status) # Create an OpenIdIdentifier record if requested. if openid_identifier is not None: assert isinstance(openid_identifier, unicode) identifier = OpenIdIdentifier() identifier.account = account identifier.identifier = openid_identifier IMasterStore(OpenIdIdentifier).add(identifier) return account
def _add_or_remove(self, data, handlers): """Add or remove source package names or package sets from this one. :param data: an iterable with `ISourcePackageName` XOR `IPackageset` instances :param handlers: a 2-tuple Sequence where the first member is the interface a datum should implement and the second is the handler to invoke in that case respectively. """ store = IMasterStore(Packageset) if not isinstance(data, (list, tuple)): data = list(data) count = len(data) for iface, handler in handlers: iface_data = [datum for datum in data if iface.providedBy(datum)] if len(iface_data) > 0: handler(iface_data, store) count -= len(iface_data) if count != 0: raise AssertionError("Not all data was handled.")
def new( self, name, description, owner, distroseries=None, related_set=None): """See `IPackagesetSet`.""" store = IMasterStore(Packageset) packagesetgroup = None if related_set is not None: # Use the packagesetgroup of the `related_set`. packagesetgroup = related_set.packagesetgroup else: # We create the related internal PackagesetGroup for this # packageset so that we can later see related package sets across # distroseries. packagesetgroup = PackagesetGroup() packagesetgroup.owner = owner store.add(packagesetgroup) if distroseries is None: ubuntu = getUtility(IDistributionSet).getByName('ubuntu') distroseries = ubuntu.currentseries packageset = Packageset() packageset.packagesetgroup = packagesetgroup packageset.name = name packageset.description = description packageset.owner = owner packageset.distroseries = distroseries store.add(packageset) # We need to ensure that the cached statements are flushed so that # the duplicate name constraint gets triggered here. try: store.flush() except IntegrityError: raise DuplicatePackagesetName() return packageset
def searchByFingerprintRequesterAndType(self, fingerprint, requester, type, consumed=None): """See ILoginTokenSet.""" conditions = And( LoginToken.fingerprint == fingerprint, LoginToken.requester == requester, LoginToken.tokentype == type) if consumed is True: conditions = And(conditions, LoginToken.date_consumed != None) elif consumed is False: conditions = And(conditions, LoginToken.date_consumed == None) else: assert consumed is None, ( "consumed should be one of {True, False, None}. Got '%s'." % consumed) # It's important to always use the MASTER_FLAVOR store here # because we don't want replication lag to cause a 404 error. return IMasterStore(LoginToken).find(LoginToken, conditions)
def test_can_shutdown_slave_only(self): '''Confirm that this TestCase's test infrastructure works as needed. ''' master_store = IMasterStore(Person) slave_store = ISlaveStore(Person) # Both Stores work when pgbouncer is up. master_store.get(Person, 1) slave_store.get(Person, 1) # Slave Store breaks when pgbouncer is torn down. Master Store # is fine. self.pgbouncer_fixture.stop() master_store.get(Person, 2) self.assertRaises(DisconnectionError, slave_store.get, Person, 2)
def test_gen_reload_queries_with_mixed_stores(self): # gen_reload_queries() returns one query for each distinct # store even for the same object type. db_object = self.factory.makeComponent() db_object_type = bulk.get_type(db_object) # Commit so the database object is available in both master # and slave stores. transaction.commit() db_objects = set( (IMasterStore(db_object).get(db_object_type, db_object.id), ISlaveStore(db_object).get(db_object_type, db_object.id))) db_queries = list(bulk.gen_reload_queries(db_objects)) self.failUnlessEqual(2, len(db_queries)) db_objects_loaded = set() for db_query in db_queries: objects = set(db_query) # None of these objects should have been loaded before. self.failUnlessEqual(set(), objects.intersection(db_objects_loaded)) db_objects_loaded.update(objects) self.failUnlessEqual(db_objects, db_objects_loaded)
def new(self, name, description, owner, distroseries, related_set=None): """See `IPackagesetSet`.""" store = IMasterStore(Packageset) try: self.getByName(distroseries, name) raise DuplicatePackagesetName except NoSuchPackageSet: pass packagesetgroup = None if related_set is not None: # Use the packagesetgroup of the `related_set`. packagesetgroup = related_set.packagesetgroup else: # We create the related internal PackagesetGroup for this # packageset so that we can later see related package sets across # distroseries. packagesetgroup = PackagesetGroup() packagesetgroup.owner = owner store.add(packagesetgroup) packageset = Packageset() packageset.packagesetgroup = packagesetgroup packageset.name = name packageset.description = description packageset.owner = owner packageset.distroseries = distroseries store.add(packageset) # Explicit flush since it's common to use Packageset.id immediately # after creation. store.flush() return packageset
def create(self, name, size, file, contentType, expires=None, debugID=None, restricted=False): """See `ILibraryFileAliasSet`""" if restricted: client = getUtility(IRestrictedLibrarianClient) else: client = getUtility(ILibrarianClient) try: fid = client.addFile(name, size, file, contentType, expires, debugID) except IntegrityError: raise InvalidFilename("Filename cannot contain slashes.") lfa = IMasterStore(LibraryFileAlias).find( LibraryFileAlias, LibraryFileAlias.id == fid).one() assert lfa is not None, "client.addFile didn't!" return lfa