def test_slave_reconnect_after_outage(self): '''The slave is again used once it becomes available.''' self.pgbouncer_fixture.stop() master_store = IMasterStore(Person) slave_store = ISlaveStore(Person) self.assertIs(master_store, slave_store) self.pgbouncer_fixture.start() transaction.abort() master_store = IMasterStore(Person) slave_store = ISlaveStore(Person) self.assertIsNot(master_store, slave_store)
def new(registrant, owner, name, recipe, description, distroseries=None, daily_build_archive=None, build_daily=False, date_created=DEFAULT): """See `ISourcePackageRecipeSource.new`.""" store = IMasterStore(SourcePackageRecipe) sprecipe = SourcePackageRecipe() builder_recipe = SourcePackageRecipeData.getParsedRecipe(recipe) SourcePackageRecipeData(builder_recipe, sprecipe) sprecipe.registrant = registrant sprecipe.owner = owner sprecipe.name = name if distroseries is not None: for distroseries_item in distroseries: sprecipe.distroseries.add(distroseries_item) sprecipe.description = description sprecipe.daily_build_archive = daily_build_archive sprecipe.build_daily = build_daily sprecipe.date_created = date_created sprecipe.date_last_modified = date_created store.add(sprecipe) return sprecipe
def test_missing_storage(self): # When a file exists in the DB but is missing from disk, a 404 # is just confusing. It's an internal error, so 500 instead. client = LibrarianClient() # Upload a file so we can retrieve it. sample_data = b'blah' file_alias_id = client.addFile('sample', len(sample_data), BytesIO(sample_data), contentType='text/plain') url = client.getURLForAlias(file_alias_id) # Change the date_created to a known value that doesn't match # the disk timestamp. The timestamp on disk cannot be trusted. file_alias = IMasterStore(LibraryFileAlias).get( LibraryFileAlias, file_alias_id) # Commit so the file is available from the Librarian. self.commit() # Fetch the file via HTTP. response = requests.get(url) response.raise_for_status() # Delete the on-disk file. storage = LibrarianStorage(config.librarian_server.root, None) os.remove(storage._fileLocation(file_alias.contentID)) # The URL now 500s, since the DB says it should exist. response = requests.get(url) self.assertEqual(500, response.status_code) self.assertIn('Server', response.headers) self.assertNotIn('Last-Modified', response.headers) self.assertNotIn('Cache-Control', response.headers)
def new(self, registrant, owner, distro_series, name, metadata, require_virtualized=True, date_created=DEFAULT): """See `ILiveFSSet`.""" if not registrant.inTeam(owner): if owner.is_team: raise LiveFSNotOwner( "%s is not a member of %s." % (registrant.displayname, owner.displayname)) else: raise LiveFSNotOwner( "%s cannot create live filesystems owned by %s." % (registrant.displayname, owner.displayname)) if self.exists(owner, distro_series, name): raise DuplicateLiveFSName store = IMasterStore(LiveFS) livefs = LiveFS(registrant, owner, distro_series, name, metadata, require_virtualized, date_created) store.add(livefs) return livefs
def test_store_disconnected_after_request_handled_logs_oops(self): # Bug #504291 was that a Store was being left in a disconnected # state after a request, causing subsequent requests handled by that # thread to fail. We detect this state in endRequest and log an # OOPS to help track down the trigger. request = LaunchpadTestRequest() publication = WebServicePublication(None) dbadapter.set_request_started() # Disconnect a store store = IMasterStore(EmailAddress) store._connection._state = STATE_DISCONNECTED # Invoke the endRequest hook. publication.endRequest(request, None) self.assertEqual(1, len(self.oopses)) oops = self.oopses[0] # Ensure the OOPS mentions the correct exception self.assertStartsWith(oops['value'], "Bug #504291") # Ensure the store has been rolled back and in a usable state. self.assertEqual(store._connection._state, STATE_RECONNECT) store.find(EmailAddress).first() # Confirms Store is working.
def create(self, name, size, file, contentType, expires=None, debugID=None, restricted=False, allow_zero_length=False): """See `ILibraryFileAliasSet`""" if restricted: client = getUtility(IRestrictedLibrarianClient) else: client = getUtility(ILibrarianClient) if '/' in name: raise InvalidFilename("Filename cannot contain slashes.") fid = client.addFile(name, size, file, contentType, expires=expires, debugID=debugID, allow_zero_length=allow_zero_length) lfa = IMasterStore(LibraryFileAlias).find( LibraryFileAlias, LibraryFileAlias.id == fid).one() assert lfa is not None, "client.addFile didn't!" return lfa
def create(cls, package_name, source_archive, target_archive, target_distroseries, target_pocket, include_binaries=False, package_version=None, copy_policy=PackageCopyPolicy.INSECURE, requester=None, sponsored=None, unembargo=False, auto_approve=False, source_distroseries=None, source_pocket=None, phased_update_percentage=None): """See `IPlainPackageCopyJobSource`.""" assert package_version is not None, "No package version specified." assert requester is not None, "No requester specified." metadata = cls._makeMetadata(target_pocket, package_version, include_binaries, sponsored, unembargo, auto_approve, source_distroseries, source_pocket, phased_update_percentage) job = PackageCopyJob(job_type=cls.class_job_type, source_archive=source_archive, target_archive=target_archive, target_distroseries=target_distroseries, package_name=package_name, copy_policy=copy_policy, metadata=metadata, requester=requester) IMasterStore(PackageCopyJob).add(job) derived = cls(job) derived.celeryRunOnCommit() return derived
def iterReady(cls): """See `IRevisionMailJobSource`.""" jobs = IMasterStore(Branch).find( (BranchJob), And(BranchJob.job_type == cls.class_job_type, BranchJob.job == Job.id, Job.id.is_in(Job.ready_jobs))) return (cls(job) for job in jobs)
def newPackagesetUploader(self, archive, person, packageset, explicit=False): """See `IArchivePermissionSet`.""" packageset = self._nameToPackageset(packageset) store = IMasterStore(ArchivePermission) # First see whether we have a matching permission in the database # already. query = ''' SELECT ap.id FROM archivepermission ap, teamparticipation tp WHERE ap.person = tp.team AND tp.person = ? AND ap.packageset = ? AND ap.archive = ? ''' query = SQL(query, (person.id, packageset.id, archive.id)) permissions = list( store.find(ArchivePermission, ArchivePermission.id.is_in(query))) if len(permissions) > 0: # Found permissions in the database, does the 'explicit' flag # have the requested value? conflicting = [ permission for permission in permissions if permission.explicit != explicit ] if len(conflicting) > 0: # At least one permission with conflicting 'explicit' flag # value exists already. cperm = conflicting[0] raise ValueError( "Permission for package set '%s' already exists for %s " "but with a different 'explicit' flag value (%s)." % (packageset.name, cperm.person.name, cperm.explicit)) else: # No conflicts, does the requested permission exist already? existing = [ permission for permission in permissions if (permission.explicit == explicit and permission.person == person and permission.packageset == packageset) ] assert len(existing) <= 1, ( "Too many permissions for %s and %s" % (person.name, packageset.name)) if len(existing) == 1: # The existing permission matches, just return it. return existing[0] # The requested permission does not exist yet. Insert it into the # database. permission = ArchivePermission(archive=archive, person=person, packageset=packageset, permission=ArchivePermissionType.UPLOAD, explicit=explicit) store.add(permission) return permission
def createMultiple(cls, copy_tasks, requester, copy_policy=PackageCopyPolicy.INSECURE, include_binaries=False, sponsored=None, unembargo=False, auto_approve=False, silent=False): """See `IPlainPackageCopyJobSource`.""" store = IMasterStore(Job) job_ids = Job.createMultiple(store, len(copy_tasks), requester) job_contents = [ cls._composeJobInsertionTuple(copy_policy, include_binaries, job_id, task, sponsored, unembargo, auto_approve, silent) for job_id, task in zip(job_ids, copy_tasks) ] return bulk.create( (PackageCopyJob.job_type, PackageCopyJob.target_distroseries, PackageCopyJob.copy_policy, PackageCopyJob.source_archive, PackageCopyJob.target_archive, PackageCopyJob.package_name, PackageCopyJob.job_id, PackageCopyJob.metadata), job_contents, get_primary_keys=True)
def iterReady(cls): """Iterate through all ready ProductJobs.""" store = IMasterStore(ProductJob) jobs = store.find( ProductJob, And(ProductJob.job_type == cls.class_job_type, ProductJob.job_id.is_in(Job.ready_jobs))) return (cls(job) for job in jobs)
def iterReady(cls): """See `IJobSource`.""" store = IMasterStore(QuestionJob) jobs = store.find( QuestionJob, And(QuestionJob.job_type == cls.class_job_type, QuestionJob.job_id.is_in(Job.ready_jobs))) return (cls(job) for job in jobs)
def new(cls, job_type, status=BuildStatus.NEEDSBUILD, date_created=None, builder=None, archive=None): """See `IBuildFarmJobSource`.""" build_farm_job = BuildFarmJob( job_type, status, date_created, builder, archive) store = IMasterStore(BuildFarmJob) store.add(build_farm_job) return build_farm_job
def iterReady(cls): """See `IJobSource`.""" jobs = IMasterStore(GitJob).find( GitJob, GitJob.job_type == cls.class_job_type, GitJob.job == Job.id, Job.id.is_in(Job.ready_jobs)) return (cls(job) for job in jobs)
def setUp(self): super(TestBugSummary, self).setUp() # Some things we are testing are impossible as mere mortals, # but might happen from the SQL command line. switch_dbuser('testadmin') self.store = IMasterStore(BugSummary)
def removeAssociation(self, server_url, handle): """See `OpenIDStore`.""" store = IMasterStore(self.Association) assoc = store.get(self.Association, ( server_url.decode('UTF-8'), handle.decode('ASCII'))) if assoc is None: return False store.remove(assoc) return True
def iterReady(): """See `IRosettaUploadJobSource`.""" jobs = IMasterStore(BranchJob).using(BranchJob, Job, Branch).find( (BranchJob), And(BranchJob.job_type == BranchJobType.ROSETTA_UPLOAD, BranchJob.job == Job.id, BranchJob.branch == Branch.id, Branch.last_mirrored_id == Branch.last_scanned_id, Job.id.is_in(Job.ready_jobs))).order_by(BranchJob.id) return (RosettaUploadJob(job) for job in jobs)
def new(self, registrant, name, display_name, distro_series, build_channels, date_created=DEFAULT): """See `ISnapBaseSet`.""" store = IMasterStore(SnapBase) snap_base = SnapBase( registrant, name, display_name, distro_series, build_channels, date_created=date_created) store.add(snap_base) return snap_base
def test_listSuitesNeedingIndexes_is_empty_for_configless_distro(self): # listSuitesNeedingIndexes returns no suites for distributions # that have no publisher config, such as Debian. We don't want # to publish such distributions. series = self.makeDistroSeriesNeedingIndexes() pub_config = get_pub_config(series.distribution) IMasterStore(pub_config).remove(pub_config) script = self.makeScript(series.distribution) self.assertEqual([], script.listSuitesNeedingIndexes(series))
def test_startup_with_no_slave(self): '''An attempt is made for the first time to connect to a slave.''' self.pgbouncer_fixture.stop() master_store = IMasterStore(Person) slave_store = ISlaveStore(Person) # The master and slave Stores are the same object. self.assertIs(master_store, slave_store)
def getRecentBuilds(cls, requester, recipe, distroseries, _now=None): if _now is None: _now = datetime.now(pytz.UTC) store = IMasterStore(SourcePackageRecipeBuild) old_threshold = _now - timedelta(days=1) return store.find(cls, cls.distroseries_id == distroseries.id, cls.requester_id == requester.id, cls.recipe_id == recipe.id, cls.date_created > old_threshold)
def _getStore(cls): """Return the correct store for this class. We want all OAuth classes to be retrieved from the master flavour. If they are retrieved from the slave, there will be problems in the authorization exchange, since it will be done across applications that won't share the session cookies. """ return IMasterStore(cls)
def _get_store(): """See `SQLBase`. We want all OAuth classes to be retrieved from the master flavour. If they are retrieved from the slave, there will be problems in the authorization exchange, since it will be done across applications that won't share the session cookies. """ return IMasterStore(LibraryFileAlias)
def is_connected(self): # First rollback any existing transaction to ensure we attempt # to reconnect. transaction.abort() try: IMasterStore(Person).find(Person).first() return True except DisconnectionError: return False
def __init__(self, transaction, logger, tm_ids): self.transaction = transaction self.logger = logger self.start_at = 0 self.tm_ids = list(tm_ids) self.total = len(self.tm_ids) self.logger.info("Fixing up a total of %d TranslationMessages." % (self.total)) self.store = IMasterStore(Product)
def exists(owner, name): """See `ISourcePackageRecipeSource.new`.""" store = IMasterStore(SourcePackageRecipe) recipe = store.find(SourcePackageRecipe, SourcePackageRecipe.owner == owner, SourcePackageRecipe.name == name).one() if recipe: return True else: return False
def pruneRevisionCache(limit): """See `IRevisionSet`.""" # Storm doesn't handle remove a limited result set: # FeatureError: Can't remove a sliced result set store = IMasterStore(RevisionCache) epoch = datetime.now(tz=pytz.UTC) - timedelta(days=30) subquery = Select([RevisionCache.id], RevisionCache.revision_date < epoch, limit=limit) store.find(RevisionCache, RevisionCache.id.is_in(subquery)).remove()
def new(self, distribution, root_dir, base_url, copy_base_url): """Make and return a new `PublisherConfig`.""" store = IMasterStore(PublisherConfig) pubconf = PublisherConfig() pubconf.distribution = distribution pubconf.root_dir = root_dir pubconf.base_url = base_url pubconf.copy_base_url = copy_base_url store.add(pubconf) return pubconf
def _getOldestLiveRequest(self): """Return the oldest live request on the master store. Due to replication lag, the master store is always a little ahead of the slave store that exports come from. """ master_store = IMasterStore(POExportRequest) sorted_by_id = master_store.find(POExportRequest).order_by( POExportRequest.id) return sorted_by_id.first()
def removeRequest(self, request_ids): """See `IPOExportRequestSet`.""" if len(request_ids) > 0: # Storm 0.15 does not have direct support for deleting based # on is_in expressions and such, so do it the hard way. ids_string = ', '.join(sqlvalues(*request_ids)) IMasterStore(POExportRequest).execute(""" DELETE FROM POExportRequest WHERE id in (%s) """ % ids_string)