def packageSetDiff(self, origin, destination, logger=None): """Please see `IPackageCloner`.""" # Find packages that are obsolete or missing in the target archive. store = IStore(BinaryPackagePublishingHistory) self._init_packageset_delta(destination) self._compute_packageset_delta(origin) # Get the list of SourcePackagePublishingHistory keys for # source packages that are fresher in the origin archive. fresher_packages = store.execute(""" SELECT s_sspph FROM tmp_merge_copy_data WHERE obsoleted = True; """) # Get the list of SourcePackagePublishingHistory keys for # source packages that are new in the origin archive. new_packages = store.execute(""" SELECT s_sspph FROM tmp_merge_copy_data WHERE missing = True; """) if logger is not None: self._print_diagnostics(logger, store) return ( [package for [package] in fresher_packages], [package for [package] in new_packages], )
def numSubmissionsWithDevice( self, bus=None, vendor_id=None, product_id=None, driver_name=None, package_name=None, distro_target=None ): """See `IHWSubmissionSet`.""" store = IStore(HWSubmission) submissions_with_device_select, all_submissions_select = self._submissionsSubmitterSelects( Count(), bus, vendor_id, product_id, driver_name, package_name, distro_target ) submissions_with_device = store.execute(submissions_with_device_select) all_submissions = store.execute(all_submissions_select) return (submissions_with_device.get_one()[0], all_submissions.get_one()[0])
def test_getAll(self): processor_set = getUtility(IProcessorSet) # Make it easy to filter out sample data store = IStore(Processor) store.execute("UPDATE Processor SET name = 'sample_data_' || name") self.factory.makeProcessor(name='q1') self.factory.makeProcessor(name='i686') self.factory.makeProcessor(name='g4') self.assertEqual( ['g4', 'i686', 'q1'], sorted(processor.name for processor in processor_set.getAll() if not processor.name.startswith('sample_data_')))
def test_getAll(self): processor_set = getUtility(IProcessorSet) # Make it easy to filter out sample data store = IStore(Processor) store.execute("UPDATE Processor SET name = 'sample_data_' || name") self.factory.makeProcessor(name='q1') self.factory.makeProcessor(name='i686') self.factory.makeProcessor(name='g4') self.assertEquals( ['g4', 'i686', 'q1'], sorted( processor.name for processor in processor_set.getAll() if not processor.name.startswith('sample_data_')))
def numSubmissionsWithDevice( self, bus=None, vendor_id=None, product_id=None, driver_name=None, package_name=None, distro_target=None): """See `IHWSubmissionSet`.""" store = IStore(HWSubmission) submissions_with_device_select, all_submissions_select = ( self._submissionsSubmitterSelects( Count(), bus, vendor_id, product_id, driver_name, package_name, distro_target)) submissions_with_device = store.execute( submissions_with_device_select) all_submissions = store.execute(all_submissions_select) return (submissions_with_device.get_one()[0], all_submissions.get_one()[0])
def numOwnersOfDevice( self, bus=None, vendor_id=None, product_id=None, driver_name=None, package_name=None, distro_target=None ): """See `IHWSubmissionSet`.""" store = IStore(HWSubmission) submitters_with_device_select, all_submitters_select = self._submissionsSubmitterSelects( HWSubmission.raw_emailaddress, bus, vendor_id, product_id, driver_name, package_name, distro_target ) submitters_with_device = store.execute( Select(columns=[Count()], tables=[Alias(submitters_with_device_select, "addresses")]) ) all_submitters = store.execute(Select(columns=[Count()], tables=[Alias(all_submitters_select, "addresses")])) return (submitters_with_device.get_one()[0], all_submitters.get_one()[0])
def preloadVisibleStackedOnBranches(branches, user=None): """Preload the chains of stacked on branches related to the given list of branches. Only the branches visible for the given user are preloaded/returned. """ if len(branches) == 0: return store = IStore(Branch) result = store.execute(""" WITH RECURSIVE stacked_on_branches_ids AS ( SELECT column1 as id FROM (VALUES %s) AS temp UNION SELECT DISTINCT branch.stacked_on FROM stacked_on_branches_ids, Branch AS branch WHERE branch.id = stacked_on_branches_ids.id AND branch.stacked_on IS NOT NULL ) SELECT id from stacked_on_branches_ids """ % ', '.join( ["(%s)" % quote(id) for id in map(attrgetter('id'), branches)])) branch_ids = [res[0] for res in result.get_all()] # Not really sure this is useful: if a given branch is visible by a # user, then I think it means that the whole chain of branches on # which is is stacked on is visible by this user expressions = [Branch.id.is_in(branch_ids)] if user is None: collection = AnonymousBranchCollection( branch_filter_expressions=expressions) else: collection = VisibleBranchCollection( user=user, branch_filter_expressions=expressions) return list(collection.getBranches())
def test_default_collection(self): # Make it easy to filter out sample data store = IStore(Processor) store.execute("UPDATE Processor SET name = 'sample_data_' || name") self.factory.makeProcessor(name='q1') self.factory.makeProcessor(name='i686') self.factory.makeProcessor(name='g4') logout() collection = self.webservice.get('/+processors?ws.size=10', api_version='devel').jsonBody() self.assertEqual( ['g4', 'i686', 'q1'], sorted(processor['name'] for processor in collection['entries'] if not processor['name'].startswith('sample_data_')))
def isSourceUploadAllowed(self, archive, sourcepackagename, person, distroseries=None): """See `IArchivePermissionSet`.""" sourcepackagename = self._nameToSourcePackageName(sourcepackagename) store = IStore(ArchivePermission) if distroseries is None: ubuntu = getUtility(IDistributionSet).getByName('ubuntu') distroseries = ubuntu.currentseries # Put together the parameters for the query that follows. archive_params = (ArchivePermissionType.UPLOAD, archive.id) permission_params = (sourcepackagename.id, person.id, distroseries.id) query_params = ( # Query parameters for the first WHERE clause. (archive.id, distroseries.id, sourcepackagename.id) + # Query parameters for the second WHERE clause. permission_params + archive_params + # Query parameters for the third WHERE clause. permission_params + archive_params) query = ''' SELECT CASE WHEN ( SELECT COUNT(ap.id) FROM packagesetsources pss, archivepermission ap, packageset ps WHERE ap.archive = %s AND ap.explicit = TRUE AND ap.packageset = ps.id AND ps.distroseries = %s AND pss.sourcepackagename = %s AND pss.packageset = ap.packageset) > 0 THEN ( SELECT COUNT(ap.id) FROM packagesetsources pss, archivepermission ap, packageset ps, teamparticipation tp WHERE pss.sourcepackagename = %s AND ap.person = tp.team AND tp.person = %s AND ap.packageset = ps.id AND ps.distroseries = %s AND pss.packageset = ap.packageset AND ap.explicit = TRUE AND ap.permission = %s AND ap.archive = %s) ELSE ( SELECT COUNT(ap.id) FROM packagesetsources pss, archivepermission ap, packageset ps, teamparticipation tp, flatpackagesetinclusion fpsi WHERE pss.sourcepackagename = %s AND ap.person = tp.team AND tp.person = %s AND ap.packageset = ps.id AND ps.distroseries = %s AND pss.packageset = fpsi.child AND fpsi.parent = ap.packageset AND ap.permission = %s AND ap.archive = %s) END AS number_of_permitted_package_sets; ''' % sqlvalues(*query_params) return store.execute(query).get_one()[0] > 0
def mergeCopy(self, origin, destination): """Please see `IPackageCloner`.""" # Calculate the package set delta in order to find packages that are # obsolete or missing in the target archive. self.packageSetDiff(origin, destination) # Now copy the fresher or new packages. store = IStore(BinaryPackagePublishingHistory) store.execute(""" INSERT INTO SourcePackagePublishingHistory ( sourcepackagerelease, distroseries, status, component, section, archive, datecreated, datepublished, pocket, sourcepackagename) SELECT mcd.s_sourcepackagerelease AS sourcepackagerelease, %s AS distroseries, mcd.s_status AS status, mcd.s_component AS component, mcd.s_section AS section, %s AS archive, %s AS datecreated, %s AS datepublished, %s AS pocket, sourcepackagename_id FROM tmp_merge_copy_data mcd WHERE mcd.obsoleted = True OR mcd.missing = True """ % sqlvalues(destination.distroseries, destination.archive, UTC_NOW, UTC_NOW, destination.pocket)) # Finally set the publishing status for the packages obsoleted in the # target archive accordingly (i.e make them superseded). store.execute(""" UPDATE sourcepackagepublishinghistory secsrc SET status = %s, datesuperseded = %s, supersededby = mcd.s_sourcepackagerelease FROM tmp_merge_copy_data mcd WHERE secsrc.id = mcd.t_sspph AND mcd.obsoleted = True """ % sqlvalues(PackagePublishingStatus.SUPERSEDED, UTC_NOW)) processors = [ removeSecurityProxy(archivearch).processor for archivearch in getUtility(IArchiveArchSet).getByArchive(destination.archive) ] self._create_missing_builds(destination.distroseries, destination.archive, (), processors, False)
def test_default_collection(self): # Make it easy to filter out sample data store = IStore(Processor) store.execute("UPDATE Processor SET name = 'sample_data_' || name") self.factory.makeProcessor(name='q1') self.factory.makeProcessor(name='i686') self.factory.makeProcessor(name='g4') logout() collection = self.webservice.get( '/+processors?ws.size=10', api_version='devel').jsonBody() self.assertEquals( ['g4', 'i686', 'q1'], sorted( processor['name'] for processor in collection['entries'] if not processor['name'].startswith('sample_data_')))
def populate_distroseriesdiff(logger, derived_series, parent_series): """Compare `derived_distroseries` to parent, and register differences. The differences are registered by creating `DistroSeriesDifference` records, insofar as they do not yet exist. """ temp_table = "temp_potentialdistroseriesdiff" store = IStore(derived_series) drop_table(store, temp_table) quoted_temp_table = quote_identifier(temp_table) store.execute(""" CREATE TEMP TABLE %s( sourcepackagename INTEGER, source_version debversion, parent_source_version debversion) ON COMMIT DROP """ % ( quoted_temp_table)) store.execute("INSERT INTO %s %s" % ( quoted_temp_table, compose_sql_find_differences(derived_series, parent_series))) logger.info( "Found %d potential difference(s).", store.execute("SELECT count(*) FROM %s" % temp_table).get_one()[0]) store.execute( compose_sql_populate_distroseriesdiff( derived_series, parent_series, temp_table))
def populate_distroseriesdiff(logger, derived_series, parent_series): """Compare `derived_distroseries` to parent, and register differences. The differences are registered by creating `DistroSeriesDifference` records, insofar as they do not yet exist. """ temp_table = "temp_potentialdistroseriesdiff" store = IStore(derived_series) drop_table(store, temp_table) quoted_temp_table = quote_identifier(temp_table) store.execute(""" CREATE TEMP TABLE %s( sourcepackagename INTEGER, source_version debversion, parent_source_version debversion) ON COMMIT DROP """ % (quoted_temp_table)) store.execute( "INSERT INTO %s %s" % (quoted_temp_table, compose_sql_find_differences(derived_series, parent_series))) logger.info( "Found %d potential difference(s).", store.execute("SELECT count(*) FROM %s" % temp_table).get_one()[0]) store.execute( compose_sql_populate_distroseriesdiff(derived_series, parent_series, temp_table))
def isSourceUploadAllowed( self, archive, sourcepackagename, person, distroseries=None): """See `IArchivePermissionSet`.""" sourcepackagename = self._nameToSourcePackageName(sourcepackagename) store = IStore(ArchivePermission) if distroseries is None: ubuntu = getUtility(IDistributionSet).getByName('ubuntu') distroseries = ubuntu.currentseries # Put together the parameters for the query that follows. archive_params = (ArchivePermissionType.UPLOAD, archive.id) permission_params = (sourcepackagename.id, person.id, distroseries.id) query_params = ( # Query parameters for the first WHERE clause. (archive.id, distroseries.id, sourcepackagename.id) + # Query parameters for the second WHERE clause. permission_params + archive_params + # Query parameters for the third WHERE clause. permission_params + archive_params) query = ''' SELECT CASE WHEN ( SELECT COUNT(ap.id) FROM packagesetsources pss, archivepermission ap, packageset ps WHERE ap.archive = %s AND ap.explicit = TRUE AND ap.packageset = ps.id AND ps.distroseries = %s AND pss.sourcepackagename = %s AND pss.packageset = ap.packageset) > 0 THEN ( SELECT COUNT(ap.id) FROM packagesetsources pss, archivepermission ap, packageset ps, teamparticipation tp WHERE pss.sourcepackagename = %s AND ap.person = tp.team AND tp.person = %s AND ap.packageset = ps.id AND ps.distroseries = %s AND pss.packageset = ap.packageset AND ap.explicit = TRUE AND ap.permission = %s AND ap.archive = %s) ELSE ( SELECT COUNT(ap.id) FROM packagesetsources pss, archivepermission ap, packageset ps, teamparticipation tp, flatpackagesetinclusion fpsi WHERE pss.sourcepackagename = %s AND ap.person = tp.team AND tp.person = %s AND ap.packageset = ps.id AND ps.distroseries = %s AND pss.packageset = fpsi.child AND fpsi.parent = ap.packageset AND ap.permission = %s AND ap.archive = %s) END AS number_of_permitted_package_sets; ''' % sqlvalues(*query_params) return store.execute(query).get_one()[0] > 0
def numOwnersOfDevice( self, bus=None, vendor_id=None, product_id=None, driver_name=None, package_name=None, distro_target=None): """See `IHWSubmissionSet`.""" store = IStore(HWSubmission) submitters_with_device_select, all_submitters_select = ( self._submissionsSubmitterSelects( HWSubmission.raw_emailaddress, bus, vendor_id, product_id, driver_name, package_name, distro_target)) submitters_with_device = store.execute( Select( columns=[Count()], tables=[Alias(submitters_with_device_select, 'addresses')])) all_submitters = store.execute( Select( columns=[Count()], tables=[Alias(all_submitters_select, 'addresses')])) return (submitters_with_device.get_one()[0], all_submitters.get_one()[0])
def getMirrorsToProbe(self, content_type, ignore_last_probe=False, limit=None): """See IDistributionMirrorSet""" query = """ SELECT distributionmirror.id, MAX(mirrorproberecord.date_created) FROM distributionmirror LEFT OUTER JOIN mirrorproberecord ON mirrorproberecord.distribution_mirror = distributionmirror.id WHERE distributionmirror.content = %s AND distributionmirror.official_candidate IS TRUE AND distributionmirror.status = %s GROUP BY distributionmirror.id """ % sqlvalues(content_type, MirrorStatus.OFFICIAL) if not ignore_last_probe: query += """ HAVING MAX(mirrorproberecord.date_created) IS NULL OR MAX(mirrorproberecord.date_created) < %s - '%s hours'::interval """ % sqlvalues(UTC_NOW, PROBE_INTERVAL) query += """ ORDER BY MAX(COALESCE( mirrorproberecord.date_created, '1970-01-01')) ASC, id""" if limit is not None: query += " LIMIT %d" % limit store = IStore(MirrorDistroArchSeries) ids = ", ".join(str(id) for (id, date_created) in store.execute(query)) query = '1 = 2' if ids: query = 'id IN (%s)' % ids return DistributionMirror.select(query)
def getMirrorsToProbe( self, content_type, ignore_last_probe=False, limit=None): """See IDistributionMirrorSet""" query = """ SELECT distributionmirror.id, MAX(mirrorproberecord.date_created) FROM distributionmirror LEFT OUTER JOIN mirrorproberecord ON mirrorproberecord.distribution_mirror = distributionmirror.id WHERE distributionmirror.content = %s AND distributionmirror.official_candidate IS TRUE AND distributionmirror.status = %s GROUP BY distributionmirror.id """ % sqlvalues(content_type, MirrorStatus.OFFICIAL) if not ignore_last_probe: query += """ HAVING MAX(mirrorproberecord.date_created) IS NULL OR MAX(mirrorproberecord.date_created) < %s - '%s hours'::interval """ % sqlvalues(UTC_NOW, PROBE_INTERVAL) query += """ ORDER BY MAX(COALESCE( mirrorproberecord.date_created, '1970-01-01')) ASC, id""" if limit is not None: query += " LIMIT %d" % limit store = IStore(MirrorDistroArchSeries) ids = ", ".join(str(id) for (id, date_created) in store.execute(query)) query = '1 = 2' if ids: query = 'id IN (%s)' % ids return DistributionMirror.select(query)
class TestNameBlacklist(TestCaseWithFactory): layer = ZopelessDatabaseLayer def setUp(self): super(TestNameBlacklist, self).setUp() self.name_blacklist_set = getUtility(INameBlacklistSet) self.caret_foo_exp = self.name_blacklist_set.create(u'^foo') self.foo_exp = self.name_blacklist_set.create(u'foo') self.verbose_exp = self.name_blacklist_set.create(u'v e r b o s e') team = self.factory.makeTeam() self.admin_exp = self.name_blacklist_set.create(u'fnord', admin=team) self.store = IStore(self.foo_exp) self.store.flush() def name_blacklist_match(self, name, user_id=None): '''Return the result of the name_blacklist_match stored procedure.''' user_id = user_id or 0 result = self.store.execute( "SELECT name_blacklist_match(%s, %s)", (name, user_id)) return result.get_one()[0] def is_blacklisted_name(self, name, user_id=None): '''Call the is_blacklisted_name stored procedure and return the result ''' user_id = user_id or 0 result = self.store.execute( "SELECT is_blacklisted_name(%s, %s)", (name, user_id)) blacklisted = result.get_one()[0] self.failIf(blacklisted is None, 'is_blacklisted_name returned NULL') return bool(blacklisted) def test_name_blacklist_match(self): # A name that is not blacklisted returns NULL/None self.failUnless(self.name_blacklist_match(u"bar") is None) # A name that is blacklisted returns the id of the row in the # NameBlacklist table that matched. Rows are tried in order, and the # first match is returned. self.assertEqual( self.name_blacklist_match(u"foobar"), self.caret_foo_exp.id) self.assertEqual( self.name_blacklist_match(u"barfoo"), self.foo_exp.id) def test_name_blacklist_match_admin_does_not_match(self): # A user in the expresssion's admin team is exempt from the # backlisted name restriction. user = self.admin_exp.admin.teamowner self.assertEqual( None, self.name_blacklist_match(u"fnord", user.id)) def test_name_blacklist_match_launchpad_admin_can_change(self): # A Launchpad admin is exempt from any backlisted name restriction # that has an admin. user = self.factory.makePerson() admins = getUtility(ILaunchpadCelebrities).admin admins.addMember(user, user) self.assertEqual( None, self.name_blacklist_match(u"fnord", user.id)) def test_name_blacklist_match_launchpad_admin_cannot_change(self): # A Launchpad admin cannot override backlisted names without admins. user = self.factory.makePerson() admins = getUtility(ILaunchpadCelebrities).admin admins.addMember(user, user) self.assertEqual( self.foo_exp.id, self.name_blacklist_match(u"barfoo", user.id)) def test_name_blacklist_match_cache(self): # If the blacklist is changed in the DB, these changes are noticed. # This test is needed because the stored procedure keeps a cache # of the compiled regular expressions. self.assertEqual( self.name_blacklist_match(u"foobar"), self.caret_foo_exp.id) self.caret_foo_exp.regexp = u'nomatch' self.assertEqual( self.name_blacklist_match(u"foobar"), self.foo_exp.id) self.foo_exp.regexp = u'nomatch2' self.failUnless(self.name_blacklist_match(u"foobar") is None) def test_is_blacklisted_name(self): # is_blacklisted_name() is just a wrapper around name_blacklist_match # that is friendlier to use in a boolean context. self.failUnless(self.is_blacklisted_name(u"bar") is False) self.failUnless(self.is_blacklisted_name(u"foo") is True) self.caret_foo_exp.regexp = u'bar' self.foo_exp.regexp = u'bar2' self.failUnless(self.is_blacklisted_name(u"foo") is False) def test_is_blacklisted_name_admin_false(self): # Users in the expression's admin team are will return False. user = self.admin_exp.admin.teamowner self.assertFalse(self.is_blacklisted_name(u"fnord", user.id)) def test_case_insensitive(self): self.failUnless(self.is_blacklisted_name(u"Foo") is True) def test_verbose(self): # Testing the VERBOSE flag is used when compiling the regexp self.failUnless(self.is_blacklisted_name(u"verbose") is True)
def _init_packageset_delta(self, destination): """Set up a temp table with data about target archive packages. This is a first step in finding out which packages in a given source archive are fresher or new with respect to a target archive. Merge copying of packages is one of the use cases that requires such a package set diff capability. In order to find fresher or new packages we first set up a temporary table that lists what packages exist in the target archive (additionally considering the distroseries, pocket and component). """ store = IStore(BinaryPackagePublishingHistory) # Use a temporary table to hold the data needed for the package set # delta computation. This will prevent multiple, parallel delta # calculations from interfering with each other. store.execute(""" CREATE TEMP TABLE tmp_merge_copy_data ( -- Source archive package data, only set for packages that -- will be copied. s_sspph integer, s_sourcepackagerelease integer, s_version debversion, s_status integer, s_component integer, s_section integer, -- Target archive package data, set for all published or -- pending packages. t_sspph integer, t_sourcepackagerelease integer, t_version debversion, -- Whether a target package became obsolete due to a more -- recent source package. obsoleted boolean DEFAULT false NOT NULL, missing boolean DEFAULT false NOT NULL, sourcepackagename text NOT NULL, sourcepackagename_id integer NOT NULL ); CREATE INDEX source_name_index ON tmp_merge_copy_data USING btree (sourcepackagename); """) # Populate the temporary table with package data from the target # archive considering the distroseries, pocket and component. pop_query = """ INSERT INTO tmp_merge_copy_data ( t_sspph, t_sourcepackagerelease, sourcepackagename, sourcepackagename_id, t_version) SELECT secsrc.id AS t_sspph, secsrc.sourcepackagerelease AS t_sourcepackagerelease, spn.name AS sourcepackagerelease, spn.id AS sourcepackagename_id, spr.version AS t_version FROM SourcePackagePublishingHistory secsrc JOIN SourcePackageRelease AS spr ON spr.id = secsrc.sourcepackagerelease JOIN SourcePackageName AS spn ON spn.id = spr.sourcepackagename WHERE secsrc.archive = %s AND secsrc.status IN (%s, %s) AND secsrc.distroseries = %s AND secsrc.pocket = %s """ % sqlvalues(destination.archive, PackagePublishingStatus.PENDING, PackagePublishingStatus.PUBLISHED, destination.distroseries, destination.pocket) if destination.component is not None: pop_query += (" AND secsrc.component = %s" % quote(destination.component)) store.execute(pop_query)
def _clone_source_packages(self, origin, destination, sourcepackagenames): """Copy source publishing data from origin to destination. @type origin: PackageLocation @param origin: the location from which source publishing records are to be copied. @type destination: PackageLocation @param destination: the location to which the data is to be copied. @type sourcepackagenames: Iterable @param sourcepackagenames: List of source packages to restrict the copy to """ store = IStore(BinaryPackagePublishingHistory) query = ''' INSERT INTO SourcePackagePublishingHistory ( sourcepackagerelease, distroseries, status, component, section, archive, datecreated, datepublished, pocket, sourcepackagename) SELECT spph.sourcepackagerelease, %s as distroseries, spph.status, spph.component, spph.section, %s as archive, %s as datecreated, %s as datepublished, %s as pocket, spph.sourcepackagename FROM SourcePackagePublishingHistory AS spph WHERE spph.distroseries = %s AND spph.status in (%s, %s) AND spph.pocket = %s AND spph.archive = %s ''' % sqlvalues( destination.distroseries, destination.archive, UTC_NOW, UTC_NOW, destination.pocket, origin.distroseries, PackagePublishingStatus.PENDING, PackagePublishingStatus.PUBLISHED, origin.pocket, origin.archive) if sourcepackagenames and len(sourcepackagenames) > 0: query += ''' AND spph.sourcepackagerelease IN ( SELECT spr.id FROM SourcePackageRelease AS spr JOIN SourcePackageName AS spn ON spn.id = spr.sourcepackagename WHERE spn.name IN %s )''' % sqlvalues(sourcepackagenames) if origin.packagesets: query += ''' AND spph.sourcepackagerelease IN ( SELECT spr.id FROM SourcePackageRelease AS spr JOIN PackagesetSources AS pss ON PSS.sourcepackagename = spr.sourcepackagename JOIN FlatPackagesetInclusion AS fpsi ON fpsi.child = pss.packageset WHERE fpsi.parent in %s ) ''' % sqlvalues([p.id for p in origin.packagesets]) if origin.component: query += "and spph.component = %s" % sqlvalues(origin.component) store.execute(query)
def get_current_dbuser(self): store = IStore(Person) query = store.execute('SELECT current_user;') result = query.get_one()[0] query.close() return result
def _compute_packageset_delta(self, origin): """Given a source/target archive find obsolete or missing packages. This means finding out which packages in a given source archive are fresher or new with respect to a target archive. """ store = IStore(BinaryPackagePublishingHistory) # The query below will find all packages in the source archive that # are fresher than their counterparts in the target archive. find_newer_packages = """ UPDATE tmp_merge_copy_data mcd SET s_sspph = secsrc.id, s_sourcepackagerelease = spr.id, s_version = spr.version, obsoleted = True, s_status = secsrc.status, s_component = secsrc.component, s_section = secsrc.section FROM SourcePackagePublishingHistory secsrc, SourcePackageRelease spr, SourcePackageName spn WHERE secsrc.archive = %s AND secsrc.status IN (%s, %s) AND secsrc.distroseries = %s AND secsrc.pocket = %s AND secsrc.sourcepackagerelease = spr.id AND spr.sourcepackagename = spn.id AND spn.name = mcd.sourcepackagename AND spr.version > mcd.t_version """ % sqlvalues(origin.archive, PackagePublishingStatus.PENDING, PackagePublishingStatus.PUBLISHED, origin.distroseries, origin.pocket) if origin.component is not None: find_newer_packages += (" AND secsrc.component = %s" % quote(origin.component)) store.execute(find_newer_packages) # Now find the packages that exist in the source archive but *not* in # the target archive. find_origin_only_packages = """ INSERT INTO tmp_merge_copy_data ( s_sspph, s_sourcepackagerelease, sourcepackagename, sourcepackagename_id, s_version, missing, s_status, s_component, s_section) SELECT secsrc.id AS s_sspph, secsrc.sourcepackagerelease AS s_sourcepackagerelease, spn.name AS sourcepackagename, spn.id AS sourcepackagename_id, spr.version AS s_version, True AS missing, secsrc.status AS s_status, secsrc.component AS s_component, secsrc.section AS s_section FROM SourcePackagePublishingHistory secsrc JOIN SourcePackageRelease AS spr ON spr.id = secsrc.sourcepackagerelease JOIN SourcePackageName AS spn ON spn.id = spr.sourcepackagename WHERE secsrc.archive = %s AND secsrc.status IN (%s, %s) AND secsrc.distroseries = %s AND secsrc.pocket = %s AND spn.name NOT IN ( SELECT sourcepackagename FROM tmp_merge_copy_data) """ % sqlvalues(origin.archive, PackagePublishingStatus.PENDING, PackagePublishingStatus.PUBLISHED, origin.distroseries, origin.pocket) if origin.component is not None: find_origin_only_packages += (" AND secsrc.component = %s" % quote(origin.component)) store.execute(find_origin_only_packages)
def deviceDriverOwnersAffectedByBugs( self, bus=None, vendor_id=None, product_id=None, driver_name=None, package_name=None, bug_ids=None, bug_tags=None, affected_by_bug=False, subscribed_to_bug=False, user=None, ): """See `IHWSubmissionSet`.""" store = IStore(HWSubmission) tables, clauses = make_submission_device_statistics_clause( bus, vendor_id, product_id, driver_name, package_name, False ) tables.append(HWSubmission) clauses.append(HWSubmissionDevice.submission == HWSubmission.id) clauses.append(_userCanAccessSubmissionStormClause(user)) if (bug_ids is None or len(bug_ids) == 0) and (bug_tags is None or len(bug_tags) == 0): raise ParameterError("bug_ids or bug_tags must be supplied.") tables.append(Bug) if bug_ids is not None and bug_ids is not []: clauses.append(Bug.id.is_in(bug_ids)) if bug_tags is not None and bug_tags is not []: clauses.extend([Bug.id == BugTag.bugID, BugTag.tag.is_in(bug_tags)]) tables.append(BugTag) # If we OR-combine the search for bug owners, subscribers # and affected people on SQL level, the query runs very slow. # So let's run the queries separately and join the results # on Python level. # This would be quicker still if we did it as a single query # using UNION. owner_query = Select( columns=[HWSubmission.ownerID], tables=tables, where=And(*(clauses + [Bug.ownerID == HWSubmission.ownerID])) ) user_ids = set(store.execute(owner_query)) if subscribed_to_bug: subscriber_clauses = [BugSubscription.person_id == HWSubmission.ownerID, BugSubscription.bug == Bug.id] subscriber_query = Select( columns=[HWSubmission.ownerID], tables=tables + [BugSubscription], where=And(*(clauses + subscriber_clauses)), ) user_ids.update(store.execute(subscriber_query)) if affected_by_bug: affected_clauses = [ BugAffectsPerson.personID == HWSubmission.ownerID, BugAffectsPerson.bug == Bug.id, BugAffectsPerson.affected, ] affected_query = Select( columns=[HWSubmission.ownerID], tables=tables + [BugAffectsPerson], where=And(*(clauses + affected_clauses)), ) user_ids.update(store.execute(affected_query)) # A "WHERE x IN (y, z...)" query needs at least one element # on the right side of IN. if len(user_ids) == 0: result = store.find(Person, False) else: user_ids = [row[0] for row in user_ids] result = store.find(Person, Person.id.is_in(user_ids)) result.order_by(Person.displayname) return result
def deviceDriverOwnersAffectedByBugs( self, bus=None, vendor_id=None, product_id=None, driver_name=None, package_name=None, bug_ids=None, bug_tags=None, affected_by_bug=False, subscribed_to_bug=False, user=None): """See `IHWSubmissionSet`.""" store = IStore(HWSubmission) tables, clauses = make_submission_device_statistics_clause( bus, vendor_id, product_id, driver_name, package_name, False) tables.append(HWSubmission) clauses.append(HWSubmissionDevice.submission == HWSubmission.id) clauses.append(_userCanAccessSubmissionStormClause(user)) if ((bug_ids is None or len(bug_ids) == 0) and (bug_tags is None or len(bug_tags) == 0)): raise ParameterError('bug_ids or bug_tags must be supplied.') tables.append(Bug) if bug_ids is not None and bug_ids is not []: clauses.append(Bug.id.is_in(bug_ids)) if bug_tags is not None and bug_tags is not []: clauses.extend([ Bug.id == BugTag.bugID, BugTag.tag.is_in(bug_tags)]) tables.append(BugTag) # If we OR-combine the search for bug owners, subscribers # and affected people on SQL level, the query runs very slow. # So let's run the queries separately and join the results # on Python level. # This would be quicker still if we did it as a single query # using UNION. owner_query = Select( columns=[HWSubmission.ownerID], tables=tables, where=And(*(clauses + [Bug.ownerID == HWSubmission.ownerID]))) user_ids = set(store.execute(owner_query)) if subscribed_to_bug: subscriber_clauses = [ BugSubscription.person_id == HWSubmission.ownerID, BugSubscription.bug == Bug.id, ] subscriber_query = Select( columns=[HWSubmission.ownerID], tables=tables + [BugSubscription], where=And(*(clauses + subscriber_clauses))) user_ids.update(store.execute(subscriber_query)) if affected_by_bug: affected_clauses = [ BugAffectsPerson.personID == HWSubmission.ownerID, BugAffectsPerson.bug == Bug.id, BugAffectsPerson.affected, ] affected_query = Select( columns=[HWSubmission.ownerID], tables=tables + [BugAffectsPerson], where=And(*(clauses + affected_clauses))) user_ids.update(store.execute(affected_query)) # A "WHERE x IN (y, z...)" query needs at least one element # on the right side of IN. if len(user_ids) == 0: result = store.find(Person, False) else: user_ids = [row[0] for row in user_ids] result = store.find(Person, Person.id.is_in(user_ids)) result.order_by(Person.display_name) return result
class TestNameBlacklist(TestCaseWithFactory): layer = ZopelessDatabaseLayer def setUp(self): super(TestNameBlacklist, self).setUp() self.name_blacklist_set = getUtility(INameBlacklistSet) self.caret_foo_exp = self.name_blacklist_set.create(u'^foo') self.foo_exp = self.name_blacklist_set.create(u'foo') self.verbose_exp = self.name_blacklist_set.create(u'v e r b o s e') team = self.factory.makeTeam() self.admin_exp = self.name_blacklist_set.create(u'fnord', admin=team) self.store = IStore(self.foo_exp) self.store.flush() def name_blacklist_match(self, name, user_id=None): '''Return the result of the name_blacklist_match stored procedure.''' user_id = user_id or 0 result = self.store.execute( "SELECT name_blacklist_match(%s, %s)", (name, user_id)) return result.get_one()[0] def is_blacklisted_name(self, name, user_id=None): '''Call the is_blacklisted_name stored procedure and return the result ''' user_id = user_id or 0 result = self.store.execute( "SELECT is_blacklisted_name(%s, %s)", (name, user_id)) blacklisted = result.get_one()[0] self.assertIsNotNone(blacklisted, 'is_blacklisted_name returned NULL') return bool(blacklisted) def test_name_blacklist_match(self): # A name that is not blacklisted returns NULL/None self.assertIsNone(self.name_blacklist_match(u"bar")) # A name that is blacklisted returns the id of the row in the # NameBlacklist table that matched. Rows are tried in order, and the # first match is returned. self.assertEqual( self.name_blacklist_match(u"foobar"), self.caret_foo_exp.id) self.assertEqual( self.name_blacklist_match(u"barfoo"), self.foo_exp.id) def test_name_blacklist_match_admin_does_not_match(self): # A user in the expresssion's admin team is exempt from the # backlisted name restriction. user = self.admin_exp.admin.teamowner self.assertEqual( None, self.name_blacklist_match(u"fnord", user.id)) def test_name_blacklist_match_launchpad_admin_can_change(self): # A Launchpad admin is exempt from any backlisted name restriction # that has an admin. user = self.factory.makePerson() admins = getUtility(ILaunchpadCelebrities).admin admins.addMember(user, user) self.assertEqual( None, self.name_blacklist_match(u"fnord", user.id)) def test_name_blacklist_match_launchpad_admin_cannot_change(self): # A Launchpad admin cannot override backlisted names without admins. user = self.factory.makePerson() admins = getUtility(ILaunchpadCelebrities).admin admins.addMember(user, user) self.assertEqual( self.foo_exp.id, self.name_blacklist_match(u"barfoo", user.id)) def test_name_blacklist_match_cache(self): # If the blacklist is changed in the DB, these changes are noticed. # This test is needed because the stored procedure keeps a cache # of the compiled regular expressions. self.assertEqual( self.name_blacklist_match(u"foobar"), self.caret_foo_exp.id) self.caret_foo_exp.regexp = u'nomatch' self.assertEqual( self.name_blacklist_match(u"foobar"), self.foo_exp.id) self.foo_exp.regexp = u'nomatch2' self.assertIsNone(self.name_blacklist_match(u"foobar")) def test_is_blacklisted_name(self): # is_blacklisted_name() is just a wrapper around name_blacklist_match # that is friendlier to use in a boolean context. self.assertFalse(self.is_blacklisted_name(u"bar")) self.assertTrue(self.is_blacklisted_name(u"foo")) self.caret_foo_exp.regexp = u'bar' self.foo_exp.regexp = u'bar2' self.assertFalse(self.is_blacklisted_name(u"foo")) def test_is_blacklisted_name_admin_false(self): # Users in the expression's admin team are will return False. user = self.admin_exp.admin.teamowner self.assertFalse(self.is_blacklisted_name(u"fnord", user.id)) def test_case_insensitive(self): self.assertTrue(self.is_blacklisted_name(u"Foo")) def test_verbose(self): # Testing the VERBOSE flag is used when compiling the regexp self.assertTrue(self.is_blacklisted_name(u"verbose"))
class ArchiveExpirer(LaunchpadCronScript): """Helper class for expiring old PPA binaries. Any PPA binary older than 30 days that is superseded or deleted will be marked for immediate expiry. """ blacklist = BLACKLISTED_PPAS whitelist = WHITELISTED_PPAS def add_my_options(self): """Add script command line options.""" self.parser.add_option("-n", "--dry-run", action="store_true", dest="dryrun", metavar="DRY_RUN", default=False, help="If set, no transactions are committed") self.parser.add_option( "-e", "--expire-after", action="store", type="int", dest="num_days", metavar="DAYS", default=15, help=("The number of days after which to expire binaries. " "Must be specified.")) def determineSourceExpirables(self, num_days): """Return expirable libraryfilealias IDs.""" stay_of_execution = '%d days' % num_days archive_types = (ArchivePurpose.PPA, ArchivePurpose.PARTNER) # The subquery here has to repeat the checks for privacy and # blacklisting on *other* publications that are also done in # the main loop for the archive being considered. results = self.store.execute(""" SELECT lfa.id FROM LibraryFileAlias AS lfa, Archive, SourcePackageReleaseFile AS sprf, SourcePackageRelease AS spr, SourcePackagePublishingHistory AS spph WHERE lfa.id = sprf.libraryfile AND spr.id = sprf.sourcepackagerelease AND spph.sourcepackagerelease = spr.id AND spph.dateremoved < ( CURRENT_TIMESTAMP AT TIME ZONE 'UTC' - interval %s) AND spph.archive = archive.id AND archive.purpose IN %s AND lfa.expires IS NULL EXCEPT SELECT sprf.libraryfile FROM SourcePackageRelease AS spr, SourcePackageReleaseFile AS sprf, SourcePackagePublishingHistory AS spph, Archive AS a, Person AS p WHERE spr.id = sprf.sourcepackagerelease AND spph.sourcepackagerelease = spr.id AND spph.archive = a.id AND p.id = a.owner AND ( (p.name IN %s AND a.purpose = %s) OR (a.private IS TRUE AND (p.name || '/' || a.name) NOT IN %s) OR a.purpose NOT IN %s OR dateremoved > CURRENT_TIMESTAMP AT TIME ZONE 'UTC' - interval %s OR dateremoved IS NULL); """ % sqlvalues(stay_of_execution, archive_types, self.blacklist, ArchivePurpose.PPA, self.whitelist, archive_types, stay_of_execution)) lfa_ids = results.get_all() return lfa_ids def determineBinaryExpirables(self, num_days): """Return expirable libraryfilealias IDs.""" stay_of_execution = '%d days' % num_days archive_types = (ArchivePurpose.PPA, ArchivePurpose.PARTNER) # The subquery here has to repeat the checks for privacy and # blacklisting on *other* publications that are also done in # the main loop for the archive being considered. results = self.store.execute(""" SELECT lfa.id FROM LibraryFileAlias AS lfa, Archive, BinaryPackageFile AS bpf, BinaryPackageRelease AS bpr, BinaryPackagePublishingHistory AS bpph WHERE lfa.id = bpf.libraryfile AND bpr.id = bpf.binarypackagerelease AND bpph.binarypackagerelease = bpr.id AND bpph.dateremoved < ( CURRENT_TIMESTAMP AT TIME ZONE 'UTC' - interval %(stay_of_execution)s) AND bpph.archive = archive.id AND archive.purpose IN %(archive_types)s AND lfa.expires IS NULL EXCEPT SELECT bpf.libraryfile FROM BinaryPackageRelease AS bpr, BinaryPackageFile AS bpf, BinaryPackagePublishingHistory AS bpph, Archive AS a, Person AS p WHERE bpr.id = bpf.binarypackagerelease AND bpph.binarypackagerelease = bpr.id AND bpph.archive = a.id AND p.id = a.owner AND ( (p.name IN %(blacklist)s AND a.purpose = %(ppa)s) OR (a.private IS TRUE AND (p.name || '/' || a.name) NOT IN %(whitelist)s) OR a.purpose NOT IN %(archive_types)s OR dateremoved > ( CURRENT_TIMESTAMP AT TIME ZONE 'UTC' - interval %(stay_of_execution)s) OR dateremoved IS NULL) """ % sqlvalues(stay_of_execution=stay_of_execution, archive_types=archive_types, blacklist=self.blacklist, whitelist=self.whitelist, ppa=ArchivePurpose.PPA)) lfa_ids = results.get_all() return lfa_ids def main(self): self.logger.info('Starting the PPA binary expiration') num_days = self.options.num_days self.logger.info("Expiring files up to %d days ago" % num_days) self.store = IStore(Archive) lfa_ids = self.determineSourceExpirables(num_days) lfa_ids.extend(self.determineBinaryExpirables(num_days)) batch_count = 0 batch_limit = 500 for id in lfa_ids: self.logger.info("Expiring libraryfilealias %s" % id) self.store.execute(""" UPDATE libraryfilealias SET expires = CURRENT_TIMESTAMP AT TIME ZONE 'UTC' WHERE id = %s """ % id) batch_count += 1 if batch_count % batch_limit == 0: if self.options.dryrun: self.logger.info("%s done, not committing (dryrun mode)" % batch_count) self.txn.abort() else: self.logger.info("%s done, committing transaction" % batch_count) self.txn.commit() if self.options.dryrun: self.txn.abort() else: self.txn.commit() self.logger.info('Finished PPA binary expiration')
class ArchiveExpirer(LaunchpadCronScript): """Helper class for expiring old PPA binaries. Any PPA binary older than 30 days that is superseded or deleted will be marked for immediate expiry. """ blacklist = BLACKLISTED_PPAS whitelist = WHITELISTED_PPAS def add_my_options(self): """Add script command line options.""" self.parser.add_option( "-n", "--dry-run", action="store_true", dest="dryrun", metavar="DRY_RUN", default=False, help="If set, no transactions are committed") self.parser.add_option( "-e", "--expire-after", action="store", type="int", dest="num_days", metavar="DAYS", default=15, help=("The number of days after which to expire binaries. " "Must be specified.")) def determineSourceExpirables(self, num_days): """Return expirable libraryfilealias IDs.""" stay_of_execution = '%d days' % num_days archive_types = (ArchivePurpose.PPA, ArchivePurpose.PARTNER) # The subquery here has to repeat the checks for privacy and # blacklisting on *other* publications that are also done in # the main loop for the archive being considered. results = self.store.execute(""" SELECT lfa.id FROM LibraryFileAlias AS lfa, Archive, SourcePackageReleaseFile AS sprf, SourcePackageRelease AS spr, SourcePackagePublishingHistory AS spph WHERE lfa.id = sprf.libraryfile AND spr.id = sprf.sourcepackagerelease AND spph.sourcepackagerelease = spr.id AND spph.dateremoved < ( CURRENT_TIMESTAMP AT TIME ZONE 'UTC' - interval %s) AND spph.archive = archive.id AND archive.purpose IN %s AND lfa.expires IS NULL EXCEPT SELECT sprf.libraryfile FROM SourcePackageRelease AS spr, SourcePackageReleaseFile AS sprf, SourcePackagePublishingHistory AS spph, Archive AS a, Person AS p WHERE spr.id = sprf.sourcepackagerelease AND spph.sourcepackagerelease = spr.id AND spph.archive = a.id AND p.id = a.owner AND ( (p.name IN %s AND a.purpose = %s) OR (a.private IS TRUE AND (p.name || '/' || a.name) NOT IN %s) OR a.purpose NOT IN %s OR dateremoved > CURRENT_TIMESTAMP AT TIME ZONE 'UTC' - interval %s OR dateremoved IS NULL); """ % sqlvalues( stay_of_execution, archive_types, self.blacklist, ArchivePurpose.PPA, self.whitelist, archive_types, stay_of_execution)) lfa_ids = results.get_all() return lfa_ids def determineBinaryExpirables(self, num_days): """Return expirable libraryfilealias IDs.""" stay_of_execution = '%d days' % num_days archive_types = (ArchivePurpose.PPA, ArchivePurpose.PARTNER) # The subquery here has to repeat the checks for privacy and # blacklisting on *other* publications that are also done in # the main loop for the archive being considered. results = self.store.execute(""" SELECT lfa.id FROM LibraryFileAlias AS lfa, Archive, BinaryPackageFile AS bpf, BinaryPackageRelease AS bpr, BinaryPackagePublishingHistory AS bpph WHERE lfa.id = bpf.libraryfile AND bpr.id = bpf.binarypackagerelease AND bpph.binarypackagerelease = bpr.id AND bpph.dateremoved < ( CURRENT_TIMESTAMP AT TIME ZONE 'UTC' - interval %(stay_of_execution)s) AND bpph.archive = archive.id AND archive.purpose IN %(archive_types)s AND lfa.expires IS NULL EXCEPT SELECT bpf.libraryfile FROM BinaryPackageRelease AS bpr, BinaryPackageFile AS bpf, BinaryPackagePublishingHistory AS bpph, Archive AS a, Person AS p WHERE bpr.id = bpf.binarypackagerelease AND bpph.binarypackagerelease = bpr.id AND bpph.archive = a.id AND p.id = a.owner AND ( (p.name IN %(blacklist)s AND a.purpose = %(ppa)s) OR (a.private IS TRUE AND (p.name || '/' || a.name) NOT IN %(whitelist)s) OR a.purpose NOT IN %(archive_types)s OR dateremoved > ( CURRENT_TIMESTAMP AT TIME ZONE 'UTC' - interval %(stay_of_execution)s) OR dateremoved IS NULL) """ % sqlvalues( stay_of_execution=stay_of_execution, archive_types=archive_types, blacklist=self.blacklist, whitelist=self.whitelist, ppa=ArchivePurpose.PPA)) lfa_ids = results.get_all() return lfa_ids def main(self): self.logger.info('Starting the PPA binary expiration') num_days = self.options.num_days self.logger.info("Expiring files up to %d days ago" % num_days) self.store = IStore(Archive) lfa_ids = self.determineSourceExpirables(num_days) lfa_ids.extend(self.determineBinaryExpirables(num_days)) batch_count = 0 batch_limit = 500 for id in lfa_ids: self.logger.info("Expiring libraryfilealias %s" % id) self.store.execute(""" UPDATE libraryfilealias SET expires = CURRENT_TIMESTAMP AT TIME ZONE 'UTC' WHERE id = %s """ % id) batch_count += 1 if batch_count % batch_limit == 0: if self.options.dryrun: self.logger.info( "%s done, not committing (dryrun mode)" % batch_count) self.txn.abort() else: self.logger.info( "%s done, committing transaction" % batch_count) self.txn.commit() if self.options.dryrun: self.txn.abort() else: self.txn.commit() self.logger.info('Finished PPA binary expiration')
def _findBuildCandidate(self): """Find a candidate job for dispatch to an idle buildd slave. The pending BuildQueue item with the highest score for this builder or None if no candidate is available. :return: A candidate job. """ def qualify_subquery(job_type, sub_query): """Put the sub-query into a job type context.""" qualified_query = """ ((BuildQueue.job_type != %s) OR EXISTS(%%s)) """ % sqlvalues(job_type) qualified_query %= sub_query return qualified_query logger = self._getSlaveScannerLogger() candidate = None general_query = """ SELECT buildqueue.id FROM buildqueue, job WHERE buildqueue.job = job.id AND job.status = %s AND ( -- The processor values either match or the candidate -- job is processor-independent. buildqueue.processor = %s OR buildqueue.processor IS NULL) AND ( -- The virtualized values either match or the candidate -- job does not care about virtualization and the idle -- builder *is* virtualized (the latter is a security -- precaution preventing the execution of untrusted code -- on native builders). buildqueue.virtualized = %s OR (buildqueue.virtualized IS NULL AND %s = TRUE)) AND buildqueue.builder IS NULL """ % sqlvalues( JobStatus.WAITING, self.processor, self.virtualized, self.virtualized) order_clause = " ORDER BY buildqueue.lastscore DESC, buildqueue.id" extra_queries = [] job_classes = specific_job_classes() for job_type, job_class in job_classes.iteritems(): query = job_class.addCandidateSelectionCriteria( self.processor, self.virtualized) if query == '': # This job class does not need to refine candidate jobs # further. continue # The sub-query should only apply to jobs of the right type. extra_queries.append(qualify_subquery(job_type, query)) query = ' AND '.join([general_query] + extra_queries) + order_clause store = IStore(self.__class__) candidate_jobs = store.execute(query).get_all() for (candidate_id,) in candidate_jobs: candidate = getUtility(IBuildQueueSet).get(candidate_id) job_class = job_classes[candidate.job_type] candidate_approved = job_class.postprocessCandidate( candidate, logger) if candidate_approved: return candidate return None