def __contains__(self, name): """See `IPillarNameSet`.""" name = ensure_unicode(name) result = IStore(PillarName).execute(""" SELECT TRUE FROM PillarName WHERE (id IN (SELECT alias_for FROM PillarName WHERE name=?) OR name=?) AND alias_for IS NULL AND active IS TRUE """, [name, name]) return result.get_one() is not None
def __contains__(self, name): """See `IPillarNameSet`.""" name = ensure_unicode(name) result = IStore(PillarName).execute( """ SELECT TRUE FROM PillarName WHERE (id IN (SELECT alias_for FROM PillarName WHERE name=?) OR name=?) AND alias_for IS NULL AND active IS TRUE """, [name, name]) return result.get_one() is not None
def numDevicesInSubmissions( self, bus=None, vendor_id=None, product_id=None, driver_name=None, package_name=None, distro_target=None ): """See `IHWSubmissionDeviceSet`.""" tables, where_clauses = make_submission_device_statistics_clause( bus, vendor_id, product_id, driver_name, package_name, False ) distro_tables, distro_clauses = make_distro_target_clause(distro_target) if distro_clauses: tables.extend(distro_tables) where_clauses.extend(distro_clauses) where_clauses.append(HWSubmissionDevice.submission == HWSubmission.id) result = IStore(HWSubmissionDevice).execute(Select(columns=[Count()], tables=tables, where=And(*where_clauses))) return result.get_one()[0]
def _getFreeBuildersCount(self, processor, virtualized): """How many builders capable of running jobs for the given processor and virtualization combination are idle/free at present?""" query = """ SELECT COUNT(id) FROM builder WHERE builderok = TRUE AND manual = FALSE AND id NOT IN ( SELECT builder FROM BuildQueue WHERE builder IS NOT NULL) AND virtualized = %s """ % sqlvalues(normalize_virtualization(virtualized)) if processor is not None: query += """ AND processor = %s """ % sqlvalues(processor) result_set = IStore(BuildQueue).execute(query) free_builders = result_set.get_one()[0] return free_builders
def _getFreeBuildersCount(self, processor, virtualized): """How many builders capable of running jobs for the given processor and virtualization combination are idle/free at present?""" query = """ SELECT COUNT(id) FROM builder WHERE builderok = TRUE AND manual = FALSE AND id NOT IN ( SELECT builder FROM BuildQueue WHERE builder IS NOT NULL) AND virtualized = %s """ % sqlvalues(normalize_virtualization(virtualized)) if processor is not None: query += """ AND processor = %s """ % sqlvalues(processor) result_set = IStore(BuildQueue).execute(query) free_builders = result_set.get_one()[0] return free_builders
def getByName(self, name, ignore_inactive=False): """Return the pillar with the given name. If ignore_inactive is True, then only active pillars are considered. If no pillar is found, None is returned. """ # We could attempt to do this in a single database query, but I # expect that doing two queries will be faster that OUTER JOINing # the Project, Product and Distribution tables (and this approach # works better with SQLObject too. # Retrieve information out of the PillarName table. query = """ SELECT id, product, project, distribution FROM PillarName WHERE (id = (SELECT alias_for FROM PillarName WHERE name=?) OR name=?) AND alias_for IS NULL%s LIMIT 1 """ if ignore_inactive: query %= " AND active IS TRUE" else: query %= "" name = ensure_unicode(name) result = IStore(PillarName).execute(query, [name, name]) row = result.get_one() if row is None: return None assert len([column for column in row[1:] if column is None]) == 2, ( "One (and only one) of product, project or distribution may be " "NOT NULL: %s" % row[1:]) id, product, project, distribution = row if product is not None: return getUtility(IProductSet).get(product) elif project is not None: return getUtility(IProjectGroupSet).get(project) else: return getUtility(IDistributionSet).get(distribution)
def getByName(self, name, ignore_inactive=False): """Return the pillar with the given name. If ignore_inactive is True, then only active pillars are considered. If no pillar is found, None is returned. """ # We could attempt to do this in a single database query, but I # expect that doing two queries will be faster that OUTER JOINing # the Project, Product and Distribution tables (and this approach # works better with SQLObject too. # Retrieve information out of the PillarName table. query = """ SELECT id, product, project, distribution FROM PillarName WHERE (id = (SELECT alias_for FROM PillarName WHERE name=?) OR name=?) AND alias_for IS NULL%s LIMIT 1 """ if ignore_inactive: query %= " AND active IS TRUE" else: query %= "" name = ensure_unicode(name) result = IStore(PillarName).execute(query, [name, name]) row = result.get_one() if row is None: return None assert len([column for column in row[1:] if column is None]) == 2, ( "One (and only one) of product, project or distribution may be " "NOT NULL: %s" % row[1:]) id, product, project, distribution = row if product is not None: return getUtility(IProductSet).get(product) elif project is not None: return getUtility(IProjectGroupSet).get(project) else: return getUtility(IDistributionSet).get(distribution)
def numDevicesInSubmissions( self, bus=None, vendor_id=None, product_id=None, driver_name=None, package_name=None, distro_target=None): """See `IHWSubmissionDeviceSet`.""" tables, where_clauses = make_submission_device_statistics_clause( bus, vendor_id, product_id, driver_name, package_name, False) distro_tables, distro_clauses = make_distro_target_clause( distro_target) if distro_clauses: tables.extend(distro_tables) where_clauses.extend(distro_clauses) where_clauses.append( HWSubmissionDevice.submission == HWSubmission.id) result = IStore(HWSubmissionDevice).execute( Select( columns=[Count()], tables=tables, where=And(*where_clauses))) return result.get_one()[0]
def _estimateTimeToNextBuilder(self): """Estimate time until next builder becomes available. For the purpose of estimating the dispatch time of the job of interest (JOI) we need to know how long it will take until the job at the head of JOI's queue is dispatched. There are two cases to consider here: the head job is - processor dependent: only builders with the matching processor/virtualization combination should be considered. - *not* processor dependent: all builders with the matching virtualization setting should be considered. :return: The estimated number of seconds untils a builder capable of running the head job becomes available. """ head_job_platform = self._getHeadJobPlatform() # Return a zero delay if we still have free builders available for the # given platform/virtualization combination. free_builders = self._getFreeBuildersCount(*head_job_platform) if free_builders > 0: return 0 head_job_processor, head_job_virtualized = head_job_platform now = self._now() delay_query = """ SELECT MIN( CASE WHEN EXTRACT(EPOCH FROM (BuildQueue.estimated_duration - (((%s AT TIME ZONE 'UTC') - Job.date_started)))) >= 0 THEN EXTRACT(EPOCH FROM (BuildQueue.estimated_duration - (((%s AT TIME ZONE 'UTC') - Job.date_started)))) ELSE -- Assume that jobs that have overdrawn their estimated -- duration time budget will complete within 2 minutes. -- This is a wild guess but has worked well so far. -- -- Please note that this is entirely innocuous i.e. if our -- guess is off nothing bad will happen but our estimate will -- not be as good as it could be. 120 END) FROM BuildQueue, Job, Builder WHERE BuildQueue.job = Job.id AND BuildQueue.builder = Builder.id AND Builder.manual = False AND Builder.builderok = True AND Job.status = %s AND Builder.virtualized = %s """ % sqlvalues(now, now, JobStatus.RUNNING, normalize_virtualization(head_job_virtualized)) if head_job_processor is not None: # Only look at builders with specific processor types. delay_query += """ AND Builder.processor = %s """ % sqlvalues(head_job_processor) result_set = IStore(BuildQueue).execute(delay_query) head_job_delay = result_set.get_one()[0] return (0 if head_job_delay is None else int(head_job_delay))
def store(self): self.debugLog.append('storing %r, size %r' % (self.filename, self.size)) self.tmpfile.close() # Verify the digest matches what the client sent us dstDigest = self.sha1_digester.hexdigest() if self.srcDigest is not None and dstDigest != self.srcDigest: # XXX: Andrew Bennetts 2004-09-20: Write test that checks that # the file really is removed or renamed, and can't possibly be # left in limbo os.remove(self.tmpfilepath) raise DigestMismatchError(self.srcDigest, dstDigest) try: # If the client told us the name of the database it's using, # check that it matches. if self.databaseName is not None: # Per Bug #840068, there are two methods of getting the # database name (connection string and db # introspection), and they can give different results # due to pgbouncer database aliases. Lets check both, # and succeed if either matches. config_dbname = ConnectionString( dbconfig.rw_main_master).dbname result = IStore(Product).execute("SELECT current_database()") real_dbname = result.get_one()[0] if self.databaseName not in (config_dbname, real_dbname): raise WrongDatabaseError(self.databaseName, (config_dbname, real_dbname)) self.debugLog.append('database name %r ok' % (self.databaseName, )) # If we haven't got a contentID, we need to create one and return # it to the client. if self.contentID is None: contentID = self.storage.library.add( dstDigest, self.size, self.md5_digester.hexdigest(), self.sha256_digester.hexdigest()) aliasID = self.storage.library.addAlias( contentID, self.filename, self.mimetype, self.expires) self.debugLog.append('created contentID: %r, aliasID: %r.' % (contentID, aliasID)) else: contentID = self.contentID aliasID = None self.debugLog.append('received contentID: %r' % (contentID, )) except: # Abort transaction and re-raise self.debugLog.append('failed to get contentID/aliasID, aborting') raise # Move file to final location try: self._move(contentID) except: # Abort DB transaction self.debugLog.append('failed to move file, aborting') # Remove file os.remove(self.tmpfilepath) # Re-raise raise # Commit any DB changes self.debugLog.append('committed') # Return the IDs if we created them, or None otherwise return contentID, aliasID
def _estimateTimeToNextBuilder(self): """Estimate time until next builder becomes available. For the purpose of estimating the dispatch time of the job of interest (JOI) we need to know how long it will take until the job at the head of JOI's queue is dispatched. There are two cases to consider here: the head job is - processor dependent: only builders with the matching processor/virtualization combination should be considered. - *not* processor dependent: all builders with the matching virtualization setting should be considered. :return: The estimated number of seconds untils a builder capable of running the head job becomes available. """ head_job_platform = self._getHeadJobPlatform() # Return a zero delay if we still have free builders available for the # given platform/virtualization combination. free_builders = self._getFreeBuildersCount(*head_job_platform) if free_builders > 0: return 0 head_job_processor, head_job_virtualized = head_job_platform now = self._now() delay_query = """ SELECT MIN( CASE WHEN EXTRACT(EPOCH FROM (BuildQueue.estimated_duration - (((%s AT TIME ZONE 'UTC') - Job.date_started)))) >= 0 THEN EXTRACT(EPOCH FROM (BuildQueue.estimated_duration - (((%s AT TIME ZONE 'UTC') - Job.date_started)))) ELSE -- Assume that jobs that have overdrawn their estimated -- duration time budget will complete within 2 minutes. -- This is a wild guess but has worked well so far. -- -- Please note that this is entirely innocuous i.e. if our -- guess is off nothing bad will happen but our estimate will -- not be as good as it could be. 120 END) FROM BuildQueue, Job, Builder WHERE BuildQueue.job = Job.id AND BuildQueue.builder = Builder.id AND Builder.manual = False AND Builder.builderok = True AND Job.status = %s AND Builder.virtualized = %s """ % sqlvalues( now, now, JobStatus.RUNNING, normalize_virtualization(head_job_virtualized)) if head_job_processor is not None: # Only look at builders with specific processor types. delay_query += """ AND Builder.processor = %s """ % sqlvalues(head_job_processor) result_set = IStore(BuildQueue).execute(delay_query) head_job_delay = result_set.get_one()[0] return (0 if head_job_delay is None else int(head_job_delay))
def store(self): self.debugLog.append('storing %r, size %r' % (self.filename, self.size)) self.tmpfile.close() # Verify the digest matches what the client sent us dstDigest = self.sha1_digester.hexdigest() if self.srcDigest is not None and dstDigest != self.srcDigest: # XXX: Andrew Bennetts 2004-09-20: Write test that checks that # the file really is removed or renamed, and can't possibly be # left in limbo os.remove(self.tmpfilepath) raise DigestMismatchError(self.srcDigest, dstDigest) try: # If the client told us the name of the database it's using, # check that it matches. if self.databaseName is not None: # Per Bug #840068, there are two methods of getting the # database name (connection string and db # introspection), and they can give different results # due to pgbouncer database aliases. Lets check both, # and succeed if either matches. config_dbname = ConnectionString( dbconfig.rw_main_master).dbname result = IStore(Product).execute("SELECT current_database()") real_dbname = result.get_one()[0] if self.databaseName not in (config_dbname, real_dbname): raise WrongDatabaseError( self.databaseName, (config_dbname, real_dbname)) self.debugLog.append( 'database name %r ok' % (self.databaseName, )) # If we haven't got a contentID, we need to create one and return # it to the client. if self.contentID is None: contentID = self.storage.library.add( dstDigest, self.size, self.md5_digester.hexdigest(), self.sha256_digester.hexdigest()) aliasID = self.storage.library.addAlias( contentID, self.filename, self.mimetype, self.expires) self.debugLog.append('created contentID: %r, aliasID: %r.' % (contentID, aliasID)) else: contentID = self.contentID aliasID = None self.debugLog.append('received contentID: %r' % (contentID, )) except: # Abort transaction and re-raise self.debugLog.append('failed to get contentID/aliasID, aborting') raise # Move file to final location try: self._move(contentID) except: # Abort DB transaction self.debugLog.append('failed to move file, aborting') # Remove file os.remove(self.tmpfilepath) # Re-raise raise # Commit any DB changes self.debugLog.append('committed') # Return the IDs if we created them, or None otherwise return contentID, aliasID
def checkFlattened(self, bugtask, check_only=True): if hasattr(bugtask, 'id'): bugtask = bugtask.id result = IStore(Bug).execute( "SELECT bugtask_flatten(?, ?)", (bugtask, check_only)) return result.get_one()[0]