def setUp(self): super(TestLibrarianGarbageCollection, self).setUp() self.client = LibrarianClient() self.patch(librariangc, 'log', BufferLogger()) # A value we use in a number of tests. This represents the # stay of execution hard coded into the garbage collector. # We don't destroy any data unless it has been waiting to be # destroyed for longer than this period. We pick a value # that is close enough to the stay of execution so that # forgetting timezone information will break things, but # far enough so that how long it takes the test to run # is not an issue. 'stay_of_excution - 1 hour' fits these # criteria. self.recent_past = utc_now() - timedelta(days=6, hours=23) # A time beyond the stay of execution. self.ancient_past = utc_now() - timedelta(days=30) self.f1_id, self.f2_id = self._makeDupes() switch_dbuser(config.librarian_gc.dbuser) self.ztm = self.layer.txn # Make sure the files exist. We do this in setup, because we # need to use the get_file_path method later in the setup and we # want to be sure it is working correctly. path = librariangc.get_file_path(self.f1_id) self.failUnless(os.path.exists(path), "Librarian uploads failed") # Make sure that every file the database knows about exists on disk. # We manually remove them for tests that need to cope with missing # library items. self.ztm.begin() cur = cursor() cur.execute("SELECT id FROM LibraryFileContent") for content_id in (row[0] for row in cur.fetchall()): path = librariangc.get_file_path(content_id) if not os.path.exists(path): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) open(path, 'w').write('whatever') self.ztm.abort() self.con = connect( user=config.librarian_gc.dbuser, isolation=ISOLATION_LEVEL_AUTOCOMMIT)
def test_time_is_now(self): # utc_now() returns a timestamp which is now. LessThanOrEqual = lambda x: MatchesAny(LessThan(x), Equals(x)) GreaterThanOrEqual = lambda x: MatchesAny(GreaterThan(x), Equals(x)) old_now = datetime.utcnow().replace(tzinfo=UTC) now = utc_now() new_now = datetime.utcnow().replace(tzinfo=UTC) self.assertThat(now, GreaterThanOrEqual(old_now)) self.assertThat(now, LessThanOrEqual(new_now))
def date_updated(self): """See `IFeed`.""" sorted_items = sorted(self.getItems(), key=operator.attrgetter('last_modified'), reverse=True) if len(sorted_items) == 0: # datetime.isoformat() doesn't place the necessary "+00:00" # for the feedvalidator's check of the iso8601 date format # unless a timezone is specified with tzinfo. return utc_now() last_modified = sorted_items[0].last_modified if last_modified is None: raise AssertionError, 'All feed entries require a date updated.' return last_modified
def __call__(self): if os.path.exists('+maintenancetime.txt'): message = file('+maintenancetime.txt').read() try: maintenancetime = parseDatetimetz(message) except DateTimeError: # XXX SteveAlexander 2005-09-22: log a warning here. return '' timeleft = maintenancetime - utc_now() if timeleft > self.toomuchtime: return '' elif timeleft < self.notmuchtime: self.timelefttext = 'very very soon' else: self.timelefttext = 'in %s' % ( DurationFormatterAPI(timeleft).approximateduration()) return self.index() return ''
def new(self, blob, expires=None): """See ITemporaryStorageManager.""" if expires is None: # A week might be quite a long time, but it shouldn't hurt, # and it gives people enough time to create an account # before accessing the uploaded blob. expires = utc_now() + timedelta(weeks=1) # At this stage we could do some sort of throttling if we were # concerned about abuse of the temporary storage facility. For # example, we could check the number of rows in temporary storage, # or the total amount of space dedicated to temporary storage, and # return an error code if that volume was unacceptably high. But for # the moment we will just ensure the BLOB is not that LARGE. # # YAGNI? There are plenty of other ways to upload large chunks # of data to Launchpad that will hang around permanently. Size # limitations on uploads needs to be done in Zope3 to avoid DOS # attacks in general. max_blob_size = config.launchpad.max_blob_size if max_blob_size > 0 and len(blob) > max_blob_size: raise BlobTooLarge(len(blob)) # create the BLOB and return the UUID new_uuid = str(uuid.uuid1()) # We use a random filename, so only things that can look up the # secret can retrieve the original data (which is why we don't use # the UUID we return to the user as the filename, nor the filename # of the object they uploaded). secret = str(uuid.uuid1()) file_alias = getUtility(ILibraryFileAliasSet).create( secret, len(blob), StringIO(blob), 'application/octet-stream', expires ) TemporaryBlobStorage(uuid=new_uuid, file_alias=file_alias) return new_uuid
def test_tzinfo(self): # utc_now() returns a timezone-aware timestamp with the timezone of # UTC. now = utc_now() self.assertEqual(now.tzinfo, UTC)
def future(self): """See `IAnnouncement`.""" if self.date_announced is None: return True return self.date_announced > utc_now()