def generateTestFile(input_dir, output_filename, relative_root): root = FilePath(input_dir) relative_root = FilePath(relative_root) inputs = [relativePath(relative_root, x) for x in sorted(root.globChildren('*.input*'))] names = [methodName(FilePath(x).basename()) for x in inputs] outputs = [relativePath(relative_root, x) for x in sorted(root.globChildren('*.output*'))] if len(inputs) != len(outputs): raise Exception("Inputs: %s, Outputs: %s" % (len(inputs),len(outputs))) test_cases = zip(inputs, outputs, names) print 'test_cases', test_cases env = Environment() template = env.from_string(TEMPLATE) rendered = template.render({'test_cases': test_cases}) FilePath(output_filename).setContent(rendered)
def test_upgrade_configuration_versions(self, versions): """ A range of versions can be upgraded and the configuration blob after upgrade will match that which is expected for the particular version. See flocker/control/test/configurations for individual version JSON files and generation code. """ source_version, target_version = versions configs_dir = FilePath(__file__).sibling('configurations') # Choose the latest configuration number available for the given # version of the config. The configuration has increased in complexity # over time, so we have added additional configurations to verify that # the new fields can be correctly upgraded. source_json_glob = b"configuration_*_v%d.json" % source_version source_jsons = sorted(configs_dir.globChildren(source_json_glob), key=lambda x: x.basename()) config_num = int(source_jsons[-1].basename().split('_')[1]) source_json_file = b"configuration_%d_v%d.json" % (config_num, versions[0]) target_json_file = b"configuration_%d_v%d.json" % (config_num, versions[1]) source_json = configs_dir.child(source_json_file).getContent() target_json = configs_dir.child(target_json_file).getContent() upgraded_json = migrate_configuration( source_version, target_version, source_json, ConfigurationMigration) self.assertEqual(json.loads(upgraded_json), json.loads(target_json))
def test_upgrade_configuration_versions(self, versions): """ A range of versions can be upgraded and the configuration blob after upgrade will match that which is expected for the particular version. See flocker/control/test/configurations for individual version JSON files and generation code. """ source_version, target_version = versions configs_dir = FilePath(__file__).sibling('configurations') # Choose the latest configuration number available for the given # version of the config. The configuration has increased in complexity # over time, so we have added additional configurations to verify that # the new fields can be correctly upgraded. source_json_glob = b"configuration_*_v%d.json" % source_version source_jsons = sorted(configs_dir.globChildren(source_json_glob), key=lambda x: x.basename()) config_num = int(source_jsons[-1].basename().split('_')[1]) source_json_file = b"configuration_%d_v%d.json" % (config_num, versions[0]) target_json_file = b"configuration_%d_v%d.json" % (config_num, versions[1]) source_json = configs_dir.child(source_json_file).getContent() target_json = configs_dir.child(target_json_file).getContent() upgraded_json = migrate_configuration(source_version, target_version, source_json, ConfigurationMigration) self.assertEqual(json.loads(upgraded_json), json.loads(target_json))
def _dev_exists_from_blockdeviceid(blockdevice_id): """ Check if the device exists before continuing """ # get the devices path for linux devs_path = FilePath(DEVICE_FILEPATH) devs = devs_path.globChildren("%s*" % DEVICE_PREFIX) for dev in devs: if str(blockdevice_id) in dev.path: return True return False
def _get_dev_from_blockdeviceid(cls, blockdevice_id): """ Get the real device path from blockdevice_id """ # get the devices path for linux devs_path = FilePath(DEVICE_FILEPATH) cls.wait_for_volume(blockdevice_id) devs = devs_path.globChildren("%s*" % DEVICE_PREFIX) for dev in devs: if str(blockdevice_id) in dev.path: return dev.realpath()
def _loadFrom (self, id, copy = False): sketch = yield self.db.runQuery( "SELECT title FROM sketches WHERE guid = ?", (id, ) ) if len(sketch) == 0: raise Error("Sketch %s not found." % id) self.title = sketch[0][0] self.loaded = True # Find the most recent snapshot file try: sketchDir = FilePath(self.dataDir).child(id) max_snap = max(map( lambda fp: int( os.path.splitext(fp.basename())[0].split('.')[1] ), sketchDir.globChildren('snapshot.*.log') )) log.msg( "Found snapshot {:d} for sketch {:s}".format( max_snap, id ) ) except ValueError: self._eventIndex = 0 self._snapEventIndex = 0 else: snapFile = sketchDir.child('snapshot.' + str(max_snap) + '.log') if max_snap > 0: snapshot = yield threads.deferToThread(snapFile.getContent) events = map( json.loads, filter(lambda e: e.strip() != "", snapshot.split("\n")) ) self.workspace.fromEvents(events) if copy: self._eventIndex = len(events) self._snapEventIndex = 0 else: self._eventIndex = max_snap self._snapEventIndex = max_snap # Rename if a copy if copy: self.rename(self.title + " Copy")
def checkForStaleReports( _time=time): delayed_call = reactor.callLater(config.main.stale_time, checkForStaleReports) report_dir = FilePath(config.main.report_dir) for report_metadata_path in \ report_dir.globChildren("*"+METADATA_EXT): last_updated = _time.time() - \ report_metadata_path.getModificationTime() if last_updated > config.main.stale_time: report_id = report_metadata_path.basename().replace( METADATA_EXT, '') closeReport(report_id) return delayed_call
def getChannelStatus(): statusFiles = FilePath.globChildren(mumudvblogdir, 'channels*') for path in statusFiles: for line in FilePath.open(path): card = path.path.split('adapter')[1].split('_')[0] fields = line.split(':') channelStatus[fields[0] + ':' + fields[1]] = {} try: channelStatus[fields[0] + ':' + fields[1]]['streamstatus'] = fields[3][:len(fields[3])-1] except IndexError: channelStatus[fields[0] + ':' + fields[1]]['streamstatus'] = 'NotTransmitted' channelStatus[fields[0] + ':' + fields[1]]['card'] = card channelStatus[fields[0] + ':' + fields[1]]['ip'] = fields[0] + ':' + fields[1] channelStatus[fields[0] + ':' + fields[1]]['name'] = fields[2] # Set cardstatus to 0 if it does not yet exist channelStatus[fields[0] + ':' + fields[1]]['cardstatus'] = (channelStatus[fields[0] + ':' + fields[1]].get('cardstatus',0))
def _loadFrom(self, id, copy=False): sketch = yield self.db.runQuery( "SELECT title FROM sketches WHERE guid = ?", (id, )) if len(sketch) == 0: raise Error("Sketch %s not found." % id) self.title = sketch[0][0] self.loaded = True # Find the most recent snapshot file try: sketchDir = FilePath(self.dataDir).child(id) max_snap = max( map( lambda fp: int( os.path.splitext(fp.basename())[0].split('.')[1]), sketchDir.globChildren('snapshot.*.log'))) log.msg("Found snapshot {:d} for sketch {:s}".format(max_snap, id)) except ValueError: self._eventIndex = 0 self._snapEventIndex = 0 else: snapFile = sketchDir.child('snapshot.' + str(max_snap) + '.log') if max_snap > 0: snapshot = yield threads.deferToThread(snapFile.getContent) events = list( map( json.loads, filter(lambda e: e.strip() != b"", snapshot.split(b"\n")))) self.workspace.fromEvents(events) if copy: self._eventIndex = len(events) self._snapEventIndex = 0 else: self._eventIndex = max_snap self._snapEventIndex = max_snap # Rename if a copy if copy: self.rename(self.title + " Copy")
def dateloop(self): imgdir = FilePath("images") imgs = imgdir.globChildren("*.jpg") # open/read i = 0 def emit(i): index = i % len(imgs) data = imgs[index].open().read() data = binascii.b2a_base64(data).strip() timestamp = time.strftime("%D %H:%M:%S") jdata = '{"timestamp" : "%s", "imgdata" : "%s" }' % (timestamp, data) if self.isconnected: self.sendLine(jdata) reactor.callLater(1.0, emit, i+1) emit(i)
def test_basicOperation(self): """ Running the L{tap2deb} script produces a bunch of files using C{dpkg-buildpackage}. """ # Skip tests if dpkg-buildpackage is not present if not procutils.which("dpkg-buildpackage"): raise SkipTest("dpkg-buildpackage must be present to test tap2deb") baseDir = FilePath(self.mktemp()) baseDir.makedirs() # Make a temporary .tap file version = '1.0' tapName = 'lemon' tapFile = baseDir.child("%s.tap" % (tapName, )) tapFile.setContent("# Dummy .tap file") buildDir = FilePath('.build') outputDir = buildDir.child('twisted-%s-%s' % (tapName, version)) # Run args = ["--tapfile", tapFile.path, "--maintainer", self.maintainer] tap2deb.run(args) # Verify input files were created self.assertEqual( sorted(outputDir.listdir()), ['build-stamp', 'debian', 'install-stamp', 'lemon.tap']) debianDir = outputDir.child('debian') for name in [ 'README.Debian', 'conffiles', 'default', 'init.d', 'postinst', 'prerm', 'postrm', 'changelog', 'control', 'copyright', 'dirs', 'rules' ]: self.assertTrue(debianDir.child(name).exists()) # Verify 4 output files were created self.assertTrue(buildDir.child('twisted-lemon_1.0_all.deb').exists()) self.assertTrue(buildDir.child('twisted-lemon_1.0.tar.gz').exists()) self.assertTrue(buildDir.child('twisted-lemon_1.0.dsc').exists()) self.assertEqual( len(buildDir.globChildren('twisted-lemon_1.0_*.changes')), 1)
def test_basicOperation(self): """ Running the L{tap2deb} script produces a bunch of files using C{dpkg-buildpackage}. """ # Skip tests if dpkg-buildpackage is not present if not procutils.which("dpkg-buildpackage"): raise SkipTest("dpkg-buildpackage must be present to test tap2deb") baseDir = FilePath(self.mktemp()) baseDir.makedirs() # Make a temporary .tap file version = '1.0' tapName = 'lemon' tapFile = baseDir.child("%s.tap" % (tapName,)) tapFile.setContent("# Dummy .tap file") buildDir = FilePath('.build') outputDir = buildDir.child('twisted-%s-%s' % (tapName, version)) # Run args = ["--tapfile", tapFile.path, "--maintainer", self.maintainer] tap2deb.run(args) # Verify input files were created self.assertEqual(sorted(outputDir.listdir()), ['build-stamp', 'debian', 'install-stamp', 'lemon.tap']) debianDir = outputDir.child('debian') for name in ['README.Debian', 'conffiles', 'default', 'init.d', 'postinst', 'prerm', 'postrm', 'changelog', 'control', 'copyright', 'dirs', 'rules']: self.assertTrue(debianDir.child(name).exists()) # Verify 4 output files were created self.assertTrue(buildDir.child('twisted-lemon_1.0_all.deb').exists()) self.assertTrue(buildDir.child('twisted-lemon_1.0.tar.gz').exists()) self.assertTrue(buildDir.child('twisted-lemon_1.0.dsc').exists()) self.assertEqual( len(buildDir.globChildren('twisted-lemon_1.0_*.changes')), 1)
def loadEnvironments(directories=CONFIG_DIRS): for directory in directories: confDir = FilePath(os.path.expanduser(directory)) for envFile in confDir.globChildren('*.env'): loadEnvironmentConfig(envFile)
def postOptions(self): appStore = self.getAppStore() siteStore = self.getStore() inroot = FilePath(self['path']) availableModes = [ 'service', 'config', 'entrymanager', 'entry', 'comment', 'metadata' ] if self['clear']: appStore.query(linkdb.LinkEntryComment).deleteFromStore() appStore.query(linkdb.LinkEntryMetadata).deleteFromStore() appStore.query(linkdb.LinkEntry).deleteFromStore() appStore.query(linkdb.LinkManager).deleteFromStore() mode = None service = None entryManager = None entry = None for fp in inroot.globChildren('*'): fd = fp.open() ief = ImportExportFile(fd, appStore) while True: line = ief.readline() if ief.eof: break if line in availableModes: mode = line if mode == 'service': # We assume the service already exists here. kw = ief.readService() sid = kw['serviceID'] print 'Assuming service "%s" exists and is configured.' % ( sid, ) service = siteStore.findUnique( IRCBotService, IRCBotService.serviceID == sid) elif mode == 'config': # For legacy reasons, we must still read the service config. kw = ief.readConfig() #service.config = config = IRCBotConfig(store=siteStore, **kw) elif mode == 'entrymanager': assert service is not None kw = ief.readEntryManager() print 'Creating entry manager for %(channel)s...' % kw entryManager = linkdb.LinkManager( store=appStore, serviceID=service.serviceID, **kw) if self['clear']: entryManager.searchIndexer.reset() elif mode == 'entry': assert entryManager is not None kw = ief.readEntry() #print 'Creating entry #%(eid)s for %(channel)s...' % kw entry = linkdb.LinkEntry(store=appStore, **kw) elif mode == 'comment': assert entry is not None kw = ief.readComment() linkdb.LinkEntryComment(store=appStore, parent=entry, **kw) elif mode == 'metadata': assert entry is not None kw = ief.readMetadata() linkdb.LinkEntryMetadata(store=appStore, entry=entry, **kw)
self._escape(request.clientproto)), request.code, request.sentLength or "-", self._escape(request.getHeader('referer') or "-"), self._escape(request.getHeader('user-agent') or "-")) self.logFile.write(line) def startTor2webHTTP(t2w, f): return internet.TCPServer(int(t2w.config.listen_port_http), f, interface=config.listen_ip) def startTor2webHTTPS(t2w, f): return internet.SSLServer(int(t2w.config.listen_port_https), f, T2WSSLContextFactory(t2w.config.sslkeyfile, t2w.config.sslcertfile, t2w.config.ssldhfile, t2w.config.cipher_list), interface=config.listen_ip) sys.excepthook = MailException antanistaticmap = {} localpath = FilePath("static/") files = localpath.globChildren("*") for file in files: antanistaticmap[file.basename()] = file.open().read() antanistaticmap['tos.html'] = PageTemplate('tos.xml') factory = T2WProxyFactory() service_https = startTor2webHTTPS(t2w, factory) service_https.setServiceParent(application) service_http = startTor2webHTTP(t2w, factory) service_http.setServiceParent(application)
def partiallyCompleteFiles(self): incoming = FilePath(self.config['files']['incoming']) return [fp for fp in incoming.globChildren('*.txconnect') if fp.getsize() > 0]
def postOptions(self): appStore = self.getAppStore() siteStore = self.getStore() inroot = FilePath(self['path']) availableModes = ['service', 'config', 'entrymanager', 'entry', 'comment', 'metadata'] if self['clear']: appStore.query(linkdb.LinkEntryComment).deleteFromStore() appStore.query(linkdb.LinkEntryMetadata).deleteFromStore() appStore.query(linkdb.LinkEntry).deleteFromStore() appStore.query(linkdb.LinkManager).deleteFromStore() mode = None service = None entryManager = None entry = None for fp in inroot.globChildren('*'): fd = fp.open() ief = ImportExportFile(fd, appStore) while True: line = ief.readline() if ief.eof: break if line in availableModes: mode = line if mode == 'service': # We assume the service already exists here. kw = ief.readService() sid = kw['serviceID'] print 'Assuming service "%s" exists and is configured.' % (sid,) service = siteStore.findUnique(IRCBotService, IRCBotService.serviceID == sid) elif mode == 'config': # For legacy reasons, we must still read the service config. kw = ief.readConfig() #service.config = config = IRCBotConfig(store=siteStore, **kw) elif mode == 'entrymanager': assert service is not None kw = ief.readEntryManager() print 'Creating entry manager for %(channel)s...' % kw entryManager = linkdb.LinkManager(store=appStore, serviceID=service.serviceID, **kw) if self['clear']: entryManager.searchIndexer.reset() elif mode == 'entry': assert entryManager is not None kw = ief.readEntry() #print 'Creating entry #%(eid)s for %(channel)s...' % kw entry = linkdb.LinkEntry(store=appStore, **kw) elif mode == 'comment': assert entry is not None kw = ief.readComment() linkdb.LinkEntryComment(store=appStore, parent=entry, **kw) elif mode == 'metadata': assert entry is not None kw = ief.readMetadata() linkdb.LinkEntryMetadata(store=appStore, entry=entry, **kw)
interface=config.listen_ip) def startTor2webHTTPS(t2w, f): return internet.SSLServer(int(t2w.config.listen_port_https), f, T2WSSLContextFactory(t2w.config.sslkeyfile, t2w.config.sslcertfile, t2w.config.ssldhfile, t2w.config.cipher_list), interface=config.listen_ip) sys.excepthook = MailException antanistaticmap = {} localpath = FilePath("static/") files = localpath.globChildren("*") for file in files: antanistaticmap[file.basename()] = file.open().read() antanistaticmap['tos.html'] = PageTemplate('tos.xml') factory = T2WProxyFactory() service_https = startTor2webHTTPS(t2w, factory) service_https.setServiceParent(application) service_http = startTor2webHTTP(t2w, factory) service_http.setServiceParent(application)
class DirDbmTests(unittest.TestCase): def setUp(self): self.path = FilePath(self.mktemp()) self.dbm = dirdbm.open(self.path.path) self.items = ((b"abc", b"foo"), (b"/lalal", b"\000\001"), (b"\000\012", b"baz")) def test_all(self): k = b64decode("//==") self.dbm[k] = b"a" self.dbm[k] = b"a" self.assertEqual(self.dbm[k], b"a") def test_rebuildInteraction(self): s = dirdbm.Shelf("dirdbm.rebuild.test") s[b"key"] = b"value" rebuild.rebuild(dirdbm) def test_dbm(self): d = self.dbm # Insert keys keys = [] values = set() for k, v in self.items: d[k] = v keys.append(k) values.add(v) keys.sort() # Check they exist for k, v in self.items: self.assertIn(k, d) self.assertEqual(d[k], v) # Check non existent key try: d[b"XXX"] except KeyError: pass else: assert 0, "didn't raise KeyError on non-existent key" # Check keys(), values() and items() dbkeys = d.keys() dbvalues = set(d.values()) dbitems = set(d.items()) dbkeys.sort() items = set(self.items) self.assertEqual( keys, dbkeys, f".keys() output didn't match: {repr(keys)} != {repr(dbkeys)}", ) self.assertEqual( values, dbvalues, ".values() output didn't match: {} != {}".format( repr(values), repr(dbvalues)), ) self.assertEqual( items, dbitems, f"items() didn't match: {repr(items)} != {repr(dbitems)}", ) copyPath = self.mktemp() d2 = d.copyTo(copyPath) copykeys = d.keys() copyvalues = set(d.values()) copyitems = set(d.items()) copykeys.sort() self.assertEqual( dbkeys, copykeys, ".copyTo().keys() didn't match: {} != {}".format( repr(dbkeys), repr(copykeys)), ) self.assertEqual( dbvalues, copyvalues, ".copyTo().values() didn't match: %s != %s" % (repr(dbvalues), repr(copyvalues)), ) self.assertEqual( dbitems, copyitems, ".copyTo().items() didn't match: %s != %s" % (repr(dbkeys), repr(copyitems)), ) d2.clear() self.assertTrue( len(d2.keys()) == len(d2.values()) == len(d2.items()) == len(d2) == 0, ".clear() failed", ) self.assertNotEqual(len(d), len(d2)) shutil.rmtree(copyPath) # Delete items for k, v in self.items: del d[k] self.assertNotIn( k, d, "key is still in database, even though we deleted it") self.assertEqual(len(d.keys()), 0, "database has keys") self.assertEqual(len(d.values()), 0, "database has values") self.assertEqual(len(d.items()), 0, "database has items") self.assertEqual(len(d), 0, "database has items") def test_modificationTime(self): import time # The mtime value for files comes from a different place than the # gettimeofday() system call. On linux, gettimeofday() can be # slightly ahead (due to clock drift which gettimeofday() takes into # account but which open()/write()/close() do not), and if we are # close to the edge of the next second, time.time() can give a value # which is larger than the mtime which results from a subsequent # write(). I consider this a kernel bug, but it is beyond the scope # of this test. Thus we keep the range of acceptability to 3 seconds time. # -warner self.dbm[b"k"] = b"v" self.assertTrue( abs(time.time() - self.dbm.getModificationTime(b"k")) <= 3) self.assertRaises(KeyError, self.dbm.getModificationTime, b"nokey") def test_recovery(self): """ DirDBM: test recovery from directory after a faked crash """ k = self.dbm._encode(b"key1") with self.path.child(k + b".rpl").open(mode="wb") as f: f.write(b"value") k2 = self.dbm._encode(b"key2") with self.path.child(k2).open(mode="wb") as f: f.write(b"correct") with self.path.child(k2 + b".rpl").open(mode="wb") as f: f.write(b"wrong") with self.path.child("aa.new").open(mode="wb") as f: f.write(b"deleted") dbm = dirdbm.DirDBM(self.path.path) self.assertEqual(dbm[b"key1"], b"value") self.assertEqual(dbm[b"key2"], b"correct") self.assertFalse(self.path.globChildren("*.new")) self.assertFalse(self.path.globChildren("*.rpl")) def test_nonStringKeys(self): """ L{dirdbm.DirDBM} operations only support string keys: other types should raise a L{TypeError}. """ self.assertRaises(TypeError, self.dbm.__setitem__, 2, "3") try: self.assertRaises(TypeError, self.dbm.__setitem__, "2", 3) except unittest.FailTest: # dirdbm.Shelf.__setitem__ supports non-string values self.assertIsInstance(self.dbm, dirdbm.Shelf) self.assertRaises(TypeError, self.dbm.__getitem__, 2) self.assertRaises(TypeError, self.dbm.__delitem__, 2) self.assertRaises(TypeError, self.dbm.has_key, 2) self.assertRaises(TypeError, self.dbm.__contains__, 2) self.assertRaises(TypeError, self.dbm.getModificationTime, 2) def test_failSet(self): """ Failure path when setting an item. """ def _writeFail(path, data): path.setContent(data) raise OSError("fail to write") self.dbm[b"failkey"] = b"test" self.patch(self.dbm, "_writeFile", _writeFail) self.assertRaises(IOError, self.dbm.__setitem__, b"failkey", b"test2")
class TreatCam: CAPTURE_GLOB = "capture-*.jpg" CAPTURE_DATETIME_FORMAT = "%Y%m%d-%H%M%S" LAST_CAPTURE_LINK_NAME = "lastsnap.jpg" def __init__(self, reactor, config): LOGGER.info("Initializing TreatCam") self.config = config self.reactor = reactor self.agent = Agent(reactor) self.defers = [] self.snapshotActionUrl = "http://localhost:%d/0/action/snapshot" % self.config.motionControlPort self.capturePath = FilePath(config.captureDir) self.lastCaptureLink = self.capturePath.child(TreatCam.LAST_CAPTURE_LINK_NAME) self.lastCaptureTime = None self.lastCaptureName = None self.findPreExistingLastCapture() self.notifier = INotify() self.notifier.startReading() self.notifier.watch(self.capturePath, mask=IN_CREATE, callbacks=[self.notifyCallback]) def __str__(self): return "TreatCam" def capturePhoto(self): LOGGER.debug("Received request to capture a photo") if not self.defers: LOGGER.debug("Sending HTTP GET request to motion daemon") httpRequestDefer = self.agent.request('GET', self.snapshotActionUrl) httpRequestDefer.addCallbacks(self.httpResponseCallback, self.httpResponseErrback) d = Deferred() self.addTimeout(d, 2) self.defers.append(d) return d def httpResponseCallback(self, ignored): LOGGER.debug("Received response from HTTP GET snapshot request to motion") def httpResponseErrback(self, failure): LOGGER.error("Error in HTTP GET snapshot request to motion") self.errbackDefers(failure) def errbackDefers(self, failure): defers = self.defers self.defers = [] for d in defers: if not d.called: d.errback(Failure()) def notifyCallback(self, ignored, filepath, mask): LOGGER.debug("Notify event %s on %s" % (humanReadableMask(mask), filepath.basename())) if mask & IN_CREATE and filepath == self.lastCaptureLink: capture = filepath.realpath().basename() LOGGER.info("New capture detected: %s" % capture) try: self.lastCaptureTime = self.extractDateTimeFromCaptureName(capture) self.lastCaptureName = capture except ValueError: self.errbackDefers(Failure()) if self.defers: defers = self.defers self.defers = [] for d in defers: if not d.called: d.callback(capture) def getLastCaptureTime(self): return self.lastCaptureTime def getLastCaptureName(self): return self.lastCaptureName def addTimeout(self, d, duration): timeout = reactor.callLater(duration, d.cancel) def cancelTimeout(result): if timeout.active(): timeout.cancel() return result d.addBoth(cancelTimeout) def extractDateTimeFromCaptureName(self, name): datetimeStr = name.split('-',1)[-1].rsplit('-',1)[0] return datetime.strptime(datetimeStr, TreatCam.CAPTURE_DATETIME_FORMAT) def findPreExistingLastCapture(self): captures = sorted(self.capturePath.globChildren(TreatCam.CAPTURE_GLOB)) if captures: lastCapturePath = captures[-1] name = lastCapturePath.basename() try: self.lastCaptureTime = self.extractDateTimeFromCaptureName(name) self.lastCaptureName = name LOGGER.info("Recovering %s at startup as last capture file" % self.lastCaptureName) except ValueError: LOGGER.exception("Unable to determine last capture file") pass def trimExcessCaptureFiles(self): captures = sorted(self.capturePath.globChildren(TreatCam.CAPTURE_GLOB)) excessCaptures = len(captures) - self.config.capturesToRetain if (excessCaptures > 0): for i in range(excessCaptures): LOGGER.info("Trimming: %s" % captures[i].basename()) captures[i].remove()