def __init__(self, id, title=''): """Initialize Repository object""" StorageManager.__init__(self, id) self.OAI = OAIHandler('OAI') self.title = title self._create_catalog() self._create_cache() #results cache needs better invalidation code - consider fake=True if you're getting conflicts on publish
def start_task(self): print("Webparser is starting") # network request request = NetworkRequest() page = request.get_with_retry(WebParser.default_retry_count, WebParser.url_gmb) # # parsing page for images parser = DocumentParser(page) listofImages = parser.get_images_from_reddit() # creating folder for storing image currentWorkingDir = os.getcwd() full_directory_path = currentWorkingDir + "\wallpaper\\" storageManager = StorageManager(full_directory_path) storageManager.createRootStorageDir() # # downloading image selected_image = random.choice(listofImages) while True: selected_image = random.choice(listofImages) print("Checking if " + selected_image + " is valid Image") if NetworkRequest().check_if_valid_image(selected_image): request.startImageDownload(selected_image, full_directory_path) break
def __init__(self, id, title=''): """Initialize Repository object""" StorageManager.__init__(self, id) self.OAI = OAIHandler('OAI') self.title = title self._create_catalog() self._create_cache( ) #results cache needs better invalidation code - consider fake=True if you're getting conflicts on publish
def startAll(self): """Setup and start all threads.""" try: # Create StorageManager and Interpreter for BufferListener storMan = StorageManager() interpr = Interpreter(storMan) # Create BufferListener buffLis = self.createBufferListener(interpr) if buffLis.conn: self.hsThreads.append(buffLis) # Check scheduler # Get the nagios configuration section from config file nagiosConf = self.config.itemsdict('NagiosPush') machine = re.search('([a-z0-9]+).zip', self.config.get('Station', 'Certificate')) nagiosConf['machine_name'] = machine.group(1) checkSched = self.createCheckScheduler(interpr, nagiosConf) eventRate = checkSched.getEventRate() storMan.addObserver(eventRate) self.hsThreads.append(checkSched) # Uploader central up = self.createUploader(0, "Upload-datastore", nagiosConf) self.hsThreads.append(up) storMan.addObserver(up) up.setNumServer(self.numServers) # Try local server #try: # up2 = self.createUploader(1, "Upload-local", nagiosConf) # self.hsThreads.append(up2) # storMan.addObserver(up2) # self.numServers += 1 # up.setNumServer(self.numServers) # up2.setNumServer(self.numServers) #except Exception, msg: # logger.debug("Error while parsing local server: %s." % msg) # logger.debug("Will not upload to a local server.")# # Set number of servers for our own StorageManager storMan.setNumServer(self.numServers) storMan.clearOldUploadedEvents() # Start all threads, running their run() function. for thread in self.hsThreads: thread.start() except Exception, msg: logger.critical("Error HsMonitor: %s" % msg) sys.exit(1)
def __init__(self, config, interpreter): super(CheckScheduler, self).__init__(name='CheckScheduler') self.stop_event = threading.Event() self.status = None self.sched = Scheduler(self.status) self.dicConfig = config self.nagiosPush = NagiosPush(config) self.storageManager = StorageManager() self.interpreter = interpreter self.eventRate = EventRate()
def __init__(self, config, interpreter): # invoke constructor of parent class (threading) threading.Thread.__init__(self) self.stop_event = threading.Event() self.status = None self.sched = Scheduler(self.status) self.dicConfig = config # create a nagios push object self.nagiosPush = NagiosPush(config) self.storageManager = StorageManager() self.interpreter = interpreter ### Event rate: self.eventRate = EventRate()
def setUp(self): # create a few dummy events self.e1 = Event(0) self.e2 = Event(4) self.e3 = Event(5) self.e4 = Event(6) self.elist = [self.e1, self.e2, self.e3, self.e4] # add bogus information to an event self.e1.data = "hello world" self.e2.data = "snd message" self.e3.data = "non-funny message" self.e4.data = "whatever" # setup storagemanager self.sm = StorageManager() self.sm.setNumServer(2) self.sm.openConnection()
def init(): global db # global session_management global user_management global storage_handler global cart_management global order_management global product_management global sales_management global product_management storage_handler = StorageHandler() db = StorageManager() user_management = UserManagement(storage_handler) cart_management = CartManagement(storage_handler) product_management = ProductManagement(storage_handler) sales_management = SalesManagement(storage_handler) order_management = OrderManagement(storage_handler, sales_management)
def __init__(self, serverID, stationID, password, URL, config, retryAfter=MINWAIT, maxWait=MAXWAIT, minBatchSize=BATCHSIZE, maxBatchSize=BATCHSIZE): self.storageManager = StorageManager() self.serverID = serverID self.stationID = stationID self.password = password self.URL = URL self.nagiosPush = NagiosPush(config) self.minBatchSize = minBatchSize self.maxBatchSize = maxBatchSize self.retryAfter = MINWAIT self.maxWait = MAXWAIT # lock to protect numEvents self.numEventsLock = Semaphore(1) # Semaphore to block if the number of events drops below minBatchSize self.noEventsSem = Semaphore(0) super(Uploader, self).__init__(name='Uploader') self.stop_event = Event() self.isRunning = False
def get_test_storage_manager(root_db): """ Generate testing storage manager instance :param root_db: indicate root database :return: the testing storage manager with local filesystem and a mongoDB """ from storages.file_storage import file_storage from storages.mongo_db import mongo_conn, mongo_writer, mongo_reader fs_hot = file_storage({'name': '/mnt/hotsemantic/', 'pathname': '/mnt/hotsemantic/'}) fs_warm = file_storage({'name': '/mnt/warmsemantic/', 'pathname': '/mnt/warmsemantic/'}) fs_cold = file_storage({'name': '/mnt/coldsemantic/', 'pathname': '/mnt/coldsemantic/'}) mongo_db2 = mongo_conn({ '__DB_ADDR__': 'localhost:27027', }) storage_manager = StorageManager() storage_manager.add_storage(root_db, name='mongo_db', write_handler=mongo_writer, read_handler=mongo_reader ) storage_manager.add_storage(mongo_db2, name='mongo_db2', write_handler=mongo_writer, read_handler=mongo_reader) storage_manager.add_storage(fs_hot, name='ceph_hot', storage_type='fs', write_handler=fs_hot.write, read_handler=fs_hot.read) storage_manager.add_storage(fs_warm, name='ceph_warm', storage_type='fs', write_handler=fs_warm.write, read_handler=fs_warm.read) storage_manager.add_storage(fs_cold, name='ceph_cold', storage_type='fs', write_handler=fs_cold.write, read_handler=fs_cold.read) storage_manager.set_default_storage('mongo_db2') print 'Pool Databases:', storage_manager.get_databases() print 'Pool File systems:', storage_manager.get_filesystems() return storage_manager
class Uploader(Observer, Thread): def __init__(self, serverID, stationID, password, URL, config, retryAfter=MINWAIT, maxWait=MAXWAIT, minBatchSize=BATCHSIZE, maxBatchSize=BATCHSIZE): self.storageManager = StorageManager() self.serverID = serverID self.stationID = stationID self.password = password self.URL = URL self.nagiosPush = NagiosPush(config) self.minBatchSize = minBatchSize self.maxBatchSize = maxBatchSize self.retryAfter = MINWAIT self.maxWait = MAXWAIT # lock to protect numEvents self.numEventsLock = Semaphore(1) # Semaphore to block if the number of events drops below minBatchSize self.noEventsSem = Semaphore(0) super(Uploader, self).__init__(name='Uploader') self.stop_event = Event() self.isRunning = False def setNumServer(self, numServer): """Sets the number of servers to upload to. Need to be set before changing the UploadedTo-status of events in the StorageManager. """ self.storageManager.setNumServer(numServer) def stop(self): self.stop_event.set() # release semaphore self.noEventsSem.release() crashes = [] def init_restart(self): """Support for restarting crashed threads.""" if len(self.crashes) > 3 and time() - self.crashes[-3] < 60.: raise ThreadCrashError("Thread has crashed three times in " "less than a minute") else: super(Uploader, self).__init__() self.crashes.append(time()) def notify(self, count=1): """Notify the uploader that count events were received.""" if (self.isRunning): shouldRelease = 0 self.numEventsLock.acquire() oldNumEvents = self.numEvents self.numEvents += count logger.debug('%i: %i events pending.' % (self.serverID, self.numEvents)) # calculate if uploader-thread should be unblocked if (self.numEvents >= self.minBatchSize and oldNumEvents < self.minBatchSize): shouldRelease = 1 self.numEventsLock.release() if (shouldRelease): self.noEventsSem.release() def __getNumEventsToUpload(self): """Gives the number of events that the Uploader can upload now. The result will be between min and max batch size. If insufficient events are available this function will block on noEventSem. """ shouldBlock = False self.numEventsLock.acquire() res = min(self.numEvents, self.maxBatchSize) if (res < self.minBatchSize): shouldBlock = True self.numEventsLock.release() if shouldBlock: logger.debug('%i: Blocked: Too few events' % self.serverID) self.noEventsSem.acquire() logger.debug('%i: Unblocked' % self.serverID) return self.minBatchSize else: return res def __upload(self, elist): """Upload a list of events to the database server.""" data = dumps(elist) checksum = md5(data).hexdigest() params = urlencode({'station_id': self.stationID, 'password': self.password, 'data': data, 'checksum': checksum}) # Open the connection and send our data. Exceptions are caught # explicitly to make sure we understand the implications of errors. try: f = urlopen(self.URL, params) except (URLError, HTTPError), msg: # For example: connection refused or internal server error returncode = str(msg) except Exception, msg: returncode = ('Uncatched exception occured in function ' '__upload: %s' % str(msg))
class TestStorageManager(unittest.TestCase): def setUp(self): # create a few dummy events self.e1 = Event(0) self.e2 = Event(4) self.e3 = Event(5) self.e4 = Event(6) self.elist = [self.e1, self.e2, self.e3, self.e4] # add bogus information to an event self.e1.data = "hello world" self.e2.data = "snd message" self.e3.data = "non-funny message" self.e4.data = "whatever" # setup storagemanager self.sm = StorageManager() self.sm.setNumServer(2) self.sm.openConnection() def tearDown(self): global observer_calls # remove all events to get back in virgin state self.sm.clear() observer_calls = 0 def testAddOnebyOne(self): num = self.sm.getNumEvents() self.sm.addEvent(self.e1) num1 = self.sm.getNumEvents() self.sm.addEvent(self.e2) num2 = self.sm.getNumEvents() self.sm.addEvent(self.e3) num3 = self.sm.getNumEvents() self.sm.addEvent(self.e4) num4 = self.sm.getNumEvents() # test the numbers self.assertEqual(num1, num + 1) self.assertEqual(num2, num + 2) self.assertEqual(num3, num + 3) self.assertEqual(num4, num + 4) def testAddMultiple(self): num = self.sm.getNumEvents() self.sm.addEvents(self.elist) num4 = self.sm.getNumEvents() self.assertEqual(num + 4, num4) def testGetOneByOne(self): # this test also sets the status so that you get a new event each time self.sm.addEvent(self.e1) self.sm.addEvent(self.e2) self.sm.addEvent(self.e3) self.sm.addEvent(self.e4) self.assertEqual(4, self.sm.getNumEventsServer(0)) (e1, id1) = self.sm.getEvent(0) self.assertEqual(self.e1.data, e1) self.sm.setUploaded(0, [id1]) self.assertEqual(3, self.sm.getNumEventsServer(0)) (e2, id2) = self.sm.getEvent(0) self.assertEqual(self.e2.data, e2) self.sm.setUploaded(0, [id2]) self.assertEqual(2, self.sm.getNumEventsServer(0)) (e3, id3) = self.sm.getEvent(0) self.assertEqual(self.e3.data, e3) self.sm.setUploaded(0, [id3]) self.assertEqual(1, self.sm.getNumEventsServer(0)) (e4, id4) = self.sm.getEvent(0) self.assertEqual(self.e4.data, e4) self.sm.setUploaded(0, [id4]) self.assertEqual(0, self.sm.getNumEventsServer(0)) def testUploadStatus(self): # add events self.sm.addEvents(self.elist) # get the ids (elist, eids) = self.sm.getEvents(0, 4) self.assertEqual(4, self.sm.getNumEvents()) self.assertEqual(4, self.sm.getNumEventsServer(0)) self.assertEqual(4, self.sm.getNumEventsServer(1)) # set server 1 uploaded self.sm.setUploaded(0, eids) self.assertEqual(4, self.sm.getNumEvents()) self.assertEqual(0, self.sm.getNumEventsServer(0)) self.assertEqual(4, self.sm.getNumEventsServer(1)) # set server 2 uploaded self.sm.setUploaded(1, eids) self.assertEqual(0, self.sm.getNumEvents()) self.assertEqual(0, self.sm.getNumEventsServer(0)) self.assertEqual(0, self.sm.getNumEventsServer(1)) def testUploadStatusWrongID(self): self.sm.addEvents(self.elist) self.sm.setUploaded(1, [-1, -4, 999999999999]) self.assertEqual(4, self.sm.getNumEvents()) self.assertEqual(4, self.sm.getNumEventsServer(0)) self.assertEqual(4, self.sm.getNumEventsServer(1)) def testUploadStatusWrongServerID(self): self.sm.addEvents(self.elist) (elist, eids) = self.sm.getEvents(0, 4) self.sm.setUploaded(11, eids) self.assertEqual(4, self.sm.getNumEvents()) self.assertEqual(4, self.sm.getNumEventsServer(0)) self.assertEqual(4, self.sm.getNumEventsServer(1)) # now see if we can still remove it self.sm.setUploaded(0, eids) self.sm.setUploaded(1, eids) self.assertEqual(0, self.sm.getNumEvents()) def testGetMultiple(self): self.sm.addEvents(self.elist) (elist, eidlist) = self.sm.getEvents(0, 4) i = 0 for e in elist: self.assertEqual(e, self.elist[i].data) i = i + 1 def testOtherBatchSize(self): self.sm.addEvents(self.elist) (elist, eidlist) = self.sm.getEvents(0, 1) self.assertEqual(1, len(elist)) (elist, eidlist) = self.sm.getEvents(0, 2) self.assertEqual(2, len(elist)) (elist, eidlist) = self.sm.getEvents(0, 3) self.assertEqual(3, len(elist)) (elist, eidlist) = self.sm.getEvents(0, 4) self.assertEqual(4, len(elist)) (elist, eidlist) = self.sm.getEvents(0, 5) self.assertEqual(4, len(elist)) (elist, eidlist) = self.sm.getEvents(0, 0) self.assertEqual(0, len(elist)) # remove 2 and test again (elist, eidlist) = self.sm.getEvents(0, 5) self.sm.setUploaded(0, [eidlist[0], eidlist[1]]) (elist, eidlist) = self.sm.getEvents(0, 1) self.assertEqual(1, len(elist)) (elist, eidlist) = self.sm.getEvents(0, 2) self.assertEqual(2, len(elist)) (elist, eidlist) = self.sm.getEvents(0, 3) self.assertEqual(2, len(elist)) def testObserver(self): global observer_calls obs = Obs() self.sm.addObserver(obs) self.sm.addEvent(self.e1) self.assertEqual(1, observer_calls) self.sm.addEvents(self.elist) self.assertEqual(5, observer_calls) def testTwoObservers(self): global observer_calls self.sm.addObserver(Obs()) self.sm.addObserver(Obs()) self.sm.addEvent(self.e1) self.assertEqual(2, observer_calls) self.sm.addEvents(self.elist) self.assertEqual(10, observer_calls)
class CheckScheduler(threading.Thread): def __init__(self, config, interpreter): # invoke constructor of parent class (threading) threading.Thread.__init__(self) self.stop_event = threading.Event() self.status = None self.sched = Scheduler(self.status) self.dicConfig = config # create a nagios push object self.nagiosPush = NagiosPush(config) self.storageManager = StorageManager() self.interpreter = interpreter ### Event rate: self.eventRate = EventRate() def getEventRate(self): return self.eventRate def stop(self): self.stop_event.set() crashes = [] def init_restart(self): """Support for restarting crashed threads""" if len(self.crashes) > 3 and time.time() - self.crashes[-3] < 60.0: raise ThreadCrashError("Thread has crashed three times in " "less than a minute") else: super(CheckScheduler, self).__init__() self.crashes.append(time.time()) # This is what the thread actually runs; the required name is run(). # The threading.Thread.start() calls threading.Thread.run(), # which is always overridden. def run(self): log("CheckScheduler: Thread started!", severity=2) self.storageManager.openConnection() ### Trigger rate: triggerRate = TriggerRate(self.interpreter) TR_interval = int(self.dicConfig["triggerrate_interval"]) self.sched.addJob(triggerRate.check, interval=TR_interval, args=self.dicConfig) ### Storage size: storageSize = StorageSize(self.storageManager) SS_interval = int(self.dicConfig["storagesize_interval"]) self.sched.addJob(storageSize.check, interval=SS_interval, args=self.dicConfig) ### Event rate: ER_interval = int(self.dicConfig["eventrate_interval"]) self.sched.addJob(self.eventRate.check, interval=ER_interval, args=self.dicConfig) ### Storage growth: storageGrowth = StorageGrowth(self.storageManager) SG_interval = int(self.dicConfig["storagegrowth_interval"]) self.sched.addJob(storageGrowth.check, interval=SG_interval, args=self.dicConfig) while not self.stop_event.isSet(): # run all checks self.sched.schedule(self.nagiosPush, self.dicConfig) try: time.sleep(1) except KeyboardInterrupt: break except: pass log("CheckScheduler: Thread stopped!", severity=2)
from StorageManager import StorageManager AVAILABLE_STORAGES = StorageManager() default_app_config = 'opencsp.apps.OpenCSPAppConfig'
def reset(): StorageManager.reset()
def init(): global db db = StorageManager()
def getImageName(self, image_url, full_save_path): print("Starting download for " + image_url) numberOfPresentWallpapers = StorageManager( full_save_path).count_files() filename = "wallpaper_" + str(numberOfPresentWallpapers) return filename