def getCMSSiteInfo(pattern): """ _getCMSSiteInfo_ Query SiteDB for the site and SE names matching the pattern. Return a dictionary keyed by site name. """ phedex = PhEDEx( responseType = "json") print phedex.subscriptions(dataset = '/HidjetQuenchedMinBias/HiWinter13-PtHat80_STARTHI44_V12-v1/GEN-SIM-RECODEBUG') print phedex.subscriptions(dataset = '/MinimumBias/Run2012D-v1/RAW')
class PhEDExInjectorPollerTest(unittest.TestCase): """ _PhEDExInjectorPollerTest_ Unit tests for the PhEDExInjector. Create some database inside DBSBuffer and then have the PhEDExInjector upload the data to PhEDEx. Pull the data back down and verify that everything is complete. """ def setUp(self): """ _setUp_ Install the DBSBuffer schema into the database and connect to PhEDEx. """ self.phedexURL = "https://cmsweb.cern.ch/phedex/datasvc/json/test" self.dbsURL = "http://vocms09.cern.ch:8880/cms_dbs_int_local_yy_writer/servlet/DBSServlet" self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer"], useDefault = False) myThread = threading.currentThread() daofactory = DAOFactory(package = "WMComponent.DBSBuffer.Database", logger = myThread.logger, dbinterface = myThread.dbi) locationAction = daofactory(classname = "DBSBufferFiles.AddLocation") locationAction.execute(siteName = "srm-cms.cern.ch") self.testFilesA = [] self.testFilesB = [] self.testDatasetA = "/%s/PromptReco-v1/RECO" % makeUUID() self.testDatasetB = "/%s/CRUZET11-v1/RAW" % makeUUID() self.phedex = PhEDEx({"endpoint": self.phedexURL}, "json") return def tearDown(self): """ _tearDown_ Delete the database. """ self.testInit.clearDatabase() def stuffDatabase(self): """ _stuffDatabase_ Fill the dbsbuffer with some files and blocks. We'll insert a total of 5 files spanning two blocks. There will be a total of two datasets inserted into the datbase. We'll inject files with the location set as an SE name as well as a PhEDEx node name as well. """ checksums = {"adler32": "1234", "cksum": "5678"} testFileA = DBSBufferFile(lfn = makeUUID(), size = 1024, events = 10, checksums = checksums, locations = set(["srm-cms.cern.ch"])) testFileA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8", appFam = "RECO", psetHash = "GIBBERISH", configContent = "MOREGIBBERISH") testFileA.setDatasetPath(self.testDatasetA) testFileA.addRun(Run(2, *[45])) testFileA.create() testFileB = DBSBufferFile(lfn = makeUUID(), size = 1024, events = 10, checksums = checksums, locations = set(["srm-cms.cern.ch"])) testFileB.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8", appFam = "RECO", psetHash = "GIBBERISH", configContent = "MOREGIBBERISH") testFileB.setDatasetPath(self.testDatasetA) testFileB.addRun(Run(2, *[45])) testFileB.create() testFileC = DBSBufferFile(lfn = makeUUID(), size = 1024, events = 10, checksums = checksums, locations = set(["srm-cms.cern.ch"])) testFileC.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8", appFam = "RECO", psetHash = "GIBBERISH", configContent = "MOREGIBBERISH") testFileC.setDatasetPath(self.testDatasetA) testFileC.addRun(Run(2, *[45])) testFileC.create() self.testFilesA.append(testFileA) self.testFilesA.append(testFileB) self.testFilesA.append(testFileC) testFileD = DBSBufferFile(lfn = makeUUID(), size = 1024, events = 10, checksums = checksums, locations = set(["srm-cms.cern.ch"])) testFileD.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8", appFam = "RECO", psetHash = "GIBBERISH", configContent = "MOREGIBBERISH") testFileD.setDatasetPath(self.testDatasetB) testFileD.addRun(Run(2, *[45])) testFileD.create() testFileE = DBSBufferFile(lfn = makeUUID(), size = 1024, events = 10, checksums = checksums, locations = set(["srm-cms.cern.ch"])) testFileE.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8", appFam = "RECO", psetHash = "GIBBERISH", configContent = "MOREGIBBERISH") testFileE.setDatasetPath(self.testDatasetB) testFileE.addRun(Run(2, *[45])) testFileE.create() self.testFilesB.append(testFileD) self.testFilesB.append(testFileE) myThread = threading.currentThread() uploadFactory = DAOFactory(package = "WMComponent.DBSUpload.Database", logger = myThread.logger, dbinterface = myThread.dbi) createBlock = uploadFactory(classname = "SetBlockStatus") self.blockAName = self.testDatasetA + "#" + makeUUID() self.blockBName = self.testDatasetB + "#" + makeUUID() createBlock.execute(block = self.blockAName, locations = ["srm-cms.cern.ch"], open_status = 1) createBlock.execute(block = self.blockBName, locations = ["srm-cms.cern.ch"], open_status = 1) bufferFactory = DAOFactory(package = "WMComponent.DBSBuffer.Database", logger = myThread.logger, dbinterface = myThread.dbi) setBlock = bufferFactory(classname = "DBSBufferFiles.SetBlock") setBlock.execute(testFileA["lfn"], self.blockAName) setBlock.execute(testFileB["lfn"], self.blockAName) setBlock.execute(testFileC["lfn"], self.blockAName) setBlock.execute(testFileD["lfn"], self.blockBName) setBlock.execute(testFileE["lfn"], self.blockBName) fileStatus = bufferFactory(classname = "DBSBufferFiles.SetStatus") fileStatus.execute(testFileA["lfn"], "LOCAL") fileStatus.execute(testFileB["lfn"], "LOCAL") fileStatus.execute(testFileC["lfn"], "LOCAL") fileStatus.execute(testFileD["lfn"], "LOCAL") fileStatus.execute(testFileE["lfn"], "LOCAL") return def createConfig(self): """ _createConfig_ Create a config for the PhEDExInjector with paths to the test DBS and PhEDEx instances. """ config = self.testInit.getConfiguration() config.component_("DBSInterface") config.DBSInterface.globalDBSUrl = self.dbsURL config.component_("PhEDExInjector") config.PhEDExInjector.phedexurl = self.phedexURL config.PhEDExInjector.subscribeMSS = True config.PhEDExInjector.group = "Saturn" config.PhEDExInjector.pollInterval = 30 config.PhEDExInjector.subscribeInterval = 60 return config def retrieveReplicaInfoForBlock(self, blockName): """ _retrieveReplicaInfoForBlock_ Retrieve the replica information for a block. It takes several minutes after a block is injected for the statistics to be calculated, so this will block until that information is available. """ attempts = 0 while attempts < 15: result = self.phedex.getReplicaInfoForFiles(block = blockName) if result.has_key("phedex"): if result["phedex"].has_key("block"): if len(result["phedex"]["block"]) != 0: return result["phedex"]["block"][0] attempts += 1 time.sleep(20) logging.info("Could not retrieve replica info for block: %s" % blockName) return None def testPoller(self): """ _testPoller_ Stuff the database and have the poller upload files to PhEDEx. Retrieve replica information for the uploaded blocks and verify that all files have been injected. Also verify that files have been subscribed to MSS. """ self.stuffDatabase() poller = PhEDExInjectorPoller(self.createConfig()) poller.setup(parameters = None) poller.algorithm(parameters = None) replicaInfo = self.retrieveReplicaInfoForBlock(self.blockAName) goldenLFNs = [] for file in self.testFilesA: goldenLFNs.append(file["lfn"]) for replicaFile in replicaInfo["file"]: assert replicaFile["name"] in goldenLFNs, \ "Error: Extra file in replica block: %s" % replicaFile["name"] goldenLFNs.remove(replicaFile["name"]) assert len(goldenLFNs) == 0, \ "Error: Files missing from PhEDEx replica: %s" % goldenLFNs replicaInfo = self.retrieveReplicaInfoForBlock(self.blockBName) goldenLFNs = [] for file in self.testFilesB: goldenLFNs.append(file["lfn"]) for replicaFile in replicaInfo["file"]: assert replicaFile["name"] in goldenLFNs, \ "Error: Extra file in replica block: %s" % replicaFile["name"] goldenLFNs.remove(replicaFile["name"]) assert len(goldenLFNs) == 0, \ "Error: Files missing from PhEDEx replica: %s" % goldenLFNs myThread = threading.currentThread() daofactory = DAOFactory(package = "WMComponent.DBSUpload.Database", logger = myThread.logger, dbinterface = myThread.dbi) setBlock = daofactory(classname = "SetBlockStatus") setBlock.execute(self.blockAName, locations = None, open_status = "InGlobalDBS") poller.algorithm(parameters = None) replicaInfo = self.retrieveReplicaInfoForBlock(self.blockAName) assert replicaInfo["is_open"] == "n", \ "Error: block should be closed." replicaInfo = self.retrieveReplicaInfoForBlock(self.blockBName) assert replicaInfo["is_open"] == "y", \ "Error: block should be open." subscriber = PhEDExInjectorSubscriber(self.createConfig()) subscriber.setup(parameters = None) subscriber.algorithm(parameters = None) subAResult = self.phedex.subscriptions(dataset = self.testDatasetA) self.assertEqual(len(subAResult["phedex"]["dataset"]), 1, "Error: Subscription was not made.") datasetASub = subAResult["phedex"]["dataset"][0] self.assertTrue(datasetASub["files"] == "3" and datasetASub["name"] == self.testDatasetA, "Error: Metadata is incorrect for sub.") self.assertEqual(datasetASub["subscription"][0]["node"], "T1_CH_CERN_MSS", "Error: Node is wrong.") return
class RequestManager(object): "Class to keep track of transfer progress in PhEDEx for a given task" def __init__(self, group='DataOps', dbFileName=None, interval=10, verbose=False): self.verbose = verbose if not dbFileName: fobj = tempfile.NamedTemporaryFile() dbFileName = '%s.db' % fobj.name self.store = RequestStore(dbFileName) self.phedex = PhEDEx() self.group = group thname = 'RequestManager monitor' self.thr = start_new_thread(thname, checkRequests, \ (self.checkStatus, self.store, interval)) if verbose: print("### Running %s thread, running: %s" % (thname, self.thr.running())) print("RequestManager, group=%s, db=%s, interval=%s" % (group, dbFileName, interval)) def stop(self): "Stop RequestManager" self.thr.stop() # stop checkStatus thread status = self.thr.running() return status def checkPhedex(self, request): "Send request to Phedex and return status of request subscription" sdict = {} rdict = self.store.get(request) if not rdict: # request is gone return 100 for dataset in rdict.get('datasets'): data = self.phedex.subscriptions(dataset=dataset, group=self.group) if self.verbose: print("### dataset", dataset, "group", self.group) print("### subscription", data) for row in data['phedex']['dataset']: if row['name'] != dataset: continue nodes = [s['node'] for s in row['subscription']] rNodes = rdict.get('sites') if self.verbose: print("### nodes", nodes, rNodes) subset = set(nodes) & set(rNodes) if subset == set(rNodes): sdict[dataset] = 1 else: pct = float(len(subset)) / float(len(set(rNodes))) sdict[dataset] = pct if self.verbose: print("### sdict", sdict) tot = len(sdict.keys()) if not tot: return -1 # return percentage of completion return round(float(sum(sdict.values())) / float(tot), 2) * 100 def checkStatus(self, request): "Check status of request in local storage" if self.verbose: print("### checkStatus of request: %s" % request) if self.store.exists(request): completed = self.checkPhedex(request) if completed == 100: # all data are staged if self.verbose: print( "### request is completed, change its status and remove it from the store" ) # call ReqMgr2 API to change status of the request # self.reqmgr.changeStatus(status) self.store.delete(request) else: if self.verbose: print("### request %s, completed %s" % (request, completed)) self.store.update(request, {'PhedexStatus': completed}) def add(self, requests): "Add requests to task manager" # loop over requests: for non-existing pid submit phedex subscription for existing one check their status for request, rdict in requests.items(): if self.store.exists(request): self.checkStatus(request) else: # if request does not exist in backend submit its subscription and add it to backend rdict = requests[request] self.store.add(request, rdict) datasets = rdict.get('datasets') sites = rdict.get('sites') subscription = PhEDExSubscription(datasets, sites, self.group) if self.verbose: print("### add subscription", subscription) # TODO: when ready enable submit subscription step # self.phedex.subscribe(subscription) def info(self, request=None): "Return info about given request" if not request: return self.store.info() completed = self.checkPhedex(request) idict = self.store.info(request) idict.update({'completed': completed}) return idict def delete(self, request): "Delete request in backend" return self.store.delete(request)
class MSManager(object): "Class to keep track of transfer progress in PhEDEx for a given task" def __init__(self, svc, group='DataOps', readOnly=True, interval=60, logger=None): if logger: self.logger = logger else: self.logger = logging.getLogger('reqmgr2ms:MSManager') self.logger.setLevel(logging.DEBUG) logging.basicConfig() self.phedex = PhEDEx() # eventually will change to Rucio self.group = group self.readOnly = readOnly self.svc = svc # Services: ReqMgr, ReqMgrAux thname = 'MSTransferor' self.thr = start_new_thread(thname, daemon, (self.transferor, 'assigned', interval, self.logger)) self.logger.debug("### Running %s thread %s", thname, self.thr.running()) thname = 'MSTransferorMonit' self.ms_monit = start_new_thread(thname, daemon, (self.monit, 'staging', interval, self.logger)) self.logger.debug("+++ Running %s thread %s", thname, self.ms_monit.running()) self.logger.info("MSManager, group=%s, interval=%s", group, interval) def monit(self, reqStatus='staging'): """ MSManager monitoring function. It performs transfer requests from staging to staged state of ReqMgr2. For references see https://github.com/dmwm/WMCore/wiki/ReqMgr2-MicroService-Transferor """ try: # get requests from ReqMgr2 data-service for given statue # here with detail=False we get back list of records requests = self.svc.reqmgr.getRequestByStatus([reqStatus], detail=False) self.logger.debug('+++ monit found %s requests in %s state', len(requests), reqStatus) requestStatus = {} # keep track of request statuses for reqName in requests: req = {'name':reqName, 'reqStatus': reqStatus} # get transfer IDs tids = self.getTransferIDs() # get transfer status transferStatuses = self.getTransferStatuses(tids) # get campaing and unified configuration campaign = self.requestCampaign(reqName) conf = self.requestConfiguration(reqName) self.logger.debug("+++ request %s campaing %s conf %s", req, campaign, conf) # if all transfers are completed, move the request status staging -> staged # completed = self.checkSubscription(request) completed = 100 # TMP if completed == 100: # all data are staged self.logger.debug("+++ request %s all transfers are completed", req) self.change(req, 'staged', '+++ monit') # if pileup transfers are completed AND some input blocks are completed, move the request status staging -> staged elif self.pileupTransfersCompleted(tids): self.logger.debug("+++ request %s pileup transfers are completed", req) self.change(req, 'staged', '+++ monit') # transfers not completed, just update the database with their completion else: self.logger.debug("+++ request %s transfers are not completed", req) requestStatus[req] = transferStatuses # TODO: implement update of transfer ids self.updateTransferIDs(requestStatus) except Exception as err: # general error self.logger.exception('+++ monit error: %s', str(err)) def transferor(self, reqStatus='assigned'): """ MSManager transferor function. It performs Unified logic for data subscription and transfers requests from assigned to staging/staged state of ReqMgr2. For references see https://github.com/dmwm/WMCore/wiki/ReqMgr2-MicroService-Transferor """ requestRecords = [] try: # get requests from ReqMgr2 data-service for given statue requestSpecs = self.svc.reqmgr.getRequestByStatus([reqStatus], detail=True) if requestSpecs: for _, wfData in requestSpecs[0].items(): requestRecords.append(requestRecord(wfData, reqStatus)) self.logger.debug('### monit found %s requests in %s state', len(requestRecords), reqStatus) # get complete requests information (based on Unified Transferor logic) requestRecords = requestsInfo(requestRecords, self.svc, self.logger) except Exception as err: # general error self.logger.exception('### transferor error: %s', str(err)) # process all requests for req in requestRecords: reqName = req['name'] # perform transfer tid = self.transferRequest(req) if tid: # Once all transfer requests were successfully made, update: assigned -> staging self.logger.debug("### transfer request for %s successfull", reqName) self.change(req, 'staging', '### transferor') # if there is nothing to be transferred (no input at all), # then update the request status once again staging -> staged # self.change(req, 'staged', '### transferor') def stop(self): "Stop MSManager" # stop MSTransferorMonit thread self.ms_monit.stop() # stop MSTransferor thread self.thr.stop() # stop checkStatus thread status = self.thr.running() return status def transferRequest(self, req): "Send request to Phedex and return status of request subscription" datasets = req.get('datasets', []) sites = req.get('sites', []) if datasets and sites: self.logger.debug("### creating subscription for: %s", pformat(req)) subscription = PhEDExSubscription(datasets, sites, self.group) # TODO: implement how to get transfer id tid = hashlib.md5(str(subscription)).hexdigest() # TODO: when ready enable submit subscription step # self.phedex.subscribe(subscription) return tid def getTransferIDsDoc(self): """ Get transfer ids document from backend. The document has the following form: https://gist.github.com/amaltaro/72599f995b37a6e33566f3c749143154 {"wf_A": {"timestamp": 0000 "primary": {"dset_1": ["list of transfer ids"]}, "secondary": {"PU_dset_1": ["list of transfer ids"]}, "wf_B": {"timestamp": 0000 "primary": {"dset_1": ["list of transfer ids"], "parent_dset_1": ["list of transfer ids"]}, "secondary": {"PU_dset_1": ["list of transfer ids"], "PU_dset_2": ["list of transfer ids"]}, ... } """ doc = {} return doc def updateTransferIDs(self, requestStatus): "Update transfer ids in backend" # TODO/Wait: https://github.com/dmwm/WMCore/issues/9198 # doc = self.getTransferIDsDoc() def getTransferIDs(self): "Get transfer ids from backend" # TODO/Wait: https://github.com/dmwm/WMCore/issues/9198 # meanwhile return transfer ids from internal store return [] def getTransferStatuses(self, tids): "get transfer statuses for given transfer IDs from backend" # transfer docs on backend has the following form # https://gist.github.com/amaltaro/72599f995b37a6e33566f3c749143154 statuses = {} for tid in tids: # TODO: I need to find request name from transfer ID #status = self.checkSubscription(request) status = 100 statuses[tid] = status return statuses def requestCampaign(self, req): "Return request campaign" return 'campaign_TODO' # TODO def requestConfiguration(self, req): "Return request configuration" return {} def pileupTransfersCompleted(self, tids): "Check if pileup transfers are completed" # TODO: add implementation return False def checkSubscription(self, req): "Send request to Phedex and return status of request subscription" sdict = {} for dataset in req.get('datasets', []): data = self.phedex.subscriptions(dataset=dataset, group=self.group) self.logger.debug("### dataset %s group %s", dataset, self.group) self.logger.debug("### subscription %s", data) for row in data['phedex']['dataset']: if row['name'] != dataset: continue nodes = [s['node'] for s in row['subscription']] rNodes = req.get('sites') self.logger.debug("### nodes %s %s", nodes, rNodes) subset = set(nodes) & set(rNodes) if subset == set(rNodes): sdict[dataset] = 1 else: pct = float(len(subset))/float(len(set(rNodes))) sdict[dataset] = pct self.logger.debug("### sdict %s", sdict) tot = len(sdict.keys()) if not tot: return -1 # return percentage of completion return round(float(sum(sdict.values()))/float(tot), 2) * 100 def checkStatus(self, req): "Check status of request in local storage" self.logger.debug("### checkStatus of request: %s", req['name']) # check subscription status of the request # completed = self.checkSubscription(req) completed = 100 if completed == 100: # all data are staged self.logger.debug("### request is completed, change its status and remove it from the store") self.change(req, 'staged', '### transferor') else: self.logger.debug("### request %s, completed %s", req, completed) def change(self, req, reqStatus, prefix='###'): """ Change request status, internally it is done via PUT request to ReqMgr2: curl -X PUT -H "Content-Type: application/json" \ -d '{"RequestStatus":"staging", "RequestName":"bla-bla"}' \ https://xxx.yyy.zz/reqmgr2/data/request """ self.logger.debug('%s updating %s status to %s', prefix, req['name'], reqStatus) try: if req.get('reqStatus', None) != reqStatus: if not self.readOnly: self.svc.reqmgr.updateRequestStatus(req['name'], reqStatus) except Exception as err: self.logger.exception("Failed to change request status. Error: %s", str(err)) def info(self, req): "Return info about given request" completed = self.checkSubscription(req) return {'request': req, 'status': completed} def delete(self, request): "Delete request in backend" pass
class RequestManager(object): "Class to keep track of transfer progress in PhEDEx for a given task" def __init__(self, group='DataOps', dbFileName=None, interval=10, verbose=False): self.verbose = verbose if not dbFileName: fobj = tempfile.NamedTemporaryFile() dbFileName = '%s.db' % fobj.name self.store = RequestStore(dbFileName) self.phedex = PhEDEx() self.group = group thname = 'RequestManager monitor' self.thr = start_new_thread(thname, checkRequests, \ (self.checkStatus, self.store, interval)) if verbose: print("### Running %s thread, running: %s" % (thname, self.thr.running())) print("RequestManager, group=%s, db=%s, interval=%s" % (group, dbFileName, interval)) def stop(self): "Stop RequestManager" self.thr.stop() # stop checkStatus thread status = self.thr.running() return status def checkPhedex(self, request): "Send request to Phedex and return status of request subscription" sdict = {} rdict = self.store.get(request) if not rdict: # request is gone return 100 for dataset in rdict.get('datasets'): data = self.phedex.subscriptions(dataset=dataset, group=self.group) if self.verbose: print("### dataset", dataset, "group", self.group) print("### subscription", data) for row in data['phedex']['dataset']: if row['name'] != dataset: continue nodes = [s['node'] for s in row['subscription']] rNodes = rdict.get('sites') if self.verbose: print("### nodes", nodes, rNodes) subset = set(nodes) & set(rNodes) if subset == set(rNodes): sdict[dataset] = 1 else: pct = float(len(subset))/float(len(set(rNodes))) sdict[dataset] = pct if self.verbose: print("### sdict", sdict) tot = len(sdict.keys()) if not tot: return -1 # return percentage of completion return round(float(sum(sdict.values()))/float(tot), 2) * 100 def checkStatus(self, request): "Check status of request in local storage" if self.verbose: print("### checkStatus of request: %s" % request) if self.store.exists(request): completed = self.checkPhedex(request) if completed == 100: # all data are staged if self.verbose: print("### request is completed, change its status and remove it from the store") # call ReqMgr2 API to change status of the request # self.reqmgr.changeStatus(status) self.store.delete(request) else: if self.verbose: print("### request %s, completed %s" % (request, completed)) self.store.update(request, {'PhedexStatus': completed}) def add(self, requests): "Add requests to task manager" # loop over requests: for non-existing pid submit phedex subscription for existing one check their status for request, rdict in requests.items(): if self.store.exists(request): self.checkStatus(request) else: # if request does not exist in backend submit its subscription and add it to backend rdict = requests[request] self.store.add(request, rdict) datasets = rdict.get('datasets') sites = rdict.get('sites') subscription = PhEDExSubscription(datasets, sites, self.group) if self.verbose: print("### add subscription", subscription) # TODO: when ready enable submit subscription step # self.phedex.subscribe(subscription) def info(self, request=None): "Return info about given request" if not request: return self.store.info() completed = self.checkPhedex(request) idict = self.store.info(request) idict.update({'completed': completed}) return idict def delete(self, request): "Delete request in backend" return self.store.delete(request)
class TransferorTest(unittest.TestCase): "Unit test for Transferor module" def setUp(self): "init test class" self.group = 'DataOps' self.interval = 2 self.rmgr = RequestManager(group=self.group, interval=self.interval, verbose=True) self.phedex = PhEDEx() # get some subscriptions from PhEDEx to play with data = self.phedex.subscriptions(group=self.group) for datasetInfo in data['phedex']['dataset']: dataset = datasetInfo.get('name') print("### dataset info from phedex, #files %s" % datasetInfo.get('files', 0)) # now use the same logic in as in Transferor, i.e. look-up dataset/group subscription data = self.phedex.subscriptions(dataset=dataset, group=self.group) if not data['phedex']['dataset']: print( "### skip this dataset since no subscription data is available" ) continue nodes = [ i['node'] for r in data['phedex']['dataset'] for i in r['subscription'] ] print("### nodes", nodes) # create fake requests with dataset/nodes info rdict1 = dict(datasets=[dataset], sites=nodes, name='req1') rdict2 = dict(datasets=[dataset], sites=nodes, name='req2') self.requests = {'req1': rdict1, 'req2': rdict2} break def tearDown(self): "tear down all resources and exit unit test" self.rmgr.stop() # stop internal thread def testRequestManager(self): "Test function for RequestManager class" # add requests to RequestManager self.rmgr.add(self.requests) # check their status for request in self.requests.keys(): # after fetch request info here it will be gone from store info = self.rmgr.info(request) print("### request", request, "info", info) completed = info.pop('completed') self.assertEqual(100, int(completed)) self.assertEqual(self.requests[request], info) self.rmgr.checkStatus(request) # at this point request should be gone from store self.assertEqual(False, self.rmgr.store.exists(request)) # but we can check request status as many times as we want self.rmgr.checkStatus(request) def testRequestManagerAutomation(self): "Test function for RequestManager class which check status of request automatically" # add requests to RequestManager self.rmgr.add(self.requests) # we'll sleep and allow RequestManager thread to check status of requests # and wipe out them from internal store time.sleep(self.interval + 1) # check their status for request in self.requests.keys(): # at this point request should be gone from store self.assertEqual(False, self.rmgr.store.exists(request)) # but we can check request status as many times as we want self.rmgr.checkStatus(request) def testRequestStore(self): "Test function for RequestStore()" fobj = tempfile.NamedTemporaryFile() fname = '%s.db' % fobj.name print("### open store", fname) store = RequestStore(fname) requests = [{'bla': {'meta': 'bla'}}, {'foo': {'meta': 'foo'}}] for item in requests: for request, rdict in item.items(): store.add(request, rdict) self.assertEqual(True, store.exists(request)) print("### request: %s" % request, "store info: ", store.info(request)) store.delete('bla') for request, rdict in store.info().items(): self.assertEqual(request, 'foo') value = 1 store.update('foo', {'update': value}) data = store.get('foo') print("### data", data) self.assertEqual('update' in data, True) self.assertEqual(data.get('update', None), value)