def __init__(self, app, api, config, mount, extconfig): RESTEntity.__init__(self, app, api, config, mount) self.logger = logging.getLogger("CRABLogger:RESTCache") # get S3 connection secrets from the CRABServerAuth file in the same way # as done for DB connection secrets. That file needs to contain an "s3" # dictionary with keys: access_key, secret_key # and config.py file for crabserver needs to point to it via the line # data.s3 = 'CRABServerAuth.s3' # following lines are copied from # https://github.com/dmwm/WMCore/blob/77a1ae719757a1eef766f8fb0c9f29ce6fcd2275/src/python/WMCore/REST/Server.py#L1735 modname, item = config.s3.rsplit(".", 1) module = __import__(modname, globals(), locals(), [item]) s3Dict = getattr(module, item) access_key = s3Dict['access_key'] secret_key = s3Dict['secret_key'] # in order to use S3 based CRABCache the cacheSSL config. param in rest external config # must be set to "endpoint/bucket" e.g. https://s3.cern.ch/<bucketname> cacheSSL = extconfig.centralconfig['backend-urls']['cacheSSL'] # make sure any trailing '/' in the cacheSSL url does not end in the bucket name cacheSSL = cacheSSL.rstrip('/') bucket = cacheSSL.split('/')[-1] endpoint = 'https://s3.cern.ch' # hardcode this. In case it can be moved to the s3Dict in config self.s3_bucket = bucket self.s3_client = boto3.client('s3', endpoint_url=endpoint, aws_access_key_id=access_key, aws_secret_access_key=secret_key, verify=False)
def __init__(self, app, api, config, mount): _rxtotal = re.compile( r"useable total space in TB:.*?<td[^>]*>([0-9,]+)</td>") _rxfree = re.compile( r"useable free space in TB:.*?<td[^>]*>([0-9,]+)</td>") _rxlist = [ (l, re.compile(rx)) for l, rx in map(lambda s: s.split(":", 1), app.appconfig.cafdata) ] _dataurl = "%s/datasvc/json/prod/blockreplicas?node=T2_CH_CERN" % app.appconfig.phedex _diskurl = "%s?id=EOSCMS" % (app.appconfig.sls % "service" ) # FIXME CASTORCMS_CMSCAF? def _space(task, c, page): # Parse the SLS disk space information. Locate the total # space information field and extract total CAF disk size # from it. Note that SLS reports volume in metric, while # we use powers of two for the rest. Only 95% of the # reported space is actually valid for use. page = page.replace("\n", " ") mt = re.search(_rxtotal, page) mf = re.search(_rxfree, page) return mt and mf and \ { "total": float(mt.group(1).replace(",", "")) * 0.95 * 1000**4 / 1024**4, "free": float(mf.group(1).replace(",", "")) * 0.95 * 1000**4 / 1024**4 } def _blocks(task, c, page): # Parse PhEDEx json data. Find datasets from blocknames, and # accumulate statistics on total size and creation time. # Assign each dataset to the first label in rxlist which # matches the dataset name. value = {} for block in cjson.decode(page)['phedex']['block']: dsname = block['name'].split("#")[0] size = float(block['replica'][0]['bytes']) ctime = float(block['replica'][0]['time_create']) label = "other" for l, rx in _rxlist: if rx.search(dsname): label = l break if label not in value: value[label] = {} if dsname not in value[label]: value[label][dsname] = {'size': size, 'ctime': ctime} else: value[label][dsname]['size'] += size if ctime < value[label][dsname]['ctime']: value[label][dsname]['ctime'] = ctime return value RESTEntity.__init__(self, app, api, config, mount) api.scraper.scrape(("caf", "usage"), {"value": _diskurl}, content_type="text/html", convert=_space) api.scraper.scrape(("caf", "blocks"), {"value": _dataurl}, convert=_blocks)
def __init__(self, app, api, config, mount): # main CouchDB database where requests/workloads are stored RESTEntity.__init__(self, app, api, config, mount) self.reqmgr_db = api.db_handler.get_db(config.couch_reqmgr_db) self.reqmgr_db_service = RequestDBWriter(self.reqmgr_db, couchapp="ReqMgr") # this need for the post validtiaon self.gq_service = WorkQueue(config.couch_host, config.couch_workqueue_db)
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) # CouchDB auxiliary database name self.reqmgr_aux_db = api.db_handler.get_db(config.couch_reqmgr_aux_db) self.reqmgr_aux_db_service = RequestDBReader(self.reqmgr_aux_db, couchapp="ReqMgrAux") self.setName()
def __init__(self, app, api, config, mount): # main CouchDB database where requests/workloads are stored RESTEntity.__init__(self, app, api, config, mount) self.reqmgr_db = api.db_handler.get_db(config.couch_reqmgr_db) self.reqmgr_db_service = RequestDBWriter(self.reqmgr_db, couchapp = "ReqMgr") # this need for the post validtiaon self.reqmgr_aux_db = api.db_handler.get_db(config.couch_reqmgr_aux_db)
def __init__(self, app, api, config, mount, centralcfg): RESTEntity.__init__(self, app, api, config, mount) self.logger = logging.getLogger("CRABLogger.RESTUserWorkflow") self.userworkflowmgr = DataUserWorkflow() self.allCMSNames = CMSSitesCache(cachetime=0, sites={}) self.centralcfg = centralcfg self.Task = getDBinstance(config, 'TaskDB', 'Task')
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self._datasvc = "http://s3.amazonaws.com/com.modestmaps.bluemarble" self._cachedir = app.statedir + "/worldmap" if not os.path.isdir(self._cachedir): os.makedirs(self._cachedir, 0755) self._umask = os.umask(0) os.umask(self._umask)
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) if getattr(config, "hnsync", False): self._syncer = HNSyncThread(app, config.hnsync, mount, minreq = getattr(config, "hnsyncreq", 1000), interval = getattr(config, "hnsynctime", 300), instance = getattr(config, "hnsyncto", "prod")) else: self._syncer = None
def __init__(self, app, api, config, mount, t0flag=False): # main CouchDB database where requests/workloads are stored RESTEntity.__init__(self, app, api, config, mount) wmstats_url = "%s/%s" % (self.config.couch_host, self.config.couch_wmstats_db) reqdb_url = "%s/%s" % (self.config.couch_host, self.config.couch_reqmgr_db) if t0flag: couchAppName = "T0Request" else: couchAppName = "ReqMgr" self.wmstats = WMStatsReader(wmstats_url, reqdbURL=reqdb_url, reqdbCouchApp=couchAppName)
def __init__(self, app, api, config, mount): # main CouchDB database where requests/workloads are stored RESTEntity.__init__(self, app, api, config, mount) wmstats_url = "%s/%s" % (self.config.couch_host, self.config.couch_wmstats_db) reqdb_url = "%s/%s" % (self.config.couch_host, self.config.couch_reqmgr_db) self.wmstats = WMStatsReader(wmstats_url, reqdbURL=reqdb_url, reqdbCouchApp="ReqMgr")
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) if getattr(config, "ldapsync", False): self._syncer = LdapSyncThread(app, config.ldapsync, mount, cacertdir = getattr(config, "cacertdir", "/etc/grid-security/certificates"), minreq = getattr(config, "ldsyncreq", 1000), interval = getattr(config, "ldsynctime", 300), instance = getattr(config, "ldsyncto", "test")) else: self._syncer = None
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) if getattr(config, "rebusfetch", False): self._syncer = RebusFetchThread(app, config.rebusfetch, mount, cacertdir = getattr(config, "cacertdir", "/etc/grid-security/certificates"), minreq = getattr(config, "rebusfetchreq", 30), interval = getattr(config, "rebusfetchtime", 300), instance = getattr(config, "rebusfetchto", "test")) else: self._syncer = None
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self.config = config arr = config.manager.split('.') try: cname = arr[-1] module = importlib.import_module('.'.join(arr[:-1])) self.mgr = getattr(module, cname)(config) except ImportError: print("ERROR initializing MicroService REST module.") traceback.print_exc()
def __init__(self, app, api, config, mount): _rxtotal = re.compile(r"useable total space in TB:.*?<td[^>]*>([0-9,]+)</td>") _rxfree = re.compile(r"useable free space in TB:.*?<td[^>]*>([0-9,]+)</td>") _rxlist = [(l, re.compile(rx)) for l, rx in map(lambda s: s.split(":", 1), app.appconfig.cafdata)] _dataurl = "%s/datasvc/json/prod/blockreplicas?node=T2_CH_CERN" % app.appconfig.phedex _diskurl = "%s?id=EOSCMS" % (app.appconfig.sls % "service") # FIXME CASTORCMS_CMSCAF? def _space(task, c, page): # Parse the SLS disk space information. Locate the total # space information field and extract total CAF disk size # from it. Note that SLS reports volume in metric, while # we use powers of two for the rest. Only 95% of the # reported space is actually valid for use. page = page.replace("\n", " ") mt = re.search(_rxtotal, page) mf = re.search(_rxfree, page) return mt and mf and \ { "total": float(mt.group(1).replace(",", "")) * 0.95 * 1000**4 / 1024**4, "free": float(mf.group(1).replace(",", "")) * 0.95 * 1000**4 / 1024**4 } def _blocks(task, c, page): # Parse PhEDEx json data. Find datasets from blocknames, and # accumulate statistics on total size and creation time. # Assign each dataset to the first label in rxlist which # matches the dataset name. value = {} for block in cjson.decode(page)['phedex']['block']: dsname = block['name'].split("#")[0] size = float(block['replica'][0]['bytes']) ctime = float(block['replica'][0]['time_create']) label = "other" for l, rx in _rxlist: if rx.search(dsname): label = l break if label not in value: value[label] = {} if dsname not in value[label]: value[label][dsname] = { 'size': size, 'ctime': ctime } else: value[label][dsname]['size'] += size if ctime < value[label][dsname]['ctime']: value[label][dsname]['ctime'] = ctime return value RESTEntity.__init__(self, app, api, config, mount) api.scraper.scrape(("caf", "usage"), { "value": _diskurl }, content_type="text/html", convert=_space) api.scraper.scrape(("caf", "blocks"), { "value": _dataurl }, convert=_blocks)
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self.config = config arr = config.manager.split('.') try: cname = arr[-1] module = importlib.import_module('.'.join(arr[:-1])) self.mgr = getattr(module, cname)(config) except ImportError: traceback.print_exc() self.mgr = MicroServiceManager(config) print("### mgr", self.mgr)
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self.config = config print("### config.manager", config.manager, type(config.manager)) arr = config.manager.split('.') try: cname = arr[-1] module = importlib.import_module('.'.join(arr[:-1])) self.mgr = getattr(module, cname)(config) except ImportError: traceback.print_exc() self.mgr = MicroServiceManager(config) print("### mgr", self.mgr)
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) if getattr(config, "rebusfetch", False): self._syncer = RebusFetchThread( app, config.rebusfetch, mount, cacertdir=getattr(config, "cacertdir", "/etc/grid-security/certificates"), minreq=getattr(config, "rebusfetchreq", 30), interval=getattr(config, "rebusfetchtime", 300), instance=getattr(config, "rebusfetchto", "test")) else: self._syncer = None
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self.transferDB = getDBinstance(config, 'FileTransfersDB', 'FileTransfers') self.logger = logging.getLogger("CRABLogger.FileTransfers")
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount)
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self.Task = getDBinstance(config, 'TaskDB', 'Task') self.JobGroup = getDBinstance(config, 'TaskDB', 'JobGroup') self.logger = logging.getLogger("CRABLogger.RESTTask")
def __init__(self, app, api, config, mount, serverdn, centralcfg): RESTEntity.__init__(self, app, api, config, mount) self.centralcfg = centralcfg self.serverdn = serverdn self.logger = logging.getLogger("CRABLogger:RESTServerInfo")
def __init__(self, app, api, config, mount, serverdn, centralcfg): RESTEntity.__init__(self, app, api, config, mount) self.centralcfg = centralcfg self.serverdn = serverdn
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self.config = config self.mgr = WMArchiveManager(config)
def __init__(self, app, api, config, mount): # main CouchDB database where requests/workloads are stored RESTEntity.__init__(self, app, api, config, mount) self.reqdb_url = "%s/%s" % (config.couch_host, config.couch_reqmgr_db)
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self.jobmetadata = DataFileMetadata()
def __init__(self, app, api, config, mount): #pylint: disable=unused-argument RESTEntity.__init__(self, app, api, config, mount) self.transferDB = getDBinstance(config, 'FileTransfersDB', 'FileTransfers') self.logger = logging.getLogger("CRABLogger.FileTransfers")
def __init__(self, *args): RESTEntity.__init__(self, *args) self._schemadir = joinpath(os.getcwd(), "src/sql") self._schema = open(joinpath(self._schemadir, "sitedb.sql")).read()
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self.logger = logging.getLogger("CRABLogger.RESTUserWorkflow") self.userworkflowmgr = DataUserWorkflow() self.allCMSNames = CMSSitesCache(cachetime=0, sites={})
def __init__(self, app, api, config, mount): # CouchDB auxiliary database name RESTEntity.__init__(self, app, api, config, mount) self.reqmgr_aux_db = api.db_handler.get_db(config.couch_reqmgr_aux_db)
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self._datasvc = app.appconfig.phedex + "/datasvc/json"
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self.Task = getDBinstance(config, "TaskDB", "Task") self.JobGroup = getDBinstance(config, "TaskDB", "JobGroup")
def __init__(self, app, api, config, mount, tasks): """:arg list tasks: the list of task objects.""" RESTEntity.__init__(self, app, api, config, mount) self._tasks = tasks
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self._cache = HostCache(app.statedir)
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self.campaignmgr = DataCampaign(config)
def __init__(self, app, api, config, mount): # CouchDB auxiliary database name RESTEntity.__init__(self, app, api, config, mount) self.time0 = time.time()
from WMCore.REST.Server import RESTFrontPage from WMCore.REST.Server import MiniRESTApi from WMCore.REST.Server import RESTApi from WMCore.REST.Server import DBConnectionPool from WMCore.REST.Server import DatabaseRESTApi from WMCore.REST.Server import RESTEntity import os, threading srcfile = os.path.abspath(__file__).rsplit("/", 1)[-1].split(".")[0] dbspec = {} class FakeApp: appname = "app" class FakeConf: db = srcfile + ".dbspec" RESTFrontPage(None, None, "/", "/dev/null", {}) MiniRESTApi(FakeApp(), None, "/") RESTApi(FakeApp(), None, "/") DBConnectionPool("x", {}) if threading.current_thread().name == "MainThread": DatabaseRESTApi(FakeApp(), FakeConf(), "/") RESTEntity(FakeApp(), None, None, "/")
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self.Task = getDBinstance(config, 'TaskDB', 'Task') self.JobGroup = getDBinstance(config, 'TaskDB', 'JobGroup')
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self.reqmgr_db = api.db_handler.get_db(config.couch_reqmgr_db) self.config = config
def __init__(self, app, api, config, mount): RESTEntity.__init__(self, app, api, config, mount) self.jobmetadata = DataFileMetadata(config)
def __init__(self, app, api, config, mount): # main CouchDB database where requests/workloads are stored RESTEntity.__init__(self, app, api, config, mount)