def search_files(self, dataset): status, jmsg = self.get_cmsweb_data('{0}?{1}'.format( self.URL_DBS_FILES, urlencode({ 'detail': 1, 'dataset': dataset }))) if not status: return {} return jmsg
def search_blocks(self, dataset): status, jmsg = self.get_cmsweb_data('{0}?{1}'.format( self.URL_DBS_BLOCKS, urlencode({ 'dataset': dataset, "detail": 1 }))) if not status: return {} return jmsg
def search_lfn(self, lfn): status, jmsg = self.get_cmsweb_data('{0}?{1}'.format( self.URL_DBS_BLOCKS, urlencode({ 'detail': 1, 'logical_file_name': lfn }))) if not status: return {} return jmsg
def search_dataset_status(self, dataset): status, jmsg = self.get_cmsweb_data('{0}?{1}'.format( self.URL_DBS_DATASETS, urlencode({ 'detail': 1, 'dataset_access_type': '*', 'dataset': dataset }))) if not status: return False, "" return True, jmsg[0]['dataset_access_type'].strip().replace(" ", "_")
def process(opts): xparam = [] for param in opts.params: p,v=param.split("=",1) xparam.append({"name":p,"value":v}) data = {"json":dumps({"parameter":xparam}),"Submit": "Build"} try: url = opts.server+'/job/'+opts.job+'/build' data = urlencode(data) req = Request(url=url,data=data,headers={"ADFS_LOGIN" : opts.user}) content = urlopen(req).read() except Exception as e: print("Unable to start jenkins job:",e)
def build_jobs(jenkins_url, jobs_data, headers={}, user="******"): for rk in ["OIDC_CLAIM_CERN_UPN"]: if rk not in headers: headers[rk] = user install_opener(build_opener(HTTPCookieProcessor(CookieJar()))) for prams, job in jobs_data: if not job: continue headers = update_crumb(jenkins_url, headers) url = jenkins_url + '/job/' + job + '/build' data = {"json": prams, "Submit": "Build"} try: data = urlencode(data) req = Request(url=url, data=data, headers=headers) content = urlopen(req).read() print("ALL_OK") except Exception as e: print("Unable to start jenkins job: %s" % e)
def search_block(self, block): status, jmsg = self.get_cmsweb_data('{0}?{1}'.format( self.URL_PHEDEX_BLOCKREPLICAS, urlencode({'block': block}))) if not status: return False, {} if len(jmsg['phedex']['block']) == 0: return False, {} block_data = { "at_cern": "no", "replicas": [], "ds_files": "0", "ds_owner": "UNKNOWN" } for replica in jmsg['phedex']['block'][0]['replica']: if (not "group" in replica) or (not replica['group']): continue block_data["replica"].append(replica["node"]) block_data["ds_files"] = str(replica["files"]) block_data["ds_owner"] = replica["group"].strip().replace(" ", "_") if replica["node"] == "T2_CH_CERN": block_data["at_cern"] = "yes" return True, block_data
def search_blocks(self, dataset): status, jmsg = self.get_cmsweb_data('{0}?{1}'.format(self.URL_DBS_BLOCKS, urlencode({'dataset': dataset, "detail":1}))) if not status: return {} return jmsg
def search_runs(self, dataset): status, jmsg = self.get_cmsweb_data('{0}?{1}'.format(self.URL_DBS_RUNS, urlencode({'dataset': dataset}))) if not status: return {} return jmsg
def search_files(self, dataset): status, jmsg = self.get_cmsweb_data('{0}?{1}'.format(self.URL_DBS_FILES, urlencode({'detail': 1,'dataset': dataset}))) if not status: return {} return jmsg
def search_lfn(self, lfn): status, jmsg = self.get_cmsweb_data('{0}?{1}'.format(self.URL_DBS_BLOCKS, urlencode({'detail': 1,'logical_file_name': lfn}))) if not status: return {} return jmsg
def search_dataset_status(self, dataset): status, jmsg = self.get_cmsweb_data('{0}?{1}'.format(self.URL_DBS_DATASETS, urlencode({'detail': 1, 'dataset_access_type': '*', 'dataset': dataset}))) if not status: return False, "" return True, jmsg[0]['dataset_access_type'].strip().replace(" ","_")
def search_block(self, block): status, jmsg = self.get_cmsweb_data('{0}?{1}'.format(self.URL_PHEDEX_BLOCKREPLICAS, urlencode({'block': block}))) if not status: return False, {} if len(jmsg['phedex']['block']) == 0: return False, {} block_data = {"at_cern" : "no", "replicas" : [], "ds_files": "0", "ds_owner":"UNKNOWN"} for replica in jmsg['phedex']['block'][0]['replica']: if (not "group" in replica) or (not replica['group']): continue block_data["replica"].append(replica["node"]) block_data["ds_files"] = str(replica["files"]) block_data["ds_owner"] = replica["group"].strip().replace(" ","_") if replica["node"] == "T2_CH_CERN": block_data["at_cern"] = "yes" return True, block_data
def search_runs(self, dataset): status, jmsg = self.get_cmsweb_data('{0}?{1}'.format( self.URL_DBS_RUNS, urlencode({'dataset': dataset}))) if not status: return {} return jmsg
def github_api(uri, token, params=None, method="POST", headers=None, page=1, page_range=None, raw=False, per_page=None, last_page=False): if not params: params = {} if not headers: headers = {} if not page_range: page_range = [] url = "https://api.github.com%s" % uri data = "" if per_page: params['per_page'] = per_page if method == "GET": if params: url = url + "?" + urlencode(params) else: data = json.dumps(params) if version_info[0] == 3: data = data.encode("utf-8") if page > 1: if not "?" in url: url = url + "?" else: url = url + "&" url = url + "page=%s" % page headers["Authorization"] = "token " + token request = Request(url, data=data, headers=headers) request.get_method = lambda: method response = urlopen(request) if (page <= 1) and (method == 'GET'): link = "" if version_info[0] == 2: link = response.info().getheader("Link") else: link = response.info().get("Link") if link: pages = [] for x in link.split(" "): m = re.match('^.*[?&]page=([1-9][0-9]*).*$', x) if m: pages.append(int(m.group(1))) if len(pages) == 2: page_range += range(pages[0], pages[1] + 1) elif len(pages) == 1: page_range += pages cont = response.read() if raw: return cont data = json.loads(cont) if page_range and page <= 1: if last_page: return github_api(uri, token, params, method, headers, page_range[-1], page_range=None, raw=False, per_page=per_page, last_page=last_page) for page in page_range: data += github_api(uri, token, params, method, headers, page, page_range=None, raw=raw, per_page=per_page, last_page=last_page) return data
def github_api(uri, params=None, method="POST", headers=None, page=1, raw=False, per_page=100, last_page=False, all_pages=True, max_pages=-1): global GH_RATE_LIMIT, GH_PAGE_RANGE if max_pages > 0 and page > max_pages: return '[]' if raw else [] if not params: params = {} if not headers: headers = {} url = "https://api.github.com%s" % uri data = "" if per_page and ('per_page' not in params): params['per_page'] = per_page if method == "GET": if params: url = url + "?" + urlencode(params) else: data = json.dumps(params) if version_info[0] == 3: data = data.encode("utf-8") if page > 1: if not "?" in url: url = url + "?" else: url = url + "&" url = url + "page=%s" % page headers["Authorization"] = "token " + get_gh_token() request = Request(url, data=data, headers=headers) request.get_method = lambda: method response = urlopen(request) if page <= 1: GH_PAGE_RANGE = [] try: GH_RATE_LIMIT = [ int(response.headers["X-RateLimit-Remaining"]), int(response.headers["X-RateLimit-Limit"]), int(response.headers["X-Ratelimit-Reset"]) ] except Exception as e: print("ERROR:", e) if (page <= 1) and (method == 'GET'): link = response.headers.get("Link") if link: pages = [] for x in link.split(" "): m = re.match('^.*[?&]page=([1-9][0-9]*).*$', x) if m: pages.append(int(m.group(1))) if len(pages) == 2: GH_PAGE_RANGE += range(pages[0], pages[1] + 1) elif len(pages) == 1: GH_PAGE_RANGE += pages cont = response.read() if raw: return cont data = json.loads(cont) if GH_PAGE_RANGE and all_pages: if last_page: return github_api(uri, params, method, headers, GH_PAGE_RANGE[-1], raw=False, per_page=per_page, all_pages=False) for page in GH_PAGE_RANGE: if max_pages > 0 and page > max_pages: break data += github_api(uri, params, method, headers, page, raw=raw, per_page=per_page, all_pages=False) return data