def log(message: str, metadata: dict = {}): history: list = session.get("deployment_history", create_new_if_empty=1, new_value=[]) history.append({ "time": int(time.time()), "log": message, "meta": metadata }) session.set("deployment_history", history)
def downMusic(url, sid): if os.path.exists('mp3/%s.mp3' % sid): log.info('Already downloaded') return r = session.get(url, timeout=30) with open('mp3/%s.mp3' % sid, 'wb') as fp: fp.write(r.content) log.info('Down finish')
def poll_deposits(username, password): for (token, ctn, doi, status) in cursor.execute("select token, ctn, doi, status from ctns where status = ?", [SUBMITTED]): response = session.get("https://api.crossref.org/deposits/{}".format(token), auth=(username, password)) if response.status_code == 401: print("Bad username and password") exit() new_status = response.json()['message']['status'] print("Update {} in {} status {} -> {}".format(ctn, doi, status, new_status)) cursor.execute("update ctns set status = ? where token = ?", [status, token]) connection.commit()
def fetch_pages(): """Iterate over all pages in the API and extract CTNs.""" params = {"q": "trial_registration:*", "fq": "doc_type:full", "fl": "id,title_display,trial_registration"} response = session.get("http://api.plos.org/search", params=params) tree = ET.fromstring(response.content) num_found = int(tree.find("result").attrib['numFound']) last_page = num_found / PAGE_SIZE + 1 for page in xrange(0, last_page + 1): print("Page {} of {}".format(page, last_page)) fetch_page(page)
def fetch_page(page_number): """Fetch a page from the PLOS API, extract CTNs and insert into the database.""" params = {"q": "trial_registration:*", "fq": "doc_type:full", "fl": "id,title_display,trial_registration", "rows": PAGE_SIZE, "start": page_number * PAGE_SIZE} response = session.get("http://api.plos.org/search", params=params) tree = ET.fromstring(response.content) docs = tree.findall("result/doc") for doc in docs: work_doi = doc.find("str[@name='id']").text ctn_inputs = doc.find("arr[@name='trial_registration']").findall("str") if not work_doi.startswith("10."): continue for ctn_input in ctn_inputs: extracted_ctns = extract_ctn(ctn_input.text) for [ctn, registry_doi] in extracted_ctns: insert_ctn(ctn, registry_doi, work_doi)
import json import requests import yaml from git import Repo from util import session # Loads previous commit node_id if it exists within Session node_id = session.get("node_id") repo = Repo("") assert not repo.bare origin = repo.remote("origin") api_url = origin.refs config = yaml.load(open("autodeploy.conf")) repo_name = config['repo'] def remaining_rate() -> int: response = requests.get('https://api.github.com/rate_limit') data = json.loads(response.text) return data["rate"]["remaining"] def has_new_update(): if remaining_rate() > 3: url = 'https://api.github.com/repos/' + repo_name + '/commits' + ('?sha=' + str(config['branch']) if 'branch' in config else '') print("autodeploy[" + config['repo'] + "]: Checking URL: " + url) response = requests.get(url) data = json.loads(response.text)[0]
def deployment_history(): h = session.get("deployment_history", create_new_if_empty=1, new_value=[]) data = {'deployment_history': h} return json.dumps(data)