def requestHistorical(session, securities, fields, startDate, endDate): recordBloombergHits("historical", len(securities) * len(fields)) try: refDataService, _ = openBloombergService(session, "//blp/refdata") request = refDataService.createRequest("HistoricalDataRequest") request.set("startDate", startDate) request.set("endDate", endDate) request.set("periodicitySelection", "DAILY") for security in securities: request.append("securities", security) for field in fields: request.append("fields", field) responses = sendAndWait(session, request) securityPricing = [] for response in responses: securityPricing.extend(extractHistoricalSecurityPricing(response)) errors = [] for response in responses: errors.extend(extractErrors(response)) return {"response": securityPricing, "errors": errors} except Exception as e: raise
def requestLatest(session, securities, fields): recordBloombergHits("latest", len(securities) * len(fields)) try: refDataService, _ = session.getService("//blp/refdata") request = refDataService.createRequest("ReferenceDataRequest") request.set("returnFormattedValue", True) for security in securities: request.append("securities", security) for field in fields: request.append("fields", field) responses = session.sendAndWait(request) securityPricing = [] for response in responses: securityPricing.extend(extractReferenceSecurityPricing(response)) errors = [] for response in responses: errors.extend(extractErrors(response)) return {"response": securityPricing, "errors": errors} except Exception as e: raise
def search(ask): conn = sqlite3.connect('corpus.db') c = conn.cursor() #ask = input() fin = open('ask.txt', 'w', encoding='utf-8') fin.write(ask) fin.close() os.system('mystem' + ' ' + 'ask.txt' + ' ' + 'ask' + ' -c -l -d --eng-gr -g') fin = open('ask', 'r', encoding='utf-8') ask = fin.read() ask = ask.rstrip() sq = "SELECT * FROM data WHERE lemmtext LIKE ?" c.execute(sq, ['%' + ask + '%']) conn.commit() ans = c.fetchall() request = [] for i in ans: id, text, lemmtext, link, name = i[0], i[1], i[2], i[3], i[4] lemm = lemmtext.split() askl = ask.split() ind = find(askl, lemm) print(ind) textl = text.split() ind0 = max(0, ind - 15) ind1 = min(len(textl) - 1, ind + len(askl) + 15) req = (textl[ind0:ind1]) req = ' '.join(req) request.append([name, link, req]) return request
def createSheet(sheetTitle): request = [] request.append({'addSheet': { 'properties': { 'title': sheetTitle } }}) response = sheet.batchUpdate(spreadsheetId=SAMPLE_SPREADSHEET_ID, body={ 'requests': request}).execute()
def returnCategories(policy): categorizedPolicy = tokenizePolicy(policy) datacontroller = list( returnTopIndex(topicDefiner('datacontroller'), policy)) datacontroller.append('datacontroller') purpose = list(returnTopIndex(topicDefiner('purpose'), policy)) purpose.append('purpose') legalbasis = list(returnTopIndex(topicDefiner('legalbasis'), policy)) legalbasis.append('legalbasis') recipients = list(returnTopIndex(topicDefiner('recipients'), policy)) recipients.append('recipients') retention = list(returnTopIndex(topicDefiner('retention'), policy)) retention.append('retention') request = list(returnTopIndex(topicDefiner('request'), policy)) request.append('request') profiling = list(returnTopIndex(topicDefiner('profiling'), policy)) profiling.append('profiling') personaldata = list(returnTopIndex(topicDefiner('personaldata'), policy)) personaldata.append('personaldata') categories = [ datacontroller, purpose, legalbasis, recipients, retention, request, profiling, personaldata ] for category in categories: for value in category: if type(value) == str: next else: try: categorizedPolicy[value - 25] = "<h1> SECTION:" + category[ -1] + "</h1>" + "<div id='" + category[ -1] + "'>" + "<b>" + categorizedPolicy[value - 25] except: categorizedPolicy[value] = "<div id='" + category[ -1] + "'>" + "<b>" + categorizedPolicy[value] try: categorizedPolicy[ value + 25] = categorizedPolicy[value + 25] + "</b></div>" except: categorizedPolicy[ value] = categorizedPolicy[value] + "</b></div>" return (" ".join(categorizedPolicy))
def process_web_vuln(instance, data): """将 web 漏洞 json 转换为相关 model""" detail = data["detail"] p = detail["param"] if p: param = WebParam(key=p["key"], value=p["value"], position=WebParamPosition(p["position"])) else: param = None request = [] response = [] extra = {} for i in range(0, 10): req_key = f"request{i}" if i else "request" resp_key = f"response{i}" if i else "response" req = detail.get(req_key) resp = detail.get(resp_key) if req == "" or resp == "": continue if req is None or resp is None: break request.append(WebRequest(raw=req)) response.append(WebResponse(raw=resp)) # 其他的数据可能是自定义的,就单独拿出来 not_extra_key = ["request", "response", "param", "payload", "url"] for k, v in detail.items(): for item in not_extra_key: if item in k: break else: extra[k] = v vuln = WebVuln(create_time=datetime.fromtimestamp(data["create_time"] / 1000), plugin=data["plugin"], vuln_class=data["vuln_class"], url=data["target"]["url"], param=param, request=request, response=response, extra=extra) dispatch_web_vuln(instance, vuln)
def batch_requests(*args): request = [] for (method, relative_url, body) in args: per_request = {} if method is None: method = 'GET' per_request['method'] = method per_request['relative_url'] = relative_url if body is not None: per_request['body'] = body request.append(per_request) response = facebook.post('/', data={ 'access_token': get_facebook_oauth_token(), 'batch': json.dumps(request),},\ ) import pprint pprint.pprint(request) pprint.pprint(response.data)
def getRequest(userid): request =[] g.database.execute(""" SELECT Request_id,Request_from,Request_to,Status from requests where Request_to="%s" """ % userid) for a in g.database.fetchall(): data={} data['reqid']=a[0] data['reqfrom'] = a[1] data['reqto']=a[2] data['status']=a[3] data['reqfromuser'] = [] g.database.execute(""" SELECT User_id,Username,Name from entries where User_id='%s' """ % a[1]) for i in g.database.fetchall(): d={} d['userid'] = i[0] d['username'] = i[1] d['name'] = i[2] data['reqfromuser'].append(d) print data request.append(data) return request
converters={'count': int}) request_daily = pd.read_csv('/home/programming/python/request_daily.csv', engine='python', na_values='-', header=None, usecols=[0, 1], names=['link', 'count'], converters={'count': int}) request_monthly = pd.read_csv('/home/programming/python/request_monthly.csv', engine='python', na_values='-', header=None, usecols=[0, 1], names=['link', 'count'], converters={'count': int}) request.append(request_monthly) request.append(request_daily) df = request.groupby('link').sum().sort_values(by='count', ascending=False).reset_index() @app.route('/request') def request(): return render_template('simple3.html', tables=[df.to_html(classes='data')], titles=df.columns.values) rate = pd.read_csv('/home/programming/python/rate.csv', engine='python', na_values='-',
def _activeHelper(): request=[] get_active_request() print(type(get_active_request)) request.append({'title' : 'HELPER', 'rating': '9.1'}) return jsonify({"movies:" : request})