def wks_teardown(): print('\nresources__teardown()') pp(sh.worksheets()) print("\n %s: %s" % (wks_name, len(sh.worksheets()))) sh.del_worksheet(wks) pp(sh.worksheets()) print("\n %s: %s" % (wks_name, len(sh.worksheets())))
def ledger_info(current_product, days=1): global today_total product_folder = ROOT_DIR + '/' + current_product ledger_file = product_folder + '/' + current_product + 'buys.json' with open(ledger_file) as json_ledger_file: ledger = json.load(json_ledger_file) remain_coins = 0 total_earn = 0 today_earn = 0 for records in ledger: if ledger[records][0]['sell_flag'] == False: remain_coins += ledger[records][0]['coins'] pp(ledger[records]) else: total_earn += ledger[records][0]['earn'] # only today earn if ledger[records][0]['sell_time'] != 0 and dateutil.parser.parse( (datetime.today() - timedelta(days=days) ).strftime("%Y-%m-%d")).timestamp() < dateutil.parser.parse( ledger[records][0]['sell_time']).timestamp(): today_earn += ledger[records][0]['earn'] print("{} Remaining coins = {}".format(current_product, remain_coins)) print("{} Total earn = {}".format(current_product, total_earn)) print("{} Today earn = {}".format(current_product, today_earn)) print() today_total += today_earn
def nvMain(stock): # naver 메인 with req.Session() as s: # ua = UserAgent() headers = { 'user-agent': ua.random, 'referer': 'https://finance.naver.com/' } get_url = s.get( 'https://finance.naver.com/item/main.nhn?code={}'.format( str(stock)), headers=headers).text soup = bs4(get_url, 'html.parser') d_dividendPayout = soup_tr_match(soup, '배당성향(%)', n=2) d_dividendYield = soup_tr_match(soup, '시가배당률(%)', n=2) dict = { '배당': { '배당성향': d_dividendPayout, '배당수익률': d_dividendYield, } } pp('크롤완료: 네이버 메인배당') return dict
def improve_data_docstring(data, lines): """ Improve the documentation of data by pretty-printing into in the docstring. :param data: The documented object :type data: object :param lines: The lines of docstring lines :type lines: list [ str ] """ if isinstance(data, (list, tuple, dict, set)): # Redirect stdout to StringIO to catch print old_stdout = sys.stdout new_stdout = io.StringIO() sys.stdout = new_stdout # Pretty print iterable pp(data) output = new_stdout.getvalue() # Append pretty printed lines lines.append(".. code-block:: JavaScript") lines.append("") lines.append(" " + output) # Reset stdout sys.stdout = old_stdout
def gen_sql(sql, q_obj): """ :param sql_str: sql clause :param q_obj: query params objct :return: result_sql """ pp(q_obj) conditions = "__PRODUCT__ __SITE_TYPE__ " replace_target = {'product': '', 'site_type': ''} for k, v in replace_target.items(): if k not in q_obj['condition']: v = "" else: v = "','".join(q_obj['condition'][k]) # print("the replace v is :", v) __replace__ = "__%s__" % k.upper() # print(k, v, __replace__) if v: v = "AND %s in ('%s')" % (k, v) # print("v:", v) conditions = conditions.replace(__replace__, v) print("conditions:", conditions) sql = sql.replace("__CONDITIONS__", conditions) sql = sql.replace("__START__", q_obj['start']) sql = sql.replace("__END__", q_obj['end']) sql = sql.replace("__DIM__", q_obj['dim']) return sql
def sell_market_size_(response, product_folder, currency): pp(response) for key in list(response.keys()): if key == 'message': print("PROBLEM") print(response['message'] + " " + currency + " SELL") save_responses(response, product_folder, currency) return {'done_reason': False} id = response['id'] r = requests.get(api_url + 'orders/' + id, auth=auth) response = r.json() pp(response) save_responses(response, product_folder, currency) return r.json() # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # pp(get_accounts()) # pp(order_limit('OMG-EUR',1,2.7)) # pp(order_market('OMG-EUR',0.5)) # pp(get_account("OMG")) # pp(get_product('OMG-EUR')) # pp(sell_market_('OMG-EUR',1)) # pp(order_market_funds('OMG-EUR',1)) # pp(get_orders_list()) # r = requests.get(api_url + 'orders/4198b629-ebbc-4e4f-a384-c530a172fb6e', auth=auth) # pp(r.json()) # pp(sell_market_size('OMG-EUR',1)) # pp(get_product('XRP-EUR'))
def query_date_histograms(q_obj): es_query_obj = make_es_query_obj(template_source, q_obj) pp(es_query_obj) pp(json.dumps(es_query_obj)) response = make_es_query(es_query_obj) aggs = response['aggregations'] response = process_aggs(q_obj['dim'], aggs) return response
def fnFinance(stock): # fnguide 기업정보 > 재무제표 with req.Session() as s: # ua = UserAgent() headers = { 'user-agent': ua.random, 'referer': 'http://comp.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A005930&cID=&MenuYn=Y&ReportGB=&NewMenuID=101&stkGb=701' } get_url = s.get( 'https://comp.fnguide.com/SVO2/ASP/SVD_Finance.asp?pGB=1&gicode=A{}' .format(str(stock)), headers=headers).text soup = bs4(get_url, 'html.parser') try: d_CF_operating = soup_tr_match(soup, '영업활동으로인한현금흐름', location='#divCashY tbody tr') except: d_CF_operating = None try: d_CF_investing = soup_tr_match(soup, '투자활동으로인한현금흐름', location='#divCashY tbody tr') except: d_CF_investing = None try: d_CF_financing = soup_tr_match(soup, '재무활동으로인한현금흐름', location='#divCashY tbody tr') except: d_CF_financing = None try: d_CF_netIncrease = soup_tr_match(soup, '현금및현금성자산의증가', location='#divCashY tbody tr') except: d_CF_netIncrease = None try: d_CF_endOfPeriod = soup_tr_match(soup, '기말현금및현금성자산', location='#divCashY tbody tr') except: d_CF_endOfPeriod = None dict = { '현금흐름': { '영업활동으로인한현금흐름': d_CF_operating, '투자활동으로인한현금흐름': d_CF_investing, '재무활동으로인한현금흐름': d_CF_financing, '현금및현금성자산의증가': d_CF_netIncrease, '기말현금및현금성자산': d_CF_endOfPeriod, } } pp('크롤완료: 재무제표') return dict
def get_captcah(sid): start = time.time() print '\nACTION : GetCaptch' url = _VERIFY_CAPTCHA_URL.format(_TEST_URL, _TEST_PORT, sid) r = requests.get(url) ret = r.json() pp(ret) captcha = base64.b64decode(ret['content'][len('data:image/png;base64,'):]) with open('./image.png', 'w') as fp: fp.write(captcha) print 'cost {} secs'.format(time.time() - start) return ret
def gen_sql(q_obj): pp(q_obj) sql = """ select __INTV__ as intv, __DIM__ as dim, sum(amount) as amount, sum(origin_amount) as origin_amount, count(distinct order_no) as order_counts, count(distinct uid) as user_counts from ci_order where __INTV__ between '__START__' and '__END__' __CONDITIONS__ group by intv, dim; """ conditions = " __CHANNEL__ __PRODUCTLINE__ __SUBSCRIBE_TYPE__ __MEMBER_CLASS__ __OS_PLATFORM__ __PAYMENT_PATTERN__" replace_target = { 'channel': '', 'subscribe_type': '', 'member_class': '', 'os_platform': '', 'payment_pattern': '', 'productline': '' } for k, v in replace_target.items(): if k not in q_obj['condition']: v = "" else: v = "','".join(q_obj['condition'][k]) __replace__ = "__%s__" % k.upper() print(k, v, __replace__) if v != '': v = " AND %s in ('%s')" % (k, v) conditions = conditions.replace(__replace__, v) intv = 'show_date' if q_obj['intv'] == '1M': intv = 'show_month' if q_obj['intv'] == '1w': intv = 'show_week' if q_obj['dim'] == 'inputtime': q_obj['dim'] = intv sql = sql.replace("__CONDITIONS__", conditions) sql = sql.replace("__START__", q_obj['start']) sql = sql.replace("__END__", q_obj['end']) sql = sql.replace("__DIM__", q_obj['dim']) sql = sql.replace("__INTV__", intv) return sql
def make_query_user(start, page_size, uid_list): print("~~~~~~~~~~~~~~~~omg-------------------------") pp(uid_list) q = {"query": {"bool": {"should": []}}} for uid in uid_list: q["query"]["bool"]["should"].append({"term": {'uid': uid}}) client = Elasticsearch('10.14.1.127') print('start', start, json.dumps(q, ensure_ascii=False)) response = client.search(index='customers', size=page_size, sort="_id:desc", preference='_only_local', body=q) return response
def order_market_size_(response, product_folder, currency): pp(response) for key in list(response.keys()): if key == 'message': print("PROBLEM") print(response['message'] + " " + currency + " BUY") save_responses(response, product_folder, currency) return {'done_reason': False} id = response['id'] r = requests.get(api_url + 'orders/' + id, auth=auth) response = r.json() pp(response) save_responses(response, product_folder, currency) return r.json()
def nvPrice(stock): # naver 주가 with req.Session() as s: # ua = UserAgent() url = 'https://m.stock.naver.com/api/item/getTrendList.nhn?code={}&size=30'.format( str(stock)) headers = { 'user-agent': ua.random, 'referer': 'https://m.stock.naver.com/item/main.nhn' } r = req.get(url, headers=headers).json()['result'] def appender(item): list = [] for i in r: try: list.append(i[item]) except: list.append(None) return list bizdate = appender('bizdate') frgn_pure_buy_quant = appender('frgn_pure_buy_quant') indi_pure_buy_quant = appender('indi_pure_buy_quant') organ_pure_buy_quant = appender('organ_pure_buy_quant') frgn_hold_ratio = appender('frgn_hold_ratio') close_val = appender('close_val') change_val = appender('change_val') acc_quant = appender('acc_quant') risefall = appender('risefall') sosok = appender('sosok') dict = { '매매동향': { '날짜': bizdate, #날짜 '외국인매매': frgn_pure_buy_quant, #외국인 '개인매매': indi_pure_buy_quant, #개인 '기관매매': organ_pure_buy_quant, #기관 '외국인보유율': frgn_hold_ratio, #외국인 보유율 '종가': close_val, #종가 '전일비': change_val, #전일비 '거래량': acc_quant, #거래량 '전일비등락': risefall, #전일비의 등락. 5는 하락, 2는 상승, 3은 보합 '소속': sosok, #시장. 1는 코스피, 2는 코스닥 }, } pp('크롤완료: 네이버 매매동향') return dict
def insert_one(body): try: doc = es.update(index='vp', doc_type='orders', id=body['id'], body={ 'doc': body, 'doc_as_upsert': True }) print(doc) if doc['_shards']['successful'] != 0: print(idx, doc) except: pp(doc) time.sleep(10)
def start(): """ Starts the module """ username = input("Enter a twitter username: @") username = "******" + username user_data = get_user_data(username) if len(user_data) == 1: # if error print(user_data['errors'][0]["message"]) print(f"What info do you want to know about {username} twitter profile?") # a list of available properties key_list = [ info for info in user_data.keys() if isinstance(user_data[info], type(None)) == False ] for num, key in enumerate(key_list): print(num, key, type(user_data[key])) key_name = int(input("Enter a number to navigate: ")) if isinstance(user_data[key_list[key_name]], dict): print(f"Here is your '{key_list[key_name]}' dictionary:") return pp(user_data[key_list[key_name]]) else: return print(user_data[key_list[key_name]])
def weather_data_download(id, country): if type(id) == str: parameter = "q=" + id elif type(id) == tuple: i, j = id country = "" parameter = "lat={}&lon={}".format(int(i), int(j)) elif type(id) == int: if len(str(id)) == 7: parameter = "id=" + str(id) elif len(str(id)) == 5: parameter = "zip=" + str(id) else: raise TypeError("Please input valid int city name or zip or id") else: raise TypeError("Please input valid city name or zip or id") url = 'https://openweathermap.org/data/2.5/weather?{},{}&appid=b6907d289e10d714a6e88b30761fae22'.format( parameter, country) # print(url) Json_data = requests.get(url).json() pp(Json_data) if "message" in Json_data: return False else: return True
def cli(ctx): # Detect whether we need the user to configure a proxy. proxy_info = urllib.getproxies() if len(proxy_info) == 0: print "It looks like you're behind a proxy server..." pp(proxy_info) proxy_host = "ph" proxy_port = "pp" proxy_username = click.prompt('Please enter your username') proxy_password = click.prompt('Please enter your password', hide_input=True, confirmation_prompt=True) print proxy_host print proxy_port print proxy_username print proxy_password return
def fnRatio(stock): # fnguide 기업정보 > 재무제표 with req.Session() as s: # ua = UserAgent() headers = { 'user-agent': ua.random, 'referer': 'http://comp.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A005930&cID=&MenuYn=Y&ReportGB=&NewMenuID=101&stkGb=701' } get_url = s.get( 'https://comp.fnguide.com/SVO2/ASP/SVD_FinanceRatio.asp?pGB=1&gicode=A{}' .format(str(stock)), headers=headers).text soup = bs4(get_url, 'html.parser') try: d_revenueGrowth = soup_tr_match(soup, '매출액증가율', match2='이자수익증가율', n=2) except: d_revenueGrowth = None try: d_operatingIncomeGrowth = soup_tr_match(soup, '영업이익증가율', n=2) except: d_operatingIncomeGrowth = None try: d_borrowings = soup_tr_match(soup, '순차입금비율', n=2) except: d_borrowings = None try: d_interestCoverage = soup_tr_match(soup, '이자보상배율', n=2) except: d_interestCoverage = None dict = { '재무비율': { '매출액증가율': d_revenueGrowth, '영업이익증가율': d_operatingIncomeGrowth, '순차입금비율': d_borrowings, '이자보상배율': d_interestCoverage, } } pp('크롤완료: 재무비율') return dict
def findForm(cls, rowloc, formTag): # form = [] model = cls.vertices[rowloc][0] cls._formDefiner(model, form, formTag) i=1 while i in range(len(form)): for j in range(3): model = form[i][j] form = cls._formDefiner(model, form, formTag) i += 1 cls._findVolume(formTag) cls.numForms += 1 pp(cls.numForms) return form
def price_kimsUniversal(EPS, EPS_E, ROE, ROE_E, ROE_avg): try: if ROE_E != None and EPS_E != None: value_100 = round(EPS_E * ROE_E) value_90 = round(EPS_E * ROE_E * 0.9) value_80 = round(EPS_E * ROE_E * 0.8) elif ROE_avg != None and EPS_E != None: value_100 = round(EPS_E * ROE_avg) value_90 = round(EPS_E * ROE_avg * 0.9) value_80 = round(EPS_E * ROE_avg * 0.8) else: pp(ROE) value_100 = round(EPS * ROE) value_90 = round(EPS * ROE * 0.9) value_80 = round(EPS * ROE * 0.8) except: value_100 = None value_90 = None value_80 = None return value_100, value_90, value_80
def order2user(): q = request.args.get("q") conditions = json.loads(q) q, start, order_only, page_size = gen_request(conditions) es_query = make_query(start, page_size, q) if 'hits' not in es_query: return "error" #total = response['hits']['total'] total_hit = es_query['aggregations']['distinct_uid']['value'] if order_only: pp(es_query['hits']['hits']) return jsonify(es_query['hits']['hits']) uid_list = [order['_source']['uid'] for order in es_query['hits']['hits']] response = make_query_user(start, page_size, uid_list) if page_size >= len(response['hits']['hits']): total = len(response['hits']['hits']) result = {'hit': response['hits']['hits'], 'total': total_hit} return jsonify(result)
def replaceFiles(ctx, table_id, payload_dir): """Reupload a given set of files to an existing table for reprocessing. Parameters ---------- ctx : Context A Click Context object. table_id : int The Id of the table to upload to. payload_dir : str The path of the payload directory containing the files. """ print "replaceFiles" # Fetch the payload files config = {} for (dirpath, dirnames, filenames) in os.walk(payload_dir): # config['files'] = [{'filename': f} for f in filenames if f != ".DS_Store"] filepaths = [os.path.join(payload_dir, f) for f in filenames if f != ".DS_Store"] break # Upload the payload files in separate threads pp(filepaths) start_time = time.time() upload_files_multithreaded(ctx, table_id, "vector", filepaths, chunk_size=20971520) ctx.log("All uploads completed and took %s minutes" % (round((time.time() - start_time) / 60, 2))) exit() # # Hacky workaround that doesn't use multi-threading. # # Force all asset files into "uploading" state by providing the first 256KB of each file. # for i in config['files']: # upload_file_init(ctx, table_id, "vector", os.path.join(payload_dir, i['filename'])) # ctx.log("Bailing!") # # # Upload the payload files # start_time = time.time() # for i in config['files']: # upload_file(ctx, table_id, "vector", os.path.join(payload_dir, i['filename']), chunk_size=20971520) # ctx.log("All uploads completed and took %s minutes" % (round((time.time() - start_time) / 60, 2))) # Poll until asset has processed poll_asset_processing(ctx, table_id, ctx.service().tables())
async def agregar(ctx, imdblink): url = 'http://www.omdbapi.com/?apiKey=' + KEY params = {'i': imdblink} response = requests.get(url, params=params).json() pp(response) if response['Response'] == "True": title = response['Title'] year = response['Year'] imdbRating = response['imdbRating'] director = response['Director'] plot = response['Plot'] cursor.execute( "INSERT INTO discordmovies (Title,Year,IMDBRating,Director,Plot) VALUES (title,year,imdbRating,director,plot)" ) db.commit() response = title + ' fue agregada a la lista mi rey' print(title + " " + year + " " + plot) else: response = "qué mierda me mandaste ridículo?" print(response['Response']) await ctx.send(response)
def hankyungIndustry(): with req.Session() as s: date = today() # ua = UserAgent() url = 'http://consensus.hankyung.com/apps.analysis/analysis.list?skinType=industry&search_date=1w&search_text=&now_page=1&type=more' headers = { 'user-agent': ua.safari, 'referer': 'http://consensus.hankyung.com/apps.analysis/analysis.list?skinType=industry&search_date=1w&search_text=', 'host': "consensus.hankyung.com", } r = req.get(url, headers=headers) r.encoding = "EUC-KR" soup = bs4(r.text, "html.parser").select('.table_style01 tbody tr') list = [] dict = {'산업리포트': {date: []}} for val in soup: report_date = val.select('.txt_number')[0].text report_title = val.select('.text_l a')[0].text report_url = "http://consensus.hankyung.com{}".format( val.select('td:last-child a[href]')[0]['href']) print(report_date) print(report_title) print(report_url) print(date) print('-----------') if report_date == date: appendItem = {} appendItem['제목'] = report_title appendItem['링크'] = report_url dict['산업리포트'][date].append(appendItem) pp('크롤완료: 한경컨센서스-산업') return dict
def fnInvest(stock): # fnguide 기업정보 > 재무제표 with req.Session() as s: # ua = UserAgent() headers = { 'user-agent': ua.random, 'referer': 'http://comp.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A005930&cID=&MenuYn=Y&ReportGB=&NewMenuID=101&stkGb=701' } get_url = s.get( 'https://comp.fnguide.com/SVO2/ASP/SVD_Invest.asp?pGB=1&gicode=A{}' .format(str(stock)), headers=headers).text soup = bs4(get_url, 'html.parser') try: d_totalCashflow = soup_tr_match(soup, '총현금흐름') except: d_totalCashflow = None try: d_totalInvesting = soup_tr_match(soup, '총투자') except: d_totalInvesting = None try: d_FCFF = soup_tr_match(soup, 'FCFF') except: d_FCFF = None dict = { '잉여현금흐름': { '총현금흐름': d_totalCashflow, '총투자': d_totalInvesting, 'FCFF': d_FCFF, } } pp('크롤완료: 투자지표') return dict
def fnConsensus(stock): # fnguide 기업정보 > 컨센서스 with req.Session() as s: # ua = UserAgent() url = 'https://comp.fnguide.com/SVO2/json/data/01_06/03_A{}.json'.format( stock) headers = { 'user-agent': ua.random, 'referer': 'https://comp.fnguide.com/SVO2/ASP/SVD_Consensus.asp?pGB=1&gicode=A{}' .format(stock) } r = req.get(url, headers=headers).json()['comp'] if len(r) == 0: r = None # 'AVG_PRC': 증권사평균 적정가, # 'AVG_PRC_BF': 증권사평균 직전 적정가, # 'AVG_RECOM_CD': 증권사평균 투자의견, # 'AVG_RECOM_CD_BF': 증권사평균 직전 투자의견, # 'EST_DT': 추정일자, # 'INST_CD': 증권사코드, # 'INST_NM': 증권사이름, # 'RECOM_CD': 투자의견, # 'RECOM_CD_BF': 직전 투자의견, # 'TARGET_PRC': 적정가, # 'TARGET_PRC_BF': 직전 적정가, # 'YOY': 증감율, dict = { '증권사별적정주가': r, } pp('크롤완료: 컨센서스') return dict
def test_BAK(pe,dic): print('리소스 디렉토리 갯수>>>',len(pe.DIRECTORY_ENTRY_RESOURCE.entries),'\n') for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries: if resource_type.name is not None: print('디렉토리 이름>>>',resource_type.name) else: print('디렉토리 이름>>>',resource_type.struct.Id, pefile.RESOURCE_TYPE.get(resource_type.struct.Id, '-')) print(' 리소스 갯수>>>',len(resource_type.directory.entries)) for resource_id in resource_type.directory.entries: if resource_id.name is not None: print(' 리소스 name -',resource_id.name) else: print(' 리소스 id -',resource_id.struct.Id) print('') print('') for i in range(len(dic['Resource directory'])): if str(type(dic['Resource directory'][i])) == '<class \'list\'>': for j in range(len(dic['Resource directory'][i])) : if str(type(dic['Resource directory'][i][j])) == '<class \'list\'>': for k in range(len(dic['Resource directory'][i][j])) : print(type(dic['Resource directory'][i][j][k])) # print('[i({0})j({1})k({2})] >>> {3}\n'.format(i,j,k,dic['Resource directory'][i][j][k]['Structure']),end='') print('[i({0})j({1})k({2})] >>> {3}\n'.format(i,j,k,dic['Resource directory'][i][j][k]),end='') pp(dic['Resource directory'][i][j][k]) print('') else: print(type(dic['Resource directory'][i][j])) print('[i({0})j({1})] >>> {2}\n'.format(i,j,dic['Resource directory'][i][j]['Structure']),end='') pp(dic['Resource directory'][i][j]) print('') else: print(type(dic['Resource directory'][i])) print('[i({0})] >>> {1}\n'.format(i,dic['Resource directory'][i]['Structure']),end='') pp(dic['Resource directory'][i]) print('') ## input('') end_time = time.time() print('소요시간>>>',end_time-start_time)
for node in root_2.findall('.//catalog_item'): item = defaultdict(list) for x in node.iter(): if x.attrib: item[x.attrib.keys()[0]].append(x.attrib.values()[0]) if x.text is None: item[x.tag].append('None') elif x.text.strip(): item[x.tag].append(x.text.strip()) d2.append(dict(item)) d1 = sorted(d1, key=lambda x: x['item_number']) d2 = sorted(d2, key=lambda x: x['item_number']) res_dict = defaultdict(list) for x, y in zip(d1, d2): for key1, key2 in zip(x.keys(), y.keys()): if (key1 == key2) and sorted(x[key1]) != sorted(y[key2]): a = set(x[key1]) b = set(y[key2]) diff = ([(i + '--' + 'test1.xml') if i in a else (i + '--' + 'test2.xml') if i in b else '' for i in list(a ^ b)]) res_dict[x['item_number'][0]].append({key1: diff}) if res_dict == {}: print('Data is same in both XML files') else: pp(dict(res_dict))
def order_info(id): r = requests.get(api_url + 'orders/' + id, auth=auth) response = r.json() time.sleep(0.5) pp(response) return r.json()
munzAuth = json.load(open('munzcreds.json')) # keys user = auth["user"] passwd = auth["pass"] BearerToken = munzAuth["data"]["token"]["access_token"] head = {"Authorization": BearerToken} try: conn = mariadb.connect(user=user, password=passwd, host="localhost", port=3306, database="munz") except mariadb.Error as e: print(f"Error connecting to MariaDB Platform: {e}") sys.exit(1) # Get Cursor cur = conn.cursor() r = requests.get("https://api.munzee.com/user/current", headers=head) pp(r) pp(r.json()["data"]) payload = '{"exclude":"","fields":"munzee_id,friendly_name,latitude,longitude,original_pin_image,proximity_radius_ft,creator_username", "points":{"box1":{"timestamp": 0,"lat2":39.928842,"lng1":-105.141754,"lng2":-105.147290,"lat1":39.925172}}}' r = requests.post("https://api.munzee.com/map/boundingbox/", headers=head, data=payload) pp(r) pp(r.json()["data"])
def fnSnapshot(stock): # fnguide 기업정보 > snapshot with req.Session() as s: # ua = UserAgent() headers = { 'user-agent': ua.random, 'referer': 'http://comp.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A005930&cID=&MenuYn=Y&ReportGB=&NewMenuID=101&stkGb=701' } get_url = s.get( 'https://comp.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A{}'. format(str(stock)), headers=headers).text soup = bs4(get_url, 'html.parser') def soup_financial_highlight(location, n=None): # 파이넨셜 하이라이트에서 연간/분기의 td항목을 각각 리스트로 반환하는 함수. d_year = soup.select( '#highlight_D_Y tbody tr:nth-child({}) td'.format(location)) for idx, val in enumerate(d_year): try: d_year[idx] = round(float(val.get_text().replace(',', '')), n) except ValueError: d_year[idx] = None except: d_year[idx] = '크롤 실패' d_quarter = soup.select( '#highlight_D_Q tbody tr:nth-child({}) td'.format(location)) for idx, val in enumerate(d_quarter): try: d_quarter[idx] = round( float(val.get_text().replace(',', '')), n) except ValueError: d_quarter[idx] = None except: d_quarter[idx] = '크롤 실패' return d_year, d_quarter d_name = soup_single_str('#giName', soup) d_market = soup_single_str('#strMarketTxt', soup) d_sectors = soup_single_str('.stxt.stxt2', soup) d_summary_1 = soup_single_str('#bizSummaryHeader', soup) d_summary_2 = soup_single_str('#bizSummaryContent li:first-child', soup) d_summary_3 = soup_single_str('#bizSummaryContent li:last-child', soup) d_treasury_shares = soup_single_str( '#svdMainGrid5 table tr:nth-child(5) td:nth-child(3)', soup) try: d_outstanding_shares = int( soup_single_str( '#svdMainGrid1 table tr:last-child td:nth-child(2)', soup).split("/")[0].replace(',', '')) except: d_outstanding_shares = 0 try: d_treasury_shares = int(d_treasury_shares) except: d_treasury_shares = 0 d_revenue_year, d_revenue_quarter = soup_financial_highlight('1') d_operatingIncome_year, d_operatingIncome_quarter = soup_financial_highlight( '3') d_profit_year, d_profit_quarter = soup_financial_highlight('5') d_equity_year, d_equity_quarter = soup_financial_highlight('10') d_ROA_year, d_ROA_quarter = soup_financial_highlight('17', 2) d_ROE_year, d_ROE_quarter = soup_financial_highlight('18', 2) d_EPS_year, d_EPS_quarter = soup_financial_highlight('19') d_PER_year, d_PER_quarter = soup_financial_highlight('22', 2) dict = { '크롤일자': time.strftime('%Y-%m-%d', time.localtime(time.time())), '크롤시간': time.strftime('%p %I:%M:%S', time.localtime(time.time())), '기업정보': { '기업명': d_name, '소속': d_market, '업종': d_sectors, '핵심요약': d_summary_1, '설명1': d_summary_2, '설명2': d_summary_3, '발행주식수': d_outstanding_shares, '자기주식수': d_treasury_shares, }, '재무하이라이트': { # year: [4년전, 3년전, 2년전, 1년전, 최근, 1년후(예상 또는 잠정), 2년후(예상), 3년후(예상)] # quarter: [4분기전, 3분기전, 2분기전, 1분기전, 최근, 1분기후(예상 또는 잠정), 2분기후(예상), 3분기후(예상)] '매출': { 'year': d_revenue_year, 'quarter': d_revenue_quarter, }, '영업이익': { 'year': d_operatingIncome_year, 'quarter': d_operatingIncome_quarter, }, '순이익': { 'year': d_profit_year, 'quarter': d_profit_quarter, }, '자본': { 'year': d_equity_year, 'quarter': d_equity_quarter, }, 'ROE': { 'year': d_ROE_year, 'quarter': d_ROE_quarter, }, 'ROA': { 'year': d_ROA_year, 'quarter': d_ROA_quarter, }, 'EPS': { 'year': d_EPS_year, 'quarter': d_EPS_quarter, }, 'PER': { 'year': d_PER_year, 'quarter': d_PER_quarter, }, } } pp('크롤완료: 스냅샷') # pp(dict) return dict
'mmm1':sys.argv[2], 'mdd1':sys.argv[3], 'rb1':2, 'rb2':1, 'tpno':'S' } session = requests.session() r = requests.post(URL,data=submit_form) soup = BeautifulSoup(r.text.encode('latin1', 'ignore').decode('big5'),'html.parser') content = soup.find_all('td') with open('output_data.json','r') as read_file: data_veg = json.load(read_file) pp(data_veg) read_file.close() for i,c in enumerate(content[20:]): if i%10==4: veg_name=c.text.replace(u'\xa0',u'').replace(' ','') # print ("name"," ",c.text) if i%10==6: if data_veg.get(veg_name, '') == '': data_veg[veg_name] = dict() # daily_price = {date:c.text} # print ("price"," ",c.text) data_veg[veg_name][date] = c.text # data_veg[veg_name].append(daily_price) # data_veg.update({veg_name:daily_price}) pp(data_veg)
def toString(self): pp(vars(self.shields)) pp(vars(self.engines)) pp(vars(self.oxygen)) pp(vars(self.weapons)) pp(vars(self.drones)) pp(vars(self.medbay)) pp(vars(self.pilot)) pp(vars(self.sensors)) pp(vars(self.doors)) pp(vars(self.teleporter)) pp(vars(self.cloaking)) pp(vars(self.artillery)) pp(vars(self.battery)) pp(vars(self.clonebay)) pp(vars(self.mindControl)) pp(vars(self.hacking))
#!/usr/bin/python3.4 from pprintpp import pprint as pp import cp # Manual query below pp(cp.query(payload="SELECT GROUP_KEY() as distance, COUNT() FROM massive WHERE type == 'planet' GROUP BY Math.floor(distance/50)*50 ORDER BY distance ASC LIMIT 0,10"))
def ppv(_): return pp(vars(_))
import fitbit from credentials import FITBIT from pprintpp import pprint as pp import json import datetime fitbit_client = fitbit.Fitbit(FITBIT['CONSUMER_ID'], FITBIT['CONSUMER_SECRET'], access_token=FITBIT['ACCESS_TOKEN'], refresh_token=FITBIT['REFRESH_TOKEN']) activity_stats = fitbit_client.activity_stats() food_goal = fitbit_client.food_goal() Aug_1_2016 = datetime.date(2016, 8, 1) get_sleep = fitbit_client.get_sleep(Aug_1_2016) get_devices = fitbit_client.get_devices() get_alarms = fitbit_client.get_alarms(FITBIT['DEVICE_ID']) now = datetime.datetime.now() now_plus_3 = now + datetime.timedelta(minutes = 2) add_alarm = fitbit_client.add_alarm(FITBIT['DEVICE_ID'], now_plus_3, ['SATURDAY']) pp(get_alarms) pp(add_alarm) pp(get_alarms)
today_total += today_earn for prd in products: if len(sys.argv) > 1: days = int((sys.argv[1])) ledger_info(prd,days) else: ledger_info(prd) days = 0 # dic = {k: v for k, v in sorted(dic.items(), key=lambda item: item[1])} dic = sorted(dic.items(), key=lambda x: x[1]) print("EARN:") pp(dic) dic_spend_EUR = sorted(dic_spend_EUR.items(),key=lambda x:x[1]) print("SPEND:") pp(dic_spend_EUR) dic_spend_EUR_ooa = sorted(dic_spend_EUR_ooa.items(),key=lambda x:x[1]) print("SPEND OOA:") pp(dic_spend_EUR_ooa) dic_spend_total = sorted(dic_spend_total.items(),key=lambda x:x[1]) print("SPEND TOTAL:") pp(dic_spend_total) dic_coins = sorted(dic_coins.items(),key=lambda x:x[1]) print("COINS:")
def ppd(_): return pp(dir(_))