def insert_db_from_file_secu(): def get_secu(code_string): secu = re.compile(r'\((\d+)\)').findall(code_string) # print 'se:', secu return secu[0] secu_keys = ['y', 'secu', 'price', 'amou', 'volu', 'buy', 'sale', 'ot'] coll_in = Mongodb('192.168.0.223', 27017, 'ada', 'base_block_trade') with open('d:/temp/secu_data_json.txt') as fd: for k, each in enumerate(fd): item = each.strip() if not item or item.startswith('#'): continue to_list = [] for j, s in enumerate(simplejson.loads(item)): if j == 1: tt = s.replace('\\28', '(').replace('\\29', ')') to_list.append(tt) else: to_list.append(s.decode('unicode-escape')) data = dict(zip(secu_keys, to_list)) data['s'] = data['secu'] data['secu'] = get_secu(data['secu']) data['volu'] = '{0:.2f}'.format(float(data['volu']) * 10000) data['amou'] = '{0:.2f}'.format(float(data['amou']) * 10000) data['typ'] = 'sha_secu' # print data data.pop('ot') coll_in.insert(data) print '{0} ok'.format(k + 1) coll_in.disconnect()
def third_update(): coll_in = Mongodb('192.168.251.95', 27017, 'news', 'announcement_hk_chz') coll_cat = Mongodb('192.168.251.95', 27017, 'ada', 'dict_announce_catalog_hk') coll_secu = Mongodb('192.168.251.95', 27017, 'ada', 'base_stock') kt = 0 cdctuo = ThirdUpdate().main() cd_dt_cat_tit_url_ori = cdctuo if cdctuo else [] for codes, dt, cat, title, url, cat_origin in cd_dt_cat_tit_url_ori: kt += 1 for code in codes: secu = get_secu(code, coll_secu) if secu and not coll_in.get({'sid': url, 'secu.0.cd': secu[0]['cd']}, {'title': 1}): print 'kt:', kt, '|', code, '|', dt, '|', url, '\n|', title try: hk_data = post_dict(secu, dt, cat, title, url, cat_origin, coll_cat) coll_in.insert(hk_data) except Exception as e: print 'Error:', e.message # 创建索引 # inds_mon = coll_in.get({'sid': url}, {'title': 1}) # ind_url = "http://192.168.250.205:17081/indexer/services/indexes/delta.json?" \ # "indexer=announce_hkz&taskids=" # if inds_mon: # jdata = BaseDownloadHtml().get_html(ind_url + str(inds_mon['_id']))[0] # if json.loads(jdata)['code'] == 200: # print '\tcreate index is ok!\n\n' coll_in.disconnect() coll_cat.disconnect() coll_secu.disconnect()
def csf_news(): coll200 = Mongodb('192.168.250.208', 27017, 'news', 'new_keyword_dict') coll_csf = Mongodb('192.168.250.208', 27017, 'news', 'csf_dict') for k, doc in enumerate(coll200.query(), 1): word = doc['word'] coll_csf.insert({'word': word, 'nat': 0, 'stat': 2, 'w': 1000}) print k coll200.disconnect() coll_csf.disconnect()
def main(self, query=None): if query is None: query_date = [str(datetime.date.today())] else: query_date = query flag = False min_date = min(query_date) coll = Mongodb('192.168.251.95', 27017, 'news', 'research_report_def') url = 'http://datainterface.eastmoney.com//EM_DataCenter/js.aspx?' query_string = 'type=SR&sty=GGSR&ps=50&p=%s&mkt=0&stat=0&cmd=2&code=&rt=' for page in range(1, 20): py_data = json.loads(self.get_html(url + query_string % str(page), encoding=True)[1:-1]) for data in py_data: code, agency = data['secuFullCode'][:6], data['insName'] date_time, url_info_code = data['datetime'][:10], data['infoCode'] report_url = 'http://data.eastmoney.com/report/%s/%s.html' % (date_time.replace('-', ''), url_info_code) if date_time in query_date: src = self.rr_research_org_code(agency) or '' # get src secu = self.base_stock_code(code) or '' # get secu if coll.get({'url': report_url}, {'titl': 1}) is None: try: now_html = self.get_html(report_url, encoding=True) title = self.remove_tag(self.__title.findall(now_html)[0]) content = self.remove_tag(self.__content.findall(now_html)[0]) to_data = { 'url': report_url, 'titl': {'szh': title, 'en': ''}, 'bio': {'en': '', 'szh': content}, 'rdt': date_time, 'upu': '', 'typ': '30001', 'stat': 1, 'upt': datetime.datetime.now(), 'crt': datetime.datetime.now(), } to_data.update({'src': src, 'secu': secu}) if not src or not secu: vn_src = '' if src else agency vn_secu = '' if secu else code to_data['vn'] = '^'.join([vn_src, vn_secu]) else: to_data['vn'] = None coll.insert(to_data) print '[%s %s FROM %s] -->>> Now insert mongodb!' % (code, date_time, agency) except Exception as e: print 'title: %s, url: %s' % (data['title'], report_url), 'Error:', e else: print '[%s %s FROM %s] -->>> mongodb table is existed' % (code, date_time, agency) elif date_time < min_date: flag = True break if flag: break coll.disconnect()
def csf_dict(): coll200 = Mongodb('192.168.250.208', 27017, 'news', 'csf_dict') ww = '送股实施公告 权益分派 分红派息 分红实施 转增股本 分派 OR 利润分配 OR 分配实施 OR 现金股利 OR ' \ '现金分红 OR 现金红利 OR 股息派发 NOT 调整非公开股票 NOT 调整发行股份 NOT 预案 NOT 预披露 NOT ' \ '管理制度 NOT 独立意见 NOT 法律意见书 NOT 预告 NOT 说明会 NOT 提示性公告 NOT 英文版 NOT 提议 ' \ 'NOT 临时公告 NOT 募集资金 NOT 完毕 NOT 调整发行股票价格' www = ww.replace('OR', ' ').replace('NOT', ' ') words = [w.strip() for w in www.split() if w.strip()] for wr in words: data = {'stat': 1, 'w': 1010, 'nat': 1, 'word': wr} coll200.insert(data) coll200.disconnect()
def main(self): if not self._validity: print "SZX this is Saturday or Monday!" return 0 coll_in = Mongodb("192.168.251.95", 27017, "ada", "base_margin_trading") coll_stock = Mongodb("192.168.251.95", 27017, "ada", "base_stock") coll_fund = Mongodb("192.168.251.95", 27017, "fund", "base_fund") url = "http://www.szse.cn/szseWeb/FrontController.szse?randnum=&" t = lambda v: "%.4f" % float(v) for page in range(1, 30): break_point = False html = self.get_html(url + self._query_string.format(self._query_date, page), encoding=True) for it in self.extract(html): # print it[0], it[1], it[2], it[3], it[4], it[5], it[6] break_point = True secu_cd = secu_code(it[0], coll_stock, coll_fund) fiba_bre = szx_fiba_bre(secu_cd, coll_in, self._query_date) sema_bre = szx_sema_bre(secu_cd, coll_in, self._query_date) # 本日融资偿还额 = 前日融资余额 + 本日融资买入- 本日融资余额(元) (fi.re = fi.ba(上期) + fi.bu - fi.ba) # 融券偿还量 = 融券卖出量 + 融券余量(上期) - 融券余量 (se.re = se.so + se.ma(上期) - se.ma) szx_fs_data = { "secu": secu_cd or it[0], "date": self._query_date, "total": t(it[6]), "stat": 2, "typ": "szx", "crt": datetime.now(), "fi": {"ba": t(it[2]), "bu": t(it[1]), "re": t(float(it[1]) + fiba_bre - float(it[2]))}, "se": { "ba": t(it[5]), "ma": t(it[4]), "so": t(it[3]), "re": t(float(it[3]) + sema_bre - float(it[4])), }, "upt": datetime.now(), } print szx_fs_data if not coll_in.get({"secu": secu_cd or it[0], "date": self._query_date, "typ": "szx"}): print coll_in.insert(szx_fs_data) if not break_point: break print u"szx [%s] 融资融券交易明细 day update: %d page done!" % (self._query_date, page) # break coll_in.disconnect() coll_stock.disconnect() coll_fund.disconnect()
def insert_db(self, total_data): coll_in = Mongodb('192.168.251.95', 27017, 'ada', 'base_margin_trading') coll_stock = Mongodb('192.168.251.95', 27017, 'ada', 'base_stock') coll_fund = Mongodb('192.168.251.95', 27017, 'fund', 'base_fund') sql_db = MySQLClient("192.168.251.95", "python_team", "python_team", "ada-fd") print '\tnow start to insert mongodb, waiting......' d = (lambda v: '%.4f' % float(v)) for pdt in total_data: # 信用交易日期 标的证券代码 标的证券简称 本日融资余额(元) 本日融资买入额(元) # 本日融资偿还额(元) 本日融券余量 本日融券卖出量 本日融券偿还量 secu_cd = secu_code(pdt[1], coll_stock, coll_fund) trade_date = '-'.join([pdt[0][:4], pdt[0][4:6], pdt[0][6:]]) uid = str(uuid.uuid5(uuid.NAMESPACE_DNS, ''.join(self._valid(pdt)).encode('u8'))) data = { 'secu': secu_cd or pdt[1], 'date': trade_date, 'total': d(int(pdt[3])), 'stat': 2, 'typ': 'sha', 'crt': datetime.now(), 'uuid': uid, 'fi': { 'ba': d(pdt[3]), 'bu': d(pdt[4]), 're': d(pdt[5]) }, 'se': { 'ba': '0.0000', 'ma': d(pdt[6]), 'so': d(pdt[7]), 're': d(pdt[8]) }, 'upt': datetime.now() } if coll_in.get({'uuid': uid, 'typ': 'sha'}, {'secu': 1}): continue elif secu_cd is None: coll_in.insert(data) else: seba = sha_seba(secu_cd, pdt[6], trade_date, sql_db) if seba is not None: data['total'] = d(int(pdt[3]) + seba) data['se']['ba'] = d(seba) coll_in.insert(data) coll_in.disconnect() coll_stock.disconnect() sql_db.disconnect() print '\tinsert all done!'
def statistics(months=None, weeks=None, days=None): if months: query_range = str(datetime.now() - timedelta(days=30)).replace( '-', '')[:8] elif weeks: query_range = str(datetime.now() - timedelta(days=7)).replace('-', '')[:8] elif days: pass coll_from = Mongodb('192.168.250.208', 27017, 'news', 'hotnews_analyse') coll_to = Mongodb('192.168.250.208', 27017, 'news', 'statistics') all_ind = { _ind for _doc in coll_from.query(kwargs={'ind': 1}) for _ind in _doc.get('ind', []) } for ind in all_ind: counter = Counter() query_cond = { 'ind': { '$in': [ind] }, 'dt': { '$gte': query_range + '000000' } } for doc in coll_from.query(query_cond, {'kw': 1}): counter.update(doc.get('kw', [])) data = { 'ind': ind, 'count': counter.most_common(100), 'dt': query_range } coll_to.insert(data) coll_from.disconnect() coll_to.disconnect()
def update(): coll_in = Mongodb('192.168.251.95', 27017, 'news', 'announcement_hk_chz') coll_cat = Mongodb('192.168.251.95', 27017, 'ada', 'dict_announce_catalog_hk') coll_secu = Mongodb('192.168.251.95', 27017, 'ada', 'base_stock') count = 0 for code, query in codes_date: ktt = 0 count += 1 validate(code, query) print '[%s-->>%s,%s]' % (count, code, query), ':waiting few minutes......\n' dctu = PoskUpdate(code, query).main() # codes, date, cat, title, url for codes, dt, cat, title, url, cat_origin in dctu: ktt += 1 print '\t[%s ->> ktt:%s]' % (code, ktt), '|', codes, '|', dt, '|', title, '|', url for code_ in codes: secu = get_secu(code_, coll_secu) print 'secu:', secu if secu and not coll_in.get({'sid': url}, {'title': 1}): try: hk_data = post_dict(secu, dt, cat, title, url, cat_origin, coll_cat) coll_in.insert(hk_data) except Exception as e: print '\t[%s] |%s|upload error: %s!' % (code_, dt, e.message) # inds_mon = coll_in.get({'sid': url}, {'title': 1}) # ind_url = "http://192.168.250.205:17081/indexer/services/indexes/delta.json?" \ # "indexer=announce_hkz&taskids=" # if inds_mon: # 创建索引 # jdata = BaseDownloadHtml().get_html(ind_url + str(inds_mon['_id']))[0] # if json.loads(jdata)['code'] == 200: # print '\tcreate index is ok!\n\n' if ktt % 80 == 0: sleep(2 * 60) coll_in.disconnect() coll_cat.disconnect() coll_secu.disconnect()
def update_item(): conceptions = [] work_book = XlsxReader(path + 'www.xlsx') base_keys = ['conp', 'resc', 'cpcd', 'idxcd', 'rel'] for _k, doc in enumerate(work_book.collection(), 1): temp = {} temp['conp'] = doc['conp'] temp['cpcd'] = doc['cpcd'] temp['resc'] = [s.strip() for s in doc['resc'].split('&')] temp['rel'] = [s.strip() for s in doc['rel'].split(';')] temp['idxcd'] = [] if not doc['idxcd'].strip() else [ doc['idxcd'].strip() ] cw = [] for k, vs in doc.iteritems(): if k not in base_keys: cw.extend([v.strip() for v in vs if v.strip()]) temp['cw'] = cw print _k conceptions.append(temp) print 'read xlsx finished.' dicts = defaultdict(list) for dox in conceptions: dicts[dox['conp']].extend(dox['idxcd']) for _temp_data in conceptions: conp = _temp_data['conp'] _temp_data['idxcd'] = dicts[conp] coll = Mongodb('192.168.250.208', 27017, 'news', 'news_conp') for data in conceptions: coll.insert(data) coll.disconnect()
class SipoSeparate(object): def __init__(self, typ): self._typ = typ self._sipo_datas = [] self._mongodb = Mongodb('192.168.0.223', 27017, 'py_crawl', 'sipo_typ') self._url_with_query_string = query_string.get_url_with_query_string( self._typ) def extract(self, page): checked = lambda t: re.compile(r'\(\d{4}\.\d\d\)', re.S).search(t) url = self._url_with_query_string.format(page=page) html_pyq = download.RequestHtml().get_html(url) document = PyQuery(html_pyq) for each_node in document.items('.cp_linr'): each_node.remove('a') data = {'tit': each_node('h1').text(), 'type': self._typ} data.update(self.initial_value(each_node( '.cp_jsh').text())) # obtain zhaiyao to being key and value for k, node_li in enumerate( each_node('.cp_linr > ul > li').items()): if node_li('li').length == 1: data.update(self.initial_value(node_li.text())) else: # handle to multi elements of li tag, sometimes content of child li tag is its parents. flh_flag = False # assure whether have multi flh, default yes. parent_node_li_text = '' for child_li in node_li.items('li'): if child_li('li').length > 1: flh_flag = True parent_node_li_text += child_li.remove('li').text() else: if flh_flag: flh_flag = False child_li_text = child_li.text() if checked(child_li_text): # `flh_flag` is true, judge `parent_node_li_text` whether has `flh`, parent_node_li_text += child_li_text else: # and must deal with `child_li_text` to update when hasn't child `flh` data.update( self.initial_value(child_li_text)) data.update( self.initial_value(parent_node_li_text)) else: data.update(self.initial_value( child_li.text())) self._sipo_datas.append(data) @staticmethod def initial_value(string): if not string.strip(): return {} key_value = lambda t: re.compile(r'(.*?):(.*)', re.S).findall(t) try: key, value = key_value(string)[0] init_py = pinyin.get_initial(key, delimiter='') if init_py == 'sqr': if pinyin.get(key, delimiter='').endswith('ren'): init_py = '_'.join((init_py, 'person')) else: init_py = '_'.join((init_py, 'day')) return dict(((init_py, value), )) except IndexError: pass return {} def insert_mongo(self, iterable): pool = ThreadPool(8) pool.map(self.extract, iterable) pool.close() pool.join() # now insert to mongodb at 192.168.0.233 for mon_data in self._sipo_datas: self._mongodb.insert(mon_data) del self._sipo_datas[:] def main(self): unit = 100 pages_list = range( 1, getattr(query_string, '_'.join(['num', self._typ])) + 1) pagination = len(pages_list) / unit + ((len(pages_list) % unit) and 1) dummy_pages_list = [ pages_list[p * unit:(p + 1) * unit] for p in range(pagination) ] for k, dummy_page in enumerate(dummy_pages_list): # print '\t%s: %s' % (self._typ, dummy_page) print 'Now executing [ {1}:{0}] times, from {2} to {3}.'.format( k + 1, self._typ, k * unit + 1, (k + 1) * unit) self.insert_mongo(dummy_page) print '->>>[ {1}: {0} ] times execute is ok, will sleeping 35seconds.\n'.format( k + 1, self._typ) time.sleep(30) break
def main(self): if not self._validity: print 'SZX this is Saturday or Monday!' return 0 coll_in = Mongodb('192.168.251.95', 27017, 'ada', 'base_margin_trading') coll_stock = Mongodb('192.168.251.95', 27017, 'ada', 'base_stock') coll_fund = Mongodb('192.168.251.95', 27017, 'fund', 'base_fund') url = 'http://www.szse.cn/szseWeb/FrontController.szse?randnum=&' t = lambda v: '%.4f' % float(v) for page in range(1, 30): break_point = False html = self.get_html( url + self._query_string.format(self._query_date, page), encoding=True) for it in self.extract(html): # print it[0], it[1], it[2], it[3], it[4], it[5], it[6] break_point = True secu_cd = secu_code(it[0], coll_stock, coll_fund) fiba_bre = szx_fiba_bre(secu_cd, coll_in, self._query_date) sema_bre = szx_sema_bre(secu_cd, coll_in, self._query_date) # 本日融资偿还额 = 前日融资余额 + 本日融资买入- 本日融资余额(元) (fi.re = fi.ba(上期) + fi.bu - fi.ba) # 融券偿还量 = 融券卖出量 + 融券余量(上期) - 融券余量 (se.re = se.so + se.ma(上期) - se.ma) szx_fs_data = { 'secu': secu_cd or it[0], 'date': self._query_date, 'total': t(it[6]), 'stat': 2, 'typ': 'szx', 'crt': datetime.now(), 'fi': { 'ba': t(it[2]), 'bu': t(it[1]), 're': t(float(it[1]) + fiba_bre - float(it[2])) }, 'se': { 'ba': t(it[5]), 'ma': t(it[4]), 'so': t(it[3]), 're': t(float(it[3]) + sema_bre - float(it[4])) }, 'upt': datetime.now() } print szx_fs_data if not coll_in.get({ 'secu': secu_cd or it[0], 'date': self._query_date, 'typ': 'szx' }): print coll_in.insert(szx_fs_data) if not break_point: break print u'szx [%s] 融资融券交易明细 day update: %d page done!' % ( self._query_date, page) # break coll_in.disconnect() coll_stock.disconnect() coll_fund.disconnect()