Example #1
0
    def go(self):
        url_list = self.get_url()

        for url0 in url_list:
            try:
                t0 = datetime.datetime.strftime(datetime.datetime.now(),
                                                '%H:%M')
                print('\n%s 准备解析目录页:%s\n' % (t0, url0))

                html = requests_manager.get_html(url0, charset='utf8')
                county_url_list = self.catalog_parser(html)

                for url1 in county_url_list:
                    t0 = datetime.datetime.strftime(datetime.datetime.now(),
                                                    '%H:%M')
                    print('\n%s 准备解析县级明细页:%s\n' % (t0, url1))

                    html = requests_manager.get_html(url1, charset='utf8')
                    county_info, more_url = self.county_parser(html)

                    global county_code
                    county_code.append(county_info)  # list 增加, 不是DataFrame
                    pd.DataFrame(county_code).to_excel('county_code.xlsx')
                    # print(county_code)

                    i = 0
                    while True:
                        url2 = more_url + '?page=%s' % i

                        t0 = datetime.datetime.strftime(
                            datetime.datetime.now(), '%H:%M')
                        print('\n%s 准备解析乡村明细第%s页:%s\n' % (t0, i, url2))

                        html = requests_manager.get_html(url2, charset='utf8')

                        global county_detail
                        df = self.more_parser(html)
                        if df.empty:
                            break

                        county_detail = county_detail.append(df,
                                                             ignore_index=True)
                        county_detail.to_excel('county_detail.xlsx')

                        i = i + 1
                        time.sleep(5)
            except:
                with open('error.log', 'a', encoding='utf8') as f:
                    f.write(("%s\n%s\n\n" % (url0, traceback.format_exc())))
    def stock_flow0(self):
        global token
        with open('stock_list.txt', 'r') as f:
            code_list = f.read().split('\n')
        print code_list
        data_dict = {}
        for stock_code in code_list:

            # http://ff.eastmoney.com//EM_CapitalFlowInterface/api/js?type=hff&rtntype=2&js=({data:[(x)]})&cb=var%20aff_data=&check=TMLBMSPROCR&acces_token=1942f5da9b46b069953c873404aad4b5&id=0000632&_=1513130772995
            data_str = ''
            # stock_type = 1
            while not data_str:  # and stock_type < 4:
                url = "http://dcfm.eastmoney.com//em_mutisvcexpandinterface/api/js/get?type=HSGTHDSTA&token=%s&filter=(SCODE=%s%s%s)&st=HDDATE&sr=-1&p=1&ps=1000" % (
                    token, '%27', stock_code, '%27')
                data_str = requests_manager.get_html(url)
                # stock_type = stock_type + 1
                time.sleep(1)

            df = pd.DataFrame(eval(data_str))
            if df.empty:
                print stock_code, u'内容为空'
                print 'url => ', url
            else:
                SNAME = df['SNAME'].iloc[0].decode('utf8')

                data_dict[SNAME] = df
                time.sleep(1)
                xls_manager.dfs_to_excel(data_dict,
                                         os.getcwd() + u'/股票资金流.xlsx')
Example #3
0
def get_data(arg_dict):
    try:
        url = arg_dict['url']
        lock = arg_dict['process_lock']
        
        html = requests_manager.get_html(url)
        time.sleep(1)
        
        ent_name = re.search('<span itemprop=\'name\'><.*?>(.*?)</a>', html).group(1)

        res_path = 'res.txt'
        with lock:
            with codecs.open(res_path, 'a', 'utf-8') as f:
                f.write(ent_name + '\n')
        
    except:
        with lock:
            with codecs.open('http_error.log', 'a', 'utf-8') as f:
                f.write(url + '\n' + traceback.format_exc() + '\n')
    finally:
        return arg_dict
    def stock_flow(self):
        global token
        with open('stock_list.txt', 'r') as f:
            stock_list = f.read().split('\n')

        sql = """
        SELECT * FROM `stock_belonging` WHERE `stock_code` in (%s)
        """ % (','.join('"%s"' % s for s in stock_list))
        with closing(
                pymysql.connect('10.10.10.15',
                                'spider',
                                'jlspider',
                                'spider',
                                charset='utf8')) as conn:
            stock_info = pd.read_sql(sql, conn)

        file_name = u'股票资金流.xlsx'
        if os.path.exists(file_name):
            os.remove(file_name)
            wb = xlwt.Workbook()
            wb.add_sheet('sheet1')
            wb.save(file_name)

        writer = pd.ExcelWriter(file_name)

        for stock_code in stock_list:
            print "stock_capital_flow =>", stock_code
            try:
                stock_type = stock_info[stock_info['stock_code'] ==
                                        stock_code]['stock_type'].iloc[0]
                stock_name = stock_info[stock_info['stock_code'] ==
                                        stock_code]['stock_name'].iloc[0]
                if stock_type == 'SH':
                    stock_type = 1
                elif stock_type == 'SZ':
                    stock_type = 2
                else:
                    stock_type = 3
                url = "http://ff.eastmoney.com//EM_CapitalFlowInterface/api/js?type=hff&rtntype=2&check=TMLBMSPROCR&acces_token=%s&id=%s%s" % (
                    token, stock_code, stock_type)
                data_str = requests_manager.get_html(url)
                l = eval(data_str)
                data = [s.split(',') for s in l]

                df = pd.DataFrame(data)
                if df.empty:
                    print stock_code, u'出错'
                    print df
                    print url
                df.columns = [
                    u'日期', u'主力净流入净额', u'主力净流入净占比', u'超大单净流入净额', u'超大单净流入净占比',
                    u'大单净流入净额', u'大单净流入净占比', u'中单净流入净额', u'中单净流入净占比',
                    u'小单净流入净额', u'小单净流入净占比', u'收盘价', u'涨跌幅'
                ]
                df = self.calculation(df)
                df = df.reindex(columns=[
                    u'日期', u'收盘价', u'涨跌幅', u'主力净流入净额', u'主力净流入净额3日累计',
                    u'主力净流入净额5日累计', u'主力净流入净额10日累计', u'主力净流入净额20日累计',
                    u'主力净流入净占比', u'1', u'2', u'超大单净流入净额', u'超大单净流入净占比',
                    u'大单净流入净额', u'大单净流入净占比', u'中单净流入净额', u'中单净流入净占比',
                    u'小单净流入净额', u'小单净流入净占比', u'中单净流入净额3日累计', u'中单净流入净额5日累计',
                    u'中单净流入净额10日累计', u'中单净流入净额20日累计', u'小单净流入净额3日累计',
                    u'小单净流入净额5日累计', u'小单净流入净额10日累计', u'小单净流入净额20日累计'
                ])
                df[u'收盘价'] = df[u'收盘价'].astype(np.float64)
                df[u'主力净流入净额'] = df[u'主力净流入净额'].astype(np.float64).round(0)
                df[u'主力净流入净额3日累计'] = df[u'主力净流入净额3日累计'].round(0)
                df[u'主力净流入净额5日累计'] = df[u'主力净流入净额5日累计'].round(0)
                df[u'主力净流入净额10日累计'] = df[u'主力净流入净额10日累计'].round(0)
                df[u'主力净流入净额20日累计'] = df[u'主力净流入净额20日累计'].round(0)

                df.to_excel(writer, stock_name, index=None)  #stock_name
                df.to_csv(stock_name + '.csv', index=None)
                time.sleep(1)
            except:
                print url
                print traceback.format_exc()

        writer.save()
Example #5
0
    def parse1(self, response):
        bs_obj = bs4.BeautifulSoup(response.text, 'html.parser')
        item = response.meta['item']

        if item['fund_code'] in blacklist:
            raise Exception('此基金已列入黑名单')

        # 读取最新净值日期
        lastest_date = self.newest_date_df['newest_date'][
            self.newest_date_df['fund_code'] == item['fund_code']]
        if lastest_date.empty:
            raise Exception('本地数据库没有找到基金代号%s' % item['fund_code'])
        lastest_date = lastest_date.iat[0]
        lastest_date = datetime.datetime(lastest_date.year, lastest_date.month,
                                         lastest_date.day)  # 从date格式转为datetime

        print "本地%s的最新净值日期为%s" % (item['fund_code'], lastest_date)

        try:
            # 净值估算
            e_dl = bs_obj.find('dl', class_='dataItem01')
            data = [
                e.get_text(strip=True)
                for e in e_dl.find('dd', class_='dataNums').find_all('span')
            ]
            data_type = e_dl.find('span', class_='sp01').get_text(strip=True)
            data_date = e_dl.find('span', id='gz_gztime').get_text(strip=True)

            # if data_date != '--':
            data_date = datetime.datetime.strptime(
                re.sub(r'\(|\)', '', data_date), '%y-%m-%d %H:%M')
            # 周六,周日按周五算
            data_date = data_date - datetime.timedelta(
                days=1) if data_date.isoweekday() == 6 else data_date
            data_date = data_date - datetime.timedelta(
                days=2) if data_date.isoweekday() == 7 else data_date

            df = pd.DataFrame(data + [data_type, data_date],
                              index=[u'净值', u'涨跌值', u'涨跌幅', u'数据类型',
                                     u'数据日期']).T
            df = df.drop([u'涨跌值', u'数据类型'], axis=1)
            df = df.rename(
                {
                    u'净值': u'estimate_net_value',
                    u'涨跌幅': u'estimate_daily_growth_rate',
                    u'数据日期': u'value_date'
                },
                axis=1)
            df[u'fund_code'] = item['fund_code']
            df[u'value_date'] = df[u'value_date'].apply(
                lambda date0: date0.strftime('%Y-%m-%d'))
            df[u'crawler_key'] = df[u'fund_code'] + '/' + df[u'value_date']
            df.index = df[u'crawler_key']
            print u"网页日期:", df[u'value_date'].iat[0], u'本地日期:', lastest_date
            # if datetime.datetime.strptime(df[u'value_date'].iat[0],'%Y-%m-%d').date() <= lastest_date.date():
            #     mysql_connecter.update_df_data(df, u'eastmoney_daily_data', u'crawler_key')
            # else:
            #     mysql_connecter.insert_df_data(df, u'eastmoney_daily_data', method='UPDATE')
            if not df.empty:
                mysql_connecter.insert_df_data(df,
                                               'eastmoney_daily_data',
                                               method='UPDATE')
            else:
                print u"无最新数据"
        except:
            log_obj.error("%s( %s )中无法解析\n%s" %
                          (self.name, response.url, traceback.format_exc()))
            with open(u'净值估算.html', 'w') as f:
                f.write(response.text)

        try:
            # 基金净值
            e_div = bs_obj.find_all('div',
                                    class_='poptableWrap singleStyleHeight01')[
                                        0]  #有三个标签页,分别是净值,分红,评级
            e_table = e_div.table
            df = pd.read_html(e_table.prettify(encoding='utf8'),
                              encoding='utf8',
                              header=0)[0]

            # 此处有时间BUG
            year_num = datetime.datetime.now().year
            df[u'日期'] = pd.to_datetime(
                df[u'日期'].apply(lambda s: '%s-%s' % (year_num, s)))

            #print df[u'日期'].dtype
            #print type(lastest_date)

            df = df.astype(np.str)
            df[u'crawler_key'] = df[u'日期'].apply(lambda date: "%s/%s" %
                                                 (item['fund_code'], date))
            df[u'fund_code'] = item['fund_code']
            df = df.rename(
                {
                    u'日期': u'value_date',
                    u'单位净值': u'net_asset_value',
                    u'累计净值': u'accumulative_net_value',
                    u'日增长率': u'daily_growth_rate'
                },
                axis=1)
            df.index = df[u'crawler_key']

            if not df.empty:
                mysql_connecter.insert_df_data(df,
                                               'eastmoney_daily_data',
                                               method='UPDATE')
            else:
                print u"无最新数据"
        except:
            log_obj.error("%s( %s )中无法解析\n%s" %
                          (self.name, response.url, traceback.format_exc()))
            with open(u'基金净值.html', 'w') as f:
                f.write(response.text)

        try:
            # js.v中的数据
            url = 'http://fund.eastmoney.com/pingzhongdata/%s.js?v=%s' % (
                item['fund_code'],
                datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
            js_data = requests_manager.get_html(url)
            js_data = re.sub('\s+', '', js_data)
            re_func = lambda key: re.search(
                (r'(?<=%s\=).+?(?=;)' % key), js_data, re.S).group(
                ) if re.search((r'%s\=.+?;' % key), js_data) else None

            # 股票仓位
            Data_fundSharesPositions = pd.DataFrame(
                eval(re_func('Data_fundSharesPositions')),
                columns=[u'value_date',
                         u'fund_shares_positions']).astype(np.str)

            Data_fundSharesPositions[u'value_date'] = Data_fundSharesPositions[
                u'value_date'].apply(lambda s: datetime.datetime.fromtimestamp(
                    int(s[:10])).strftime('%Y-%m-%d'))
            Data_fundSharesPositions[
                u'fund_shares_positions'] = Data_fundSharesPositions[
                    u'fund_shares_positions'] + '%'

            Data_fundSharesPositions[u'crawler_key'] = item[
                'fund_code'] + '/' + Data_fundSharesPositions[u'value_date']
            Data_fundSharesPositions = Data_fundSharesPositions.drop([
                u'value_date',
            ],
                                                                     axis=1)
            Data_fundSharesPositions.index = Data_fundSharesPositions[
                u'crawler_key']

            if not Data_fundSharesPositions.empty:
                mysql_connecter.insert_df_data(Data_fundSharesPositions,
                                               'eastmoney_daily_data',
                                               method='UPDATE')

        except:
            log_obj.error("%s( %s )中无法解析\n%s" %
                          (self.name, response.url, traceback.format_exc()))
            with open(u'js_v中的数据.html', 'w') as f:
                f.write(response.text)
    def parse1(self, response):
        print "准备解析:", response.url
        item = response.meta['item']

        if item['fund_code'] in blacklist:
            raise Exception('此基金已列入黑名单')

        # 年份列表
        url = "http://fund.eastmoney.com/f10/FundArchivesDatas.aspx?type=jjcc&code=%s&topline=200" % (
            item['fund_code'])
        html = requests_manager.get_html(url)
        with open('test1.html', 'w') as f:
            f.write(html)
        year_list = eval(
            re.search(
                r'(?<=arryear:)\[.+?\](?=,)', html).group()) if re.search(
                    r'(?<=arryear:)\[.+?\](?=,)', html) else None
        if year_list is None:
            raise Exception(u'错误的 url %s' % url)

        for year0 in year_list:
            url = "http://fund.eastmoney.com/f10/FundArchivesDatas.aspx?type=jjcc&code=%s&topline=200&year=%s" % (
                item['fund_code'], str(year0))
            html = requests_manager.get_html(url)
            with open('test2.html', 'w') as f:
                f.write(html)

            html = re.search(r"<div class='box'>.+</div>",
                             html).group() if re.search(
                                 r"<div class='box'>.+</div>", html) else None
            if html is None:
                raise Exception(u'错误的 url %s' % url)

            bs_obj = bs4.BeautifulSoup(html, 'html.parser')

            with open('test3.html', 'w') as f:
                f.write(bs_obj.prettify(encoding='utf8'))

            for e_div in bs_obj.find_all('div', class_="box"):
                title = e_div.find('h4', class_="t").get_text(strip=True)
                print response.url
                print title
                converters = {u'股票代码': lambda s: str(s)}
                df0 = pd.read_html(e_div.table.prettify(encoding='utf8'),
                                   encoding='utf8',
                                   converters=converters)[0]
                df0.columns = [re.sub(r'\s+', '', s) for s in df0.columns]

                func = lambda s: re.search(ur'占净值|持股数|持仓市值', s).group(
                ) if re.search(ur'占净值|持股数|持仓市值', s) else s
                df0.columns = [func(s) for s in df0.columns]

                df0[u'标题'] = title
                df0[u'cut_off_date'] = title.split(u'截止至:')[-1]
                df0[u'对应基金'] = item[u'fund_code']

                df0[u'年份'] = year0

                df0 = df0.rename(
                    {
                        u'股票代码': u'stock_code',
                        u'股票名称': u'stock_name',
                        u'占净值': u'net_value_ratio',
                        u'持股数': u'share_holding',
                        u'持仓市值': u'market_value',
                        u'对应基金': u'fund_code',
                        u'标题': u'title',
                        u'年份': u'year'
                    },
                    axis=1)

                df0 = df0.drop([u'序号', u'相关资讯', u'最新价', u'涨跌幅'],
                               axis=1,
                               errors='ignore')
                df0[u'crawler_key'] = df0[u'fund_code'] + u'/' + df0[
                    u'stock_code'] + u'/' + df0[u'cut_off_date']

                mysql_connecter.insert_df_data(df0,
                                               u'fund_holdings',
                                               method='UPDATE')
Example #7
0
    def parse1(self, response):
        bs_obj = bs4.BeautifulSoup(response.text, 'html.parser')
        item = response.meta['item']

        if item['fund_code'] in blacklist:
            raise Exception('此基金已列入黑名单')

        try:
            # js.v中的数据
            url = 'http://fund.eastmoney.com/pingzhongdata/%s.js?v=%s' % (
                item['fund_code'],
                datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
            js_data = requests_manager.get_html(url)
            js_data = re.sub('\s+', '', js_data)
            re_func = lambda key: re.search(
                (r'(?<=%s\=).+?(?=;)' % key), js_data, re.S).group(
                ) if re.search((r'%s\=.+?;' % key), js_data) else None

            # 规模变动
            Data_fluctuationScale = pd.read_json(
                re_func('Data_fluctuationScale')
            )  # , columns = [u'value_date', u'fund_shares_positions']).astype(np.str)

            for i in range(Data_fluctuationScale.shape[0]):
                #print Data_fluctuationScale.loc[i,'series']
                #print type(Data_fluctuationScale.loc[i,'series'])
                ser = pd.Series(Data_fluctuationScale.loc[i, 'series'])
                ser = ser.rename({'mom': u'较上期环比', 'y': u'净资产规模(亿)'})
                ser['value_date'] = Data_fluctuationScale.loc[i, 'categories']
                Data_fluctuationScale.loc[i, 'series'] = ser.to_json()

            Data_fluctuationScale['fund_code'] = item['fund_code']
            Data_fluctuationScale['data_type'] = u'规模变动'
            Data_fluctuationScale['crawler_key'] = Data_fluctuationScale[
                'fund_code'] + '/' + Data_fluctuationScale[
                    'data_type'] + '/' + Data_fluctuationScale['categories']
            Data_fluctuationScale = Data_fluctuationScale.drop([
                'categories',
            ],
                                                               axis=1)
            Data_fluctuationScale = Data_fluctuationScale.rename(
                {'series': 'json_data'}, axis=1)

            Data_fluctuationScale.index = Data_fluctuationScale['crawler_key']
            #print Data_fluctuationScale
            if not Data_fluctuationScale.empty:
                mysql_connecter.insert_df_data(Data_fluctuationScale,
                                               'fund_mixed_data')

            # 持有人结构
            Data_holderStructure = json.loads(
                re_func('Data_holderStructure')
            )  # , columns = [u'value_date', u'fund_shares_positions']).astype(np.str)
            #print Data_holderStructure
            categories = Data_holderStructure['categories']
            series = Data_holderStructure['series']

            d = {d0['name']: d0['data'] for d0 in series}

            df = pd.DataFrame(d, index=categories)
            df['value_date'] = df.index
            ser = df.T.apply(lambda ser: ser.to_json())
            ser.name = 'json_data'

            Data_holderStructure = pd.DataFrame(ser, index=categories)
            Data_holderStructure['fund_code'] = item['fund_code']
            Data_holderStructure['data_type'] = u'持有人结构'
            Data_holderStructure['crawler_key'] = item[
                'fund_code'] + '/' + Data_holderStructure[
                    'data_type'] + '/' + Data_holderStructure.index

            Data_holderStructure.index = Data_holderStructure['crawler_key']
            if not Data_holderStructure.empty:
                mysql_connecter.insert_df_data(Data_holderStructure,
                                               'fund_mixed_data')

            # 资产配置
            Data_assetAllocation = json.loads(re_func('Data_assetAllocation'))
            categories = Data_assetAllocation['categories']
            series = Data_assetAllocation['series']

            d = {d0['name']: d0['data'] for d0 in series}

            df = pd.DataFrame(d, index=categories)
            df['value_date'] = df.index
            ser = df.T.apply(lambda ser: ser.to_json())
            ser.name = 'json_data'

            Data_assetAllocation = pd.DataFrame(ser, index=categories)
            Data_assetAllocation['fund_code'] = item['fund_code']
            Data_assetAllocation['data_type'] = u'资产配置'
            Data_assetAllocation['crawler_key'] = item[
                'fund_code'] + '/' + Data_assetAllocation[
                    'data_type'] + '/' + Data_assetAllocation.index

            Data_assetAllocation.index = Data_assetAllocation['crawler_key']
            if not Data_assetAllocation.empty:
                mysql_connecter.insert_df_data(Data_assetAllocation,
                                               'fund_mixed_data')

            # 基金经理变动一览
            e_table = bs_obj.find('li', class_='fundManagerTab').table
            df0 = pd.read_html(e_table.prettify(encoding='utf8'),
                               encoding='utf8')[0]
            df0.columns = df0.loc[0, :]
            df0.columns.name = None
            df0 = df0.drop([
                0,
            ])
            df0.index = range(df0.shape[0])
            df = pd.DataFrame(
                {
                    'crawler_key': item['fund_code'] + '/' + u'基金经理变动',
                    'fund_code': item['fund_code'],
                    'data_type': u'基金经理变动',
                    'json_data': df0.to_json()
                },
                index=[
                    0,
                ])
            if not df.empty:
                mysql_connecter.insert_df_data(df,
                                               'fund_mixed_data',
                                               method='UPDATE')

        except:
            log_obj.error("%s( %s )中无法解析\n%s" %
                          (self.name, response.url, traceback.format_exc()))