def get_web_data(self, comp_code, url, cols, rename_cols, start_date='', date_col_name='date', data_type='json', headers=None): df = pd.DataFrame() # 다음 웹 크롤링 page = 1 while True: pg_url = url.format(code=comp_code, page=page) if data_type == 'json': if headers != None: response = requests.get(pg_url, headers=headers) else: response = requests.get(pg_url) json = response.json() page_data = pd.DataFrame.from_dict(json['data']) else: page_data = pd.read_html(pg_url, header=0)[0] page_data = page_data.dropna() if len(page_data) == 0: break page_data = page_data[cols] page_data[date_col_name] = page_data[date_col_name].str.slice( 0, 10).str.replace("-", ".") last_date = page_data.tail(1)[date_col_name].to_string(index=False) df = df.append(page_data, ignore_index=True) if start_date != '': if DateUtils.to_date(start_date) > DateUtils.to_date( last_date): break page += 1 # 필요 없는 날짜 제거 if start_date != '': drop_cnt = 0 df_len = len(df) for i in range(df_len): last_date = df.loc[df_len - i - 1, date_col_name] if DateUtils.to_date(start_date) > DateUtils.to_date( last_date): drop_cnt += 1 else: break if drop_cnt > 0: df = df[:-drop_cnt] # 정렬 및 컬럼명 변경 if df.shape[0] != 0: df = df.sort_values(by=date_col_name) df.rename(columns=rename_cols, inplace=True) return df
def train_months(self, start:str='2018.01', end:str='2018.11', invest_money:float=100000000)->None: train_model = self._global_params.train_model start_month = DateUtils.to_date(start, '%Y.%m') end_month = DateUtils.to_date(end, '%Y.%m') between = DateUtils.between_months(start_month, end_month) invest_months_result = [] result_columns = ["month", "invest_money", "result_money"] MOCK_MONEY = 10000000 chart_data = [] for i in range(between + 1): # params.remove_session_file = True before_month_start = DateUtils.to_month_str(start_month, i - self._global_params.mock_period_months) before_month_end = DateUtils.to_month_str(start_month, i - 1) self._global_params.invest_start_date = before_month_start + '.01' self._global_params.invest_end_date = before_month_end + '.31' self._global_params.result_file_name = "MOCK_" + before_month_start + "-" + before_month_end self._global_params.invest_money = MOCK_MONEY corp = Corp(self._global_params) corps = corp.get_eval_corps_auto(self._global_params.invest_end_date) self._env.set_params(params=self._global_params) before_result, _ = self.trains(corps) now_month = DateUtils.to_month_str(start_month, i) before_result = corp.exclude_corps(before_result, now_month) before_result = before_result.sort_values(by='invest_result', ascending=False) before_result.index = range(len(before_result.index)) corp10_codes = before_result.loc[:9, 'code'] corp10_codes.index = range(len(corp10_codes.index)) corp10 = corp.get_corps_for_codes(corp10_codes) corp10_len = len(corp10.index) self._global_params.invest_start_date = now_month + '.01' self._global_params.invest_end_date = now_month + '.31' self._global_params.result_file_name = "INVEST_" + now_month self._global_params.invest_money = invest_money / corp10_len self._env.set_params(params=self._global_params) now_result, invest_chart_data = self.trains(corp10) chart_data.append(invest_chart_data) invest_money = now_result['invest_result'].sum() result = [now_month, self._global_params.invest_money * corp10_len, invest_money] invest_months_result.append(result) print(result) df_imr = pd.DataFrame(invest_months_result, columns=result_columns) save_file_name = "recommend_months_" + start + "-" + end + ".xlsx" if "_" in train_model: save_file_path = os.path.join('result', 'reinforcement', train_model, self._global_params.ensemble_type, save_file_name) else: save_file_path = os.path.join('result', 'reinforcement', train_model, save_file_name) DataUtils.save_excel(df_imr, save_file_path) if len(chart_data) > 1: visualizer = InvestVisualizer(self._global_params) visualizer.draw_invest_months(chart_data, start, end) print()
def _get_stock_naver_data(self, comp_code, start_date): """네이버 매일 주식정보를 가져온다.""" url = self._get_naver_url(comp_code) df = pd.DataFrame() # 네이버 웹 크롤링 page = 1 bf_date = '' while True: pg_url = '{url}&page={page}'.format(url=url, page=page) page_data = pd.read_html(pg_url, header=0)[0] page_data = page_data.dropna() last_date = page_data.tail(1)['날짜'].to_string(index=False) if bf_date == last_date: break df = df.append(page_data, ignore_index=True) if start_date != '': if DateUtils.to_date(start_date) > DateUtils.to_date( last_date): break if len(page_data) < 10: break page += 1 bf_date = last_date # 필요 없는 날짜 제거 if start_date != '': drop_cnt = 0 df_len = len(df) for i in range(df_len): last_date = df.loc[df_len - i - 1, '날짜'] if DateUtils.to_date(start_date) > DateUtils.to_date( last_date): drop_cnt += 1 else: break if drop_cnt > 0: df = df[:-drop_cnt] # 정렬 및 컬럼명 변경 if df.shape[0] != 0: df = df.sort_values(by='날짜') df.rename(columns={ '날짜': 'date', '종가': 'close', '전일비': 'diff', '시가': 'open', '고가': 'high', '저가': 'low', '거래량': 'volume' }, inplace=True) return df
def get_stock_daum_data_before(self, comp_code, start_date=''): """다음증권의 매일 주식정보를 가져온다.""" url = self.get_daum_url_before(comp_code, start_date) df = pd.DataFrame() # 다음 웹 크롤링 page = 1 while True: pg_url = '{url}&page={page}'.format(url=url, page=page) page_data = pd.read_html(pg_url, header=0)[0] page_data = page_data.dropna() if len(page_data) == 0: break page_data = page_data[['일자', '종가', '시가', '고가', '저가', '거래량']] page_data['일자'] = pd.to_datetime( page_data['일자'], format='%y.%m.%d').dt.strftime('%Y.%m.%d') last_date = page_data.tail(1)['일자'].to_string(index=False) df = df.append(page_data, ignore_index=True) if start_date != '': if DateUtils.to_date(start_date) > DateUtils.to_date( last_date): break page += 1 # 필요 없는 날짜 제거 if start_date != '': drop_cnt = 0 df_len = len(df) for i in range(df_len): last_date = df.loc[df_len - i - 1, '일자'] if DateUtils.to_date(start_date) > DateUtils.to_date( last_date): drop_cnt += 1 else: break if drop_cnt > 0: df = df[:-drop_cnt] # 정렬 및 컬럼명 변경 if df.shape[0] != 0: df = df.sort_values(by='일자') df.rename(columns={ '일자': 'date', '종가': 'close', '시가': 'open', '고가': 'high', '저가': 'low', '거래량': 'volume' }, inplace=True) return df
def recommend_corps(recommend_month: str, train_model: str = 'rnn') -> None: """하나의 세션으로 학습시키는 기본 모델 """ month = DateUtils.to_date(recommend_month, '%Y.%m') params = GlobalParams(train_model=train_model) #params.remove_session_file = True before_month_start = DateUtils.to_month_str(month, -params.mock_period_months) before_month_end = DateUtils.to_month_str(month, -1) params.invest_start_date = before_month_start + '.01' params.invest_end_date = DateUtils.to_date_str(month - datetime.timedelta(days=1)) params.result_file_name = "MOCK_" + before_month_start + "-" + before_month_end corp = Corp(params) corps = corp.get_eval_corps_auto(params.invest_end_date) invests = LearningNMockInvestment(params) invests.train_n_invests(corps) before_result = pd.read_csv(invests.get_result_file_path()) if params.rmse_max_recommend is not None: before_result = before_result.query("rmse<" + str(params.rmse_max_recommend)) before_result = before_result.sort_values(by='invest_result', ascending=False) before_result.index = range(len(before_result.index)) save_file_name = "recommend_months_" + recommend_month + ".xlsx" save_file_path = os.path.join('result', train_model, save_file_name) DataUtils.save_csv(before_result, save_file_path) print(before_result)
def get_stock_data(self, comp_code: str) -> pd.DataFrame: comp_code = DataUtils.to_string_corp_code(comp_code) file_path = os.path.join(self.DIR_STOCKS, comp_code + '.txt') if os.path.isfile(file_path): stock_data = pd.read_csv(file_path) if hasattr(self.params, 'check_stock_data' ) and self.params.check_stock_data == True: stock_data = stock_data.dropna() stock_data = stock_data[:-1] date_last = stock_data.tail(1)['date'].to_string(index=False) date_next = DateUtils.to_date(date_last) + datetime.timedelta( days=1) date_next = date_next.strftime("%Y.%m.%d") new_data = self.get_stock_web_data(comp_code, date_next) if len(new_data) > 0: stock_data = stock_data.append(new_data, ignore_index=True) stock_data = stock_data.dropna() stock_data.to_csv(file_path, index=False) else: stock_data = self.get_stock_web_data(comp_code, '') stock_data.to_csv(file_path, index=False) stock_data = stock_data.dropna() if hasattr(self.params, 'forcast_date') and self.params.forcast_date is not None: stock_data = stock_data.query("date<'{}'".format( self.params.forcast_date)) elif hasattr( self.params, 'remove_stock_days') and self.params.remove_stock_days > 0: stock_data = stock_data[:-self.params.remove_stock_days] return stock_data
def get_kospi_kosdaq(self, market='KOSPI'): file_path = os.path.join(self.DIR, 'files', market + '.txt') if os.path.isfile(file_path): kos_data = pd.read_csv(file_path) if hasattr( self.params, 'check_kos_data') and self.params.check_kos_data == True: kos_data = kos_data.dropna() kos_data = kos_data[:-1] date_last = kos_data.tail(1)['date'].to_string(index=False) date_next = DateUtils.to_date(date_last) + datetime.timedelta( days=1) date_next = date_next.strftime("%Y.%m.%d") new_data = self.get_kospi_kosdaq_from_daum(market, date_next) if len(new_data) > 0: kos_data = kos_data.append(new_data, ignore_index=True) kos_data = kos_data.dropna() kos_data.to_csv(file_path, index=False) else: kos_data = self.get_kospi_kosdaq_from_daum(market, '') kos_data.to_csv(file_path, index=False) kos_data = kos_data.dropna() return kos_data
def get_eval_corps_auto(self, date_maket_cap=None) -> pd.DataFrame: """100개의 주식 종목을 정해진 방법에 의해 가져온다""" if hasattr(self.params, 'invest_start_date' ) == False or self.params.invest_start_date is None: invest_start_date_str = DateUtils.today_str('%Y.%m.%d') else: invest_start_date_str = self.params.invest_start_date invest_start_date = DateUtils.to_date(invest_start_date_str) if hasattr(self.params, 'max_listing_period_years' ) == False or self.params.max_listing_period_years is None: max_listing_period_years = 20 else: max_listing_period_years = self.params.max_listing_period_years max_listing_date = DateUtils.add_years(invest_start_date, -max_listing_period_years) max_listing_date = DateUtils.to_date_str(max_listing_date, '%Y-%m-%d') corps = self.get_corps_all() corps = corps.query("상장일<'{}'".format(max_listing_date)) corps.loc[:, '종목코드'] = corps['종목코드'].astype(str).str.zfill(6) if date_maket_cap is None: date_maket_cap = invest_start_date_str #corps_cap = self.get_corps_maket_cap(date_maket_cap) corps_cap = self.get_now_corps_maket_cap() corps = corps.merge(corps_cap, on='종목코드') corps = corps.sort_values(by=["시가총액"], ascending=False) selected_corps_first = corps[:50] selected_corps_last = corps[len(corps) - 60:-10] return selected_corps_first.append(selected_corps_last, ignore_index=True)
def get_kospi_kosdaq_from_daum(self, market='KOSPI', start_date=''): daum_url = 'http://finance.daum.net/api/market_index/days?page={page}&perPage=10&market={market}&pagination=true' df = pd.DataFrame() # 다음 웹 크롤링 page = 1 while True: pg_url = daum_url.format(market=market, page=page) response = requests.get(pg_url, headers=self.DAUM_HEADER) json = response.json() page_data = pd.DataFrame.from_dict(json['data']) if len(page_data) == 0: break page_data = page_data[['date', 'tradePrice']] page_data['date'] = page_data['date'].str.slice(0, 10).str.replace( "-", ".") last_date = page_data.tail(1)['date'].to_string(index=False) df = df.append(page_data, ignore_index=True) if start_date != '': if DateUtils.to_date(start_date) > DateUtils.to_date( last_date): break page += 1 # 필요 없는 날짜 제거 if start_date != '': drop_cnt = 0 df_len = len(df) for i in range(df_len): last_date = df.loc[df_len - i - 1, 'date'] if DateUtils.to_date(start_date) > DateUtils.to_date( last_date): drop_cnt += 1 else: break if drop_cnt > 0: df = df[:-drop_cnt] # 정렬 및 컬럼명 변경 if df.shape[0] != 0: df = df.sort_values(by='date') df.rename(columns={ 'date': 'date', 'tradePrice': 'close' }, inplace=True) return df
def update_stocks_data(self): files = glob.glob(self.DIR_STOCKS + "/*.txt") for file_path in files: file_name = os.path.basename(file_path) stock_data = pd.read_csv(file_path) stock_data = stock_data.dropna() stock_data = stock_data[:-1] date_last = stock_data.tail(1)['date'].to_string(index=False) date_next = DateUtils.to_date(date_last) + datetime.timedelta( days=1) date_next = date_next.strftime("%Y.%m.%d") comp_code = file_name.replace(".txt", "") new_data = self.get_stock_web_data(comp_code, date_next) if len(new_data) > 0: stock_data = stock_data.append(new_data, ignore_index=True) stock_data = stock_data.dropna() stock_data.to_csv(file_path, index=False)
def get_train_test(self, data, scaler_close=None): """train, test 데이터로 만든다.""" data = data.copy() data = data[(data[['close', 'open', 'high', 'low', 'volume']] != 0).all(1)] data.index = pd.RangeIndex(len(data.index)) #data = self.add_mean_line(data) if self.params.invest_end_date is not None: data = data.query("date<='{}'".format(self.params.invest_end_date)) if self.params.invest_start_date is not None: invest_data = data.query("date>='{}'".format( self.params.invest_start_date)) invest_count = len(invest_data.index) - 1 self.params.invest_count = invest_count invest_start_date_str = self.params.invest_start_date else: invest_count = 0 self.params.invest_count = 0 invest_start_date_str = data.tail(1)['date'].to_string(index=False) invest_start_date = DateUtils.to_date(invest_start_date_str) if hasattr(self.params, 'stock_training_period_years'): period = self.params.stock_training_period_years stock_start_date = DateUtils.add_years(invest_start_date, -period) stock_start_date = stock_start_date.strftime("%Y.%m.%d") data = data.query("date>='{}'".format(stock_start_date)) test_count = None if hasattr(self.params, 'stock_test_period_years' ) and self.params.stock_test_period_years is not None: period = self.params.stock_test_period_years test_start_date = DateUtils.add_years(invest_start_date, -period) test_start_date = DateUtils.to_date_str(test_start_date) test_data = data.query("date>='{}'".format(test_start_date)) test_count = len(test_data.index) - invest_count scaled_data, scaler_close = self.get_scaled_data(data, scaler_close) dataX, dataY, dataX_last, y_date = self.get_dataXY(scaled_data) data_params = self.split_train_test(dataX, dataY, invest_count, test_count, y_date) return data_params, scaler_close, dataX_last
def get_stock_data(self, comp_code): comp_code = DataUtils.to_string_corp_code(comp_code) file_path = './data/files/stocks/' + comp_code + '.csv' if os.path.isfile(file_path): stock_data = pd.read_csv(file_path) stock_data = stock_data[:-1] date_last = stock_data.tail(1)['date'].to_string(index=False) date_next = DateUtils.to_date(date_last) + datetime.timedelta( days=1) date_next = date_next.strftime("%Y-%m-%d") new_data = self._get_stock_naver_data(comp_code, date_next) if len(new_data) > 0: stock_data = stock_data.append(new_data, ignore_index=True) stock_data.to_csv(file_path, index=False) else: stock_data = self._get_stock_naver_data(comp_code, '') stock_data.to_csv(file_path, index=False) if self.params.remove_stock_days > 0: stock_data = stock_data[:-self.params.remove_stock_days] return stock_data
def train_months(start: str = '2018.01', end: str = '2018.09', invest_money: float = 100000000, train_model: str = 'rnn') -> None: """하나의 세션으로 학습시키는 기본 모델 """ start_month = DateUtils.to_date(start, '%Y.%m') end_month = DateUtils.to_date(end, '%Y.%m') between = DateUtils.between_months(start_month, end_month) invest_months_result = [] result_columns = ["month", "invest_money", "result_money"] MOCK_MONEY = 10000000 chart_data = [] params = None index_money = None for i in range(between + 1): params = GlobalParams(train_model=train_model) #params.remove_session_file = True before_month_start = DateUtils.to_month_str( start_month, i - params.mock_period_months) before_month_end = DateUtils.to_month_str(start_month, i - 1) params.invest_start_date = before_month_start + '.01' params.invest_end_date = before_month_end + '.31' params.result_file_name = "MOCK_" + before_month_start + "-" + before_month_end params.invest_money = MOCK_MONEY corp = Corp(params) corps = corp.get_eval_corps_auto(params.invest_end_date) invests = LearningNMockInvestment(params) invests.train_n_invests(corps) before_result = pd.read_csv(invests.get_result_file_path()) now_month = DateUtils.to_month_str(start_month, i) if params.rmse_max_recommend is not None: before_result = before_result.query("rmse<" + str(params.rmse_max_recommend)) before_result = corp.exclude_corps(before_result, now_month) before_result = before_result.sort_values(by='invest_result', ascending=False) before_result.index = range(len(before_result.index)) corp10_codes = before_result.loc[:9, 'code'] corp10_codes.index = range(len(corp10_codes.index)) corp10 = corp.get_corps_for_codes(corp10_codes) corp10_len = len(corp10_codes.index) params = GlobalParams(train_model=train_model) #params.remove_session_file = False params.invest_start_date = now_month + '.01' params.invest_end_date = now_month + '.31' params.result_file_name = "INVEST_" + now_month params.invest_money = invest_money / corp10_len if index_money is not None: params.index_money = index_money / corp10_len invests = LearningNMockInvestment(params) invest_chart_data = invests.train_n_invests(corp10, invest_only=False) chart_data.append(invest_chart_data) now_result = pd.read_csv(invests.get_result_file_path()) invest_money = now_result['invest_result'].sum() index_money = now_result['all_invest_result'].sum() invest_months_result.append( [now_month, params.invest_money * corp10_len, invest_money]) print(now_month, params.invest_money * corp10_len, invest_money) df_imr = pd.DataFrame(invest_months_result, columns=result_columns) save_file_name = "recommend_months_" + start + "-" + end + ".xlsx" if "_" in train_model: save_file_path = os.path.join('result', train_model, params.ensemble_type, save_file_name) else: save_file_path = os.path.join('result', train_model, save_file_name) DataUtils.save_csv(df_imr, save_file_path) if len(chart_data) > 1 and params is not None: visualizer = InvestVisualizer(params) visualizer.draw_invest_months(chart_data, start, end) print()