def All_stock(): df = stock.get_market_ohlcv_by_ticker('20210217', market='KOSPI') list_of_index = [item for item in df.index] ticker = sorted(list_of_index) #종목코드 오름차순 ticker_name = [stock.get_market_ticker_name(i) for i in ticker] # 종목명 오름차순 return ticker, ticker_name
def krx_price_query(request): """ 가격 정보. :param request: :return: """ render_dict = get_render_dict("krx_price_query") if request.POST: query = request.POST["query"] render_dict["query"] = query realtime_result = get_xml_request(krx_price_query_url.format(query)) if realtime_result: krx_realtime(realtime_result, query, render_dict) statement_result = get_xml_request( krx_statement_query_url.format(query)) if statement_result: krx_statement(statement_result, render_dict) # df = stock.get_market_ohlcv_by_date("20200426", "20200426", query) # render_dict["market"] = df ticker_name = stock.get_market_ticker_name(query) render_dict["ticker_name"] = ticker_name stocks = models.Stock.objects.all().order_by("code") render_dict["stocks"] = stocks return render(request, "book/investment/krx_price_query.html", render_dict)
def pykrx_scratch(date_Start, date_End): print("Reading Daily Chart ... {} - {}".format(date_Start, date_End)) # create main folder if not os.path.exists(Krx_Char_folder_path): os.mkdir(Krx_Char_folder_path) # ticker scratch for ticker in stock.get_market_ticker_list(market="ALL"): stock_name = stock.get_market_ticker_name(ticker) stock_folder_name = stock_name + '_' + ticker #print(stock_name, ticker) df = stock.get_market_ohlcv_by_date(date_Start, date_End, ticker) df = df.reset_index() #print(len(df)) df.insert(5, '종가2', df['종가']) # folder check if not os.path.exists(Krx_Char_folder_path + '/' + stock_name): os.mkdir(Krx_Char_folder_path + '/' + stock_name) df.to_csv(Krx_Char_folder_path + '/' + stock_name + '/' + ticker + '.csv', sep=',', na_rep='0', index=False, header=False) print('{} Daily chart is written! ==== ticker is : {}'.format( stock_name, ticker)) print('Scratching daily chart is done!')
def getTickerNameALL(self): logger.info("GetStockList - getTickerNameALL") for stockTicker in self.ALL: try: self.tickerToName[stockTicker] = stock.get_market_ticker_name(stockTicker) except: self.tickerToName[stockTicker] = stockTicker logger.critical(f"GetStockList - getTickerNameALL; No name retrieved, ticker : {stockTicker}")
def save_market_code_name(self, date, market): item_codes = stock.get_market_ticker_list(date, market=market) for item_code in item_codes: item_name = stock.get_market_ticker_name(item_code) item_code = int(item_code) data = { "stock_item_name": item_name, "stock_item_code": item_code, "stock_market_name": market } req = CreateItemListReq() req.set_param(data)
def post_item_list(self, tickers_in_market: dict): req = CreateItemListReq() for market_name in tickers_in_market.keys(): tickers = tickers_in_market[market_name] for ticker in tickers: item_name = stock.get_market_ticker_name(ticker) params = {'stock_item_name': item_name, 'stock_item_code': ticker, 'stock_market_name': market_name} req.set_param(params) res = req.send_post() print(res.json())
def getCorpCodeByCorpName(self, corpName): if self.useCorpCodeXml == True: for corpList in self.corpList: if corpList.findtext("corp_name") == corpName: return corpList.findtext("corp_code") else: for ticker in self.tickerList: name = stock.get_market_ticker_name(ticker) if name == corpName: return ticker return None
def __collect_tickers(market='KOSPI'): """ stock.get_market_ticker_list() >> ['095570', '006840', '027410', '282330', '138930', ...] stock.get_market_ticker_name(ticker) >> SK하이닉스 """ tickers = dict() today = datetime.today().strftime("%Y%m%d") for ticker in stock.get_market_ticker_list(today, market): ticker_name = stock.get_market_ticker_name(ticker) tickers[ticker_name] = ticker return tickers
def find_all_codes(fromdate: date, todate: date): tempd = fromdate codes = {} while True: codes.update({code: None for code in pykrx_stock.get_market_ticker_list(_date_to_str(tempd), market='ALL')}) if tempd > todate: break tempd += timedelta(days=365) for code in codes: name = pykrx_stock.get_market_ticker_name(code) if isinstance(name, str): codes.update({code: name}) return codes
def getMarketOhlcvByTicker(self, date=None, market="ALL"): if date == None: return None # NOTE: 백업 데이터 존재 여부 확인. if not os.path.isdir(self.dirMarketOhlcv): os.makedirs(self.dirMarketOhlcv) filePath = self.dirMarketOhlcv + \ '/marketOhlcv_%s_%s.bin' % (market, date) if os.path.isfile(filePath): # NOTE: file 이 있고, corpName 에 해당하는 데이터 있는지 확인. with open(filePath, 'rb') as f: marketOhlcv = pickle.load(f) return marketOhlcv rawOhlcvList = None try: rawOhlcvList = stock.get_market_ohlcv_by_ticker(date, market) except: print("[param] date 를 확인해주세요.") print( "[위 사항이 아니라면] [KRX API update 가 필요합니다] stock.get_market_ohlcv_by_ticker") exit() # NOTE: rawOhlcvList 데이터는 pandas.DataFrame 인데, DaraFrame 전체가 empty 면 rawOhlcvList.empty 는 True 를 리턴 함. # [refer] https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.empty.html if rawOhlcvList.empty == True: return None resOhlcv = dict() desc = "get ohlcv" for ticker in tqdm(rawOhlcvList.index, desc): corpName = stock.get_market_ticker_name(ticker) tmpDict = dict() for column in rawOhlcvList.columns: tmpDict[column] = str(rawOhlcvList.loc[ticker][column]) resOhlcv[corpName] = tmpDict # NOTE: 데이터 백업. with open(filePath, 'wb') as f: pickle.dump(resOhlcv, f) return resOhlcv
def getMarketFundamentalByTicker(self, date=None, market="ALL"): if date == None: return None # NOTE: 백업 데이터 존재 여부 확인. if not os.path.isdir(self.dirMarketFundamental): os.makedirs(self.dirMarketFundamental) filePath = self.dirMarketFundamental + \ '/marketFundamental_%s_%s.bin' % (market, date) if os.path.isfile(filePath): # NOTE: file 이 있고, corpName 에 해당하는 데이터 있는지 확인. with open(filePath, 'rb') as f: marketFundamental = pickle.load(f) return marketFundamental rawFundamentalList = None try: rawFundamentalList = stock.get_market_fundamental_by_ticker( date, market) # pandas form except: print("[KRX API update 가 필요합니다] stock.get_market_fundamental_by_ticker") exit() if rawFundamentalList.empty == True: print("[WARN] please check parameter `date`") return None resFundamental = dict() desc = "get fundamental" for ticker in tqdm(rawFundamentalList.index, desc): corpName = stock.get_market_ticker_name(ticker) tmpDict = dict() for column in rawFundamentalList.columns: tmpDict[column] = str( rawFundamentalList.loc[ticker][column]).replace(" ", "") resFundamental[corpName] = tmpDict # NOTE: 데이터 백업. with open(filePath, 'wb') as f: pickle.dump(resFundamental, f) return resFundamental
def getMarketCapByTicker(self, date=None, market="ALL"): if date is None: return None if not os.path.isdir(self.dirMarketCap): os.makedirs(self.dirMarketCap) filePath = self.dirMarketCap + '/' + \ 'marketCap_%s_%s.bin' % (market, date) if os.path.isfile(filePath): with open(filePath, 'rb') as f: marketCap = pickle.load(f) return marketCap rawMarketCap = None try: rawMarketCap = stock.get_market_cap_by_ticker( date, market) # pandas form except: print("[KRX API update 가 필요합니다] stock.get_market_cap_by_ticker") exit() if rawMarketCap.empty == True: print("[WARN] please check parameter `date`") return None resMarketCap = dict() desc = "get marketCap" for ticker in tqdm(rawMarketCap.index, desc): tmpDict = dict() corpName = stock.get_market_ticker_name(ticker) tmpDict["종목명"] = corpName for column in rawMarketCap.columns: # 종가, 시가총액, 거래량, 시가총액, 상장주식수 tmpDict[column] = str( rawMarketCap.loc[ticker][column]).replace(" ", "") resMarketCap[corpName] = tmpDict # NOTE: 데이터 백업. with open(filePath, 'wb') as f: pickle.dump(resMarketCap, f) return resMarketCap
class krxModule(): tickerList = stock.get_market_ticker_list() nameList = map(lambda x: stock.get_market_ticker_name(x), tickerList) nameMap = dict(zip(nameList, tickerList)) #key==index #value==dictionary{colums:value} def to_dict(self, df): return { df.iloc[i].name: df.iloc[i, :].to_dict() for i in range(len(df)) } def chartJSON(self, dic): tmp_dic = {ticker["종목명"]: ticker["등락률"] for _, ticker in dic.items()} labels = list(tmp_dic.keys()) datas = list(tmp_dic.values()) rand = [[ np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255) ] for _ in range(len(labels))] backgroundColors = [ 'rgba(%d,%d,%d,0.2)' % (rand[i][0], rand[i][1], rand[i][2]) for i in range(len(labels)) ] borderColors = [ 'rgba(%d,%d,%d,1)' % (rand[i][0], rand[i][1], rand[i][2]) for i in range(len(labels)) ] json_ = { 'type': 'bar', 'data': { 'labels': labels, 'datasets': [{ 'label': '등락률', 'data': datas, 'backgroundColor': backgroundColors, 'borderColor': borderColors, 'borderWidth': '1' }] }, 'options': { 'responsive': False, 'scales': { 'yAxes': [{ 'ticks': { 'beginAtZero': True } }] }, } } return json_ def getName_by_ticker(self, ticker): return self.nameMap[ticker] def getTop10(self, start, end): df = stock.get_market_price_change_by_ticker(start, end) return self.to_dict(df.sort_values("등락률", ascending=False).head(10))
def news_finder(stock_id, start_date): start_date = start_date stock_name = stock.get_market_ticker_name(stock_id) # 네이버 금융 뉴스 섹션에 접속 driver = webdriver.Chrome(executable_path="./chromedriver.exe") url = "https://finance.naver.com/news/" driver.get(url) driver.find_element_by_xpath( '//*[@id="newsMainTop"]/div/div[2]/form/div/input').click( ) # 검색창 초기화 driver.find_element_by_xpath( '//*[@id="newsMainTop"]/div/div[2]/form/div/input').send_keys( stock_name) # 주식 이름 검색창에 입력 driver.find_element_by_xpath( '//*[@id="newsMainTop"]/div/div[2]/form/div/a').click( ) # 검색 아이콘 클릭해서 실행 now = datetime.datetime.now() today_date = str(now.strftime('%Y-%m-%d')) result_url = driver.current_url new_url = result_url + "&sm=title.basic&pd=4&stDateStart=" + start_date + "&stDateEnd=" + today_date # 제목에서만 설정 & Start 날짜 직접 설정 가능! driver.get(new_url) # 몇 페이지까지 존재하는지 알아보자. bs_obj = bs4.BeautifulSoup(driver.page_source, "html.parser") # 뷰티풀숩 object 생성 last = bs_obj.find("td", {"class": "pgRR"}) a = last.find('a', href=True) last_page_num = int(a['href'].split('page=')[1]) # last_page_num 개의 페이지 URL을 pages라는 리스트에 담아보자. page_numbering = list(range(1, last_page_num + 1)) pages = [] for i in range(1, last_page_num): pages.append(new_url + "&page=" + str(i)) # 빈 데이터프레임 생성 df = pd.DataFrame(columns=("date", "title", "content")) # 빈 데이터프레임에 크롤링한 기사 본문 내용들을 채워넣기 for page in pages: driver.get(page) bs_obj = bs4.BeautifulSoup(driver.page_source, "html.parser") # 깔끔한 작업을 위해 필요한 영역만 남기고 나머지는 무시하자 news_list = bs_obj.find("dl", {"class": "newsList"}) news_titles_1 = news_list.find_all("dt", {"class": "articleSubject"}) news_titles_2 = news_list.find_all( "dd", {"class": "articleSubject" }) # 네이버 뉴스는 썸네일이 있는 뉴스와 없는 뉴스의 태그가 다르게 설정되어 있네요. news_titles = news_titles_1 + news_titles_2 # 그래서 작업을 2번 한 후에 합치는 방식을 썼습니다. del news_titles_1, news_titles_2 # 필요없는 변수 제거 for title in news_titles: temp = title.find('a', href=True) news_url = "https://finance.naver.com" + temp['href'] driver.get(news_url) # 개별 뉴스들을 클릭하는 동작을 실행시킨다. bs_obj = bs4.BeautifulSoup(driver.page_source, "html.parser") if len(driver.window_handles) > 1: # 팝업창이 있는 경우 time.sleep(1) driver.switch_to_window(driver.window_handles[1]) driver.close() # 팝업창 종료 driver.switch_to_window( driver.window_handles[0]) # 원래창으로 복귀 else: # 팝업창이 없는 경우 # 기사 제목 크롤링 title = driver.find_element_by_xpath( '//*[@id="contentarea_left"]/div[2]/div[1]/div[2]/h3' ).text # 기사 업로드 날짜 크롤링 date = driver.find_element_by_xpath( '//*[@id="contentarea_left"]/div[2]/div[1]/div[2]/div/span' ).text # 기사 본문 크롤링 content = collect_data.nlp_news( driver.find_element_by_xpath( '//*[@id="content"]').text) df.loc[len(df)] = [date, title, content] # 빈 데이터프레임에 행 추가 # id와 name이라는 column을 만들고 해당 값으로 동일하게 채워넣는다. df['id'] = stock_id df['name'] = stock_name driver.close() return df
def test_io_in_kosdaq_market(self): tickers = stock.get_market_ticker_list("20190225", market="KOSDAQ") for ticker in tickers: name = stock.get_market_ticker_name(ticker) self.assertIsInstance(name, str) self.assertNotEqual(len(name), 0)
def test_io(self): name = stock.get_market_ticker_name("000660") self.assertIsInstance(name, str) self.assertNotEqual(len(name), 0)
simulation_size = 10 num_layers = 2 size_layer = 256 timestamp = 10 epoch = 500 dropout_rate = 0.8 test_size = 30 learning_rate = 0.0001 date_Start = '20200101' stock_input = "코리아센터" date_End = datetime.today().strftime("%Y%m%d") print("Daily candle dates {} - {}".format(date_Start, date_End)) for ticker in stock.get_market_ticker_list(market="ALL"): stock_name = stock.get_market_ticker_name(ticker) if stock_name == stock_input: print('Found : {}, ticker : {}'.format(stock_input, ticker)) korea_center_ticker = ticker korea_center_name = stock_input df_korea = stock.get_market_ohlcv_by_date(date_Start, date_End, korea_center_ticker) df_korea = df_korea.reset_index() print(len(df_korea)) df_korea.insert(5, '종가2', df_korea['종가']) #df = pd.read_csv('../dataset/GOOG-year.csv') #df_korea.rename(columns=df.columns) #print(df_korea.head()) #print(df.head()) print(df_korea.iloc[:, 4:5].tail())
def get_name(code: str): if code not in name_cache: name_cache.update({code: stock.get_market_ticker_name(code)}) return name_cache.get(code)
from pykrx import stock import json # 회사 이름에 대응되는 ticker라는 것이 있는데 그걸 받아온다 tickers = stock.get_market_ticker_list() # print(tickers) myJSON = {} for ticker in tickers: myJSON[ticker] = stock.get_market_ticker_name(ticker) # name = stock.get_market_ticker_name("000540") with open("tickerlist.json", "w") as JSON: json.dump(myJSON, JSON, ensure_ascii=False) # with open("tickerlist.json", "r") as JSON: # myJSON = json.load(JSON) print(myJSON) ohlcv = stock.get_market_ohlcv_by_date("20200501", "20200510", "000660") print(ohlcv) df = stock.get_market_price_change_by_ticker("20200501", "20200510") print(df)