def fetch(self, terms, page=1): """銘柄検索結果を取得 terms: 検索ワード page: ページ """ siteurl = self.SITE_URL % {'terms': terms, 'page': page} fp = urllib2.urlopen(siteurl) html = fp.read() fp.close() soup = html_parser(html) elm = soup.find('div', {'class': 'ymuiPagingTop yjSt marB4 clearFix'}) if elm: # 全件数 max_page = self._text(elm) if max_page: self._max_page = int(math.ceil(int(max_page) / 50.0)) # データ elm = soup.find("div", {'class': 'boardFinList fsize13px s130 marB10'}) self._elms = elm.findAll('tr') self._detail = False else: elm = soup.find('div', {'class': 'selectFinTitle yjL'}) if elm: self._elms = [elm] self._max_page = 0 self._detail = True
def fetch(self, terms, page=1): """銘柄検索結果を取得 terms: 検索ワード page: ページ """ siteurl = self.SITE_URL % {'terms':terms, 'page':page} fp = urllib2.urlopen(siteurl) html = fp.read() fp.close() soup = html_parser(html) elm = soup.find('div', {'class': 'ymuiPagingTop yjSt marB4 clearFix'}) if elm: # 全件数 max_page = self._text(elm) if max_page: self._max_page = int(math.ceil(int(max_page) / 50.0)) # データ elm = soup.find("div", {'class': 'boardFinList fsize13px s130 marB10'}) self._elms = elm.findAll('tr') self._detail = False else: elm = soup.find('div', {'class': 'selectFinTitle yjL'}) if elm: self._elms = [elm] self._max_page = 0 self._detail = True
def fetch(self, start_date, end_date, ccode, range_type, page=1): """対象日時のYahooページを開く start_date: 開始日時(datetime) end_date: 終了日時(datetime) ccode: 証券コード range_type: 取得タイプ(デイリー, 週間, 月間) page: ページ(1ページ50件に調整) """ siteurl = self.SITE_URL % { 'syear': start_date.year, 'smon': start_date.month, 'sday': start_date.day, 'eyear': end_date.year, 'emon': end_date.month, 'eday': end_date.day, 'page': page, 'range_type': range_type, 'ccode': ccode } fp = urlopen(siteurl) html = fp.read() fp.close() soup = html_parser(html) self._elms = soup.findAll("table", attrs={"class": "boardFin yjSt marB6"}) if len(self._elms) == 0: raise CCODENotFoundException("証券コードが見つかりません") self._elms = self._elms[0].findAll("tr")[1:] debuglog(siteurl) debuglog(len(self._elms))
def fetch(self, code): """株価データを取得 code: 証券コード """ siteurl = self.SITE_URL % {'code': code} fp = urlopen(siteurl) html = fp.read() fp.close() soup = html_parser(html) self._elm = soup.findAll("dd", attrs={"class": "ymuiEditLink mar0"}) debuglog(siteurl)
def fetch(self, code): """株価データを取得 code: 証券コード """ siteurl = self.SITE_URL % {'code': code} fp = urllib2.urlopen(siteurl) html = fp.read() fp.close() html = html.decode("euc_jp", "ignore").encode("utf8") # UTF-8に変換 soup = html_parser(html) self._elm = soup.find("tr", attrs={"align": "right"}) debuglog(siteurl)
def fetch(self, code): """株価データを取得 code: 証券コード """ siteurl = self.SITE_URL % {'code':code} fp = urllib2.urlopen(siteurl) html = fp.read() fp.close() html = html.decode("euc_jp", "ignore").encode("utf8") # UTF-8に変換 soup = html_parser(html) self._elm = soup.find("tr", attrs={"align": "right"}) debuglog(siteurl)
def fetch(self, ids, page): """銘柄データを取得 ids: 業種別ID page: ページ """ siteurl = self.SITE_URL % {'ids': ids, 'page': page} fp = urllib2.urlopen(siteurl) html = fp.read() fp.close() soup = html_parser(html) self._elms = soup.findAll("tr", attrs={"class": "yjM"}) debuglog(siteurl)
def fetch(self, ccode): """財務データを取得 ccode: 証券コード """ siteurl = self.SITE_URL % {'ccode': ccode} fp = urlopen(siteurl) html = fp.read() fp.close() html = html.decode("utf-8", "ignore") soup = html_parser(html) self._elm = soup.find(lambda tag: tag.name == "div" and tag.get("class") == ["chartFinance"]) debuglog(siteurl)
def fetch(self, ccode): """財務データを取得 ccode: 証券コード """ siteurl = self.SITE_URL % {'ccode':ccode} fp = urllib2.urlopen(siteurl) html = fp.read() fp.close() html = html.decode("euc_jp", "ignore").encode("utf8") # UTF-8に変換 soup = html_parser(html) self._elm = soup.find("div", attrs={"class": "chartFinance"}) debuglog(siteurl)
def fetch(self, ids, page): """銘柄データを取得 ids: 業種別ID page: ページ """ siteurl = self.SITE_URL % {'ids':ids, 'page':page} fp = urllib2.urlopen(siteurl) html = fp.read() fp.close() soup = html_parser(html) self._elms = soup.findAll("tr", attrs={"class": "yjM"}) debuglog(siteurl)
def fetch(self, ccode): """財務データを取得 ccode: 証券コード """ siteurl = self.SITE_URL % {'ccode': ccode} fp = urlopen(siteurl) html = fp.read() fp.close() html = html.decode("utf-8", "ignore") soup = html_parser(html) self._elm = soup.find(lambda tag: tag.name == "div" and tag.get( "class") == ["chartFinance"]) debuglog(siteurl)
def fetch(self,ccode): url = self.SITE_URL % {"ccode":ccode} fp = urlopen(url) html = fp.read() fp.close() html = html.decode("utf-8", "ignore") soup = html_parser(html) elm = soup.findAll("table",{ "class":"yjMt" })#文字列のリストになるので注意が必要。0番目だけが必要なもの self._elm = str(elm[0]) self._ccode = ccode
def fetch(self, start_date, end_date, ccode, range_type, page=1): """対象日時のYahooページを開く start_date: 開始日時(datetime) end_date: 終了日時(datetime) ccode: 証券コード range_type: 取得タイプ(デイリー, 週間, 月間) page: ページ(1ページ50件に調整) """ siteurl = self.SITE_URL % {'syear': start_date.year, 'smon': start_date.month, 'sday': start_date.day, 'eyear': end_date.year, 'emon': end_date.month, 'eday': end_date.day, 'page': page, 'range_type':range_type, 'ccode':ccode} fp = urlopen(siteurl) html = fp.read() fp.close() soup = html_parser(html) self._elms = soup.findAll("table", attrs={"class": "boardFin yjSt marB6"})[0].findAll("tr")[1:] debuglog(siteurl) debuglog(len(self._elms))
def fetch(self, start_date, end_date, ccode, range_type, page=0): """対象日時のYahooページを開く start_date: 開始日時(datetime) end_date: 終了日時(datetime) ccode: 証券コード range_type: 取得タイプ(デイリー, 週間, 月間) page: ページ(1ページ50件に調整) """ siteurl = self.SITE_URL % {'syear': start_date.year, 'smon': start_date.month, 'sday': start_date.day, 'eyear': end_date.year, 'emon': end_date.month, 'eday': end_date.day, 'page': page*self.COLUMN_NUM, 'range_type':range_type, 'ccode':ccode} fp = urllib2.urlopen(siteurl) html = fp.read() fp.close() html = html.decode("euc_jp", "ignore").encode("utf8") # UTF-8に変換 soup = html_parser(html) # <tr align=right bgcolor="#ffffff"> self._elms = soup.findAll("tr", attrs={"align": "right", "bgcolor": "#ffffff"}) debuglog(siteurl) debuglog(len(self._elms))