def fetch(self, start_date, end_date, ccode, range_type, page=1): """対象日時のYahooページを開く start_date: 開始日時(datetime) end_date: 終了日時(datetime) ccode: 証券コード range_type: 取得タイプ(デイリー, 週間, 月間) page: ページ(1ページ50件に調整) """ siteurl = self.SITE_URL % { 'syear': start_date.year, 'smon': start_date.month, 'sday': start_date.day, 'eyear': end_date.year, 'emon': end_date.month, 'eday': end_date.day, 'page': page, 'range_type': range_type, 'ccode': ccode } fp = urlopen(siteurl) html = fp.read() fp.close() soup = html_parser(html) self._elms = soup.findAll("table", attrs={"class": "boardFin yjSt marB6"}) if len(self._elms) == 0: raise CCODENotFoundException("証券コードが見つかりません") self._elms = self._elms[0].findAll("tr")[1:] debuglog(siteurl) debuglog(len(self._elms))
def fetch(self, code): """株価データを取得 code: 証券コード """ siteurl = self.SITE_URL % {'code': code} fp = urlopen(siteurl) html = fp.read() fp.close() soup = html_parser(html) self._elm = soup.findAll("dd", attrs={"class": "ymuiEditLink mar0"}) debuglog(siteurl)
def fetch(self, code): """株価データを取得 code: 証券コード """ siteurl = self.SITE_URL % {'code': code} fp = urllib2.urlopen(siteurl) html = fp.read() fp.close() html = html.decode("euc_jp", "ignore").encode("utf8") # UTF-8に変換 soup = html_parser(html) self._elm = soup.find("tr", attrs={"align": "right"}) debuglog(siteurl)
def fetch(self, code): """株価データを取得 code: 証券コード """ siteurl = self.SITE_URL % {'code':code} fp = urllib2.urlopen(siteurl) html = fp.read() fp.close() html = html.decode("euc_jp", "ignore").encode("utf8") # UTF-8に変換 soup = html_parser(html) self._elm = soup.find("tr", attrs={"align": "right"}) debuglog(siteurl)
def fetch(self, ids, page): """銘柄データを取得 ids: 業種別ID page: ページ """ siteurl = self.SITE_URL % {'ids': ids, 'page': page} fp = urllib2.urlopen(siteurl) html = fp.read() fp.close() soup = html_parser(html) self._elms = soup.findAll("tr", attrs={"class": "yjM"}) debuglog(siteurl)
def fetch(self, ccode): """財務データを取得 ccode: 証券コード """ siteurl = self.SITE_URL % {'ccode': ccode} fp = urlopen(siteurl) html = fp.read() fp.close() html = html.decode("utf-8", "ignore") soup = html_parser(html) self._elm = soup.find(lambda tag: tag.name == "div" and tag.get("class") == ["chartFinance"]) debuglog(siteurl)
def fetch(self, ccode): """財務データを取得 ccode: 証券コード """ siteurl = self.SITE_URL % {'ccode':ccode} fp = urllib2.urlopen(siteurl) html = fp.read() fp.close() html = html.decode("euc_jp", "ignore").encode("utf8") # UTF-8に変換 soup = html_parser(html) self._elm = soup.find("div", attrs={"class": "chartFinance"}) debuglog(siteurl)
def fetch(self, ids, page): """銘柄データを取得 ids: 業種別ID page: ページ """ siteurl = self.SITE_URL % {'ids':ids, 'page':page} fp = urllib2.urlopen(siteurl) html = fp.read() fp.close() soup = html_parser(html) self._elms = soup.findAll("tr", attrs={"class": "yjM"}) debuglog(siteurl)
def fetch(self, ccode): """財務データを取得 ccode: 証券コード """ siteurl = self.SITE_URL % {'ccode': ccode} fp = urlopen(siteurl) html = fp.read() fp.close() html = html.decode("utf-8", "ignore") soup = html_parser(html) self._elm = soup.find(lambda tag: tag.name == "div" and tag.get( "class") == ["chartFinance"]) debuglog(siteurl)
def fetch(self, start_date, end_date, ccode, range_type, page=1): """対象日時のYahooページを開く start_date: 開始日時(datetime) end_date: 終了日時(datetime) ccode: 証券コード range_type: 取得タイプ(デイリー, 週間, 月間) page: ページ(1ページ50件に調整) """ siteurl = self.SITE_URL % {'syear': start_date.year, 'smon': start_date.month, 'sday': start_date.day, 'eyear': end_date.year, 'emon': end_date.month, 'eday': end_date.day, 'page': page, 'range_type':range_type, 'ccode':ccode} fp = urlopen(siteurl) html = fp.read() fp.close() soup = html_parser(html) self._elms = soup.findAll("table", attrs={"class": "boardFin yjSt marB6"})[0].findAll("tr")[1:] debuglog(siteurl) debuglog(len(self._elms))
def fetch(self, start_date, end_date, ccode, range_type, page=0): """対象日時のYahooページを開く start_date: 開始日時(datetime) end_date: 終了日時(datetime) ccode: 証券コード range_type: 取得タイプ(デイリー, 週間, 月間) page: ページ(1ページ50件に調整) """ siteurl = self.SITE_URL % {'syear': start_date.year, 'smon': start_date.month, 'sday': start_date.day, 'eyear': end_date.year, 'emon': end_date.month, 'eday': end_date.day, 'page': page*self.COLUMN_NUM, 'range_type':range_type, 'ccode':ccode} fp = urllib2.urlopen(siteurl) html = fp.read() fp.close() html = html.decode("euc_jp", "ignore").encode("utf8") # UTF-8に変換 soup = html_parser(html) # <tr align=right bgcolor="#ffffff"> self._elms = soup.findAll("tr", attrs={"align": "right", "bgcolor": "#ffffff"}) debuglog(siteurl) debuglog(len(self._elms))