Beispiel #1
0
    def fetch(self, terms, page=1):
        """銘柄検索結果を取得
        terms: 検索ワード
        page: ページ
        """
        siteurl = self.SITE_URL % {'terms': terms, 'page': page}
        fp = urllib2.urlopen(siteurl)
        html = fp.read()
        fp.close()
        soup = html_parser(html)

        elm = soup.find('div', {'class': 'ymuiPagingTop yjSt marB4 clearFix'})
        if elm:
            # 全件数
            max_page = self._text(elm)
            if max_page:
                self._max_page = int(math.ceil(int(max_page) / 50.0))
            # データ
            elm = soup.find("div",
                            {'class': 'boardFinList fsize13px s130 marB10'})
            self._elms = elm.findAll('tr')
            self._detail = False
        else:
            elm = soup.find('div', {'class': 'selectFinTitle yjL'})
            if elm:
                self._elms = [elm]
                self._max_page = 0
                self._detail = True
Beispiel #2
0
 def fetch(self, terms, page=1):
     """銘柄検索結果を取得
     terms: 検索ワード
     page: ページ
     """
     siteurl = self.SITE_URL % {'terms':terms, 'page':page}
     fp = urllib2.urlopen(siteurl)
     html = fp.read()
     fp.close()
     soup = html_parser(html)
     
     elm = soup.find('div', {'class': 'ymuiPagingTop yjSt marB4 clearFix'})
     if elm:
         # 全件数
         max_page = self._text(elm)
         if max_page:
             self._max_page = int(math.ceil(int(max_page) / 50.0))
         # データ
         elm = soup.find("div", {'class': 'boardFinList fsize13px s130 marB10'})
         self._elms = elm.findAll('tr')
         self._detail = False
     else:
         elm = soup.find('div', {'class': 'selectFinTitle yjL'})
         if elm:
             self._elms = [elm]
             self._max_page = 0
             self._detail = True
Beispiel #3
0
 def fetch(self, start_date, end_date, ccode, range_type, page=1):
     """対象日時のYahooページを開く
     start_date: 開始日時(datetime)
     end_date: 終了日時(datetime)
     ccode: 証券コード
     range_type: 取得タイプ(デイリー, 週間, 月間)
     page: ページ(1ページ50件に調整)
     """
     siteurl = self.SITE_URL % {
         'syear': start_date.year,
         'smon': start_date.month,
         'sday': start_date.day,
         'eyear': end_date.year,
         'emon': end_date.month,
         'eday': end_date.day,
         'page': page,
         'range_type': range_type,
         'ccode': ccode
     }
     fp = urlopen(siteurl)
     html = fp.read()
     fp.close()
     soup = html_parser(html)
     self._elms = soup.findAll("table",
                               attrs={"class": "boardFin yjSt marB6"})
     if len(self._elms) == 0:
         raise CCODENotFoundException("証券コードが見つかりません")
     self._elms = self._elms[0].findAll("tr")[1:]
     debuglog(siteurl)
     debuglog(len(self._elms))
Beispiel #4
0
Datei: price.py Projekt: tkyn/jsm
 def fetch(self, code):
     """株価データを取得
     code: 証券コード
     """
     siteurl = self.SITE_URL % {'code': code}
     fp = urlopen(siteurl)
     html = fp.read()
     fp.close()
     soup = html_parser(html)
     self._elm = soup.findAll("dd", attrs={"class": "ymuiEditLink mar0"})
     debuglog(siteurl)
Beispiel #5
0
 def fetch(self, code):
     """株価データを取得
     code: 証券コード
     """
     siteurl = self.SITE_URL % {'code': code}
     fp = urllib2.urlopen(siteurl)
     html = fp.read()
     fp.close()
     html = html.decode("euc_jp", "ignore").encode("utf8")  # UTF-8に変換
     soup = html_parser(html)
     self._elm = soup.find("tr", attrs={"align": "right"})
     debuglog(siteurl)
Beispiel #6
0
 def fetch(self, code):
     """株価データを取得
     code: 証券コード
     """
     siteurl = self.SITE_URL % {'code':code}
     fp = urllib2.urlopen(siteurl)
     html = fp.read()
     fp.close()
     html = html.decode("euc_jp", "ignore").encode("utf8") # UTF-8に変換
     soup = html_parser(html)
     self._elm = soup.find("tr", attrs={"align": "right"})
     debuglog(siteurl)
Beispiel #7
0
 def fetch(self, ids, page):
     """銘柄データを取得
     ids: 業種別ID
     page: ページ
     """
     siteurl = self.SITE_URL % {'ids': ids, 'page': page}
     fp = urllib2.urlopen(siteurl)
     html = fp.read()
     fp.close()
     soup = html_parser(html)
     self._elms = soup.findAll("tr", attrs={"class": "yjM"})
     debuglog(siteurl)
Beispiel #8
0
 def fetch(self, ccode):
     """財務データを取得
     ccode: 証券コード
     """
     siteurl = self.SITE_URL % {'ccode': ccode}
     fp = urlopen(siteurl)
     html = fp.read()
     fp.close()
     html = html.decode("utf-8", "ignore")
     soup = html_parser(html)
     self._elm = soup.find(lambda tag: tag.name == "div" and tag.get("class") == ["chartFinance"])
     debuglog(siteurl)
Beispiel #9
0
 def fetch(self, ccode):
     """財務データを取得
     ccode: 証券コード
     """
     siteurl = self.SITE_URL % {'ccode':ccode}
     fp = urllib2.urlopen(siteurl)
     html = fp.read()
     fp.close()
     html = html.decode("euc_jp", "ignore").encode("utf8") # UTF-8に変換
     soup = html_parser(html)
     self._elm = soup.find("div", attrs={"class": "chartFinance"})
     debuglog(siteurl)
Beispiel #10
0
 def fetch(self, ids, page):
     """銘柄データを取得
     ids: 業種別ID
     page: ページ
     """
     siteurl = self.SITE_URL % {'ids':ids, 'page':page}
     fp = urllib2.urlopen(siteurl)
     html = fp.read()
     fp.close()
     soup = html_parser(html)
     self._elms = soup.findAll("tr", attrs={"class": "yjM"})
     debuglog(siteurl)
Beispiel #11
0
 def fetch(self, ccode):
     """財務データを取得
     ccode: 証券コード
     """
     siteurl = self.SITE_URL % {'ccode': ccode}
     fp = urlopen(siteurl)
     html = fp.read()
     fp.close()
     html = html.decode("utf-8", "ignore")
     soup = html_parser(html)
     self._elm = soup.find(lambda tag: tag.name == "div" and tag.get(
         "class") == ["chartFinance"])
     debuglog(siteurl)
Beispiel #12
0
    def fetch(self,ccode):

        url = self.SITE_URL % {"ccode":ccode}

        fp = urlopen(url)
        html = fp.read()
        fp.close()
        html = html.decode("utf-8", "ignore")
        soup = html_parser(html)
        elm = soup.findAll("table",{
            "class":"yjMt"
        })#文字列のリストになるので注意が必要。0番目だけが必要なもの
        self._elm = str(elm[0])
        self._ccode = ccode
Beispiel #13
0
 def fetch(self, start_date, end_date, ccode, range_type, page=1):
     """対象日時のYahooページを開く
     start_date: 開始日時(datetime)
     end_date: 終了日時(datetime)
     ccode: 証券コード
     range_type: 取得タイプ(デイリー, 週間, 月間)
     page: ページ(1ページ50件に調整)
     """
     siteurl = self.SITE_URL % {'syear': start_date.year, 'smon': start_date.month, 'sday': start_date.day,
                                'eyear': end_date.year, 'emon': end_date.month, 'eday': end_date.day,
                                'page': page, 'range_type':range_type, 'ccode':ccode}
     fp = urlopen(siteurl)
     html = fp.read()
     fp.close()
     soup = html_parser(html)
     self._elms = soup.findAll("table", attrs={"class": "boardFin yjSt marB6"})[0].findAll("tr")[1:]
     debuglog(siteurl)
     debuglog(len(self._elms))
Beispiel #14
0
 def fetch(self, start_date, end_date, ccode, range_type, page=0):
     """対象日時のYahooページを開く
     start_date: 開始日時(datetime)
     end_date: 終了日時(datetime)
     ccode: 証券コード
     range_type: 取得タイプ(デイリー, 週間, 月間)
     page: ページ(1ページ50件に調整)
     """
     siteurl = self.SITE_URL % {'syear': start_date.year, 'smon': start_date.month, 'sday': start_date.day,
                                'eyear': end_date.year, 'emon': end_date.month, 'eday': end_date.day,
                                'page': page*self.COLUMN_NUM, 'range_type':range_type, 'ccode':ccode}
     fp = urllib2.urlopen(siteurl)
     html = fp.read()
     fp.close()
     html = html.decode("euc_jp", "ignore").encode("utf8") # UTF-8に変換
     soup = html_parser(html)
     # <tr align=right bgcolor="#ffffff">
     self._elms = soup.findAll("tr", attrs={"align": "right", "bgcolor": "#ffffff"})
     debuglog(siteurl)
     debuglog(len(self._elms))