示例#1
0
    def get_charts(self, period="d", size="l", chart_type="c", ta="1"):
        """
        Downloads the charts of all tickers shown by the table.

        :param period: table period eg. : 'd', 'w' or 'm' for daily, weekly and monthly periods
        :type period: str
        :param size: table size eg.: 'l' for large or 's' for small - choose large for better quality but higher size
        :type size: str
        :param chart_type: chart type: 'c' for candles or 'l' for lines
        :type chart_type: str
        :param ta: technical analysis eg.: '1' to show ta '0' to hide ta
        :type ta: str
        """

        encoded_payload = urlencode(
            {"ty": chart_type, "ta": ta, "p": period, "s": size}
        )

        sequential_data_scrape(
            scrape.download_chart_image,
            [
                f"https://finviz.com/chart.ashx?{encoded_payload}&t={row.get('Ticker')}"
                for row in self.data
            ],
            self._user_agent,
        )
示例#2
0
文件: screener.py 项目: WSJUSA/finviz
    def __search_screener(self):
        """ Private function used to return data from the FinViz screener. """

        self._page_content, self._url = http_request_get(
            'https://finviz.com/screener.ashx',
            payload={
                'v': self._table,
                't': ','.join(self._tickers),
                'f': ','.join(self._filters),
                'o': self._order,
                's': self._signal,
                'c': ','.join(self._custom)
            })

        self._rows = self.__check_rows()
        self.headers = self.__get_table_headers()
        page_urls = scrape.get_page_urls(self._page_content, self._rows,
                                         self._url)
        pages_data = sequential_data_scrape(scrape.get_table, page_urls,
                                            self._delay, self.headers,
                                            self._rows)

        data = []
        for page in pages_data:
            for row in page:
                data.append(row)

        return data
示例#3
0
文件: screener.py 项目: WSJUSA/finviz
    def get_ticker_details(self):
        """
        Downloads the details of all tickers shown by the table.
        """

        base_url = 'https://finviz.com/quote.ashx?'
        ticker_urls = []

        for row in self.data:
            ticker_urls.append(f"{base_url}&t={row.get('Ticker')}")

        ticker_data = sequential_data_scrape(scrape.download_ticker_details,
                                             ticker_urls, self._delay)

        for entry in ticker_data:
            for key, value in entry.items():
                for ticker_generic in self.data:
                    if ticker_generic.get('Ticker') == key:
                        if 'Sales' not in self.headers:
                            self.headers.extend(list(value[0].keys()))

                        ticker_generic.update(value[0])
                        self.analysis.extend(value[1])

        return self.data
示例#4
0
    def get_ticker_details(self):
        """
        Downloads the details of all tickers shown by the table.
        """

        ticker_data = sequential_data_scrape(
            scrape.download_ticker_details,
            [
                f"https://finviz.com/quote.ashx?&t={row.get('Ticker')}"
                for row in self.data
            ],
            self._user_agent,
        )

        for entry in ticker_data:
            for key, value in entry.items():
                for ticker_generic in self.data:
                    if ticker_generic.get("Ticker") == key:
                        if "Sales" not in self.headers:
                            self.headers.extend(list(value[0].keys()))

                        ticker_generic.update(value[0])
                        self.analysis.extend(value[1])

        return self.data
示例#5
0
    def __search_screener(self):
        """ Private function used to return data from the FinViz screener. """

        self._page_content, self._url = http_request_get(
            "https://finviz.com/screener.ashx",
            payload={
                "v": self._table,
                "t": ",".join(self._tickers),
                "f": ",".join(self._filters),
                "o": self._order,
                "s": self._signal,
                "c": ",".join(self._custom),
            },
        )

        self._rows = self.__check_rows()
        self.headers = self.__get_table_headers()
        pages_data = sequential_data_scrape(
            scrape.get_table,
            scrape.get_page_urls(self._page_content, self._rows, self._url),
            self._delay,
            self.headers,
            self._rows,
        )

        data = []
        for page in pages_data:
            for row in page:
                data.append(row)

        return data
示例#6
0
 def get_ticker_details_df(self):
     """
     Downloads the details of all tickers shown by the Screener.
     """
     base_url = 'https://finviz.com/quote.ashx?'
     ticker_urls = [f"{base_url}&t={row.get('Ticker')}" for row in self.data]
     ticker_data = sequential_data_scrape(scrape.download_ticker_details, ticker_urls, self._delay)
     quote_data = []
     for quote in ticker_data:
         quote[list(quote.keys())[0]][0]['Ticker'] = list(quote.keys())[0]
         quote_data.append(quote[list(quote.keys())[0]][0])
     return DataFrame(quote_data)