Ejemplo n.º 1
0
    def __search_screener(self):
        """ Private function used to return data from the FinViz screener. """

        self._page_content, self._url = http_request_get(
            'https://finviz.com/screener.ashx',
            payload={
                'v': self._table,
                't': ','.join(self._tickers),
                'f': ','.join(self._filters),
                'o': self._order,
                's': self._signal,
                'c': ','.join(self._custom)
            })

        self._rows = self.__check_rows()
        self.headers = self.__get_table_headers()
        page_urls = scrape.get_page_urls(self._page_content, self._rows,
                                         self._url)

        async_connector = Connector(scrape.get_table, page_urls, self.headers,
                                    self._rows)
        pages_data = async_connector.run_connector()

        data = []
        for page in pages_data:
            for row in page:
                data.append(row)

        return data
Ejemplo n.º 2
0
    def get_charts(self, period='d', size='l', chart_type='c', ta='1'):
        """
        Downloads the charts of all tickers shown by the table.

        :param period: table period eg. : 'd', 'w' or 'm' for daily, weekly and monthly periods
        :type period: str
        :param size: table size eg.: 'l' for large or 's' for small - choose large for better quality but higher size
        :type size: str
        :param chart_type: chart type: 'c' for candles or 'l' for lines
        :type chart_type: str
        :param ta: technical analysis eg.: '1' to show ta '0' to hide ta
        :type ta: str
        """

        payload = {
            'ty': chart_type,
            'ta': ta,
            'p': period,
            's': size
        }

        base_url = f"https://finviz.com/chart.ashx?{urlencode(payload)}"
        chart_urls = []

        for row in self.data:
            chart_urls.append(f"{base_url}&t={row.get('Ticker')}")

        async_connector = Connector(scrape.download_chart_image, chart_urls)
        async_connector.run_connector()
Ejemplo n.º 3
0
    def get_ticker_details(self):
        """
        Downloads the details of all tickers shown by the table.
        """

        base_url = 'https://finviz.com/quote.ashx?'
        ticker_urls = []

        for row in self.data:
            ticker_urls.append(base_url + f"&t={row.get('Ticker')}")

        async_connector = Connector(scrape.download_ticker_details,
                                    ticker_urls,
                                    cssselect=True)
        ticker_data = async_connector.run_connector()

        for entry in ticker_data:
            for key, value in entry.items():
                for ticker_generic in self.data:
                    if ticker_generic.get('Ticker') == key:
                        if 'Sales' not in self.headers:
                            self.headers.extend(list(value[0].keys()))

                        ticker_generic.update(value[0])
                        self.analysis.extend(value[1])

        return self.data
Ejemplo n.º 4
0
    def __search_screener(self):
        """ Private function used to return data from the FinViz screener. """

        self._page_content, self._url = http_request_get(
            "https://finviz.com/screener.ashx",
            payload={
                "v": self._table,
                "t": ",".join(self._tickers),
                "f": ",".join(self._filters),
                "o": self._order,
                "s": self._signal,
                "c": ",".join(self._custom),
            },
            user_agent=self._user_agent,
        )

        self._rows = self.__check_rows()
        self.headers = self.__get_table_headers()

        if self._request_method == "async":
            async_connector = Connector(
                scrape.get_table,
                scrape.get_page_urls(self._page_content, self._rows, self._url),
                self._user_agent,
                self.headers,
                self._rows,
                css_select=True,
            )
            pages_data = async_connector.run_connector()
        else:
            pages_data = sequential_data_scrape(
                scrape.get_table,
                scrape.get_page_urls(self._page_content, self._rows, self._url),
                self._user_agent,
                self.headers,
                self._rows,
            )

        data = []
        for page in pages_data:
            for row in page:
                data.append(row)

        return data