Beispiel #1
0
class Coinbase:
    def __init__(self,
                 api_key=None,
                 api_secret=None,
                 timeout=DEFAULT_TIMEOUT,
                 max_retries=MAX_RETRIES):
        #internal initialization
        self.__request = RequestUrl(timeout, max_retries)
        self.urls = CoinbaseUrl()

    def get_data(self,
                 symbol,
                 start=None,
                 end=None,
                 periods=None,
                 interval="1D",
                 dayfirst=False):
        """Coinbase getData API for intraday/Historical data

        :param symbol: crypto symbol
        :type symbol: string
        :param start: start time, defaults to None
        :type start: string optional
        :param end: end time, defaults to None
        :type end: string, optional
        :param periods: No of days, defaults to None
        :type periods: integer, optional
        :param interval: timeframe, defaults to "1D"
        :type interval: string, optional
        :param dayfirst: if date in european style, defaults to False
        :type dayfirst: bool, optional
        :raises ValueError: invalid time
        :raises Exception: for execption
        :return: data requested
        :rtype: pandas.DataFrame
        """
        try:
            s_from, e_till = get_date_range(start=start,
                                            end=end,
                                            periods=periods,
                                            dayfirst=dayfirst)
            if s_from > e_till:
                raise ValueError("End should grater than start.")

            #capitalize symbol
            symbol = symbol.upper()
            data_url = self.urls.get_candle_data_url(symbol,
                                                     start=s_from,
                                                     end=e_till,
                                                     interval=interval)
            res = self.__request.get(data_url, headers=self.urls.HEADER)
            data = json.loads(res.text)
            dfs = pd.DataFrame(data, columns=self.urls.data_columns)
            dfs['OpenTime'] = pd.to_datetime(dfs['OpenTime'], unit='s')
            dfs.set_index("OpenTime", inplace=True)
            return dfs

        except Exception as err:
            raise Exception("Error occurred while fetching data :", str(err))
Beispiel #2
0
class NseData:
    def __init__(self, timeout=DEFAULT_TIMEOUT, max_retries=MAX_RETRIES):
        self.__nse_urls = NseUrls()
        self.__headers = self.__nse_urls.header
        #create request
        self.__request = RequestUrl(timeout, max_retries)

    def get_indices(self):
        """To get list of NSE indices
        """
        try:
            index_page = self.__request.get(self.__nse_urls.INDEX_URL,
                                            headers=self.__headers)
            soup = BeautifulSoup(index_page.text, 'lxml')
            table = soup.find("select", {"id": "indexType"})
            indices_data = table.find_all("option")
            indices = [
                index.get("value") for index in indices_data
                if "NIFTY" in index.get("value")
            ]

            #lets append india vix as well
            indices.append("INDIA VIX")
            return indices
        except Exception as err:
            raise Exception("Error occurred while getting NSE indices :",
                            str(err))

    def get_oc_exp_dates(self, symbol):
        """get current  available expiry dates

        :raises Exception: NSE connection related
        :return: expiry dates
        :rtype: list
        """
        try:
            base_oc_url = self.__nse_urls.get_option_chain_url(symbol)
            page = self.__request.get(base_oc_url, headers=self.__headers)
            soup = BeautifulSoup(page.text, 'lxml')
            table = soup.find("select", {"id": "date"})
            expiry_out = table.find_all("option")
            expiry_dates = [exp_date.get("value")
                            for exp_date in expiry_out][1:]
            return expiry_dates

        except Exception as err:
            raise Exception("something went wrong while reading nse URL :",
                            str(err))

    def get_option_chain_df(self, symbol, expiry_date=None, dayfirst=False):
        """ This function fetches option chain data from NSE and returns in pandas.DataFrame

        :param symbol: stock/index symbol
        :type symbol: string
        :param expiry_date: expiry date (all date formats accepted), defaults to next near
        :type expiry_date: string
        :param dayfirst: True if date format is european style DD/MM/YYYY, defaults to False
        :type dayfirst: bool, optional
        :raises Exception: NSE connection related
        :raises Exception: In html parsing
        :return: option chain
        :rtype: pandas.DataFrame
        """
        try:
            if not expiry_date:
                expiry_date = self.get_oc_exp_dates(symbol)[0]

            oc_url = self.__nse_urls.get_option_chain_url(
                symbol, expiry_date, dayfirst)
            # If the response was successful, no Exception will be raised
            oc_page = self.__request.get(oc_url, headers=self.__headers)

        except Exception as err:
            raise Exception("Error occurred while connecting NSE :", str(err))
        else:
            try:
                dfs = pd.read_html(oc_page.text)
                return dfs[1]
            except Exception as err:
                raise Exception("Error occurred while reading html :",
                                str(err))

    def __get_file_path(self,
                        file_name,
                        file_path=None,
                        is_use_default_name=True):
        """[summary]

        :param file_name: file name
        :type file_name: string
        :param file_path: file directory or file path , defaults to None
        :type file_path: string, optional
        :param is_use_default_name: to get filename in current timestamp, defaults to True
        :type is_use_default_name: bool, optional
        :return: file path
        :rtype: string
        """
        try:
            if not file_path:
                file_path = os.getcwd()

            if os.path.isfile(file_path):
                if (not is_use_default_name):
                    return file_path
                # if need to use default file path, we get parent path
                else:
                    file_path = os.path.dirname(file_path)

            # datetime object containing current date and time
            now = datetime.now()
            # dd/mm/YY H:M:S
            dt_string = now.strftime("%d_%B_%H_%M")
            file_name = file_name + "_" + dt_string + ".xlsx"

            excel_path = os.path.join(file_path, file_name)
            return excel_path
        except Exception as err:
            print("Error while naming file. Error: ", str(err))

    def get_option_chain_excel(self,
                               symbol,
                               expiry_date=None,
                               dayfirst=False,
                               file_path=None,
                               is_use_default_name=True):
        """Fetches NSE option chain data and returns in the form of excel (.xlsx)

        :param symbol: stock/index symbol
        :type symbol: string
        :param expiry_date: expiry date (all date formats accepted), defaults to next near
        :type expiry_date: string
        :param dayfirst: True if date format is european style DD/MM/YYYY, defaults to False
        :type dayfirst: bool, optional
        :param file_path: file/folder path, defaults to None
        :type file_path: string, optional
        :param is_use_default_name:  to get filename as current timestamp, defaults to True
        :type is_use_default_name: bool, optional
        :raises Exception:  NSE connection related
        """
        try:
            if not expiry_date:
                expiry_date = self.get_oc_exp_dates(symbol)[0]

            df = self.get_option_chain_df(symbol, expiry_date, dayfirst)
            file_name = symbol + "_" + expiry_date
            excel_path = self.__get_file_path(file_name, file_path,
                                              is_use_default_name)

            df.to_excel(excel_path, file_name)
        except Exception as err:
            raise Exception("Error occurred while getting excel :", str(err))

    def __join_part_oi_dfs(self, df_join, df_joiner):
        """will append joiner to join for oi_dfs

        :param df_join: Dictionary of participants
        :type df_join: dict
        :param df_joiner: Dictionary of participants
        :type df_joiner: dict
        """
        for client in df_join:
            df_join[client] = self.__join_dfs(df_join[client],
                                              df_joiner[client]).sort_index()

    def __join_dfs(self, join, joiner):
        """will append joiner to join for oi_dfs

        :param join: df which will be appended
        :type join: pandas.DataFrame
        :param joiner: df which we want to append
        :type joiner: pandas.DataFrame
        :return: merged data frame
        :rtype: pandas.DataFrame
        """
        return join.append(joiner)

    def get_part_oi_df(self,
                       start=None,
                       end=None,
                       periods=None,
                       dayfirst=False,
                       workers=None):
        """Return dictionary of participants containing data frames

        :param start: start date , defaults to None
        :type start: string, optional
        :param end: end date, defaults to None
        :type end: string, optional
        :param periods: number of days, defaults to None
        :type periods: interger, optional
        :param dayfirst: True if date format is european style DD/MM/YYYY, defaults to False
        :type dayfirst: bool, optional
        :param workers: Number of threads for requesting nse, defaults to None
        :type workers: interger, optional
        :raises Exception: NSE Connection/Request overload
        :return: participant wise open interest
        :rtype: pandas.DataFrame
        """
        try:
            #format date just in case
            if start:
                start = get_formated_date(start, dayfirst=dayfirst)
            if end:
                end = get_formated_date(end, dayfirst=dayfirst)

            #if both none, we set end to today
            if not start and not end:
                end = get_formated_date()
                if not periods:
                    periods = PART_OI_DAYS
            #get urls for these days
            dates = pd.date_range(start=start,
                                  end=end,
                                  periods=periods,
                                  freq='B')
            url_date = [(self.__nse_urls.get_participant_oi_url(date), date)
                        for date in dates]  #

            oi_clm = self.__nse_urls.PART_OI_CLM
            #lets preinitialize, better readability
            oi_dfs = {
                "Client": pd.DataFrame(columns=oi_clm, index=dates),
                "DII": pd.DataFrame(columns=oi_clm, index=dates),
                "FII": pd.DataFrame(columns=oi_clm, index=dates),
                "Pro": pd.DataFrame(columns=oi_clm, index=dates),
                "TOTAL": pd.DataFrame(columns=oi_clm, index=dates)
            }

            if not workers:
                workers = os.cpu_count() * 2

            with concurrent.futures.ThreadPoolExecutor(
                    max_workers=workers) as executor:
                responses = {
                    executor.submit(self.__request.get, url, self.__headers):
                    date
                    for url, date in url_date
                }
                for res in concurrent.futures.as_completed(responses):
                    date = responses[res]
                    try:
                        csv = res.result()
                    except Exception as exc:
                        #might be holiday
                        pass
                    else:
                        df = pd.read_csv(
                            io.StringIO(csv.content.decode('utf-8')))
                        #drop the first header
                        df_header = df.iloc[0]
                        #is there any implace way?
                        df = df[1:]
                        df.columns = df_header
                        df.set_index('Client Type', inplace=True)
                        #lets us create data frome for all client type
                        oi_dfs['Client'].loc[date] = df.loc['Client']
                        oi_dfs['FII'].loc[date] = df.loc['FII']
                        oi_dfs['DII'].loc[date] = df.loc['DII']
                        oi_dfs['Pro'].loc[date] = df.loc['Pro']
                        oi_dfs['TOTAL'].loc[date] = df.loc['TOTAL']

            if not oi_dfs['Client'].empty:
                #remove nan row
                for client in oi_dfs:
                    oi_dfs[client].dropna(inplace=True)

                #if holiday occurred in business day, lets retrive more data equivalent to holdidays.
                if oi_dfs['Client'].shape[0] < periods:
                    new_periods = periods - oi_dfs['Client'].shape[0]
                    try:
                        #if only start, find till today
                        if start and (not end):
                            s_from = oi_dfs['Client'].index[-1] + timedelta(1)
                            e_till = None
                        #if not start, can go to past
                        elif (end and (not start)):
                            s_from = None
                            e_till = oi_dfs['Client'].index[0] - timedelta(1)
                        #if start and end, no need to change
                        else:
                            return oi_dfs
                    except IndexError as err:
                        raise Exception(
                            "NSE Access error.size down/clean cookies to resolve the issue."
                        )
                    except Exception as exc:
                        raise Exception("participant OI error: ", str(exc))

                    oi_dfs_new = self.get_part_oi_df(start=s_from,
                                                     end=e_till,
                                                     periods=new_periods)
                    self.__join_part_oi_dfs(oi_dfs, oi_dfs_new)

                return oi_dfs

        except Exception as err:
            raise Exception("Error occurred while getting part_oi :", str(err))

    def __parse_indexdata(self, res_txt, symbol):
        dfs = pd.read_html(res_txt)[0]
        if dfs.shape[0] < 2:
            raise Exception("No record found")
        if "NIFTY" in symbol:
            fined_dfs = dfs.iloc[0:]
            fined_dfs.columns = self.__nse_urls.INDEX_DATA_CLM
        elif symbol == "INDIA VIX":
            fined_dfs = dfs.iloc[1:]
            fined_dfs.drop(fined_dfs.index[0], inplace=True)
            fined_dfs.columns = self.__nse_urls.VIX_DATA_CLM
        fined_dfs.drop(fined_dfs.index[-1], inplace=True)
        fined_dfs.set_index("Date", inplace=True)
        return fined_dfs

    def __get_datarange_intv(self, start, end, intv):
        diff = math.ceil((end - start).days / intv)
        date_ranges = []
        curr_start = prev_start = start
        for i in range(diff):
            curr_start = (start + timedelta(intv * i))
            if i != 0:
                start_ = prev_start
                end_ = curr_start - timedelta(1)
                date_ranges.append((start_, end_))
            prev_start = curr_start
        date_ranges.append((curr_start, end))
        return date_ranges

    def __get_data_adjusted(self,
                            dfs,
                            symbol,
                            series="EQ",
                            start=None,
                            end=None,
                            periods=None):
        if periods and (dfs.shape[0] < periods):
            new_periods = periods - dfs.shape[0]
            try:
                s_from = e_till = None
                #if only start, find till today
                if start and (not end):
                    s_from = dfs.index[0] + timedelta(1)
                    e_till = None
                #if not start, can go to past
                elif ((end and (not start)) or periods):
                    s_from = None
                    e_till = dfs.index[-1] - timedelta(1)
            except IndexError as err:
                raise Exception("NSE Access error.")
            except Exception as exc:
                raise Exception("Stock data error: ", str(exc))
            try:
                dfs_new = self.get_data(symbol,
                                        series,
                                        start=s_from,
                                        end=e_till,
                                        periods=new_periods)
                dfs = self.__join_dfs(dfs, dfs_new).sort_index(ascending=False)
            except Exception as exc:
                #data may not be available
                pass
        return dfs

    def get_data(self,
                 symbol,
                 series="EQ",
                 start=None,
                 end=None,
                 periods=None,
                 dayfirst=False):
        """To get NSE stock data

        :param symbol: stock/index symbol
        :type symbol: string
        :param series: segment, defaults to "EQ"
        :type series: string, optional
        :param start: start date, defaults to None
        :type start: string, optional
        :param end: end date, defaults to None
        :type end: string, optional
        :param periods: number of days, defaults to None
        :type periods: interger, optional
        :param dayfirst: True if date format is european style DD/MM/YYYY, defaults to False
        :type dayfirst: bool, optional
        :raises Exception: NSE Connection Related
        :return: stock data
        :rtype: pandas.DataFrame
        """
        try:

            #Step1: get the date range
            s_from, e_till = get_date_range(start=start,
                                            end=end,
                                            periods=periods,
                                            dayfirst=dayfirst)

            if s_from > e_till:
                raise ValueError("End should grater than start.")

            data_limit = None
            if self.__nse_urls.is_index(symbol):
                data_limit = INDEX_DATA_LIMIT
            else:
                data_limit = STOCK_DATA_LIMIT

            data_days = e_till - s_from
            if (data_days.days) > data_limit:
                date_ranges = self.__get_datarange_intv(
                    s_from, e_till, data_limit)
                workers = len(date_ranges)
                with concurrent.futures.ThreadPoolExecutor(
                        max_workers=workers) as executor:
                    responses = [executor.submit(self.get_data, symbol=symbol,start=start_,end=end_,dayfirst=dayfirst)\
                                    for start_,end_ in date_ranges]
                    dfs = []
                    for res in concurrent.futures.as_completed(responses):
                        try:
                            df = res.result()
                            dfs.append(df)
                        except Exception as exc:
                            #might be holiday/no record
                            pass
                    all_dfs = pd.concat(dfs).sort_index(ascending=False)
                    adjusted_dfs = self.__get_data_adjusted(all_dfs,
                                                            symbol,
                                                            start=start,
                                                            end=end,
                                                            periods=periods)
                    return adjusted_dfs

            data_url = self.__nse_urls.get_stock_data_url\
                                                        (
                                                        symbol,series=series,start=s_from,
                                                        end=e_till
                                                        )

            csv = self.__request.get(data_url, headers=self.__headers)

            #if it is index, wee need to read table
            # Why the heck, We are doing so much handling? Is there any other way?
            # Suggestions are welcome. ping me on github
            if self.__nse_urls.is_index(symbol):
                dfs = self.__parse_indexdata(csv.text, symbol)
            else:
                dfs = pd.read_csv(io.StringIO(csv.content.decode('utf-8')))
                dfs.set_index('Date ', inplace=True)
            # Converting the index as date
            dfs.index = pd.to_datetime(dfs.index)
            dfs = self.__get_data_adjusted(dfs,
                                           symbol,
                                           start=start,
                                           end=end,
                                           periods=periods)
            return dfs

        except Exception as err:
            raise Exception("Error occurred while fetching stock data :",
                            str(err))
Beispiel #3
0
class Nasdaq:
    """Nasdaq class to get data from nasdaq
    """
    def __init__(self,timeout=DEFAULT_TIMEOUT,max_retries=MAX_RETRIES,cloud_mode=False):
        if cloud_mode:
            self.requests = Curl(timeout,max_retries)
        else:
            self.requests = RequestUrl(timeout,max_retries)
        self.nasdaq_url =  NasdaqUrls()

    def __get_data_adjusted(self,dfs,symbol,start=None,end=None,periods=None):
        if periods and (dfs.shape[0] < periods):
            new_periods = periods - dfs.shape[0]
            try:
                s_from = e_till = None
                #if only start, find till today
                if start and (not end):
                    s_from = dfs.index[0] + timedelta(1)
                    e_till = None
                #if not start, can go to past
                elif((end and (not start)) or periods):
                    s_from = None
                    e_till = dfs.index[-1] - timedelta(1)
            except IndexError as err:
                raise Exception("Nasdaq Access error.")
            except Exception as exc:
                raise Exception("Nasdaq data error: ",str(exc))
            try:
                dfs_new = self.get_data(symbol,start = s_from,end = e_till,periods = new_periods)
                dfs = self.__join_dfs(dfs,dfs_new).sort_index(ascending=False)
            except Exception as exc:
                #Small part of data may not be available
                pass
        return dfs

    def __join_dfs(self,join,joiner):
        """will append joiner to join for oi_dfs

        :param join: df which will be appended
        :type join: pandas.DataFrame
        :param joiner: df which we want to append
        :type joiner: pandas.DataFrame
        :return: merged data frame
        :rtype: pandas.DataFrame
        """
        return join.append(joiner)

    def get_data(self,symbol,start=None,end=None,periods=None,dayfirst=False):
        """get_data API to fetch data from nasdaq

        :param symbol: stock symbol
        :type symbol: string
        :param start: start date, defaults to None
        :type start: string, optional
        :param end: end date, defaults to None
        :type end: string, optional
        :param periods: number of days, defaults to None
        :type periods: integer, optional
        :param dayfirst: True if date format is european style DD/MM/YYYY, defaults to False
        :type dayfirst: bool, optional
        :raises ValueError: for invalid inputs
        :raises Exception: incase if no data found
        :return: stock data
        :rtype: pandas.DataFrame
        """
        try:
            #Step1: get the date range
            s_from,e_till = get_date_range(start=start,end=end,periods=periods,dayfirst=dayfirst)

            if s_from > e_till:
                    raise ValueError("End should grater than start.")

            url = self.nasdaq_url.get_data_url(symbol=symbol,start=s_from,end=e_till)
            res = self.requests.get(url,headers=self.nasdaq_url.header)

            try:
                dfs = pd.read_csv(io.StringIO(res.content.decode('utf-8')))
            except Exception as err:
                #increase data range, nasdaq not returning for small set
                if e_till ==  get_formated_dateframe():
                    raise Exception("Nasdaq not retruning data for this date range.\
                                     Please, retry with other date ranges")
                e_till = get_formated_dateframe()
                if (e_till - s_from).days < DEFAULT_DAYS:
                    s_from = e_till - DEFAULT_DAYS
                dfs = self.get_data(symbol,start=s_from,end=e_till)

            dfs.set_index("Date",inplace=True)
            #convert to  datetime
            dfs.index = pd.to_datetime(dfs.index)
            dfs = self.__get_data_adjusted(dfs,symbol,start=start,end=end,periods=periods)
            dfs = dfs.replace('$','',regex=True)
            if not dfs.empty:
                #dfs = dfs.apply(pd.to_numeric)
                return dfs
        except Exception as err:
            raise Exception("Error occurred while getting data :", str(err))
Beispiel #4
0
class Yfinance:
    def __init__(self, timeout=DEFAULT_TIMEOUT, max_retries=MAX_RETRIES):
        self.__yfinance_base_url = r"https://query1.finance.yahoo.com/v7/finance/download/"
        self.__yfinance_suffix_url = r"&interval=1d&events=history&includeAdjustedClose=true"
        self.__headers = {
            'User-Agent':
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
        }
        #create request
        self.__request = RequestUrl(timeout, max_retries)
        #ticker symbol map to yahoo finance:
        self.INDEX_MAP = {"SENSEX":"%5EBSESN","NIFTY 50":"%5ENSEI","NIFTY":"%5ENSEI",\
                           "NIFTY BANK":"%5ENSEBANK","BANKNIFTY":"%5ENSEBANK"}

    def __get_complete_url(self, symbol, start, end, is_indian=True):
        period1 = int(time.mktime(start.timetuple()))
        period2 = int(time.mktime(end.timetuple()))
        symbol = symbol.upper()
        if is_indian and is_ind_index(symbol):
            symbol = self.INDEX_MAP.get(symbol)
            if not symbol:
                raise Exception("Data not available for this symbol")
        elif (is_indian):
            symbol += ".NS"

        complete_csv_url = self.__yfinance_base_url + symbol + '?period1=' + str(
            period1) + '&period2=' + str(period2) + self.__yfinance_suffix_url
        return complete_csv_url

    def get_data(self,
                 symbol,
                 is_indian=True,
                 start=None,
                 end=None,
                 periods=None,
                 dayfirst=False):
        """get_data API to fetch data from nasdaq

        :param symbol: stock symbol
        :type symbol: string
        :param start: start date, defaults to None
        :type start: string, optional
        :param end: end date, defaults to None
        :type end: string, optional
        :param is_indian: False if stock is not from indian market , defaults to True
        :type is_indian: bool, optional
        :param periods: number of days, defaults to None
        :type periods: integer, optional
        :param dayfirst: True if date format is european style DD/MM/YYYY, defaults to False
        :type dayfirst: bool, optional
        :raises ValueError: for invalid inputs
        :raises Exception: incase if no data found
        :return: stock data
        :rtype: pandas.DataFrame
        """
        #Step1: get the date range
        s_from, e_till = get_date_range(start=start,
                                        end=end,
                                        periods=periods,
                                        dayfirst=dayfirst)
        url = self.__get_complete_url(symbol, s_from, e_till, is_indian)
        response = self.__request.get(url, headers=self.__headers)
        dfs = pd.read_csv(io.StringIO(response.content.decode('utf-8')))
        dfs.set_index("Date", inplace=True)
        #convert to  datetime
        dfs.index = pd.to_datetime(dfs.index)
        dfs = self.__get_data_adjusted(dfs,
                                       symbol,
                                       start=start,
                                       end=end,
                                       periods=periods)
        if not dfs.empty:
            return dfs

    def __join_dfs(self, join, joiner):
        """will append joiner to join for oi_dfs

        :param join: df which will be appended
        :type join: pandas.DataFrame
        :param joiner: df which we want to append
        :type joiner: pandas.DataFrame
        :return: merged data frame
        :rtype: pandas.DataFrame
        """
        return join.append(joiner)

    def __get_data_adjusted(self,
                            dfs,
                            symbol,
                            start=None,
                            end=None,
                            periods=None):
        if periods and (dfs.shape[0] < periods):
            new_periods = periods - dfs.shape[0]
            try:
                s_from = e_till = None
                #if only start, find till today
                if start and (not end):
                    s_from = dfs.index[0] + timedelta(1)
                    e_till = None
                #if not start, can go to past
                elif ((end and (not start)) or periods):
                    s_from = None
                    e_till = dfs.index[0] - timedelta(1)
            except IndexError as err:
                raise Exception("yahoo finace Access error.")
            except Exception as exc:
                raise Exception("yahoo finace data error: ", str(exc))
            try:
                dfs_new = self.get_data(symbol,
                                        start=s_from,
                                        end=e_till,
                                        periods=new_periods)
                dfs = self.__join_dfs(dfs, dfs_new).sort_index(ascending=False)
            except Exception as exc:
                #Small part of data may not be available
                pass
        return dfs
Beispiel #5
0
class Binance:
    def __init__(self,api_key=None, api_secret=None,timeout=DEFAULT_TIMEOUT,max_retries=MAX_RETRIES):
        #internal initialization
        self.__request = RequestUrl(timeout,max_retries)
        self.urls = BinanceUrl()

    def get_tickers(self,keyword=None):
        """Get all crypto tickers from binance

        :param keyword: Any keyword to match, for ex. "BTC" will return all BTC pair, defaults to "None"
        :type keyword: str, optional
        :raises Exception: related to network/API
        :return: list of all tickers form binance
        :rtype: list
        """
        try:
            res = self.__request.get(self.urls.TICKER_URL,headers=self.urls.HEADER)
            tickers = json.loads(res.text)
            all_tickers = [each_symbol.get("symbol") for each_symbol in tickers]
            if keyword:
                keyword = keyword.upper()
                all_tickers = [symbol for symbol in all_tickers if keyword in symbol]
            return all_tickers
        except Exception as err:
            raise Exception("Error occurred while getting tickers :", str(err))

    def get_data(self,symbol,start=None,end=None,periods=None,interval="1D",dayfirst=False):
        """Binance getData API for intraday/Historical data

        :param symbol: crypto symbol
        :type symbol: string
        :param start: start time, defaults to None
        :type start: string optional
        :param end: end time, defaults to None
        :type end: string, optional
        :param periods: No of days, defaults to None
        :type periods: integer, optional
        :param interval: timeframe, defaults to "1D"
        :type interval: string, optional
        :param dayfirst: if date in european style, defaults to False
        :type dayfirst: bool, optional
        :raises ValueError: invalid time
        :raises Exception: for execption
        :return: data requested
        :rtype: pandas.DataFrame
        """
        try:
            s_from,e_till = get_date_range(start=start,end=end,periods=periods,dayfirst=dayfirst)
            if s_from > e_till:
                raise ValueError("End should grater than start.")

            #capitalize
            symbol = symbol.upper()
            interval = interval.lower()

            s_from_milli_sec = str(int(s_from.timestamp() * 1000))
            e_till_milli_sec = str(int(e_till.timestamp() * 1000))

            data_url = self.urls.get_candle_data_url(symbol,start=s_from_milli_sec,end=e_till_milli_sec,interval=interval)
            try:
                res = self.__request.get(data_url,headers=self.urls.HEADER)
            except:
                data_url = self.urls.get_candle_data_url(symbol,start=s_from_milli_sec,end=e_till_milli_sec,interval=interval,use_backup=True)
                res = self.__request.get(data_url,headers=self.urls.HEADER)

            data = json.loads(res.text)
            dfs = pd.DataFrame(data,columns=self.urls.data_columns)
            dfs['OpenTime'] = pd.to_datetime(dfs['OpenTime'], unit='ms')
            dfs['CloseTime'] = pd.to_datetime(dfs['CloseTime'], unit='ms')
            dfs.set_index("OpenTime",inplace=True)

            return dfs

        except Exception as err:
            raise Exception("Error occurred while fetching data :", str(err))
Beispiel #6
0
class Samco:
    def __init__(self,user_id,password,yob,timeout=DEFAULT_TIMEOUT,max_retries=MAX_RETRIES):
        #internal initialization
        self.__request = RequestUrl(timeout,max_retries)
        self.urls = SamcoUrl()

        request_body = {
                        "userId": user_id,
                        "password": password,
                        "yob": yob
                        }

        #lets login
        res = self.__request.post(self.urls.LOGIN_URL,
                            data=json.dumps(request_body),
                            headers = self.urls.LOGIN_HEADERS, verify=False)

        self.login_res = json.loads(res.text)
        #set token
        self.urls.set_session(self.login_res.get("sessionToken"))

    def __get_hist_data(self,symbol,start,end,interval="1D"):
        try:
            url = self.urls.get_hist_data_url(symbol,start,end)
            res = self.__request.get(url,headers=self.urls.DATA_HEADER)
            json_key = "historicalCandleData"
            if is_ind_index(symbol):
                json_key = "indexCandleData"
            hist_data_dict =  json.loads(res.text).get(json_key)
            dfs = pd.json_normalize(hist_data_dict)
            dfs.set_index("date",inplace=True)
            # Converting the index as date
            dfs.index = pd.to_datetime(dfs.index)
            return dfs

        except Exception as err:
            raise Exception("Error occurred for historical data: ",str(err))

    def __get_intra_data(self,symbol,start,end,interval="1M"):
        try:
            url = self.urls.get_intra_data_url(symbol,start,end)
            res = self.__request.get(url,headers=self.urls.DATA_HEADER)
            json_key = "intradayCandleData"
            if is_ind_index(symbol):
                json_key = "indexIntraDayCandleData"
            intra_data =  json.loads(res.text).get(json_key)
            dfs = pd.DataFrame(intra_data)
            dfs.set_index("dateTime",inplace=True)
            # Converting the index as date
            dfs.index = pd.to_datetime(dfs.index)
            return dfs
        except Exception as err:
            raise Exception("Error occurred for historical data: ",str(err))

    def __finetune_df(self,df):
        """drop dataframe out of range time

        :param df: input dataframe
        :type df: pd.DataFrame
        """
        drop_index = (df.between_time("07:00","09:00",include_end=False) + \
                        df.between_time("15:30","17:00",include_start=False)).index
        df.drop(drop_index,inplace=True)

    def get_data(self,symbol,start=None,end=None,periods=None,interval="1D",dayfirst=False):
        """Samco getData API for intraday/Historical data

        :param symbol: stock symbol
        :type symbol: string
        :param start: start time, defaults to None
        :type start: string optional
        :param end: end time, defaults to None
        :type end: string, optional
        :param periods: No of days, defaults to None
        :type periods: integer, optional
        :param interval: timeframe, defaults to "1D"
        :type interval: string, optional
        :param dayfirst: if date in european style, defaults to False
        :type dayfirst: bool, optional
        :raises ValueError: invalid time
        :raises Exception: for execption
        :return: data requested
        :rtype: pandas.DataFrame
        """
        try:
            s_from,e_till = get_date_range(start=start,end=end,periods=periods,dayfirst=dayfirst)
            if s_from > e_till:
                raise ValueError("End should grater than start.")

            #capitalize
            symbol = symbol.upper()
            interval = interval.upper()

            time_frame  = pd.Timedelta(interval)
            #if interval is 1 day, Use historical data API
            day_time_frame = pd.Timedelta("1D")
            min_time_frame = pd.Timedelta("1M")
            if time_frame >= day_time_frame:
                dfs = self.__get_hist_data(symbol,s_from,e_till)
                dfs = dfs.apply(pd.to_numeric)
                if time_frame != day_time_frame:
                    dfs = get_data_resample(dfs,interval)
            else:
                dfs = self.__get_intra_data(symbol,s_from,e_till)
                dfs = dfs.apply(pd.to_numeric)
                if time_frame != min_time_frame:
                    dfs = get_data_resample(dfs,interval)

            if not dfs.empty:
                return dfs

        except Exception as err:
            raise Exception("Error occurred while fetching data :", str(err))

    def get_optionchain(self,symbol):
        params={'searchSymbolName': symbol}
        res = self.__request.get(self.urls.OPTION_CHAIN_URL,headers=self.urls.DATA_HEADER,params = params)
        return res.json()