Ejemplo n.º 1
0
def download(ticker,
             filename=None,
             period='1y',
             interval='1d',
             start='',
             end=''):
    """
        https://yahooquery.dpguthrie.com/

    :param ticker: stock symbol
    :param period: valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
    :param interval: valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
    :param start: YYYY–MM–DD
    :param end: YYYY–MM–DD
    :return:
    """
    ticker = Ticker(ticker + ".SA")

    if len(start) and len(end):
        data = ticker.history(start=start, end=end, interval=interval)
    else:
        data = ticker.history(period=period, interval=interval)
    if filename is not None and len(data):
        data.to_csv(filename)

    return ticker
Ejemplo n.º 2
0
def fetch_all_data():
    os.system('rm -rf {}_data/*.npz'.format(exchange))
    init = Ticker(tickers)
    data = init.history(start=source_start_str)

    if type(data) == type(constituents):
        data = {k: data.loc[k] for k in tickers}

    # Need to create dataframe if yahooquery returns a dict and ensure all entries are dataframes,
    # are continually traded since 2015, and traded actively enough to not throw "divide by zero" error when standardizing.

    print('{} total companies in {}.'.format(len(data), exchange))
    num_days = data[('JPM' if exchange == 'nyse' else 'GOOG')].shape[0]
    data = {
        k: v[column_titles].ewm(alpha=1, axis=0,
                                adjust=False).mean().to_numpy()[-128:, :]
        for k, v in data.items()
        if (type(v) == type(constituents)
            #and str(v.index.array[0])[:10] == source_start_str
            and
            not np.any(pd.isnull(v[column_titles])) and v.shape[0] == num_days)
    }

    print('Data sift has produced {} total companies from {} '
          'for consideration.'.format(len(data), exchange))

    all_companies_history = []
    std_single_window = np.zeros((128, 1))

    for k, v in data.items():
        average = np.mean(v, axis=-1, keepdims=True)
        data_price_avg = np.mean(v[:, :4], axis=None)
        data_price_std = np.std(v[:, :4], axis=None)

        std_single_window[:, :] = np.divide(average - data_price_avg,
                                            data_price_std)
        all_companies_history.append(std_single_window)

    history = np.stack(all_companies_history, axis=0)

    index_init = Ticker(('^NYA' if exchange == 'nyse' else
                         ('^GSPC' if exchange == 'snp' else '^NDX')))
    index_data = index_init.history(start=source_start_str)
    index = index_data[column_titles].ewm(alpha=1,
                                          axis=0).mean()[-128:].to_numpy()

    index_average = np.mean(index, axis=-1, keepdims=True)
    index_price_avg = np.mean(index[:, :4], axis=None)
    index_price_std = np.mean(index[:, :4], axis=None)

    index = np.divide(index_average - index_price_avg, index_price_std)

    np.savez('{}_data/{}.npz'.format(exchange, source_end_str),
             history=history,
             index=index)

    # Destroy loading window
    loading.destroy()
Ejemplo n.º 3
0
class YahooApi:
    def __init__(self, symbol: Optional[str] = None) -> None:

        self.symbol = symbol
        self.ticker = Ticker(self.symbol)
        if symbol:
            try:
                self.check_symbol(symbol)
            except SymbolNotFoundException:
                if not str(symbol).endswith(".SA"):
                    self.symbol = symbol + ".SA"
                    self.ticker = Ticker(self.symbol)
                    self.check_symbol(self.symbol)

                else:
                    raise

    def check_symbol(self, symbol: str) -> None:
        price = self.ticker.price
        if type(price[symbol]) != dict:
            raise SymbolNotFoundException(price[symbol])

    def extract_prefix(self, symbol: str) -> str:
        if symbol.endswith(".SA"):
            return symbol.split(".")[0]
        else:
            return symbol

    @symbol_required
    def get_price(self) -> float:
        price = self.ticker.price
        if type(price[self.symbol]) != dict:
            raise SymbolNotFoundException(price[self.symbol])
        market_price = price[self.symbol].get("regularMarketPrice", None)
        try:
            float(market_price)
            return market_price
        except:
            traceback.print_exc()
            raise Exception("An error occured when reading the price")

    def get_history(self, period) -> DataFrame:

        return self.ticker.history(period)

    @symbol_required
    def get_details(self) -> dict:
        summary_detail = self.ticker.summary_detail
        if type(summary_detail) != dict:
            raise Exception

        return self.ticker.summary_detail

    @symbol_required
    def historic(self, period, interval) -> DataFrame:
        return self.ticker.history(period=period, interval=interval)
Ejemplo n.º 4
0
def main(argv):
    interval = ''

    try:
        opts, args = getopt.getopt(argv, "hi:", ["interval="])
    except getopt.GetoptError:
        print('tradingdatadownload.py -i <interval>')
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print('tradingdatadownload.py -i <interval>')
            sys.exit()
        elif opt in ("-i", "--interval"):
            interval = arg.lstrip()

    if interval == '':
        print('tradingdatadownload.py -i <interval>')
        sys.exit(2)

    #cnxn = pyodbc.connect("dsn=azure-trading;UID=sqladmin;PWD=Duoduo88")
    #engine = create_engine("mssql+pyodbc://sqladmin:Duoduo88@azure-trading")
    cnxn = pyodbc.connect(
        "DRIVER={ODBC Driver 17 for SQL Server};SERVER=192.168.1.19;UID=sa;PWD=Duoduo88;database=trading"
    )
    engine = create_engine(
        "mssql+pyodbc://sa:[email protected]/trading?driver=ODBC+Driver+17+for+SQL+Server"
    )
    sym = pd.read_sql_table("Symbol", engine)
    ticker_symbols = sym['Symbol'].values.tolist()
    print(ticker_symbols)

    if interval == 'weekly':
        tickers = Ticker(ticker_symbols)
        weekly = tickers.history(period='5y', interval='1wk')
        print('Download weekly completed')
        saveToTable(weekly, 'Weekly', cnxn, engine)
        print('Saved to table weekly')

    if interval == 'daily':
        #daily = tickers.history('1mo', interval='1d')
        #saveToTable(daily, 'Daily', cnxn, engine)
        downloadDaily(ticker_symbols, cnxn, engine)

    if interval == 'hourly':
        tickers = Ticker(ticker_symbols)
        hourly = tickers.history(period='1mo', interval='1h')
        print('Download hourly completed')
        saveToTable(hourly, 'Hourly', cnxn, engine)
        print('Saved to table hourly')
Ejemplo n.º 5
0
def update_table(n, local_data_json, period):
    callback = dash.callback_context.triggered[0]["prop_id"]
    local_data = json.loads(local_data_json)
    n_clicks = local_data["n_clicks"]
    n_previous_clicks = local_data["n_previous_clicks"]
    if (
            n > 0 or (n_clicks > n_previous_clicks)
    ) and "date_input" not in callback and "select" not in callback and period:
        tickers = []
        for symbol in current_tickers:
            ticker = Ticker(symbol.lower())
            row = ticker.history(interval='1h', period=period)
            row["Symbol"] = symbol
            tickers.append(row)
        df = pd.concat(tickers)
        df = df.reset_index()
        df = df[["date", "close", "Symbol"]]
        df_pivot = df.pivot("date", "Symbol", "close").reset_index()
        corr_df = df_pivot.corr(method="pearson")
        corr_df.head().reset_index()
        matrix = corr_df.to_dict('records')
        for i, element in enumerate(matrix):
            element = dict(sorted(element.items()))
        for i in range(len(matrix)):
            matrix[i]["Tickers"] = list(current_tickers[i])
        last_update_time = time.strftime("%Y-%m-%d %H:%M:%S")
        return matrix, [html.P(last_update_time)], "", [{
            "name": i,
            "id": i
        } for i in ["Tickers"] + current_tickers]
    else:
        raise dash.exceptions.PreventUpdate
Ejemplo n.º 6
0
def createFolders(execType):
    TickerNames = pd.read_csv('./src/tickerNames/TickerNames.csv')
    TickerNames = TickerNames.values
    if execType == 0:
        pathName = './backtestdatabase/'
    else:
        pathName = './database'
    for tickerName in TickerNames:
        if not os.path.exists(pathName + tickerName[0]):
            tickers = Ticker(tickerName[0])
            df = tickers.history(period='max', interval='1d')
            print("Creating and Updating " + tickerName[0] + " at " +
                  datetime.fromtimestamp(time.time()).strftime('%H:%M'))
            os.makedirs(pathName + tickerName[0] + '/')
            analysisColumnNames = [
                'Time Stamp', 'Strategy', 'Position', 'Amount', 'Entry',
                'Stop Loss', 'Take Profit', 'Confidence', 'Outcome', 'Profits',
                'Points Gained/Lost'
            ]
            analysisFrame = pd.DataFrame(columns=analysisColumnNames)
            analysisFrame.to_csv(pathName + tickerName[0] + '/analysis.csv')
            tradeColumnNames = [
                'Time Stamp', 'Position', 'Amount', 'Entry', 'Stop Loss',
                'Target', 'Confidence', 'Leverage', 'Outcome', 'Profits'
            ]
            tradeFrame = pd.DataFrame(columns=tradeColumnNames)
            tradeFrame.to_csv(pathName + tickerName[0] + '/trades.csv')
            df.to_csv(pathName + tickerName[0] + '/temp.csv')
Ejemplo n.º 7
0
 def yfi(self, identifier, options):
     # convert
     convert = {"daily": "1d", "monthly": "1m", "weekly": "1w"}
     ticker = identifier.split(".yfi")[0]
     stockObject = Ticker(ticker)
     history = stockObject.history(period="max", interval=convert[options["range"]])
     return history
Ejemplo n.º 8
0
def download_prices(stocks_table,
                    interval='1d',
                    default_min_date='2016-01-01'):
    """
    Function to run prices on a specific list of stocks and store results in list
    """
    # Create list of symbols
    stocks_list = stocks_table.read_table_to_pandas()['symbol'].to_list()

    # Create instance of Tickers
    tickers = Ticker(stocks_list,
                     asynchronous=True,
                     formatted=True,
                     max_workers=32)

    # Run query and create instance with query
    price_query = tickers.history(start=default_min_date, interval=interval)

    # Check result data types
    # If it's a dataframe then all symbols in list are good
    if isinstance(price_query, pd.DataFrame):

        # Reset Index to make the symbol index another column
        stock_prices = price_query.reset_index()

        return stock_prices
Ejemplo n.º 9
0
def min_variance(ticker_list, period='1y', interval='1d', cash=10000000):
    x = Ticker(ticker_list,
               retry=20,
               status_forcelist=[404, 429, 500, 502, 503, 504])
    data = x.history(period=period, interval=interval)
    if len(ticker_list) > 1:
        data = yf.download(ticker_list,
                           period='10y',
                           interval=interval,
                           group_by='ticker')
    new_data = []
    df = pd.DataFrame()
    weight = 1 / len(ticker_list)
    for i in ticker_list:
        stock_normal_ret = data['close'] / data.iloc[0]['close']
        df[i] = data['close']
        if len(ticker_list) > 1:
            stock_normal_ret = data[i]['close'] / data[i].iloc[0]['close']
            df[i] = data[i]['close']
        alloc = stock_normal_ret * weight
        balance = alloc * cash
        new_data.append(balance)

    mu = expected_returns.mean_historical_return(df)
    s = risk_models.sample_cov(df)
    ef = EfficientFrontier(mu, s)
    weights = ef.min_volatility()
    sharpe = ef.max_sharpe()
    cleaned_weights = ef.clean_weights()
    x = ef.portfolio_performance(verbose=False)
    return cleaned_weights, round(2.5 * x[2] / 15, 3)  # sharpe adjusted weight
Ejemplo n.º 10
0
def adosc(portfolio_item,transaction_volume, buy_threshold_difference=2, sell_threshold_difference=2, period='5d',
          fastperiod=3, slowperiod=10):
    """
    strategy that trades based on reversals in the chaikin oscillator
    :param transaction_volume:
    :param portfolio_item:
    :param buy_threshold_difference:
    :param sell_threshold_difference:
    :param period:
    :param fastperiod:
    :param slowperiod:
    :return:
    """
    from yahooquery import Ticker
    from time import sleep
    from math import floor
    import talib
    from .TradeHistoryItem import log_trade
    from API.Help import pct_change, initialize_alpaca

    alpaca = initialize_alpaca()
    ticker = str(portfolio_item)
    yahoo_ticker = Ticker(ticker)
    history = yahoo_ticker.history(period=period, interval=portfolio_item.portfolio.get_trading_frequency())
    ticker_adosc = talib.ADOSC(high=history['high'], low=history['low'], close=history['close'],
                               volume=history['volume'], fastperiod=fastperiod, slowperiod=slowperiod)
    ticker_adosc_pct = pct_change(ticker_adosc)

    # Buy when in the bottom of a dip in the chalking oscillator graph
    if ticker_adosc_pct[-2] < 0 and \
            abs(ticker_adosc_pct[-2] - ticker_adosc_pct[-1]) > buy_threshold_difference and \
            ticker_adosc_pct[-1] > 0 and portfolio_item.transaction_status != portfolio_item.BUY:
        if portfolio_item.transaction_status == 2:  # only buy to cover if stock has been shorted before
            print('buying to cover {} shares of {}'.format(transaction_volume, ticker))
            alpaca.submit_order(ticker, transaction_volume, 'buy', 'market', 'day')
            portfolio_item.buy_to_cover(transaction_volume=transaction_volume)
            log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=2)
            sleep(1)  # hopefully combats 403 alpaca error
        print('buying {} shares of {}'.format(transaction_volume, ticker))
        alpaca.submit_order(ticker, transaction_volume, 'buy', 'market', 'day')
        portfolio_item.buy(transaction_volume=transaction_volume)
        log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=0)

    # Sell at a tip in chaikin oscillator
    elif ticker_adosc_pct[-2] > 0 and \
            abs(ticker_adosc_pct[-2] - ticker_adosc_pct[-1]) > sell_threshold_difference and \
            ticker_adosc_pct[-1] < 0:
        if portfolio_item.transaction_status == portfolio_item.BUY:  # making sure stock exists before selling it
            print('selling {} shares of {}'.format(transaction_volume, ticker))
            alpaca.submit_order(ticker, transaction_volume, 'sell', 'market', 'day')
            portfolio_item.sell(transaction_volume=transaction_volume)
            log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=1)
            sleep(1)
        if portfolio_item.transaction_status != portfolio_item.SHORT:  # make sure we dont short twice in a row
            transaction_volume = floor(portfolio_item.cash_allocated / (
                        portfolio_item.ticker.price_now * 1.1))  # gives us a 10% buffer if the stock goes the other way
            print('shorting {} shares of {}'.format(transaction_volume, ticker))
            alpaca.submit_order(ticker, transaction_volume, 'sell', 'market', 'day')
            portfolio_item.short(transaction_volume=transaction_volume)
            log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=3)
Ejemplo n.º 11
0
    def scrape(self, ChangePendingToVoid):

        tickers = Ticker(self.tickerName)
        df = tickers.history(period='2y', interval='1d')
        df = df.iloc[::-1]

        if os.path.exists('./database/' + self.tickerName):
            ##Change all previously unfinished analysis rows to 'void' on startup
            # if ChangePendingToVoid == True:
            #     analysisRead = pd.read_csv('./database/' + self.tickerName + '/analysis.csv', index_col= 0)
            #     for index,row in analysisRead.iterrows():
            #         if row['Outcome'] == 'Pending':
            #             analysisRead.loc[index, 'Outcome'] = 'Void (Prog Closed)'
            #     analysisRead.to_csv('./database/' + self.tickerName + '/analysis.csv')

            ## This saving then re-reading is necessary to prevent the buggy header issues
            df.to_csv('./database/' + self.tickerName + '/temp.csv')
            df = pd.read_csv('./database/' + self.tickerName + '/temp.csv')
            ###########################
            dfFirstTwoRows = df.head(2)
            dfSecondRow = dfFirstTwoRows.iloc[1:].head(1)
            dfDate = dfSecondRow['date'].values[0]

            database = pd.read_csv('./database/' + self.tickerName +
                                   '/query.csv',
                                   index_col=0)
            databaseFirstTwoRow = database.head(2)
            databaseSecondRow = databaseFirstTwoRow.iloc[1:].head(1)
            dbDate = databaseSecondRow['date'].values[0]

            if not (dbDate == dfDate):
                print("Updating " + self.tickerName + " at " +
                      datetime.fromtimestamp(time.time()).strftime('%H:%M'))
                df.to_csv('./database/' + self.tickerName + '/query.csv')
                self.stratCalc.inform(df, 1)
        else:
            print("Creating and Updating " + self.tickerName + " at " +
                  datetime.fromtimestamp(time.time()).strftime('%H:%M'))
            os.makedirs('./database/' + self.tickerName + '/')
            analysisColumnNames = [
                'Time Stamp', 'Strategy', 'Position', 'Amount', 'Entry',
                'Stop Loss', 'Take Profit', 'Confidence', 'Outcome', 'Profits',
                'Points Gained/Lost'
            ]
            analysisFrame = pd.DataFrame(columns=analysisColumnNames)
            analysisFrame.to_csv('./database/' + self.tickerName +
                                 '/analysis.csv')
            tradeColumnNames = [
                'Time Stamp', 'Position', 'Amount', 'Entry', 'Stop Loss',
                'Target', 'Confidence', 'Leverage', 'Outcome', 'Profits'
            ]
            tradeFrame = pd.DataFrame(columns=tradeColumnNames)
            tradeFrame.to_csv('./database/' + self.tickerName + '/trades.csv')
            df.to_csv('./database/' + self.tickerName + '/query.csv')
            df.to_csv('./database/' + self.tickerName + '/temp.csv')
            df = pd.read_csv('./database/' + self.tickerName + '/temp.csv')
            self.stratCalc.inform(df, 1)

        pass
Ejemplo n.º 12
0
def fusao_tres_candles(ticker): #junta os ultimos tres candles
    ativo = Ticker(ticker) #chama funcao Ticker do yahooquery entrada eh o ticker
    hist = ativo.history('5d','1d')#retorna dataframe pandas com o historico de cotacoes do ticker ultimos 5 dias   
    hist_low_5d = hist["low"]#lista com o minimo dos ultimos 5 pregoes
    minimo = hist_low_5d.tail(3).min() #minimo dos minimos dos tres dia
    fechamento = hist["close"].tail(1).max()#fechamento do ultimo pregao
    fusao = [minimo, fechamento] #tres ultimos candles transofrmados em 1
    return fusao
Ejemplo n.º 13
0
def display_value(n_clicks, symbol: str):
    if n_clicks is None:
        raise PreventUpdate
    if symbol is None:
        raise PreventUpdate
    else:
        symbol = symbol.upper().strip()
        ticker = symbol + ".NS"
        yTicker = Ticker(ticker)
        yearly_pricing_data = yTicker.history(period='1y',
                                              interval='1d').loc[ticker]
        yearly_pricing_data = yearly_pricing_data.reset_index()
        pricing = yTicker.price[ticker]
        sTicker = STicker(symbol=symbol)
        yt_asset_profile = yTicker.asset_profile[ticker]
        moneycontrol_url = get_moneycontrol_url(symbol=symbol)
        moneycontrol_data = get_moneycontrol_data(moneycontrol_url)
        key_stats = sTicker.get_key_stats()
        return html.Div([
            dbc.Row(
                dbc.Container([
                    html.H1(sTicker.get_company_name(), className="display-3"),
                    get_links(sTicker=sTicker,
                              moneycontrol_url=moneycontrol_url),
                    html.H4(f"Industry : {yt_asset_profile['industry']}"),
                    html.H4(f"Sector : {yt_asset_profile['sector']}"),
                    html.Hr(className="my-2"),
                    dbc.Row([
                        dbc.Col([
                            dbc.Row(html.Br()),
                            get_ohlc_data(pricing=pricing),
                            dbc.Row(html.Br()),
                            dbc.Row(
                                dbc.Col(
                                    dcc.Graph(figure=get_ytd_chart(
                                        yearly_pricing_data)))),
                            dbc.Row(html.Br()),
                            dbc.Row(get_ranges(pricing, moneycontrol_data),
                                    justify='center')
                        ]),
                        dbc.Col([
                            dbc.Row(get_company_description(sTicker=sTicker)),
                            dbc.Row(html.Br()),
                            dbc.Row(
                                get_key_stats(key_stats, yTicker, ticker,
                                              pricing)),
                            dbc.Row(html.Br()),
                        ])
                    ])
                ],
                              fluid=True)),
            dbc.Row(dbc.Container(html.Div(), style={'height': '3rem'})),
            dbc.Row(
                dbc.Container(get_tables(sTicker),
                              fluid=True,
                              style={'height': '50rem'}))
        ])
Ejemplo n.º 14
0
def hist_price(symbol, start=None, end=None, interval='1d'):
    if end is None and start is None:
        raise ValueError('Requires at value for start or end')
    if start is None:
        raise ValueError('Requires value for start')
    stockyq = Ticker(symbol)
    price = stockyq.history(interval=interval, start=start, end=end)
    #price.to_excel("price.xlsx")
    return price
Ejemplo n.º 15
0
def get_historical(exchange='SGX', start_year=2018, interval='1d'):
    '''
    Query latest historical prices of stocks.    
    Parameters:
        exchange      : Short name for stock exchange. Default value is SGX
        start_year    : Reference start year. Default value is 2018
        interval      : Reference interval period. Default value is daily
    '''
    # Initialise parameter #
    ## Tickers ##
    filepath = re.split(exchange, download_path)[0]
    stocks = pd.read_excel(filepath + exchange + '_TICKERS.xlsx')
    target_tickers = list(stocks['TICKER'])

    # Get historical prices - Default end date is now #
    tickers = Ticker(target_tickers)
    if interval == '1d':
        historical = tickers.history(start=dt.datetime(start_year, 1, 1),
                                     end=dt.datetime.now())
    else:
        first_recent = get_working_day()
        second_recent = first_recent - dt.timedelta(7)
        historical = tickers.history(interval=interval,
                                     start=second_recent,
                                     end=first_recent)

    # Access each ticker and output data #
    for target_ticker in target_tickers:
        target = historical[target_ticker]
        company = stocks.loc[stocks['TICKER'] == target_ticker,
                             'COMPANY'].iloc[0]
        try:
            # target.index = [timestamp.date() for timestamp in list(target.index)]
            target.index.name = 'date'
            target.sort_index(ascending=False, inplace=True)
            if interval == '1d':
                target.to_csv(download_path + '\\' + exchange + \
                              r'\Prices\Daily\%s_%s.csv' % (target_ticker, company))
            else:
                target.dropna(inplace=True)
                target.to_csv(download_path + '\\' + exchange + \
                              r'\Prices\Minute\%s_%s.csv' % (target_ticker, company))
        except:
            pass
Ejemplo n.º 16
0
def gerar_valor_acao(codigo_acao):
    ticker_acao = Ticker(codigo_acao)
    resultado = ticker_acao.history(period="7d",  interval="5m")
    resultado.reset_index(inplace=True)
    index = random.randrange(0, resultado.shape[0], 1)
    resultado = resultado.iloc[[index]]
    retorno = {'codigo': codigo_acao,
               'horario': datetime.now(pytz.timezone('AMERICA/Sao_Paulo')).isoformat(),
               'valor': resultado['close'].values[0]}
    return json.dumps(retorno), 200
Ejemplo n.º 17
0
class Stock(object):
    
    def __init__(self, ticker, fundamental_frequency='q'):
        self.ticker= ticker
        self.zacks_earnings_cal = get_zacks_earnings_calendar(ticker)
        self.yquery = Ticker(ticker)
        self.returns_data = self.yquery.history(adj_ohlc=True,  
                                 start=(datetime.today() \
                                        -timedelta(days = 365*3)
                                        ).strftime('%Y-%m-%d'), 
                                 end = datetime.today().strftime('%Y-%m-%d')
                                 ).droplevel(0).rename(columns={'high': 'PriceHigh',
                                                                'volume': 'Volume',
                                                                'open': 'PriceOpen',
                                                                'low': 'PriceLow',
                                                                'close': 'PriceClose' 
                                                                })
        self.financial_data = self.get_all_financial_data(fundamental_frequency=fundamental_frequency)
        try:
            self.ratings_data = get_finviz_fundamentals_ratings(ticker)
        except:
            self.ratings_data = None
        try:
            self.insider_trading_data = get_finviz_inside_trading(ticker)
        except:
            self.insider_trading_data = None
        try:
            self.news_data = get_finviz_news(ticker)
        except:
            self.news_data = None
            
            
    def get_all_financial_data (self, fundamental_frequency='q'):
        df = self.yquery.all_financial_data( frequency = fundamental_frequency)
        df['Quarter'] = df['asOfDate'].apply(lambda x: as_of_date_to_quarter(x))
        df = pd.merge(df, self.zacks_earnings_cal, how = 'left', on = 'Quarter')\
            .set_index('ReleaseDate').drop(['asOfDate', 'periodType', 'Quarter'], axis = 1)
        return df
        
        
        
    def get_fundamental_ts (self, item):
        ts = self.financial_data[item]
        date_idx = pd.date_range(self.returns_data.index[0], self.returns_data.index[-1])
        ts = ts.reindex(index = date_idx).shift(1).ffill()
        ts = ts.loc[self.returns_data.index]
        return ts
        
    def __getitem__(self, item):
        if item in self.returns_data.columns:
            return self.returns_data[item]
        elif item in self.financial_data.columns:
            return self.get_fundamental_ts(item)
        else:
            raise KeyError('Item not Found!')
Ejemplo n.º 18
0
def train(ticket):
    # ts = TimeSeries(key=ALPHA_VANTAGE_API_KEY, output_format='pandas')

    # df, data_info = ts.get_intraday(ticket, outputsize='full', interval='5min')

    # df = df[::-1]
    ticket = ticket.lower()
    if not os.path.exists('cache.json'):
        cache = []
    else:
        with open('cache.json') as file:
            cache = json.load(file)

    for item in cache:
        if item['ticket'] == ticket and (
                datetime.now() - parser.parse(item['time'])).seconds < 3600:
            return

    ticker = Ticker(ticket, asynchronous=True)
    df = ticker.history(period='5d', interval='1m')

    data = df.filter(['close'])
    print(data)
    dataset = data.values
    training_data_len = math.ceil(len(dataset) * .8)

    scaler = MinMaxScaler(feature_range=(0, 1))
    scaled_data = scaler.fit_transform(dataset)

    train_data = scaled_data[0:training_data_len:]
    x_train = []
    y_train = []

    for i in range(60, len(train_data)):
        x_train.append(train_data[i - 60:i, 0])
        y_train.append(train_data[i, 0])

    x_train, y_train = np.array(x_train), np.array(y_train)
    x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))

    model = Sequential()
    model.add(
        LSTM(50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
    model.add(LSTM(50, return_sequences=False))
    model.add(Dense(25))
    model.add(Dense(1))

    model.compile(optimizer='adam', loss='mean_squared_error')

    model.fit(x_train, y_train, batch_size=1, epochs=1)
    model.save(f'/Users/maximsurovtsev/Stocks prediction/{ticket}')

    with open('cache.json', 'w') as file:
        cache.append({'ticket': ticket, 'time': str(datetime.now())})
        json.dump(cache, file, indent=4)
Ejemplo n.º 19
0
def get_stock(symbol):
    try:
        symbol = symbol + '.SA'
        ticker = Ticker(symbol)
        df = ticker.history(period='1mo', interval='1d')
        value = df.close[-1]
        value_ = df.close[-2]
        percent_day = ((value - value_) / value_) * 100
        return value, percent_day
    except AttributeError:
        raise AttributeError(ticker.financial_data)
Ejemplo n.º 20
0
    def get_ticker_data(self, symbol):
        """Returns the stock symbol ticker data, either from YAHOO or from local cache."""
        now = datetime.datetime.now()
        x = self.data_df2y_cache.get(symbol, None)
        cache_time = self.data_cache_time.get(symbol, 99)
        if cache_time != now.hour or x is None:

            # Download from YAHOO
            self.data_cache_time[symbol] = now.hour

            t = Ticker(symbol, formatted=True)
            valid = not "Quote not found" in t.price[symbol]
            if valid:
                df2y = t.history(period='2y', interval='1d')
                df5y = t.history(period='5y', interval='1d')
                df3m = t.history(period='3mo', interval='1d')
                # 1d interval for 5y is overkill, but needed otherwise the plot will differ from the 2y plot due
                # to quick variations.
                price = float(t.price[list(
                    t.price)[0]]['regularMarketPrice']['fmt'].replace(',', ''))
                short_name = t.quote_type[list(t.quote_type)[0]]['shortName']
            else:
                df2y = None
                df5y = None
                df3m = None
                price = 0
                short_name = "Unknown Ticker"

            self.data_df2y_cache[symbol] = df2y
            self.data_df5y_cache[symbol] = df5y
            self.data_df3m_cache[symbol] = df3m
            self.data_price_cache[symbol] = price
            self.short_name_cache[symbol] = short_name
        else:
            # Get from Cache
            df2y = self.data_df2y_cache.get(symbol)
            df5y = self.data_df5y_cache.get(symbol)
            df3m = self.data_df3m_cache.get(symbol)
            price = self.data_price_cache.get(symbol)

        return float(price), df3m, df5y, df2y
Ejemplo n.º 21
0
def get_stock_history(ticker, period, interval):
    """
    Input example: {"stocks":["GOOG","MSFT"], "period":"1y", "interval":"1wk"}
    """
    ticker = Ticker(ticker)
    history = ticker.history(period=period, interval=interval)
    dates = history.index.get_level_values(1).tolist()
    close = history['close'].values.tolist()
    open = history['open'].values.tolist()
    high = history['high'].values.tolist()
    low = history['low'].values.tolist()
    result = {'dates': dates, "close": close, "open": open, "high": high, "low": low}
    return jsonify(result)
Ejemplo n.º 22
0
def train(msg):
    comm, ticket = msg.text.split()
    ticker = Ticker(ticket, asynchronous=True)
    df = ticker.history(period='5d', interval='1m')
    if df[ticket] == 'No data found, symbol may be delisted':
        bot.send_message(msg.chat.id, 'Не нашел такой :(')
    else:
        bot.send_message(msg.chat.id, 'Нужно немного подождать')
        net.train(ticket)
        bot.send_message(
            msg.chat.id,
            f'Модель по {ticket} обучена и будет доступна в течение часа.')
        price = net.magic(ticket)
        bot.send_message(
            msg.chat.id,
            f'Предполагаемая цена закрытия следующей свечи: {price}')
Ejemplo n.º 23
0
def get_papel_values(papel_alias):

    ticker = Ticker(papel_alias)

    sum = ticker.summary_detail
    if sum[papel_alias] != "No fundamentals data found for any of the summaryTypes=summaryDetail":
        end = datetime.now().strftime('%Y-%m-%d')
        start = (datetime.now() - timedelta(days=180)).strftime('%Y-%m-%d')
        ticker_hist = ticker.history(start=start, end=end)
        if isinstance(ticker_hist, pd.DataFrame):
            ticker_hist.sort_values(by='date', ascending=False, inplace=True)
        else:
            pass
    else:
        ticker_hist = 0

    return ticker_hist
Ejemplo n.º 24
0
def max_sharpe(ticker, period='1y', interval='1d'):
    x = Ticker(ticker,
               retry=20,
               status_forcelist=[404, 429, 500, 502, 503, 504])
    data = x.history(period=period, interval=interval)
    df = pd.DataFrame()
    df[ticker] = data['close']
    mu = expected_returns.mean_historical_return(df)
    s = risk_models.sample_cov(df)
    ef = EfficientFrontier(mu, s)
    weights = ef.max_sharpe()
    cleaned_weights = ef.clean_weights()
    x = ef.portfolio_performance(verbose=False)
    if x is None:
        return 0
    else:
        return round(2.5 * x[2] / 15, 3)  # sharpe adjusted weight
Ejemplo n.º 25
0
    def update(self):
        stratCalc = StrategyCalculator(self.tickerName)
        tickers = Ticker(self.tickerName)
        df = tickers.history(period='max', interval='1d')
        # df = df.iloc[::-1]
        endOfBacktest = False

        while endOfBacktest == False:
            if os.path.exists('./database/' + self.tickerName):
                initialbool = False
                if initialbool == False:
                    df.to_csv('./database/' + self.tickerName + '/temp.csv')
                    df = pd.read_csv('./database/' + self.tickerName +
                                     '/temp.csv',
                                     index_col=0)
                    initialbool = True
                inputt = df
                inputt = inputt.head(201)
                inputt = inputt.iloc[::-1]
                stratCalc.inform(df=inputt)
                df = df.iloc[1:]
                if len(df.index) < 201:
                    endOfBacktest = True
            else:
                print("Creating and Updating " + self.tickerName + " at " +
                      datetime.fromtimestamp(time.time()).strftime('%H:%M'))
                os.makedirs('./database/' + self.tickerName + '/')
                analysisColumnNames = [
                    'Time Stamp', 'Strategy', 'Position', 'Amount', 'Entry',
                    'Stop Loss', 'Take Profit', 'Confidence', 'Outcome',
                    'Profits', 'Points Gained/Lost'
                ]
                analysisFrame = pd.DataFrame(columns=analysisColumnNames)
                analysisFrame.to_csv('./database/' + self.tickerName +
                                     '/analysis.csv')
                tradeColumnNames = [
                    'Time Stamp', 'Position', 'Amount', 'Entry', 'Stop Loss',
                    'Target', 'Confidence', 'Leverage', 'Outcome', 'Profits'
                ]
                tradeFrame = pd.DataFrame(columns=tradeColumnNames)
                tradeFrame.to_csv('./database/' + self.tickerName +
                                  '/trades.csv')
                df.to_csv('./database/' + self.tickerName + '/temp.csv')
                df = pd.read_csv('./database/' + self.tickerName + '/temp.csv')
Ejemplo n.º 26
0
    def scrape(self):

        tickers = Ticker(self.tickerName)
        df = tickers.history(period='7d', interval='1m')
        df = df.iloc[::-1]

        if os.path.exists('./database/' + self.tickerName):

            ## This saving then re-reading is necessary to prevent the buggy header issues
            df.to_csv('./database/' + self.tickerName + '/temp.csv')
            df = pd.read_csv('./database/' + self.tickerName + '/temp.csv')
            ###########################
            dfFirstTwoRows = df.head(2)
            dfSecondRow = dfFirstTwoRows.iloc[1:].head(1)
            dfDate = dfSecondRow['date'].values[0]
            print("DfDate = " + str(dfDate))

            database = pd.read_csv('./database/' + self.tickerName +
                                   '/query.csv',
                                   index_col=0)
            databaseFirstTwoRow = database.head(2)
            databaseSecondRow = databaseFirstTwoRow.iloc[1:].head(1)
            dbDate = databaseSecondRow['date'].values[0]
            print("DbDate = " + dbDate)

            if not (dbDate == dfDate):
                print("Updating " + self.tickerName + " at " +
                      datetime.fromtimestamp(time.time()).strftime('%H:%M'))
                df.to_csv('./database/' + self.tickerName + '/query.csv')
                self.stratCalc.inform(df.iloc[1:])
        else:
            print("Creating and Updating " + self.tickerName + " at " +
                  datetime.fromtimestamp(time.time()).strftime('%H:%M'))
            os.makedirs('./database/' + self.tickerName + '/')
            columnNames = [
                'Time Stamp', 'Strategy', 'Position', 'Stop Loss',
                'Take Profit', 'Outcome', 'Points Gained/Lost'
            ]
            frame = pd.DataFrame(columns=columnNames)
            frame.to_csv('./database/' + self.tickerName + '/analysis.csv')
            df.to_csv('./database/' + self.tickerName + '/query.csv')
            df.to_csv('./database/' + self.tickerName + '/temp.csv')
            df = pd.read_csv('./database/' + self.tickerName + '/temp.csv')
            self.stratCalc.inform(df.iloc[1:])
Ejemplo n.º 27
0
    def get_stock_data(symbol, indicators=None):
        ticker = Ticker(symbol)
        df_ = ticker.history(period='2y', interval='1d')

        df_ = df_[['high', 'open', 'low', 'adjclose', 'volume']]
        df_.rename(columns={'adjclose': 'close'}, inplace=True)

        basic_cols = ['high', 'open', 'low', 'close', 'volume']
        if indicators is not None and 'ttm-squeeze' in indicators:
            df_ = ttm_squeeze_indicators(df_)
            basic_cols += [
                'lower_band',
                'upper_band',
                'lower_keltner',
                'upper_keltner',
                'linreg',
            ]

        return df_[basic_cols]
Ejemplo n.º 28
0
def downloadDaily(ticker_symbols, cnxn, engine):

    cursor = cnxn.cursor()
    cursor.execute('EXEC spProcessDailyData')
    cursor.commit()
    cursor.close()

    for symbol in ticker_symbols:
        ticker = Ticker(symbol)
        daily = ticker.history(period='1y', interval='1d')
        print(f'Downloading {symbol}.')

        # for single symbol dowload, add column symbol and date as indexes
        daily['symbol'] = symbol
        daily.reset_index(inplace=True)
        daily.rename(columns={'index': 'date'}, inplace=True)
        daily.set_index(['symbol', 'date'], inplace=True)

        daily.to_sql('tmpDaily',
                     engine,
                     if_exists='append',
                     schema='dbo',
                     index=True)

        #print('Saved to table daily')

        df_options = ticker.option_chain

        #check if it's string, if yes, there is no option chain

        if isinstance(df_options, str) == False:
            print(f'Downloading option chain {symbol}.')
            df_options['createDate'] = datetime.date.today()
            df_options.to_sql('Option',
                              engine,
                              if_exists='append',
                              schema='dbo',
                              index=True)

    cursor = cnxn.cursor()
    cursor.execute('EXEC spProcessDailyData')
    cursor.commit()
    cursor.close()
Ejemplo n.º 29
0
def get_all(symbols):
    # %%

    print(len(symbols))
    # %%
    all_tickers = Ticker(symbols)
    df2 = all_tickers.history(period='2y', interval='1d')

    if isinstance(df2, pd.core.frame.DataFrame) and not df2.empty:
        pg_db.df_to_db(df2.reset_index()[DB_COLUMNS],
                       name='temp_yahoo_stock_data',
                       if_exists='replace',
                       index=False)

    # %%
    else:
        final_df = pd.DataFrame()
        for idx, (symbol, dataframe) in enumerate(df2.items(), start=1):
            print(symbol)
            if isinstance(dataframe,
                          pd.core.frame.DataFrame) and not dataframe.empty:
                print(dataframe.head())
                dataframe['symbol'] = symbol
                dataframe.reset_index(inplace=True)
                dataframe.rename(columns={'index': 'date'}, inplace=True)

                final_df = pd.concat([final_df, dataframe])
            if idx % 100 == 0 or idx == len(df2):
                if_exists = 'replace' if idx == 100 else 'append'
                try:
                    if if_exists == 'replace' and 'splits' not in final_df.columns:
                        final_df['splits'] = None

                    pg_db.df_to_db(final_df[DB_COLUMNS],
                                   name='temp_yahoo_stock_data',
                                   if_exists=if_exists,
                                   index=False)

                except AttributeError:
                    print(final_df)

            print(f'completed {symbol}, {idx} of {len(df2)}')
Ejemplo n.º 30
0
def magic(ticket):
    ticket = ticket.lower()
    model = load_model(f'/Users/maximsurovtsev/Stocks prediction/{ticket}')
    # ts = TimeSeries(key=ALPHA_VANTAGE_API_KEY, output_format='pandas')
    ticker = Ticker(ticket, asynchronous=True)
    new_data = ticker.history(period='5d', interval='1m')

    # new_data, data_info = ts.get_intraday(ticket, outputsize='full', interval='5min')

    # new_data = new_data[::-1].filter(['4. close'])
    new_data = new_data.filter(['close'])
    last_60 = new_data[-60:].values
    scaler = MinMaxScaler(feature_range=(0, 1))
    scaler.fit_transform(new_data)
    last_60 = scaler.transform(last_60)

    scaled = np.array([last_60])
    scaled = np.reshape(scaled, (scaled.shape[0], scaled.shape[1], 1))
    pred_price = scaler.inverse_transform(model.predict(scaled))
    return round(float(pred_price[0][0]), 3)