def generate_stock_data(ticker: str, source: str, start: str,
                        end: str) -> Tuple[str, pd.DataFrame]:
    """Return stock ticker and bars"""

    # def process_dates(start, end) -> Tuple[datetime]:
    #     """Convert date string into datetime objects"""
    #
    #     start = start.split('-')
    #     start_list = list(map(int, start))
    #     start_datetime = datetime.datetime(start_list[0],
    #                                        start_list[1],
    #                                        start_list[2])
    #     end = end.split('-')
    #     end_list = list(map(int, end))
    #     end_datetime = datetime.datetime(end_list[0],
    #                                      end_list[1],
    #                                      end_list[2])
    #     return start_datetime, end_datetime

    # start, end = process_dates(start, end)

    bars = DataReader(ticker, source, start, end)

    return ticker, bars
def get_close(ticker_list):
    """
    Get close price for all listed tickers from 2015-06-30 to 2019-06-30.
    """
    from pandas_datareader._utils import RemoteDataError

    close = {}
    removed_tickers = []

    for ticker in ticker_list:
        try:
            data = DataReader(ticker, 'yahoo', start, end, retry_count=5)
            ticker_close = data['Close'].copy()
            ticker_close.name = ticker
            close.update({ticker: ticker_close})
        except (
                KeyError, RemoteDataError
        ) as kr:  # if ticker is already been acquired(/spun off) by other company.
            try:
                removed_tickers.append(ticker)
            except RemoteDataError as r:
                pass

    return close, removed_tickers
from __future__ import division
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from pandas import Series,DataFrame
from pandas_datareader import DataReader
from datetime import datetime

sns.set_style('whitegrid')
tech_list = ['AAPL','GOOG','MSFT','AMZN']
end = '2018-01-01'
start = '2017-01-01'

for stock in tech_list:
    globals()[stock] = DataReader(stock,data_source='yahoo',start=start,end=end)

AAPL.describe()
ma_day = [10,20,50]
for ma in ma_day:
    column_name = "MA for %s days" % (str(ma))
    AAPL[column_name] = AAPL['Adj Close'].rolling(ma).mean()

AAPL[['Adj Close','MA for 10 days','MA for 20 days','MA for 50 days']].plot(subplots=False,figsize=(10,4))
plt.show()
Exemple #4
0
def stock_predictor(stock):
    start_date = datetime.datetime.now() - datetime.timedelta(days=3650)
    end_date = datetime.date.today()

    df = DataReader(stock, "yahoo", start_date, end_date)
    data = df.filter(['Close'])
    dataset = data.values
    train_data_len = math.ceil(len(dataset)*.8)

    #scale the data
    scaler = MinMaxScaler(feature_range=(0,1))
    scaled_data = scaler.fit_transform(dataset)

    #create the training dataset
    train_data = scaled_data[0:train_data_len, :]

    #split the data into x_train and y_train dataset
    x_train=[]
    y_train=[]
    
    for i in range(60,len(train_data)):
        x_train.append(train_data[i-60:i, 0])
        y_train.append(train_data[i, 0])
            
            #convert x_train and y_train to numpy arrays
    x_train, y_train = np.array(x_train), np.array(y_train)
            
            #reshape the data to 3 dimension
    x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
            
            #build LSTM model
    model = Sequential()
    model.add(LSTM(50, return_sequences=True, input_shape=(x_train.shape[1],1)))
    model.add(LSTM(50,return_sequences=False))
            
    model.add(Dense(25))
    model.add(Dense(1))
            
    #compile the model
    model.compile(optimizer='adam', loss='mean_squared_error')
            
    #train the model
    model.fit(x_train, y_train, batch_size=1, epochs=1)
            
    #create test dataset
    test_data = scaled_data[train_data_len-60:, :]
            
    #create dataset x_test, y_test
    x_test = []
    y_test = dataset[train_data_len:, :]
    for i in range(60,len(test_data)):
        x_test.append(test_data[i-60:i, 0])
                
    #convert data to numpy array
    x_test = np.array(x_test)
    
    #reshape the data
    x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
                
        #get the models predicted price values
    predictions = model.predict(x_test)
    predictions = scaler.inverse_transform(predictions)
    
    #get the root mean squared error (RMSE)
    rmse = np.sqrt(np.mean((predictions-y_test)**2))

    #plot the data
    train = data[:train_data_len]
    valid = data[train_data_len:]
    valid['Predictions'] = predictions
    plt.figure(figsize=(16,8))
    plt.title('Model for {}'.format(stock))
    plt.xlabel('Date', fontsize=16)
    plt.ylabel('Close Price', fontsize=16)
    plt.plot(train['Close'])
    plt.plot(valid[['Close','Predictions']])
    plt.legend(['Train','Val','Prediction'],loc='lower right')
    plt.show()
    
    #save figure
    grp = plt.savefig('{}.png'.format(stock))
    
    #get predicted price for next day
    last_60day = data[-60:].values
    last_60day_scaled = scaler.transform(last_60day)
    xx_test = []
    xx_test.append(last_60day_scaled)
    xx_test = np.array(xx_test)
    xx_test = np.reshape(xx_test, (xx_test.shape[0], xx_test.shape[1],1))
    pred = model.predict(xx_test)
    pred = scaler.inverse_transform(pred)
    pred = pred[0]
    pred = pred[0]
    pred = round(pred, 2)
    #print("The predicted price for the next trading day is: {}".format(pred))
    
    pred = json.dumps(str(pred))    
    return {'prediction' : pred} 
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 19 13:20:01 2020

@author: satake
"""

import pandas as pd
from pandas import Series, DataFrame
import numpy as npf

import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline

from pandas_datareader import DataReader
from datetime import datetime

tech_list = ['AAPL', 'GOOG', 'MSFT']

end = datetime.now()
start = datetime(end.year - 1, end.month, end.day)

for stock in tech_list:
    globals()[stock] = DataReader(stock, 'yahoo', start, end)

AAPL['Adj Close'].plot(legend=True, figsize=(10, 4))
Exemple #6
0
from pandas_datareader import data
import matplotlib.pyplot as plt
import pandas as pd
import datetime

# In[3]:

from pandas_datareader import DataReader
from datetime import datetime
import matplotlib.pyplot as plt

# In[38]:

#fetch the FTSE 100 index data from Yahoo
ftse_data = DataReader('^FTSE', 'yahoo', datetime(2015, 2, 20),
                       datetime(2018, 2, 20))
#print(ftse_data['Adj Close'])

# In[51]:

ftse_data.to_csv("./FTSE.csv")

# In[39]:

#fetch the 30 companies data from Yahoo

#Vodafone Group Plc (VOD.L)
VOD_L_data = DataReader('VOD.L', 'yahoo', datetime(2015, 2, 20),
                        datetime(2018, 2, 20))

# In[40]:
Exemple #7
0
data = pd.read_csv('/Users/shashank/Downloads/Code/top_stocks_data(1).csv')

#data = data.set_index(['Company'])
data = data.sort_values('Sharpe Ratio', ascending = True)
data = data.tail(n)
data = data.reset_index()
data = data['Company']
data = data.values.tolist()

stocks = data
start = start
end = end


df = DataReader(stocks, 'yahoo', start=start, end=end)['Close']
#print (df.tail())

returns = df.pct_change()

plt.figure(figsize=(14, 7))
for c in returns.columns.values:
    plt.plot(returns.index, returns[c], lw=3, alpha=0.8,label=c)
plt.legend(loc='upper right', fontsize=12)
plt.ylabel('daily returns')

def portfolio_annualised_performance(weights, mean_returns, cov_matrix):
    returns = np.sum(mean_returns*weights ) *252
    std = np.sqrt(np.dot(weights.T, np.dot(cov_matrix, weights))) * np.sqrt(252)
    return std, returns
Exemple #8
0
def run_stooq():
    for s in Index.select():
        data = DataReader(s.symbol, 'stooq')
        to_pickle(data, 'stooq', s.symbol)
        sleep(10)
Exemple #9
0
 def test_bulk_fetch(self):
     with pytest.raises(NotImplementedError):
         DataReader(["005930", "000660"])
Exemple #10
0
# Simple Monte Carlo simulation of stocks taking covariance into account
from pandas_datareader import DataReader
from pandas import Panel, DataFrame
from scipy.linalg import cholesky
import numpy as np
import matplotlib.pyplot as plt
from pandas import bdate_range   # business days

symbols = ['AAPL', 'AMZN', 'GOOG']
data = dict((symbol, DataReader(symbol, "google", pause=1)) for symbol in symbols)
panel = Panel(data).swapaxes('items', 'minor')
closing = panel['Close'].dropna()

rets = np.log(closing / closing.shift(1)).dropna()
# TODO: Not covariance
upper_cholesky = cholesky(rets.corr(), lower=False)

n_days = 255  # Working days in a year
dates = bdate_range(start=closing.ix[-1].name, periods=n_days)
n_assets = len(symbols)
n_sims = 100
# dt = 1
dt = 1/n_days
# mu = rets.mean().values*n_days
mu = rets.mean().values
sigma = rets.std().values*np.sqrt(n_days)

rand_values = np.random.standard_normal(size = (n_days * n_sims, n_assets))
# Use the random values to generate values that satisfy the covariance
corr_values = rand_values.dot(upper_cholesky)*sigma
Exemple #11
0
from pandas_datareader import DataReader
from datetime import datetime
from datetime import date
import sys

# ibm = DataReader(sys.argv[1] + ".NS",  'yahoo', datetime(2012, 1, 1), datetime.now())
# str = "../stock_data/" + sys.argv[1] + ".csv"
# ibm.to_csv(str)

if (sys.argv[1] == "^NSEI"):
    ibm = DataReader("^NSEI", 'yahoo', datetime(2012, 1, 1), datetime.now())
    str = "../stock_data/" + sys.argv[1] + ".csv"
    ibm.to_csv(str)

if (sys.argv[1] != "NSEI"):
    ibm = DataReader(sys.argv[1] + ".NS", 'yahoo', datetime(2012, 1, 1),
                     datetime.now())
    str = "../stock_data/" + sys.argv[1] + ".csv"
    ibm.to_csv(str)
from pandas_datareader import DataReader
import numpy as np
import pandas as pd

# Grab time series data for 5-year history for the stock (here AAPL)
# and for S&P-500 Index
df = DataReader('MRK', 'yahoo', '2018-07-30', '2019-07-30')
dfb = DataReader('^GSPC', 'yahoo', '2018-07-30', '2019-07-30')

# create a time-series of monthly data points
rts = df.resample('M').last()
rbts = dfb.resample('M').last()
dfsm = pd.DataFrame(
    {
        's_adjclose': rts['Adj Close'],
        'b_adjclose': rbts['Adj Close']
    },
    index=rts.index)

# compute returns
dfsm[['s_returns','b_returns']] = dfsm[['s_adjclose','b_adjclose']]/\
    dfsm[['s_adjclose','b_adjclose']].shift(1) -1
dfsm = dfsm.dropna()
covmat = np.cov(dfsm["s_returns"], dfsm["b_returns"])

# calculate measures now
beta = covmat[0, 1] / covmat[1, 1]
alpha = np.mean(dfsm["s_returns"]) - beta * np.mean(dfsm["b_returns"])

# r_squared     = 1. - SS_res/SS_tot
ypred = alpha + beta * dfsm["b_returns"]
from pyramid.arima import auto_arima
from arch import arch_model
from pandas_datareader import DataReader
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import rpy2.robjects.packages as rpackages
rugarch = rpackages.importr('rugarch')
import rpy2.robjects as robjects
from rpy2.robjects import pandas2ri

# In[230]:

# Fetching Data
gspc = DataReader("^GSPC", "yahoo", datetime(2000, 10, 1),
                  datetime(2019, 1, 1))['Adj Close']

# In[229]:

#Computing logged Returns
gspc_returns = np.log(gspc).diff().dropna()

# In[33]:

#window length => lenght of time series to forecast next point
#forecast_length => Number of point we will forecast using previous data
window_length = 500
forecast_length = len(gspc_returns) - window_length

# In[ ]:
Exemple #14
0
def predictData(stock, days):
    stock_list.append(stock)

    start = datetime.datetime.now() - datetime.timedelta(days=365)
    end = datetime.datetime.now()

    df = DataReader(stock, 'yahoo', start, end)
    if os.path.exists('./Exports'):
        csv_name = ('Exports/' + stock + '_Export.csv')
    else:
        os.mkdir("Exports")
        csv_name = ('Exports/' + stock + '_Export.csv')
        df.to_csv(csv_name)
        df['Prediction'] = df['Close'].shift(-1)
        df.dropna(inplace=True)

    forecast_time = int(days)

    df['Prediction'] = df['Close'].shift(-1)
    df1 = df['Prediction']
    array = np.array(df['Close'])
    array1 = np.array(df1)
    array = array.reshape(-1, 1)
    array1 = array1.reshape(-1, 1)

    X = array
    Y = array1
    X = np.nan_to_num(X)
    Y = np.nan_to_num(Y)
    X = preprocessing.scale(X)
    X_prediction = X[-forecast_time:]
    X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                        Y,
                                                        train_size=0.8,
                                                        test_size=0.2)

    clf = LinearRegression()
    clf.fit(X_train, Y_train)
    prediction = (clf.predict(X_prediction))

    prediction = np.around(prediction, decimals=3)

    print(stock)
    last_row = df.tail(1)
    last_row = last_row.reset_index()
    last_row = last_row['Close']
    last_row = last_row.to_string(index=False)
    print('Close: {}'.format(last_row))
    print('-' * 80)

    lr = LinearRegression()
    lr.fit(X_train, Y_train)
    lr_confidence = lr.score(X_test, Y_test)
    lr_confidence = round(lr_confidence, 2)

    # price
    price = si.get_live_price('{}'.format(stock))
    price = round(price, 2)

    # volatility, momentum, beta, alpha, r_squared
    df = DataReader(stock, 'yahoo', start, end)
    dfb = DataReader('^GSPC', 'yahoo', start, end)

    rts = df.resample('M').last()
    rbts = dfb.resample('M').last()
    dfsm = pd.DataFrame(
        {
            's_adjclose': rts['Adj Close'],
            'b_adjclose': rbts['Adj Close']
        },
        index=rts.index)


    dfsm[['s_returns','b_returns']] = dfsm[['s_adjclose','b_adjclose']]/\
        dfsm[['s_adjclose','b_adjclose']].shift(1) -1
    dfsm = dfsm.dropna()
    covmat = np.cov(dfsm["s_returns"], dfsm["b_returns"])

    beta = covmat[0, 1] / covmat[1, 1]

    alpha = np.mean(dfsm["s_returns"]) - beta * np.mean(dfsm["b_returns"])

    ypred = alpha + beta * dfsm["b_returns"]
    SS_res = np.sum(np.power(ypred - dfsm["s_returns"], 2))
    SS_tot = covmat[0, 0] * (len(dfsm) - 1)  # SS_tot is sample_variance*(n-1)
    r_squared = 1. - SS_res / SS_tot

    volatility = np.sqrt(covmat[0, 0])
    momentum = np.prod(1 + dfsm["s_returns"].tail(12).values) - 1

    prd = 12.
    alpha = alpha * prd
    volatility = volatility * np.sqrt(prd)

    beta = round(beta, 2)
    alpha = round(alpha, 2)
    r_squared = round(r_squared, 2)
    volatility = round(volatility, 2)
    momentum = round(momentum, 2)

    # Sharpe Ratio
    x = 5000

    y = (x)

    stock_df = df
    stock_df[
        'Norm return'] = stock_df['Adj Close'] / stock_df.iloc[0]['Adj Close']

    allocation = float(x / y)
    stock_df['Allocation'] = stock_df['Norm return'] * allocation

    stock_df['Position'] = stock_df['Allocation'] * x
    pos = [df['Position']]
    val = pd.concat(pos, axis=1)
    val.columns = ['WMT Pos']
    val['Total Pos'] = val.sum(axis=1)

    val.tail(1)

    val['Daily Return'] = val['Total Pos'].pct_change(1)

    Sharpe_Ratio = val['Daily Return'].mean() / val['Daily Return'].std()

    A_Sharpe_Ratio = (252**0.5) * Sharpe_Ratio

    A_Sharpe_Ratio = round(A_Sharpe_Ratio, 2)

    difference = float(prediction[4]) - float(last_row)
    change = float(difference) / float(last_row)
    predictions.append(change)

    confidence.append(lr_confidence)

    error = 1 - float(lr_confidence)
    error_list.append(error)

    if (float(prediction[4]) > (float(last_row))
            and (float(lr_confidence)) > 0.8):
        output = ("\nStock: " + str(stock) + "\nLast Close: " + str(last_row) +
                  "\nPrediction in 1 Day: " + str(prediction[0]) +
                  "\nPrediction in 5 Days: " + str(prediction[4]) +
                  "\nConfidence: " + str(lr_confidence) +
                  "\nCurrent Price : " + str(price) + "\n\nStock Data: " +
                  "\nBeta: " + str(beta) + "\nAlpha: " + str(alpha) +
                  "\nSharpe Ratio: " + str(A_Sharpe_Ratio) + "\nVolatility: " +
                  str(volatility) + "\nMomentum: " + str(momentum))
        sendMessage(output)
Exemple #15
0
def get_yahoo_finance(stock):
    f = DataReader(stock, 'yahoo', start, end)
    return f
    'DFF': "FRED/DFF", #Effective Federal Funds Rate
    'DTB3': "FRED/DTB3", #3-Month Treasury Bill: Secondary Market Rate
    'DGS5': "FRED/DGS5", #5-Year Treasury Constant Maturity Rate
    'DGS10': "FRED/DGS10",#10-Year Treasury Constant Maturity Rate
    'DGS30': "FRED/DGS30", #30-Year Treasury Constant Maturity Rate
    'T5YIE': "FRED/T5YIE", #5-year Breakeven Inflation Rate
    'T10YIE': "FRED/T10YIE", #10-year Breakeven Inflation Rate
    'T5YIFR': "FRED/T5YIFR",#5-Year, 5-Year Forward Inflation Expectation Rate 
    'TEDRATE': "FRED/TEDRATE", #TED Spread
    'DPRIME': "FRED/DPRIME" #Bank Prime Loan Rate
}


#Index Fund Data
index_data = {
    'VIX': DataReader('VIX', 'yahoo', start, end), #Vix index
    #Sector ETFs:
    'XLE':DataReader('XLE', 'yahoo', start, end), #Energy Select Sector SPDR Fund 
    'XLF':DataReader('XLF', 'yahoo', start, end), #Financial Select Sector SPDR Fund
    'XLU':DataReader('XLU', 'yahoo', start, end), #Utilities Select Sector SPDR Fund
    'XLI':DataReader('XLI', 'yahoo', start, end), #Industrial Select Sector SPDR Fund
    'XLK':DataReader('XLK', 'yahoo', start, end), #Technology Select Sector SPDR Fund
    'XLV':DataReader('XLV', 'yahoo', start, end), #Health Care Select Sector SPDR Fund
    'XLY':DataReader('XLY', 'yahoo', start, end), #Consumer Discretionary Select Sector SPDR Fund
    'XLP':DataReader('XLP', 'yahoo', start, end), #Consumer Staples Select Sector SPDR Fund
    'XLB':DataReader('XLB', 'yahoo', start, end), #Materials Select Sector SPDR Fund
}


#econ features that are not daily (weekly, monthly & quarterly data - need to ffill)
sparse_econ_data= {
from pandas_datareader import DataReader
import numpy as np
import pandas as pd
import datetime

# Grab time series data for 5-year history for the stock (here AAPL)
# and for S&P-500 Index
start_date = datetime.datetime.now() - datetime.timedelta(days=1826)
end_date = datetime.date.today()

stock = 'MSFT'
index = '^GSPC'

# Grab time series data for 5-year history for the stock
# and for S&P-500 Index
df = DataReader(stock, 'yahoo', start_date, end_date)
dfb = DataReader(index, 'yahoo', start_date, end_date)

# create a time-series of monthly data points
rts = df.resample('M').last()
rbts = dfb.resample('M').last()
dfsm = pd.DataFrame(
    {
        's_adjclose': rts['Adj Close'],
        'b_adjclose': rbts['Adj Close']
    },
    index=rts.index)

# compute returns
dfsm[['s_returns','b_returns']] = dfsm[['s_adjclose','b_adjclose']]/\
    dfsm[['s_adjclose','b_adjclose']].shift(1) -1
Exemple #18
0
    def fetch_daily_values(self, code, start, end):
        from pandas_datareader import DataReader

        data = DataReader(code, "yahoo", start, end)

        return data
Exemple #19
0
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split

start_date = datetime.datetime(1969, 1, 1)
end_date = datetime.date.today()

df = DataReader('^DJI', 'yahoo', start_date, end_date)

print('\nDJIA Historical Prices: ')
print(df.head())
print(f'\nDJIA Historical Prices Dataset Shape: {df.shape}')

#plot closing price for DJI
df.Close.plot(grid=True, figsize=(10, 6))
plt.title("DJI Closing price")

#Create technical indicators
ti = pd.DataFrame(index=df.index)

# open, high, low, close, volume
ti["Open"] = df["Open"]
ti["High"] = df["High"]
#

# In[ ]:

# List of Tech_stocks for analytics
tech_list = ['AAPL', 'GOOGL', 'MSFT', 'AMZN']

# set up Start and End time for data grab
end = datetime.now()
start = datetime(end.year - 1, end.month, end.day)

#For-loop for grabing google finance data and setting as a dataframe
# Set DataFrame as the Stock Ticker

for stock in tech_list:
    globals()[stock] = DataReader(stock, 'yahoo', start, end)  #change

# In[4]:

AAPL.head()

# In[5]:

AAPL.describe()

# In[6]:

AAPL.info()

# Now that we've seen the DataFrame, let's go ahead and plot out the volume and closing price of the AAPL(Apple) stocks.
from pandas_datareader import get_data_yahoo as get_data
import scipy.stats as stats
import scipy as sp
from pandas_datareader import DataReader

vix = DataReader("VIXCLS", "fred")
print(vix.shape)
print(vix.head())

# def ret_f(ticker, begdate, enddate):
#     p = get_data(ticker, begdate, enddate)
#     return (p.Close.values[1:] / p.Close.values[:-1] - 1)
#
#
# begdate = '20130101'
# enddate = '20151231'
# y = ret_f('IBM', begdate, enddate)
# x = ret_f('MSFT', begdate, enddate)
#
# print(sp.stats.bartlett(x, y))
# szeregi czasowe

# rng = pd.date_range('2020-10-10', periods=10,freq='Q')
# print(rng)

# freq może być H,S,T-Minutes, Q, W-weekly, D- calendar day, B - business day,
# M- Month end MS - month start, A- year end

# Time zone

# time_zones = list(pytz.all_timezones)
# europe_timezones = [tz for tz in time_zones if tz.startswith('Europe')]
# print(europe_timezones)

# zmiana timezone - th_convert(tz= strefa czasowa)

#data reader pobiera dane z internetu
amazon = DataReader('AMZN', 'stooq')
amazon.to_csv('../data/amazon_datareader.csv')

# skumulowane wykresy za pomocą subplots (3 to ilość rodzaji ax=ax[x]
# amazon = amazon.sort_index()
fig, ax = plt.subplots(3, sharex=True)
amazon['Close'].plot(ax=ax[0])
amazon['Close'].rolling(120).mean().plot(ax=ax[1],
                                         logy=True)  #logy = logarytmiczny
amazon['Close'].plot(ax=ax[1])
amazon['Close'].plot(ax=ax[2])
ax[0].legend(['price', 'rolling'])
ax[1].legend(['price'])
ax[2].legend(['test'])
Exemple #23
0
from datetime import datetime


# In[2]:


tech_stocks = ['AAPL', 'GOOG', 'MSFT', 'AMZN']
end = datetime.now()
start = datetime(end.year - 1, end.month, end.day)


# In[3]:


for stock in tech_stocks:
    globals()[stock] = DataReader(stock, 'yahoo', start, end)


# In[4]:


AAPL['Adj Close'].plot(legend=True, figsize=(10, 4))


# In[5]:


AAPL['Volume'].plot(legend=True, figsize=(10, 4))


# ## Calculate moving averages
Exemple #24
0
tickers = si.tickers_sp500()
tickers = [item.replace(".", "-") for item in tickers]

# Set dates
num_of_years = float(input('Enter the number of years: '))
start = dt.date.today() - dt.timedelta(days=int(365.25 * num_of_years))
end = dt.date.today()

# Get today's date
mylist = []
mylist.append(dt.date.today())
today = mylist[0]

# Get Index Data
index = 'SPY'
spy = DataReader(index, 'yahoo', start, end)
spy['RSI'] = talib.RSI(spy['Adj Close'], timeperiod=14)

signals = []
accuracies = []
for symbol in tickers:
    try:
        df = pd.read_csv(
            f'/Users/shashank/Documents/Code/Python/Outputs/S&P500/{symbol}.csv',
            index_col=0,
            parse_dates=True)
        df = df.truncate(before=start, after=end)

        # Technical Indicators
        df['upper_band'], df['middle_band'], df['lower_band'] = talib.BBANDS(
            df['Adj Close'], timeperiod=7)
Exemple #25
0
def history(ticker, start, end):

    data = DataReader(ticker, 'yahoo', start, end)
    data['Ticker'] = ticker

    return data
# In[3]:


# List of Tech_stocks for analytics
tech_list = ['AAPL','GOOGL','MSFT','AMZN']

# set up Start and End time for data grab
end = datetime.now()
start = datetime(end.year-1,end.month,end.day)

#For-loop for grabing google finance data and setting as a dataframe
# Set DataFrame as the Stock Ticker

for stock in tech_list:
    globals()[stock] = DataReader(stock,'google',start,end)


# Quick note: Using globals() is a sloppy way of setting the DataFrame names, but its simple
# 
# Let's go ahead and play aorund with the AAPL(Apple) Stock DataFrame to get a feel for the data.

# In[4]:


AAPL.head()


# In[5]:

Exemple #27
0

def  getTickerQuotes(ticker):
  # Get historical stock data
  tick = yf.Ticker(ticker)
  # print(tick.info)
  # get historical market data
  hist = tick.history(period="5d", start=startTime, end=endTime)
  return hist


def loadConfig():
  config = configparser.ConfigParser()
  config.read('data/config.ini')
  tickers = config['TICKERS']
  return config, tickers


if __name__ == "__main__":
  config, tickers = loadConfig()
  
  numTicks = 1
  for tick in tickers:
    data = getTickerQuotes(config['TICKERS'][tick])
    prices[tick] = DataReader(config['TICKERS'][tick],'yahoo', startTime, endTime).loc[:,'Close'] #S&P 500

  print("Data:\n", prices.head())
  returns = prices.pct_change()
  print("Percent Change:\n", returns.head())

Exemple #28
0
           "FEDFUNDS",
           "DCOILWTICO",
           "GOLDAMGBD228NLBM",
           "UNRATE",
           "CIVPART",
           "BAMLHYH0A0HYM2TRIV",
           "SP500"]
data_source = "fred"

#for ticker in tickers:    
#    fed = DataReader(ticker,data_source,start,end)
#    print(fed.tail(10))
#    fed.plot(title = ticker,figsize = (15,5))
#    plt.show()

sp = DataReader(tickers[7],data_source,start,end)
print(sp.tail(10))
sp.plot(title = tickers[7],figsize = (15,5))
plt.show()

num = 233
risk_num = 60


sp1 = sp.copy()
sample = sp1.dropna()
ret_cum = sample / sample.iloc[0,:]
ret_per = sample / sample.shift(1) - 1
print(ret_cum.tail())
print(ret_per.tail())
ret_cum.plot(title = "cumulative return")
from tensorflow.keras.models import Sequential
from pandas.plotting import lag_plot
from tensorflow.keras.layers import Dense, LSTM
from pandas_datareader import DataReader
from statsmodels.tsa.arima_model import ARIMA
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error

warnings.filterwarnings('ignore')

ticker = 'TSLA'
start_date = datetime.datetime.now() - datetime.timedelta(days=365)
end_date = datetime.date.today()

# # Fetching the historic prices
df = DataReader(ticker, 'yahoo', start_date, end_date)

# # Plotting the data
plt.figure(figsize=(16, 8))
plt.title(f'{ticker} Close Price History')
plt.plot(df['Close'], linewidth=2)
plt.xlabel('Date', fontsize=18)
plt.ylabel('Close Price USD ($)', fontsize=18)
plt.show()

last = df.tail(365)
plt.figure(figsize=(16, 8))
plt.title(f"{ticker}'s Daily Returns")
returns = last['Close'] / last['Close'].shift(1) - 1
returns.plot(label='returns %', linewidth=1)
plt.xlabel('Date', fontsize=18)
Exemple #30
0
])
otherList = pd.DataFrame(columns=[
    'Stock', "RS_Rating", "50 Day MA", "150 Day Ma", "200 Day MA",
    "52 Week Low", "52 week High", "Failed"
])

for stock in stocklist:
    #n += 1
    time.sleep(1.5)

    print("\npulling {}".format(stock))
    # rsi value
    start_date = datetime.datetime.now() - datetime.timedelta(days=365)
    end_date = datetime.date.today()

    df = DataReader(stock, 'yahoo', start=start_date, end=end_date)

    df["rsi"] = talib.RSI(df["Close"])

    RS_Rating = df["rsi"].tail(14).mean()

    try:
        smaUsed = [50, 150, 200]
        for x in smaUsed:
            sma = x
            df["SMA_" + str(sma)] = round(
                df.iloc[:, 4].rolling(window=sma).mean(), 2)

        currentClose = df["Adj Close"][-1]
        moving_average_50 = df["SMA_50"][-1]
        moving_average_150 = df["SMA_150"][-1]