def get_data(value): start = dt.datetime(2010, 1, 1) end = pd.to_datetime(dt.datetime.today(), format='%Y-%m-%d', errors='coerce') stk_data = gh(symbol=value, start=start, end=end) stk_data.reset_index(inplace=True) return stk_data[['Date', 'Close']]
from pypfopt import risk_models from pypfopt import expected_returns from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices stocksymbols = [ 'TATAMOTORS', 'DABUR', 'ICICIBANK', 'WIPRO', 'BPCL', 'IRCTC', 'INFY', 'RELIANCE' ] startdate = date(2019, 10, 14) end_date = date.today() print(end_date) print(f"You have {len(stocksymbols)} assets in your porfolio") df = pd.DataFrame() for i in range(len(stocksymbols)): data = gh(symbol=stocksymbols[i], start=startdate, end=(end_date))[['Symbol', 'Close']] data.rename(columns={'Close': data['Symbol'][0]}, inplace=True) data.drop(['Symbol'], axis=1, inplace=True) if i == 0: df = data if i != 0: df = df.join(data) # calculating expected annual return and annualized sample covariance matrix of daily assets returns mean = expected_returns.mean_historical_return(df) S = risk_models.sample_cov(df) # for sample covariance matrix plt.style.use('ggplot') fig = plt.figure()
def find_profits(symbol, start, end, stocks=50): df = gh(symbol=symbol, start=start, end=end) df = dataset(df, stocks) print(f"Profit: {df['Profits'].sum()}") return df
from sklearn import model_selection from sklearn.metrics import confusion_matrix from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import Dropout #Setting start and end dates and fetching the historical data start = dt.datetime(2013, 1, 1) end = dt.datetime(2018, 12, 31) stk_data = gh(symbol='SBIN', start=start, end=end) #Visualizing the fetched data plt.figure(figsize=(14, 14)) plt.plot(stk_data['Close']) plt.title('Historical Stock Value') plt.xlabel('Date') plt.ylabel('Stock Price') plt.show() #Data Preprocessing stk_data['Date'] = stk_data.index data2 = pd.DataFrame(columns=['Date', 'Open', 'High', 'Low', 'Close']) data2['Date'] = stk_data['Date'] data2['Open'] = stk_data['Open'] data2['High'] = stk_data['High']
# -*- coding: utf-8 -*- """ Created on Wed Jul 22 15:54:07 2020 @author: mukmc """ from nsepy import get_history as gh import datetime as dt start = dt.datetime(2020,7,20) end = dt.datetime(2020,7,22) stk_data = gh(symbol='NIFTY 50',start=start,end=end) ###........................................................... import pandas as pd import numpy as np import matplotlib.pyplot as plt dat = pd.read_csv("data/nifty.csv",index_col=False) dat['hl']=dat['High']-dat['Low'] dat=dat.reindex(index=dat.index[::-1]) #training_set = dat.iloc[0:int((4862)*0.8),4] training_set = dat.iloc[0:int((4862)*0.8),1:6] from sklearn.preprocessing import MinMaxScaler
# Import useful Libraries from nsepy import get_history as gh from datetime import date import numpy as np import pandas as pd import matplotlib.pyplot as plt import statistics as stats import math from sklearn.model_selection import GridSearchCV from sklearn.linear_model import SGDClassifier from sklearn.metrics import roc_curve, auc, roc_auc_score from sklearn.svm import SVC from sklearn.model_selection import train_test_split # Get the data required using nsepy stk1 = gh(symbol='RELIANCE', start=date(2019, 11, 1), end=date(2020, 5, 7)) # We will save the dataframe so that we can keep calling it over and over again stk1.to_pickle('ril.pkl') ril = pd.read_pickle('ril.pkl') # - We will now pose this problem as a classification problem. # - The idea is that if the adjusted close price the next day is higher than the present day, we buy the stock. This will be indicated as 1. Otherwise, we sell the stock. This will be indicated as 0. # rilf = pd.DataFrame(index = ril.index) # rilf['price'] = ril['Last'] ril['response'] = ril['Last'].diff() ril['class'] = np.where(ril['response'] > 0.0, 1, 0) ril['class_final'] = ril['class'].shift( -1) # shift the classes to align the next day with present day ril = ril.iloc[:len(ril) - 1] ril['class_final'] = ril.class_final.astype(int)
from nsepy import get_history as gh from keras.models import Sequential from keras.layers import Dense, LSTM, Dropout from sklearn.preprocessing import MinMaxScaler # CONSTANTS N_FEATURES = 1 # number of features N_STEPS = 60 # number of time-steps TRAIN_SPLIT = 0.8 # portion of data to be trained # DATA PREPROCESSING # Loading Data data = gh(symbol='BHARTIARTL', start=datetime(2004, 1, 1), end=datetime(2021, 4, 13)) data = data[['Close']] final_data = data.values train_data = final_data[0:int(len(final_data) * TRAIN_SPLIT), :] test_data = final_data[int(len(final_data) * TRAIN_SPLIT):, :] # Scaling Data scaler = MinMaxScaler() scaled_data = scaler.fit_transform(final_data) # Train-Test Split X_train, y_train = [], [] for i in range(N_STEPS, len(train_data)): X_train.append(scaled_data[i - N_STEPS:i, 0]) y_train.append(scaled_data[i, 0])