Esempio n. 1
0
def stock_price_df(symbol):
    file = 'data/{}'.format(symbol.upper())
    if not os.path.exists(file):
        time.sleep(5)
        df = get_history(symbol, end=now, start=start)
        df.to_csv(file)
    return pd.read_csv(file)
Esempio n. 2
0
def stock_df(symbol):
    file = os.path.join(stocks_dir, symbol.upper())
    if not os.path.exists(file):
        print('Downloading {}'.format(symbol))
        time.sleep(5)
        df = get_history(symbol, end=today, start=start_date)
        df.to_csv(file)
    return pd.read_csv(file)
Esempio n. 3
0
# options expire on last thursday of every month
p1_expiry = get_expiry_date(year=today.year, month=today.month - 1)
p2_expiry = get_expiry_date(
    year=today.year, month=today.month - 2) + timedelta(days=1)

print(p1_expiry, p2_expiry)

symbol = "NIFTY"
close = 'Close'

print(symbol)

df = get_history(
    symbol=symbol, index=True,
    strike_price=9600, option_type='CE',
    start=p2_expiry, end=p1_expiry, expiry_date=p1_expiry,
)
df = df.sort_index(ascending=True)
print(df[[close, 'Underlying']])

df = get_history(
    symbol=symbol, index=True,
    strike_price=10200, option_type='CE',
    start=p2_expiry, end=p1_expiry, expiry_date=p1_expiry,
)
df = df.sort_index(ascending=True)


def add_chgp(df, col, col_diff):
    df[col_diff] = round((1 - df[col] / df[col].shift(-1)) * 100, 2)
import time
st = time.clock()
from nsetools import Nse
from datetime import date
from nsepy import get_history

nse = Nse()

all_stock_codes = nse.get_stock_codes()
count = 0
for stk in all_stock_codes:
    count += 1
    print(count, stk)
    try:
        stock = get_history(symbol=stk,
                            start=date(2019, 10, 2),
                            end=date(2019, 12, 6))
    except:
        print("Problem finding data for", stk)

    stock.to_csv(
        "C:\\Python27\\Stock Market\\Working\\Stocks_NSE\\{}.csv".format(stk))

print(time.clock() - st)
Esempio n. 5
0
import datetime
import os
from datetime import timedelta

import nsepy
import plotly.graph_objects as go

global df, buy_list
global updates
buy_list = []
updates = []

data = 'INFY'
buy = []
date_y = datetime.date.today() - timedelta(days=60)
df = nsepy.get_history(symbol='ONGC', start=date_y, end=datetime.date.today())
df.reset_index('Date', inplace=True)
df['Date'] = df['Date'].astype(str)
df['Date'] = df['Date'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d'))
df.set_index('Date', inplace=True)
ohlc_dict = {'Open': 'first', 'High': 'max', 'Low': 'min', 'Close': 'last'}

df = df.resample('M', how=ohlc_dict).dropna(how='any')
cols = ['Open', 'High', 'Low', 'Close']
df = df[cols]

# df = df['Close'].resample('1M').ohlc()
df.reset_index('Date', inplace=True)
print(df)

df['HA_Close'] = ((df['Open'] + df['High'] + df['Low'] + df['Close']) / 4)
Esempio n. 6
0
# making sure of getting n working day(ignore sat & sun)
days = int(find_no_of_working_days(start_date, end_date))
while days < no_of_work_day:
    no_of_calender_day = no_of_calender_day + (no_of_work_day - days)
    start_date = end_date - datetime.timedelta(days=no_of_calender_day)
    days = int(find_no_of_working_days(start_date, end_date))

row_num = 0
with open('NSEEquitySymbols.csv') as csvfile:
    readCSV = csv.reader(csvfile, delimiter=',')
    next(readCSV, None)  # skip header
    for rowcsv in readCSV:
        stock_symbol = rowcsv[0]
        stock_name = rowcsv[1]
        row_num = row_num + 1
        data = get_history(symbol=stock_symbol, start=start_date, end=end_date)

        if data.empty:
            continue
        else:
            no_of_day = 0
            while (len(data) < no_of_work_day):  # making sure n day data is there
                no_of_day = no_of_day + 1
                data = get_history(symbol=stock_symbol, start=start_date - datetime.timedelta(days=no_of_day),
                                   end=end_date)
        # print(data)

        data['Last 5 Day Avg. Vol'] = data['Volume'].shift(1).rolling(
            window=5).mean()  # Average volume traded in last 5 days
        data['Previous-Last 5 Day Avg. Vol'] = data['Last 5 Day Avg. Vol'].shift(
            5)  # Average volume traded between last 5 to 10 days
import numpy as np

from nsepy import get_history
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM

from keras.callbacks import EarlyStopping
import datetime as dt

print("getting data")
ds = get_history(symbol='WIPRO',
                 start=dt.date(2011, 1, 17),
                 end=dt.date.today())

sc = MinMaxScaler()
train_set = sc.fit_transform(ds['Close'][:2459].values.reshape(-1, 1))

# create the training dataset
# create the scaled training dataset

past_days = 30


def prepare_data(timeseries_data, n_features):
    X, y = [], []
    for i in range(len(timeseries_data)):
        # find the end of this pattern
        end_ix = i + n_features
        # check if we are beyond the sequence
        if end_ix > len(timeseries_data) - 1:
Esempio n. 8
0
import os

from nsepy import get_history

from datetime import date

data = get_history(symbol='SBIN', start=date(2020, 1, 1), end=date(2020, 4, 17))
os.chdir('/storage/emulated/0/bluetooth')
print(type(data))
data.to_excel('a.xlsx')
Esempio n. 9
0


# Part 1 - Data Preprocessing

# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import nsepy 
from datetime import date
import pickle


dataset_train  = nsepy.get_history(symbol="NIFTY", 
                    start=date(2010,1,1), 
                    end=date(2017,11,30),
					index=True)

# Importing the training set
#dataset_train = pd.read_csv('Google_Stock_Price_Train.csv')

training_set = dataset_train.iloc[:, 1:2].values

# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)

# Creating a data structure with 60 timesteps and 1 output
X_train = []
y_train = []
Esempio n. 10
0
import nsepy as ns
import datetime as d
import pandas as pd
#s=ns.get_history(symbol='NIFTY IT', start=d.date(2015,1,1), end=d.date(2015,1,10), index = True)
s = ns.get_history(symbol='INFY',
                   start=d.date(2015, 1, 1),
                   end=d.date(2016, 1, 1))
file_name = 'dataset.csv'
s[['Open', 'Close', 'Low', 'High', 'Volume']].to_csv(file_name,
                                                     sep=',',
                                                     encoding='utf-8')
s1 = ns.get_history(symbol='TCS',
                    start=d.date(2015, 1, 1),
                    end=d.date(2016, 1, 1))
with open('dataset.csv', 'a', encoding='utf-8') as f:
    s1[['Open', 'Close', 'Low', 'High', 'Volume']].to_csv(f, header=False)
s2 = ns.get_history(symbol='NIFTY IT',
                    start=d.date(2015, 1, 1),
                    end=d.date(2016, 1, 1),
                    index=True)
with open('dataset.csv', 'a', encoding='utf-8') as f:
    s2[['Open', 'Close', 'Low', 'High', 'Volume']].to_csv(f, header=False)

s[['Open', 'Close', 'Low', 'High', 'Volume']].to_csv('infy.csv',
                                                     sep=',',
                                                     encoding='utf-8')
s1[['Open', 'Close', 'Low', 'High', 'Volume']].to_csv('tcs.csv',
                                                      sep=',',
                                                      encoding='utf-8')
s2[['Open', 'Close', 'Low', 'High', 'Volume']].to_csv('niftyit.csv',
                                                      sep=',',
import sys

i = 3

if(len(sys.argv) < 9):
    print("argv: location symbol syear smonth sday eyear emonth eday")
else:
    location = sys.argv[i - 2]
    symbol = sys.argv[i - 1]
    syear, smonth, sday = int(sys.argv[i]), int(sys.argv[i + 1]), int(sys.argv[i + 2])
    eyear, emonth, eday = int(sys.argv[i + 3]), int(sys.argv[i + 4]), int(sys.argv[i + 5])

    fname = location + "\\" + symbol + "_" + str(eyear) + "_" + str(emonth) + "_" + str(eday) + "_" + str(syear) + "_" + str(smonth) + "_" + str(sday) + ".csv"
    
    try:
        startdate = date(syear, smonth, sday)
        enddate = date(eyear, emonth, eday)

        dataset = get_history(symbol=symbol,
                            start=startdate,
                            end=enddate)

        dataset.to_csv(fname, index=True, header=True, sep=',')

    except:
        print("Error occurred while downloading. Please check your internet connection or contact administrator.\nModule 2")

    else:
        print("Successful Download")

Esempio n. 12
0
        (fa_dataframe['Last'] > fa_dataframe['Open'])
        & (fa_dataframe['Last'] < fa_dataframe['VWAP'])]
    return l_filteredData


nFailures = 0

filterResult_belowVwap = []
filterResult_aboveVwap = []

for symbolName in nifty200symbolNames:
    l_symbolData = pd.DataFrame()
    while True:
        try:
            l_symbolData = get_history(symbol=symbolName,
                                       start=date(startYear, startMonth,
                                                  startDate),
                                       end=date(endYear, endMonth, endDate))
            break
        except TimeoutError:
            nFailures += 1
            print("Timeout Error", nFailures)

    l_symbolData = l_symbolData[~l_symbolData.index.duplicated(keep='first')]
    #print(list(l_symbolData.columns.values))

    l_symbolData['%DeliveryVolume'] = (l_symbolData['Deliverable Volume'] /
                                       l_symbolData['Volume']) * 100
    l_symbolData = l_symbolData[[
        'Symbol', 'Prev Close', 'Open', 'High', 'Low', 'Close', 'Last', 'VWAP',
        '%DeliveryVolume'
    ]]
Esempio n. 13
0
     t3 = 0
     ch_vol = 0
     ch_pri = 0
     dele = 0
     t1 = numpy.sum(tt.loc[(tt['TIMESTAMP'] == datet.strftime("%x")),
                           'CHG_IN_OI'].values)
     t2 = numpy.sum(tt.loc[(tt['TIMESTAMP'] == yester.strftime("%x")),
                           'OPEN_INT'].values)
     fut = (t1 / t2)
 else:
     listtest.append('No')
     d1 = date(int(yester.strftime("%Y")), int(yester.strftime("%m")),
               int(yester.strftime("%d")))
     d2 = date(int(datet.strftime("%Y")), int(datet.strftime("%m")),
               int(datet.strftime("%d")))
     sbin = get_history(symbol=x, start=d1, end=d2)
     fir = sbin.loc[d1]
     las = sbin.loc[d2]
     tt = temp2.get_group(x)
     t1 = numpy.sum(tt.loc[(tt['TIMESTAMP'] == datet.strftime("%x")),
                           'CHG_IN_OI'].values)
     t2 = numpy.sum(tt.loc[(tt['TIMESTAMP'] == yester.strftime("%x")),
                           'OPEN_INT'].values)
     fut = (t1 / t2)
     t3 = las['Close']
     t4 = fir['Close']
     t5 = las['Volume']
     t6 = fir['Volume']
     ch_vol = ((t5 - t6) / t6)
     ch_pri = ((t3 - t4) / t4)
     dele = las['%Deliverble']
import matplotlib.pyplot as plt

import argparse

parser = argparse.ArgumentParser()
parser.add_argument('--last_days_count', action="store", help="Last Number of Days to get data", type=int)
parser.add_argument('--symbol', action="store", help="Stock Symbol", type=str)


args = parser.parse_args()

number_of_days = args.last_days_count
end_date = datetime.now().date()
start_date = end_date - timedelta(number_of_days)

df = get_history(symbol=args.symbol, start=start_date, end=end_date)

# SMA For Delivery
df['delivery_SMA_3'] = df.iloc[:,12].rolling(window=3).mean()
df['delivery_SMA_5'] = df.iloc[:,12].rolling(window=5).mean()
df['delivery_SMA_10'] = df.iloc[:,12].rolling(window=10).mean()

# SMA for Turnover
df['delivery_turnover_3'] = df.iloc[:,10].rolling(window=3).mean()
df['delivery_turnover_5'] = df.iloc[:,10].rolling(window=5).mean()
df['delivery_turnover_10'] = df.iloc[:,10].rolling(window=10).mean()

# SMA for Turnover
df['close_3'] = df.iloc[:,7].rolling(window=3).mean()
df['close_5'] = df.iloc[:,7].rolling(window=5).mean()
df['close_10'] = df.iloc[:,7].rolling(window=10).mean()
Esempio n. 15
0
import numpy as np
from nsepy import get_history,get_index_pe_history
import datetime as dt
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')


# In[2]:


start = dt.datetime(2015,1,1)

end = dt.datetime(2016,1,1)

infy = get_history(symbol='INFY', start = start, end = end)
infy.index = pd.to_datetime(infy.index)
infy.head(10)


# In[3]:


new_infy=infy.reset_index()


# In[4]:


new_infy['Date']=pd.to_datetime(new_infy['Date'])
new_infy.head()
Esempio n. 16
0
def Nifty_data():
    data = get_history (symbol = "NIFTY", start = date (2021, 2, 1), end = date.today (), index = True, futures = True,
                    expiry_date = date (2021, 3, 25))
    data = data.sort_index(ascending = False)
    df = data
    H3 = round (df.Close + (df.High - df.Low) * 1.1 / 4, 2)
    H4 = round (df.Close + (df.High - df.Low) * 1.1 / 2, 2)
    H5 = round (df.Close * (df.High / df.Low), 2)
    L3 = round (df.Close - (df.High - df.Low) * 1.1 / 4, 2)
    L4 = round (df.Close - (df.High - df.Low) * 1.1 / 2, 2)
    df['H3'] = H3
    df['H4'] = H4
    df['H5'] = H5
    L5 = round (df.Close - (df.H5 - df.Close), 2)
    df['L3'] = L3
    df['L4'] = L4
    df['L5'] = L5
    P = round((df.High + df.Low + df.Close)/3,2)
    BC = round((df.High + df.Low)/2,2)
    TC = round((P-BC)+P,2)
    df['Central Pivot'] = P
    df['TC'] = TC
    df['BC'] = BC

    df['Top Central'] = df.apply(lambda df:df['TC'] if df['TC'] > df['BC'] else df['BC'],axis = 1)
    df['Bottom Central'] = df.apply(lambda df:df['BC'] if df['TC'] > df['BC'] else df['TC'],axis = 1)


    conditions = [
        ((df['H3']== df['H3'].shift(-1))&(df['L3'] == df['L3'].shift(-1))),
        ((df['H3'] > df['H3'].shift(-1))&(df['L3'] < df['L3'].shift(-1))),
        ((df['H3'] < df['H3'].shift(-1))&(df['L3'] > df['L3'].shift(-1))),
        (df['L3'] > df['H3'].shift(-1)),
        ((df['H3'] > df['H3'].shift(-1))& (df['L3'] > df['L3'].shift(-1))),
        (df['H3'] < df['L3'].shift(-1)),
        ((df['H3'] < df['H3'].shift(-1)) & (df['L3'] < df['L3'].shift(-1)))
    ]

    values = [
        "Unchanged Value",
        "Outside Value",
        "Inside Value",
        "Higher Value",
        "Overlapping Higher Value",
        "Lower Value",
        "Overlapping Lower Value"
    ]
    df["2 Day Relationship"] = np.select(conditions,values)


    def expected_outcome(arg):
        switcher = {
            "Higher Value" : "Bullish",
            "Overlapping Higher Value": "Moderately Bullish",
            "Lower Value": "Bearish",
            "Overlapping Lower Value": "Moderately Bearish",
            "Unchanged Value": "Sideways / Breakout",
            "Outside Value":"Sideways",
            "Inside Value": "Breakout"
        }
        return switcher.get(arg,"Nothing")

    df['Expected Outcome'] =df.apply(lambda df:expected_outcome(df['2 Day Relationship']),axis=1)


    df1 = df.filter (['Date', 'Symbol', 'Expiry','High','Low', 'Close', 'H3','L3','Central Pivot','Top Central','Bottom Central','2 Day Relationship','Expected Outcome'], axis = 1)

    return (df1)
Esempio n. 17
0
    if key == 'dayLow':
        print("day Low : " + str("%.2f" % value))
    if key == 'open':
        print("day open : " + str("%.2f" % value))
    if key == 'closePrice':
        print("day close : " + str("%.2f" % value))
    if key == 'previousClose':
        print("previous day close : " + str("%.2f" % value))

# current_price=get_live_price('TATAMOTORS.NS')
# print("current price : "+str("%.2f" % current_price))

for SYMBOL in nifty50Array:

    #-----------------------------------------------------------------------Pivot points-----------------------------------------------------------------------------
    pnb = get_history(symbol=SYMBOL, start=Start, end=Today)
    yesterday_close = pnb['Close']
    yesterday_open = pnb['Open']
    yesterday_high = pnb['High']
    yesterday_low = pnb['Low']
    # print(type(yesterday_close))
    yOpen = yesterday_open.values.tolist()[0]
    yClose = yesterday_close.values.tolist()[0]
    yHigh = yesterday_high.values.tolist()[0]
    yLow = yesterday_low.values.tolist()[0]

    print('yetsterday open : ' + str(yOpen))
    print('yetsterday High : ' + str(yHigh))
    print('yetsterday low : ' + str(yLow))
    print('yetsterday close : ' + str(yClose))
Esempio n. 18
0
def get_data(ticker, start_date='2016-01-01', end_date='2017-01-01'):
    """
        This function fetches the data from different web source such as Quandl, Yahoo finance and NSEPy
    """
    try:
        df = yf.download(ticker, start_date, end_date)
        df['Source'] = 'Yahoo'
        return df[[
            'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume', 'Source'
        ]]
    except:
        try:
            df = quandl.get('WIKI/' + ticker,
                            start_date=start_date,
                            end_date=end_date,
                            api_key=get_quantinsti_api_key())
            df['Source'] = 'Quandl Wiki'
            df = df.rename(columns={"Adj. Close": "Adj Close"})
            return df[[
                'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume', 'Source'
            ]]
        except AuthenticationError as a:
            print(a)
            print(
                "Please replace the line no. 17 in quantrautil.py file with your Quandl API Key"
            )
        except:
            try:
                start_date = pd.to_datetime(start_date)
                end_date = pd.to_datetime(end_date)
                df = iex.stocks.get_historical_data(ticker,
                                                    start=start_date,
                                                    end=end_date,
                                                    output_format='pandas')
                df.index.name = 'Date'
                df = df.rename(
                    columns={
                        'open': 'Open',
                        'high': 'High',
                        'low': 'Low',
                        'close': 'Close',
                        'volume': 'Volume',
                    })
                df['Source'] = 'IEX'
                return df[['Open', 'High', 'Low', 'Close', 'Volume', 'Source']]
            except:
                try:
                    df = quandl.get('NSE/' + ticker,
                                    start_date=start_date,
                                    end_date=end_date,
                                    api_key=get_quantinsti_api_key())
                    df['Source'] = 'Quandl NSE'
                    return df[[
                        'Open', 'High', 'Low', 'Close', 'Volume', 'Source'
                    ]]
                except:
                    try:
                        df = nsepy.get_history(symbol=ticker,
                                               start=start_date,
                                               end=end_date)
                        df['Source'] = 'nsepy'
                        return df[[
                            'Open', 'High', 'Low', 'Close', 'Volume', 'Source'
                        ]]
                    except:
                        print(traceback.print_exc())
Esempio n. 19
0
import pandas as pd
from nsepy import get_history
from datetime import date
import scipy.stats as st
import datetime as dt
import numpy as np

x = dt.datetime(2020, 1, 1)
y = dt.datetime(2020, 4, 12)

#BankingSector
data_ICICI = get_history(symbol='ICICIBANK', start=x, end=y)
data_HDFCBank = get_history(symbol="HDFCBANK", start=x, end=y)
data_SBI = get_history(symbol="SBIN", start=x, end=y)
data_YES = get_history(symbol="YESBANK", start=x, end=y)
data_INDUSLAND = get_history(symbol="INDUSINDBK", start=x, end=y)
data_BANKBARODA = get_history(symbol="BANKBARODA", start=x, end=y)
data_pnb = get_history(symbol='PNB', start=x, end=y)

data_Banking = pd.DataFrame()
data_Banking['ICICI'] = data_ICICI['Close']
data_Banking['SBI'] = data_SBI[['Close']]
data_Banking['PNB'] = data_pnb[['Close']]
data_Banking['YES_BANK'] = data_YES[['Close']]
data_Banking['INDUSLAND'] = data_INDUSLAND[['Close']]
data_Banking['BANKBARODA'] = data_YES[['Close']]
data_Banking['HDFC'] = data_HDFCBank[['Close']]
avg = data_Banking.mean()

d_return = []
for col in data_Banking[[
Esempio n. 20
0
        nthu = todayte

#start = date(2019,7,1)
#end = date(2019,7,25)

start = pd.to_datetime('today').date()
end = pd.to_datetime('today').date()
futures = True
expiry = date(nthu.year,nthu.month,nthu.day)

array = []

stockNames = ['AAA','BBB','CCC'....]

for x in stockNames:
  temp = get_history(symbol=x, start=start, end=end, futures = futures, expiry_date = expiry)
  array.append(temp)

data = pd.concat(array)

print(data)
data.to_csv("~/Documents/Financial Analysis/NSE DATA_near.csv")

#Code below for mid month data

nthu = todayte
while todayte.month == nmon:
    todayte += timedelta(days=1)
    if todayte.weekday()==3 and todayte.month==nmon: #this is Thursday
        nthu = todayte
Esempio n. 21
0
 def data_update(all_stocks_chosen, start_date, end_date):
     all_data = {}
     start, end = start_date, end_date
     for equity in all_stocks_chosen:
         all_data[equity] = get_history(symbol=equity, start=start, end=end)
     return all_stocks_chosen, all_data
Esempio n. 22
0
def stock_pred():
    st.title("STOCK PREDICTION WEB APPLICATION")
    st.header('Here you can select the stock you want to predict')
    st.write("This Prediction is done by LSTM model.")

    main_bg = "14.jpg"
    main_bg_ext = "jpg"

    side_bg = '16.jpg'
    side_bg_ext = "jpg"

    st.markdown(f"""
        <style>
        .reportview-container {{
            background: url(data:image/{main_bg_ext};base64,{base64.b64encode(open(main_bg, "rb").read()).decode()})
        }}
       .sidebar .sidebar-content {{
            background: url(data:image/{side_bg_ext};base64,{base64.b64encode(open(side_bg, "rb").read()).decode()})
        }}
        </style>
        """,
                unsafe_allow_html=True)

    choose_stock = st.selectbox(
        'Choose the stock',
        ['NONE', 'TCS.NS', 'BHEL.NS', 'HDFCBANK.NS', 'TVSMOTOR.NS'])
    if (choose_stock == 'TCS.NS'):
        df1 = get_history(symbol='TCS',
                          start=date(2020, 1, 1),
                          end=date.today())
        df1['Date'] = df1.index
        st.header('TCS')
        if st.checkbox('Show Raw Data'):
            st.subheader("Wanna See Raw Data")
            st.dataframe(df1.tail())

        new_df = df1.filter(['Close'])
        scaler = MinMaxScaler(feature_range=(0, 1))

        last_30_days = new_df[-100:].values
        last_30_days_scaled = scaler.fit_transform(last_30_days)
        X_test = []
        X_test.append(last_30_days_scaled)
        X_test = np.array(X_test)
        X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
        model = load_model('MODELS/TCS.model')
        pred_price = model.predict(X_test)
        pred_price = scaler.inverse_transform(pred_price)

        NextDay_Date = datetime.date.today() + datetime.timedelta(days=1)

        get_pred = st.button('GeT Prediction of Selected Stock')
        if get_pred:
            st.subheader(
                "Predictions for the next upcoming day Close Price : " +
                str(NextDay_Date))
            st.markdown(pred_price)

        st.subheader("Close Price VS Date Interactive chart for analysis:")
        st.line_chart(df1['Close'])

        st.subheader("Line chart of Open and Close for analysis:")
        st.line_chart(df1[['Open', 'Close']])

        st.subheader("Line chart of High and Low for analysis:")
        st.line_chart(df1[['High', 'Low']])

    if (choose_stock == 'BHEL.NS'):
        df1 = get_history(symbol='BHEL',
                          start=date(2020, 1, 1),
                          end=date.today())
        df1['Date'] = df1.index
        st.header('BHEL')
        if st.checkbox('Show Raw Data'):
            st.subheader("Showing raw data---->>>")
            st.dataframe(df1.tail())

        new_df = df1.filter(['Close'])
        scaler = MinMaxScaler(feature_range=(0, 1))

        last_30_days = new_df[-100:].values
        last_30_days_scaled = scaler.fit_transform(last_30_days)
        X_test = []
        X_test.append(last_30_days_scaled)
        X_test = np.array(X_test)
        X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
        model = load_model('MODELS/BHEL.model')
        pred_price = model.predict(X_test)
        pred_price = scaler.inverse_transform(pred_price)

        NextDay_Date = datetime.date.today() + datetime.timedelta(days=1)

        get_pred = st.button('GeT Prediction of Selected Stock')
        if get_pred:
            st.subheader(
                "Predictions for the next upcoming day Close Price : " +
                str(NextDay_Date))
            st.markdown(pred_price)

        st.subheader("Close Price VS Date Interactive chart for analysis:")
        st.line_chart(df1['Close'])

        st.subheader("Line chart of Open and Close for analysis:")
        st.line_chart(df1[['Open', 'Close']])

        st.subheader("Line chart of High and Low for analysis:")
        st.line_chart(df1[['High', 'Low']])

    if (choose_stock == 'HDFCBANK.NS'):
        df1 = get_history(symbol='HDFCBANK',
                          start=date(2020, 3, 1),
                          end=date.today())
        df1['Date'] = df1.index
        st.header('HDFC')
        if st.checkbox('Show Raw Data'):
            st.subheader("Showing raw data---->>>")
            st.dataframe(df1.tail())

        new_df = df1.filter(['Close'])
        scaler = MinMaxScaler(feature_range=(0, 1))

        last_30_days = new_df[-100:].values
        last_30_days_scaled = scaler.fit_transform(last_30_days)
        X_test = []
        X_test.append(last_30_days_scaled)
        X_test = np.array(X_test)
        X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
        model = load_model('MODELS/HDFCBANK.model')
        pred_price = model.predict(X_test)
        pred_price = scaler.inverse_transform(pred_price)

        NextDay_Date = datetime.date.today() + datetime.timedelta(days=1)

        get_pred = st.button('GeT Prediction of Selected Stock')
        if get_pred:
            st.subheader(
                "Predictions for the next upcoming day Close Price : " +
                str(NextDay_Date))
            st.markdown(pred_price)

        st.subheader("Close Price VS Date Interactive chart for analysis:")
        st.line_chart(df1['Close'])

        st.subheader("Line chart of Open and Close for analysis:")
        st.line_chart(df1[['Open', 'Close']])

        st.subheader("Line chart of High and Low for analysis:")
        st.line_chart(df1[['High', 'Low']])

    if (choose_stock == 'TVSMOTOR.NS'):
        df1 = get_history(symbol='TVSMOTOR',
                          start=date(2020, 4, 1),
                          end=date.today())
        df1['Date'] = df1.index
        st.header('TVSMOTOR')
        if st.checkbox('Show Raw Data'):
            st.subheader("Showing raw data---->>>")
            st.dataframe(df1.tail())

        new_df = df1.filter(['Close'])
        scaler = MinMaxScaler(feature_range=(0, 1))

        last_30_days = new_df[-100:].values
        last_30_days_scaled = scaler.fit_transform(last_30_days)
        X_test = []
        X_test.append(last_30_days_scaled)
        X_test = np.array(X_test)
        X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
        model = load_model('MODELS/TVSMOTOR.model')
        pred_price = model.predict(X_test)
        pred_price = scaler.inverse_transform(pred_price)

        NextDay_Date = datetime.date.today() + datetime.timedelta(days=1)

        get_pred = st.button('GeT Prediction of Selected Stock')
        if get_pred:
            st.subheader(
                "Predictions for the next upcoming day Close Price : " +
                str(NextDay_Date))
            st.markdown(pred_price)

        st.subheader("Close Price VS Date Interactive chart for analysis:")
        st.line_chart(df1['Close'])

        st.subheader("Line chart of Open and Close for analysis:")
        st.line_chart(df1[['Open', 'Close']])

        st.subheader("Line chart of High and Low for analysis:")
        st.line_chart(df1[['High', 'Low']])
Esempio n. 23
0
import pandas as pd
from nsepy import get_history
from datetime import date
import scipy.stats as st
import datetime as dt

x = dt.datetime(2020, 1, 1)
y = dt.datetime(2020, 4, 12)

data_Reliance = get_history(symbol='RELIANCE', start=x, end=y)
data_Tititan = get_history(symbol="Titan Company ", start=x, end=y)
data_sunpharma = get_history(symbol='SUNPHARMA', start=x, end=y)
data_TCS = get_history(symbol='TCS', start=x, end=y)
data_MindTree = get_history(symbol='MINDTREE', start=x, end=y)
data_PERSISTENT = get_history(symbol="PERSISTENT ", start=x, end=y)
data_ICICI = get_history(symbol='ICICIBANK', start=x, end=y)
data_SBI = get_history(symbol="SBIN", start=x, end=y)
data_TATACOMM = get_history(symbol="TATACOMM", start=x, end=y)
data_cipla = get_history(symbol='CIPLA', start=x, end=y)
data_HDFC = get_history(symbol="HDFC", start=x, end=y)

d = pd.DataFrame()
d['Titan'] = data_Tititan['Close']
d['Reliance'] = data_Reliance[['Close']]
d['Mind_Tree'] = data_MindTree[['Close']]
d['Tcs'] = data_TCS[['Close']]
d['persistent'] = data_PERSISTENT[['Close']]
d['ICICI'] = data_ICICI[['Close']]
d['SBI'] = data_SBI[['Close']]
d['HDFC'] = data_HDFC[['Close']]
d['Cipla'] = data_cipla[['Close']]
indices = session.query(StockList).filter(StockList.indices == True)
indices_list = [ind.stock_code for ind in indices]

stocks = session.query(StockList).filter(StockList.fno == True)

for stock in stocks:
    print("######################## Running for Stock:" + stock.stock_code)
    start_date = stock.fno_updated + timedelta(days=1)
    if start_date < todaysDate:
        for expiry_date in expiry_dates:
            print(start_date)
            stock_ohlc_data = get_history(
                symbol=stock.stock_code,
                start=start_date,
                end=todaysDate,
                expiry_date=expiry_date,
                futures=True,
                index=True if stock.stock_code in indices_list else False)
            # print(stock_ohlc_data.dtypes)
            for ind in stock_ohlc_data.index:
                if stock_ohlc_data.index.values.size > 0:
                    trade_date = ind
                    nseFNODaily = NseFNODaily(
                        stock_id=stock.stock_id,
                        trade_date=trade_date,
                        stock_code=stock_ohlc_data["Symbol"][ind],
                        expiry_date=expiry_date,
                        open=stock_ohlc_data["Open"][ind],
                        high=stock_ohlc_data["High"][ind],
                        low=stock_ohlc_data["Low"][ind],
@author: sanjotraibagkar
"""

from nsepy import get_history
from datetime import date
import pandas as pd
import matplotlib.pyplot as plt

stock = "SBIN"
start = start = date(2017, 12, 26)
end = date(2018, 1, 25)
end2 = date(2018, 2, 6)
data_fut = get_history(symbol=stock,
                       start=start,
                       end=end,
                       futures=True,
                       expiry_date=date(2018, 1, 25))
data_fut2 = get_history(symbol=stock,
                        start=start,
                        end=end2,
                        futures=True,
                        expiry_date=date(2018, 2, 22))

OI_combined = pd.concat(
    [data_fut2['Open Interest'], data_fut['Open Interest']], axis=1)
OI_combined['OI_Combined'] = OI_combined.sum(axis=1)

plt.figure(1, figsize=(10, 9))
plt.subplot(211)
plt.title('Open Interest')
Esempio n. 26
0
from nsepy import get_history
import datetime as dt
import pandas as pd
# import numpy as np
import statistics

# define the period for data comparision
start_date = dt.datetime(2018, 1, 1)
end_date = dt.datetime.today()

# define the pair here
series1 = 'ICICIBANK'
series2 = 'AXISBANK'

df1 = pd.DataFrame(get_history(series1, start=start_date, end=end_date))
df2 = pd.DataFrame(get_history(series2, start=start_date, end=end_date))

df1.to_csv(
    '/Users/shayakroy/Desktop/Trading Videos/Pyhton/Pair Trading/{}.csv'.
    format(series1))
df2.to_csv(
    '/Users/shayakroy/Desktop/Trading Videos/Pyhton/Pair Trading/{}.csv'.
    format(series2))

# rename the variable
df1.rename(columns={'Close': series1}, inplace=True)
df2.rename(columns={'Close': series2}, inplace=True)

# keeping only the closing prices in the 2 data frames
df1 = df1[series1]
df2 = df2[series2]
from nsepy import get_index_pe_history
from nsetools import Nse
nse = Nse()
print(nse)
from nsepy.symbols import get_symbol_list

# # Step-1 : Download stock price from 01-03-2019 to 01-03-2020

# In[4]:

start = datetime(2019, 3, 1)
end = datetime(2020, 3, 1)

# In[5]:

stock = get_history(symbol='HDFC', start=start, end=end)

# # Step-2: Calculate trend and seasonality of Close,Low and High Price

# In[6]:

ts_ds = pd.DataFrame({
    'Symbol':
    'HDFC',
    'Low':
    stock[stock['Symbol'] == 'HDFC'].Low,
    'Low_season':
    seasonal_decompose(np.array(stock[stock['Symbol'] == 'HDFC'].Low),
                       freq=90,
                       extrapolate_trend='freq').seasonal,
    'Low_trend':
Esempio n. 28
0



NIFTY = ["NIFTY IT"]
Stocks = ["INFY", "TCS"]

data_NIFTY = {}
data_Stocks = {}
moving_average_Stocks = {}
moving_average_NIFTY = {}


# data and moving average
for items in Stocks:
    data_Stocks[items] = get_history(symbol = items, start = datetime.date(2015,1,1), end = datetime.date(2015,3,1))
    moving_average_Stocks[items]  = moving_average(data_Stocks[items], 4)
    

for items in NIFTY:
    data_NIFTY[items] = get_history(symbol = items, start = datetime.date(2015,1,1), end = datetime.date(2015,1,10), index = True)
    moving_average_NIFTY[items]  = moving_average(data_NIFTY[items], 4)
    
# create dummy variables
for items in data_Stocks:
    data_Stocks[items]["Volume_Shocks"] = vol_shock(data_Stocks[items])
    data_Stocks[items]["Price_Shocks"] = price_shock(data_Stocks[items])
    
for items in data_NIFTY:
    data_NIFTY[items]["Volume_Shocks"] = vol_shock(data_NIFTY[items])
    data_NIFTY[items]["Price_Shocks"] = price_shock(data_NIFTY[items])
Esempio n. 29
0
    return (CMP)


#List of top stock futures
stks = [
    "SBIN", "INFY", "VEDL", "APOLLOTYRE", "RELCAPITAL", "BHARATFIN", "MARUTI",
    "ICICIBANK", "TATAMOTORS", "YESBANK", "HINDALCO", "AXISBANK", "HDFCBANK",
    "TCS", "RELIANCE", "ITC", "ULTRACEMCO", "POWERGRID", "HDFC", "LT",
    "BAJAJ-AUTO", "KOTAKBANK", "ASHOKLEY", "GRASIM", "TATASTEEL", "INDUSINDBK",
    "HEROMOTOCO", "HINDUNILVR"
]

#Creating Dataframe for Stock Futures for Current Month Time Series
fut = get_history(symbol=stks[0],
                  start=date(2017, 1, 1),
                  end=date(2017, 12, 31),
                  index=False,
                  futures=True,
                  expiry_date=date(2017, 1, 25))
month = 1
for i in [23, 30, 27, 25, 29, 27, 31]:
    month += 1
    numb = get_history(symbol=stks[0],
                       start=date(2017, month, 1),
                       end=date(2017, 12, 31),
                       index=False,
                       futures=True,
                       expiry_date=date(2017, month, i))
    fut = fut.append(numb)
for j in stks[1:len(stks) + 1]:
    month = 1
    fut = fut.append(
Esempio n. 30
0
nf_calls=[]
nf_puts=[]


#nf_calls[['VolumeCalls']]=np.nan
#nf_puts[['VolumeCalls']]=np.nan
i=min_avalable_strikes=4850
max_avalable_strike=9400
nf_opt_CE=nf_opt_PE=pd.DataFrame()

while i in range(min_avalable_strikes,max_avalable_strike):
    temp_CE = get_history(symbol="NIFTY",
                         start=date(2016,2,1), 
                         end=date(2016,4,24),
                         index=True,
                         option_type="CE",
                         strike_price=i,
                         expiry_date=date(2016,4,28))
    
    #print(nf_opt_CE.head())
    #if nf_opt_CE['Number of Contracts'].values >0 :
    '''if nf_opt_CE.empty :
        nf_opt_CE.append(0)
    '''
    
    temp_PE = get_history(symbol="NIFTY",
                         start=date(2016,2,1), 
                         end=date(2016,4,22),
                         index=True,
                         option_type="PE",
Esempio n. 31
0
def main():
    # Importing Historical Stock Price Data using nsepy's get_history function
    symbol = input("Enter Stock Symbol:")
    n = int(input(
        "Enter number of days for which you want prediction:"))  # 'cipla', 2
    start = date(2015, 1, 1)
    end = date.today()
    df = get_history(symbol=symbol, start=start, end=end)
    df['Date'] = df.index

    # Plotting the Close price data
    plt.figure(figsize=(18, 9))
    plt.title('Close Price History')
    plt.xlabel('Date', fontsize=18)
    plt.ylabel('Close Price', fontsize=18)
    plt.fill_between(df['Date'], df['Close'], color="skyblue", alpha=0.2)
    plt.plot(df['Date'], df['Close'], color="skyblue", alpha=0.6)
    plt.show()

    # Create a new dataframe to store the close prices
    data = df.filter(['Close'])
    # Converting the dataframe to a numpy array
    dataset = data.values
    # divide the dataset into training data and testing data
    training_data_len = math.ceil(len(dataset) * 0.75)

    # Scale data to be values between 0 and 1
    scaler = MinMaxScaler(feature_range=(0, 1))
    scaled_data = scaler.fit_transform(dataset)

    # Creating the training data set from scaled dataset
    prediction_days = 30
    train_data = scaled_data[0:training_data_len, :]
    # Split the data into x_train and y_train
    x_train = []
    y_train = []
    for i in range(prediction_days, len(train_data)):
        x_train.append(train_data[i - prediction_days:i, 0])
        y_train.append(train_data[i, 0])
    x_train, y_train = np.array(x_train), np.array(y_train)
    x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))

    # Build the model
    model = Sequential()
    model.add(
        LSTM(units=50,
             return_sequences=True,
             input_shape=(x_train.shape[1], 1)))
    model.add(LSTM(units=50, return_sequences=False))
    model.add(Dense(units=25))
    model.add(Dense(units=1))

    # Compile the model
    model.compile(optimizer='adam', loss='mean_squared_error')

    # Train the model
    model.fit(x_train, y_train, batch_size=32, epochs=100)

    # Save the model
    model.save(symbol + '.model')

    # Test Data set
    test_data = scaled_data[training_data_len - prediction_days:, :]
    # Create the x_test and y_test data sets
    x_test = []
    y_test = dataset[training_data_len:, :]
    for i in range(prediction_days, len(test_data)):
        x_test.append(test_data[i - prediction_days:i, 0])

    # Convert x_test to a numpy array
    x_test = np.array(x_test)
    # Reshape the data into the shape accepted by the LSTM model
    x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))

    # Getting the model's predictions
    predictions = model.predict(x_test)
    predictions = scaler.inverse_transform(predictions)
    print(len(predictions))
    # Calculate RMSE Value
    rmse = np.sqrt(np.mean(((predictions - y_test)**2)))
    print("RMSE:", rmse)

    train = data[:training_data_len]
    valid = data[training_data_len:]
    valid['Predictions'] = predictions

    # Visualizing the data
    plt.figure(figsize=(18, 9))
    plt.title('Predicted Stock price and Real Stock Price')
    plt.xlabel('Date', fontsize=18)
    plt.ylabel('Close Price', fontsize=18)
    plt.plot(train['Close'])
    plt.plot(valid[['Close', 'Predictions']])
    plt.legend(['Train', 'Val', 'Predictions'], loc='lower right')
    plt.show()

    valid[-5:]

    # Next Day Price Predcition
    newdf = df.filter(['Close'])
    # newdf['Predictions']

    # Leapfrog
    for i in range(n):
        last_30_days = newdf[-30:].values
        last_30_days_scaled = scaler.transform(last_30_days)
        print("last_30_days:\n")
        print(last_30_days)

        x_test = []
        x_test.append(last_30_days_scaled)
        x_test = np.array(x_test)
        x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))

        pred_price = model.predict(x_test)
        pred_price = scaler.inverse_transform(pred_price)

        print("Predicted Price:{}".format(pred_price))

        last_date = date.today()
        next_date = last_date + timedelta(days=i)

        newdf.loc[next_date, 'Close'] = pred_price

    print(newdf.dtypes)
    print("Next {} day predictions are: {}".format(n, newdf[-(n * -1):]))
    newdf2 = newdf[(n * -1):].Close.astype(int)
    print("newdf2:", newdf2)
    print("newdf2[1]:", newdf2[1])

    for i in range(n):
        last_date = date.today()
        next_date = last_date + timedelta(days=i)
        ''' stock_pred_id = symbol + '_' + str(next_date.strftime('%d%m%Y'))   #'cipla_18032021' # (stock_symbol + _ + ddmmyy)
        print("stock_pred_id:", stock_pred_id) '''
        num = str(i)

        date_of_pred = str(next_date.strftime('%Y-%m-%d'))

        label = next_date.strftime('%b %d, %Y')
        data_db1 = {
            'stocks': {
                symbol: {
                    'historical': {
                        num: {
                            'close': int(newdf2[i]),
                        }
                    }
                }
            }
        }
        '''if i == 0:
            create_db(data_db1)
        else:'''
        add_new_data(int(newdf2[i]), symbol, num, date_of_pred,
                     label)  # symbol, stock_pred_id
Esempio n. 32
0
        high = (self.df['close'] > self.df['close_s1']) & (self.df['close'] > self.df['close_s2'])
        # high = self.df['close'] > self.df['close_s2']
        self.df.loc[high, 'close_l'] = self.df['close']
        self.df.loc[high, 'trend'] = 1


if len(sys.argv) > 1:
    fname = sys.argv[1]
    print('Reading local file {}'.format(fname))
    df = pd.read_csv(sys.argv[1])
else:
    symbol='HDFCLIFE'
    print('Downloading {} data from nsepy'.format(symbol))
    df = nsepy.get_history(
        symbol=symbol,
        start=dt.date(2017,1,1),
        end=dt.date(2018,1,19)
    )
    if df.empty:
        print('No data is received from nsepy. Exiting...')
        sys.exit()


df.reset_index(inplace=True)
df.columns = [i.lower() for i in df.columns]

lb = LineBreak(df)
lb.LINE_NUMBER = 2
data = lb.get_chart_data()
print(data.tail(38))
Esempio n. 33
0
def fetch(keyword, stockSymbol):
    twitterData = twitter_data.TwitterData('2020-2-18')
    tweets = twitterData.getTwitterData(keyword)
    #Twitter data fetched

    keyword2 = keyword

    historical_data = get_history(symbol=stockSymbol,
                                  start=datetime.date(2020, 2, 18),
                                  end=datetime.date(2020, 5, 26))
    data = pd.DataFrame(historical_data,
                        columns=['Open', 'Close', 'High', 'Low'])
    data.reset_index(level=0, inplace=True)
    #print(data)

    open_price = {}
    close_price = {}
    high_price = {}
    low_price = {}

    for index, row in data.iterrows():
        date = row['Date']
        open_price.update({date: row['Open']})
        close_price.update({date: row['Close']})
        high_price.update({date: row['High']})
        low_price.update({date: row['Low']})
    #Stock data fetched

    for t in tweets.items():
        for value in t[1]:
            tweet_s.append(value)

    url = urlopen('http://www2.compute.dtu.dk/~faan/data/AFINN.zip')
    zipfile = ZipFile(StringIO(url.read()))
    afinn_file = zipfile.open('AFINN/AFINN-111.txt')

    for line in afinn_file:
        parts = line.strip().split()
        if len(parts) == 2:
            afinn[parts[0]] = int(parts[1])

    sentiment_analyzer()
    inputTweets = csv.reader(open('Data/Tweets.csv', 'rb'), delimiter=',')
    stopWords = getListOfStopWords()
    count = 0
    featureList = []
    list_tweet = []
    labelList = []
    dates = []
    tweets = []
    date_split = []

    for row in inputTweets:
        #print(row)
        if len(row) == 4:
            list_tweet.append(row)
            sentiment = row[0]
            date = row[1]
            text = row[2]

            date_split.append(date)
            dates.append(date)
            labelList.append(sentiment)

            processedText = processTweetText(text)
            featureVector = getFeatureVector(processedText, stopWords)
            featureList.extend(featureVector)
            tweets.append((featureVector, sentiment))
            #print(tweets)
    #print(featureList)

    result = getFeatureVectorAndLabels(tweets, featureList)
    #return result
    #new code

    data2 = open('newfile.txt', 'r')
    files = np.loadtxt(data2, dtype=str, delimiter=',')

    #Now we will split the data
    inp_data2 = []
    inp_data2 = np.array(files[:, 0:-1], dtype='float')
    target2 = np.array(files[:, -1], dtype='int')

    X = np.array(inp_data2)
    y = np.array(target2)
    best_params_ = svc_param_selection(X, y, 6)
    svc_RBF = svm.SVC(kernel='rbf', C=10, gamma=0.01).fit(X, y)
    #print("accuracy of RBF Kernel with gamma=0.01 is ", svc_RBF.score(X,y))
    #return svc_RBF.score(X,y)
    #accuracy score
    #checkpoint 2

    #print "Preparing dataset for stock prediction using stock data and tweet sentiment...."
    date_tweet_details = {}
    file = open("stockpredict.txt", "w")
    totalPositiveCount = 0
    totalNeutralCount = 0
    totalNegativeCount = 0
    myList = []
    final = []
    for dateVal in np.unique(date_split):
        date_totalCount = 0
        date_PosCount = 0
        date_NegCount = 0
        date_NutCount = 0
        total_sentiment_score = 0

        for row in list_tweet:
            sentiment = row[0]
            temp_date = row[1]
            sentiment_score = row[3]
            if (temp_date == dateVal):
                total_sentiment_score += float(sentiment_score)
                date_totalCount += 1
                if (sentiment == 'positive'):
                    date_PosCount += 1
                elif (sentiment == 'negative'):
                    date_NegCount += 1
                elif (sentiment == 'neutral'):
                    date_NutCount += 1

        s = str(date_totalCount) + " " + str(date_PosCount) + " " + str(
            date_NegCount) + " " + str(date_NutCount)
        date_tweet_details.update({dateVal: s})

        totalPositiveCount += date_PosCount
        totalNeutralCount += date_NegCount
        totalNegativeCount += date_NutCount

        dateVal = dateVal.strip()
        day = datetime.datetime.strptime(dateVal, '%Y-%m-%d').strftime('%A')
        #print dateVal
        #print day
        closing_price = 0.
        opening_price = 0.
        if day == 'Saturday':
            update_date = dateVal.split("-")
            if len(str((int(update_date[2]) - 1))) == 1:
                dateVal = update_date[0] + "-" + update_date[1] + "-0" + str(
                    (int(update_date[2]) - 1))
            else:
                dateVal = update_date[0] + "-" + update_date[1] + "-" + str(
                    (int(update_date[2]) - 1))

            dt = parser.parse(dateVal)
            datetime_obj = dt.date()
            opening_price = open_price[datetime_obj]
            closing_price = close_price[datetime_obj]
        elif day == 'Sunday':
            update_date = dateVal.split("-")
            if len(str((int(update_date[2]) - 2))) == 1:
                dateVal = update_date[0] + "-" + update_date[1] + "-0" + str(
                    (int(update_date[2]) - 2))
            else:
                dateVal = update_date[0] + "-" + update_date[1] + "-" + str(
                    (int(update_date[2]) - 2))

            dt = parser.parse(dateVal)
            datetime_obj = dt.date()
            opening_price = open_price[datetime_obj]
            closing_price = close_price[datetime_obj]
        else:
            dt = parser.parse(dateVal)
            datetime_obj = dt.date()
            opening_price = open_price[datetime_obj]
            closing_price = close_price[datetime_obj]

        #print dateVal
        #print "Total tweets = ", date_totalCount, " Positive tweets = ", date_PosCount, " Negative tweets = ", date_NegCount
        #print "Total sentiment score = ", total_sentiment_score
        market_status = 0
        if (float(closing_price) - float(opening_price)) > 0:
            market_status = 1
        else:
            market_status = -1
        file.write(
            str(date_PosCount) + "," + str(date_NegCount) + "," +
            str(date_NutCount) + "," + str(date_totalCount) + "," +
            str(market_status) + "\n")
        #print " Total Tweet For date =",dateVal ," Count =" , date_totalCount
        #print " Positive Tweet For date =",dateVal ," Count =" , date_PosCount
        #print " Negative Tweet For date =",dateVal ," Count =" , date_NegCount
        #print " Neutral Tweet For date =",dateVal ," Count =" , date_NutCount
    file.close()
    #print "Read from text file and prepare data matrix & target matrix...."

    data_Stock = open('stockpredict.txt', 'r')
    inp_dataStock = []
    stockfiles = np.loadtxt(data_Stock, delimiter=',')
    inp_dataStock = np.array(stockfiles[:, 0:-1], dtype='float')
    stock_Y = stockfiles[:, -1]

    X_stock = np.array(inp_dataStock)
    y_stock = np.array(stock_Y)
    #best_params_1 = svc_param_selection_1(X_stock,stock_Y,4)

    svc_RBF = svm.SVC(kernel='rbf', C=10, gamma=0.01).fit(X_stock, y_stock)

    #print("accuracy of RBF Kernel with gamma=0.01 is ", svc_RBF.score(X,y))
    #return svc_RBF.score(X_stock,y_stock)
    #checkpoint3
    dates = []
    prices = []

    for index, row in data.iterrows():
        date = row['Date']
        int_date = date.strftime('%Y%m%d')
        #dates.append(int(int_date.split('-')[2]))
        dates.append(int_date)
        prices.append(float(row['Close']))

    #print dates
    #print prices
    predicted_price = predict_price(dates, prices, 20200526)
    bhavishya = str(predicted_price)

    final.append(bhavishya)

    #print "\nThe stock close price for 25th May will be:"
    #print "RBF kernel: Rs.", str(predicted_price[0])'''
    myList.append([totalPositiveCount, totalNegativeCount, totalNeutralCount])
    #maximum = myList.index(max(myList))

    if (totalPositiveCount > totalNegativeCount):
        if (totalPositiveCount > totalNeutralCount):
            final.append('Uptrend')
            return final

    if (totalNegativeCount > totalPositiveCount):
        if (totalNegativeCount > totalNeutralCount):
            final.append('Downtrend')
            return final

    else:
        final.append('Neutral')
        return final
Esempio n. 34
0
from nsepy import get_history
from datetime import date

from nsepy.commons import StrDate
import talib
from utils.AlgorithmHelper import MFI

v = get_history(symbol="SBIN", start=date(2017, 1, 1), end=date(2017, 11, 19))
print(MFI(v, 14))
talib.MFI()
Esempio n. 35
0
 def getData(self, ticker):
     s = date.today() - timedelta(days=170)
     e = date.today()
     data = get_history(symbol=ticker, start=s, end=e)
     data100 = pd.DataFrame(data[-100:])
     return data100