Beispiel #1
0
    def send_head(self):
        """Common code for GET and HEAD commands.

        This sends the response code and MIME headers.

        Return value is either a file object (which has to be copied
        to the outputfile by the caller unless the command was HEAD,
        and must be closed by the caller under all circumstances), or
        None, in which case the caller has nothing further to do.

        """
        
        print self.path
        request_path = self.path[1:]
        
        ctype = 'application/octet-stream'
        f = StringIO()
        
        params = parse_qs(urlparse(request_path).query)
        stock = params['s'][0]
        month_begin = params['a'][0]
        day_begin = params['b'][0]
        year_begin = params['c'][0]
        month_end = params['d'][0]
        day_end = params['e'][0]
        year_end = params['f'][0]
        type = params['g'][0]
        
        yf.pdr_override()
        
        data_begin_str = year_begin + "-" + month_begin +"-" + day_begin
        date_end_str = year_end + "-" + month_end +"-" + day_end
        if(type == 'd'):
            action_to_request = None
        else:
            action_to_request = 'only'
        print stock, data_begin_str, date_end_str, action_to_request
        pandas_data = yf.download(stock, start = data_begin_str, end = date_end_str, actions = action_to_request)
        print pandas_data
        if('d' == type):
            data = pandas_data.to_csv(columns = ['Open','High','Low','Close','Volume','Adj Close'])
        else:
            data = pandas_data.to_csv(columns = ['action','value'])
        f.write(data)
        self.send_response(200)
        length = f.tell()
        f.seek(0)
        self.send_header("Content-type", ctype)
        self.send_header("Content-Length", str(length))
        self.end_headers()
        return f
Beispiel #2
0
 def downloader(self):
     startDate = datetime.date(2017,1,1)
     self.last_downloaded = startDate
     yf.pdr_override()
     try:
         print 'ILS'
         self.download_currency_quotes(startDate)
         print 'EUR'
         self.download_currency_quotes(startDate, 'EUR')
         self.last_downloaded = datetime.date.today()
     except:
         print 'not downloaded at: ' + str(datetime.datetime.now()) #try again later
     while True:
         self._downloader()
         time.sleep(60*60)
def main():      
    # FinancialKey.creat_StockPriceProcess_file()
    yf.pdr_override()
    stock_id = take_stock_id_by_sql()
    #-----------------------------------------------    
    i = 1 
    for stock in stock_id['stock_cid']:
        print(str(i)+'/'+str(len(stock_id)) + ' : ' + stock)
        self = crawler_new_stock_price(stock,stock_id)
        self.main()
        save_new_craw_process(stock)
        i=i+1
        # stock='0053'
    
    #------------------------------------------------------
    text = 'insert into StockPriceProcess (name,stockdate,time) values(%s,%s,%s)'
    today = str( datetime.datetime.now().strftime("%Y-%m-%d") )
    tem = str( datetime.datetime.now() )
    time = re.split('\.',tem)[0]
    value = ('StockPrice',today,time)

    stock_sql.Update2Sql(host,user,password,
                         'python',text,value)        
Beispiel #4
0
import pandas as pd
import pandas_datareader as pdr
import matplotlib.pyplot as plt
from fix_yahoo_finance import pdr_override

# fix cant get yahoo data
pdr_override()

START_DATE = '2014-06-01'
END_DATE = '2016-06-13'

# a = pdr.get_data_yahoo('AAL', START_DATE, END_DATE)
# print(a['Adj Close'])
pd.concat()

print('------- TASK 1 ------\n')
# LFL replace to ADR
symbols = ['AAL', 'ALK', 'AVH', 'CEA', 'ZNH', 'VLRS', 'CPA', 'DAL', 'GOL', 'LUV', 'UAL']
all_data = {symbol : pdr.get_data_yahoo(symbol, START_DATE, END_DATE) for symbol in symbols}
print(all_data['AAL'].head(2))
print('---------------------\n')

print('------- TASK 2 ------\n')
luvdf = pd.read_csv('LUV.csv', index_col='Date', parse_dates=True)
print(luvdf.head(2))
print('---------------------\n')

print('------- TASK 3 ------\n')
luvdf = luvdf[START_DATE : END_DATE]
print('---------------------\n')
Beispiel #5
0
def yahoo_finance_bridge():
    """
    This function fixes problems w.r.t. fetching data from Yahoo Finance
    :return: None
    """
    pdr_override()
Beispiel #6
0
import pandas as pd
import numpy as np
# libraries to load underlying daily time series
from pandas_datareader import data as pdr
import warnings
warnings.filterwarnings("ignore")
warnings.simplefilter("ignore", category=PendingDeprecationWarning)
with warnings.catch_warnings():
    warnings.filterwarnings("ignore",category=DeprecationWarning)
    import fix_yahoo_finance as yf #https://github.com/ranaroussi/fix-yahoo-finance
    yf.pdr_override() # fix


# TIPS: get cell value: #data.loc[data.index == '2017-06-29'].iloc[0]['Volatility']

class HelperFunctions(object):
    
    def cleanParams(self, a):
        ND = a[0]
        FD = a[1]
        NO = [str(np.round(i,2)) for i in a[2]]
        FO = [str(np.round(i,2)) for i in a[3]]
        return [ND,FD,NO,FO]

    def printStrat(self, strategies, mode="print"):
        simid = []; sim_pnl = [];wins = [];losses = [];trades = [];params = []; daysForward = []
        for i in strategies.keys():
            simid.append(i)
            sim_pnl.append(strategies[i].get("Total PNL"))
            wins.append(strategies[i].get("Win Trades"))
            losses.append(strategies[i].get("Lose Trades"))
Beispiel #7
0
def mainfunc():
    yf.pdr_override() # fix yahoo finance api
    df = pdr.get_data_yahoo('0700.HK', start=ini_time, end=end_time)
    print(df)
def crawler_history_stock_price(stock_info):
    def upload_stock_price2sql(data,dataset_name):
        
        dataset_name = dataset_name.replace('.','_')
    
        conn = ( pymysql.connect(host = host,# SQL IP
                         port = 3306,
                         user = user,
                         password = password,
                         database='StockPrice',  
                         charset="utf8") )   
    
        for j in range(len(data)):
    
            ( conn.cursor().execute( 'insert into ' + dataset_name + 
             '( Date ,stock_id  ,Open ,High ,Low ,Close ,'+
             ' Adj_Close , Volume )'+' values(%s,%s,%s,%s,%s,%s,%s,%s)',
             ( data['Date'][j],data['stock_id'][j],
              float( data['Open'][j] ),
              float( data['High'][j] ),
              float( data['Low'][j] ),
              float( data['Close'][j] ),
              float( data['Adj Close'][j] ),
              int( data['Volume'][j] )
              ) ) )
      
        conn.commit()
        conn.close() 
    #----------------------------------------------------------------
    yf.pdr_override() # <== that's all it takes :-)   \
    stock_id_log = []
    len_data_log = []
      
    now = datetime.datetime.now()
    end = str( now.year ) + '-' + str( now.month )+ '-' +str( now.day+1 )

    for i in range(len(stock_info)):
        print(str(i)+'/'+str(len(stock_info)))
        # stock_id = '1593'
        stock_id = str( stock_info.stock_id[i] ) + '.TW'
        stock_id2 = str( stock_info.stock_id[i] ) + '.TWO'
        #print(stock_info.stock_id[i])
        bo = 1
        while( bo ):
            data = pdr.get_data_yahoo(stock_id, start='1900-1-10', end=end)
            data['stock_id'] = stock_id
            dataset_name = '_'+stock_id
            if len(data) == 0:
                data = pdr.get_data_yahoo(stock_id2, start='1900-1-10', end=end)
                data['stock_id'] = stock_id2
                dataset_name = '_'+stock_id2
            if len(data) != 0:
                bo=0
            #print(stock_id2)
            #print('len(data) = ' + str( len(data) ) )

        stock_id_log.append(stock_id)
        len_data_log.append(len(data))
        
        data['Date'] = data.index
        data.index = range(len(data))
        #data['stock_name'] = stock_name
        data.Date = np.array( [ str(data.Date[i]).split(' ')[0] for i in range(len(data)) ] )
        upload_stock_price2sql(data,dataset_name)
        
    stock_log = pd.DataFrame({'stock_id_log':stock_id_log,
                              'len_data_log':len_data_log})
    return stock_log
Beispiel #9
0
    def calcStock(self):
        tf.reset_default_graph()
        tf.set_random_seed(777)

        def data_standardization(x):
            x_np = np.asarray(x)
            return (x_np - x_np.mean()) / x_np.std()


        def min_max_scaling(x):
            x_np = np.asarray(x)
            return (x_np - x_np.min()) / (x_np.max() - x_np.min() + 1e-7) 

        def reverse_min_max_scaling(org_x, x):
            org_x_np = np.asarray(org_x)
            x_np = np.asarray(x)
            return (x_np * (org_x_np.max() - org_x_np.min() + 1e-7)) + org_x_np.min()
        

        input_data_column_cnt = 6
        output_data_column_cnt = 1

        seq_length = 28
        rnn_cell_hidden_dim = 20
        forget_bias = 1.0   
        num_stacked_layers = 1   
        keep_prob = 1.0   

        epoch_num = 50       
        learning_rate = 0.01

        yf.pdr_override()
        start_date = '1996-05-06'
        print(start_date)
        print(self.comCode)
        df = data.get_data_yahoo(self.comCode, start_date)
        df.to_csv('./STOCK.csv', mode='w') 
        stock_file_name = 'STOCK.csv' 
        encoding = 'euc-kr' 
        names = ['Date','Open','High','Low','Close','Adj Close','Volume']

        raw_dataframe = pd.read_csv(stock_file_name, names=names, encoding=encoding) 
        del raw_dataframe['Date']
        stock_info = raw_dataframe.values[1:].astype(np.float) 

        price = stock_info[:,:-1]
        norm_price = min_max_scaling(price) 

        volume = stock_info[:,-1:]
        norm_volume = min_max_scaling(volume) 

        x = np.concatenate((norm_price, norm_volume), axis=1) 
        y = x[:, [-2]] 

        dataX = []
        dataY = [] 

        for i in range(0, len(y) - seq_length):
            _x = x[i : i+seq_length]
            _y = y[i + seq_length] 
            if i is 0:
                 print(_x, "->", _y) 
            dataX.append(_x) 
            dataY.append(_y) 

        train_size = int(len(dataY) * 0.7)
        test_size = len(dataY) - train_size

        trainX = np.array(dataX[0:train_size])
        trainY = np.array(dataY[0:train_size])

        testX = np.array(dataX[train_size:len(dataX)])
        testY = np.array(dataY[train_size:len(dataY)])

        X = tf.placeholder(tf.float32, [None, seq_length, input_data_column_cnt])
        Y = tf.placeholder(tf.float32, [None, 1])

        targets = tf.placeholder(tf.float32, [None, 1])
        predictions = tf.placeholder(tf.float32, [None, 1])

        def lstm_cell():
            cell = tf.contrib.rnn.BasicLSTMCell(num_units=rnn_cell_hidden_dim, 
                                        forget_bias=forget_bias, state_is_tuple=True, activation=tf.nn.softsign)
            if keep_prob < 1.0:
                cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
            return cell

        stackedRNNs = [lstm_cell() for _ in range(num_stacked_layers)]
        multi_cells = tf.contrib.rnn.MultiRNNCell(stackedRNNs, state_is_tuple=True) if num_stacked_layers > 1 else lstm_cell()

        hypothesis, _states = tf.nn.dynamic_rnn(multi_cells, X, dtype=tf.float32)
        print("hypothesis: ", hypothesis)

        hypothesis = tf.contrib.layers.fully_connected(hypothesis[:, -1], output_data_column_cnt, activation_fn=tf.identity)

        loss = tf.reduce_sum(tf.square(hypothesis - Y))
        optimizer = tf.train.AdamOptimizer(learning_rate)

        train = optimizer.minimize(loss)

        rmse = tf.sqrt(tf.reduce_mean(tf.squared_difference(targets, predictions)))

        train_error_summary = [] 
        test_error_summary = []  
        test_predict = ''  

        sess = tf.Session()
        sess.run(tf.global_variables_initializer())

        for epoch in range(epoch_num):
            _, _loss = sess.run([train, loss], feed_dict={X: trainX, Y: trainY})
            if ((epoch+1) % 100 == 0) or (epoch == epoch_num-1): 
                train_predict = sess.run(hypothesis, feed_dict={X: trainX})
                train_error = sess.run(rmse, feed_dict={targets: trainY, predictions: train_predict})
                train_error_summary.append(train_error)

                test_predict = sess.run(hypothesis, feed_dict={X: testX})
                test_error = sess.run(rmse, feed_dict={targets: testY, predictions: test_predict})
                test_error_summary.append(test_error)
                
        recent_data = np.array([x[len(x)-seq_length : ]])
        print("recent_data.shape:", recent_data.shape)
        print("recent_data:", recent_data)

        price_predict = sess.run(hypothesis, feed_dict={X: recent_data})

        print("test_predict", test_predict[0])
        
        print("--------------------------------------------------")
        for i in range(10):
            print(test_predict[0])
        print("--------------------------------------------------")
        
        price_predict = reverse_min_max_scaling(price,price_predict) 
        test_predict = reverse_min_max_scaling(price,test_predict)
        testY = reverse_min_max_scaling(price,testY) 
        print("Tomorrow's stock price", price_predict[0]) 
        
        return testY, test_predict, train_error_summary, test_error_summary, price_predict[0]
Beispiel #10
0
# 4. More stock manipulations
# resample()

import os
os.getcwd()
os.chdir(
    'C:\\Users\\sxw17\\Desktop\\python learning\\Python for Finance\\Python_Programming_for_Finance'
)

import datetime as dt
import matplotlib.pyplot as plt
from matplotlib import style
import pandas as pd

import fix_yahoo_finance as fy
fy.pdr_override()

from pandas.api.types import is_list_like

import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like

import pandas_datareader.data as web

style.use('ggplot')

start = dt.datetime(2000, 1, 1)
end = dt.datetime(2019, 7, 5)

df = web.DataReader('TSLA', 'yahoo', start, end)
print(df.head())
Beispiel #11
0
def load_yhoo_data(symbol, symbol_id, vendor_id, conn):
    """
    This will load stock data (date+OHLCV) and additional info to our daily_data table.
    args:
        symbol: stock ticker, type string.
        symbol_id: stock id referenced in symbol(id) column, type integer.
        vendor_id: data vendor id referenced in data_vendor(id) column, type integer.
        conn: a Postgres DB connection object
    return:
        None
    """

    cur = conn.cursor()
    # generic start date should pull all data for a given symbol
    start_dt = datetime.datetime(2004, 12, 30)
    end_dt = datetime.datetime(2017, 12, 1)

    yf.pdr_override()

    try:
        data = yf.download(symbol, start=start_dt, end=end_dt)
    except:
        MASTER_LIST_FAILED_SYMBOLS.append(symbol)
        raise Exception('Failed to load {}'.format(symbol))

    data['Date'] = data.index

    # create new dataframe matching our table schema
    # and re-arrange our dataframe to match our database table
    columns_table_order = [
        'data_vendor_id', 'stock_id', 'created_date', 'last_updated_date',
        'date_price', 'open_price', 'high_price', 'low_price', 'close_price',
        'adj_close_price', 'volume'
    ]

    newDF = pd.DataFrame()
    newDF['date_price'] = data['Date']
    newDF['open_price'] = data['Open']
    newDF['high_price'] = data['High']
    newDF['low_price'] = data['Low']
    newDF['close_price'] = data['Close']
    newDF['adj_close_price'] = data['Adj Close']
    newDF['volume'] = data['Volume']
    newDF['stock_id'] = symbol_id
    newDF['data_vendor_id'] = vendor_id
    newDF['created_date'] = datetime.datetime.utcnow()
    newDF['last_updated_date'] = datetime.datetime.utcnow()
    newDF = newDF[columns_table_order]

    # ensure our data is sorted by date
    newDF = newDF.sort_values(by=['date_price'], ascending=True)

    # convert our dataframe to a list
    list_of_lists = newDF.values.tolist()
    # convert our list to a list of tuples
    tuples_mkt_data = [tuple(x) for x in list_of_lists]

    # WRITE DATA TO DB
    insert_query = """
                    INSERT INTO daily_data (data_vendor_id, stock_id, created_date,
                    last_updated_date, date_price, open_price, high_price, low_price, close_price, 
                    adj_close_price, volume) 
                    VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
                    """
    cur.executemany(insert_query, tuples_mkt_data)
    conn.commit()
    print('{} complete!'.format(symbol))
def create_shifted_orderbook_more_feature(ticker,
                                          start_date,
                                          end_date,
                                          lag_period=5,
                                          pred_period=7):
    """
    Shifts order book data. Determines stock movement direction. Incorporates technical indicators as training features.
    :param ticker: stock ticker
    :param start_date: start date for stock data collection period
    :param end_date: end date for stock data collection period
    :param lag_period: number of previous prices trained on
    :param pred_period: number of days forecast
    :return:
            stock_data: the price/volume info for a stock
            stock_returns: percent change between days and lagging percent changes (i.e. previous days' changes)
            stock_lag: stock price and lagging stock price
            stock_movement: stock movement direction (+1: increase, -1: decrease)
    """
    # Retrieve the Nifty data from Yahoo finance:
    format = '%Y-%m-%d'  # Formatting directives
    start = start_date.strftime(format)
    end = end_date.strftime(format)

    yf.pdr_override()  # <== that's all it takes :-)
    stock_data = pdr.get_data_yahoo(ticker, start=start, end=end)

    # Creates stock lag
    stock_data.dropna()
    stock_lag = pd.DataFrame(data=stock_data, index=stock_data.index)

    stock_returns = pd.DataFrame(data=stock_data, index=stock_data.index)

    # Initializes dataframe values and smooths the closing price data
    stock_data_smooth = stock_data['Adj Close']
    exponential_smoothing(
        0.7, stock_data_smooth)  #so the stock_data_smooth is smoothing

    #stock_lag['Volume'] = stock_returns['Volume'] = stock_data['Volume']
    stock_lag[
        "Close"] = stock_data_smooth  #so, now the stock_lag["Close"] is derive from Adj Close + smoothing.
    #print stock_lag["Close"]

    # Sets lagging price data (previous days' price data as feature inputs)
    for i in range(0, lag_period):
        column_label = 'Lag{:d}'.format(i)
        stock_lag[column_label] = stock_lag['Close'].shift(1 + i)

    # EMA- Momentum
    #stock_lag['EMA'] = talib.EMA(close, timeperiod = 30)
    ndays = 30
    name_EWMA = 'EWMA_' + str(ndays)
    stock_lag['EWMA_'] = EWMA(stock_lag, ndays)[name_EWMA]

    # Bollinger Bands
    #stock_lag['upperband'], stock_lag['middleband'], stock_lag['lowerband'] = talib.BBANDS(close, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)
    aa = BBANDS(stock_lag, ndays=30)
    stock_lag['upperband'] = aa['Upper BollingerBand']
    stock_lag['lowerband'] = aa['Lower BollingerBand']

    # StochK
    #stock_lag['slowk'], stock_lag['slowd'] = talib.STOCH(high, low, close, fastk_period=14, slowk_period=3, slowk_matype=0, slowd_period=3,
    #                     slowd_matype=0)
    n = 30
    name_slowk = 'SO%k'
    name_slowd = 'SO%d_' + str(n)
    stock_lag['slowk'] = STOK(stock_lag)[name_slowk]
    stock_lag['slowd'] = STO(stock_lag, n)[name_slowd]

    # MACD- Momentum
    #macd, macdsignal, stock_lag['macdhist'] = talib.MACD(close, fastperiod=12, slowperiod=26, signalperiod=9)
    n_fast = 12
    n_slow = 26
    name_macd = 'MACD_' + str(n_fast) + '_' + str(n_slow)
    name_macdsignal = 'MACDsign_' + str(n_fast) + '_' + str(n_slow)
    name_macdhist = 'MACDdiff_' + str(n_fast) + '_' + str(n_slow)
    macd = MACD(stock_lag, n_fast, n_slow)[name_macd]
    macdsignal = MACD(stock_lag, n_fast, n_slow)[name_macdsignal]
    stock_lag['macdhist'] = MACD(stock_lag, n_fast, n_slow)[name_macdhist]

    # CCI- Momentum
    #stock_lag['CCI'] = talib.CCI(high, low, close)
    stock_lag['CCI'] = CCI(stock_lag, ndays=30)["CCI"]
    #print stock_lag['CCI']

    # # RSI- Momentum
    # #stock_lag['RSI'] = talib.RSI(close, timeperiod=14)
    # ndays = 14
    # name_RSI = 'RSI_' + str(ndays)
    # stock_lag['RSI'] = RSI(stock_lag, n = ndays)[name_RSI]
    # #print stock_lag['RSI']

    # Chaikin- Volume
    #stock_lag['Chaikin'] = talib.ADOSC(high, low, close, volume, fastperiod=3, slowperiod=10)
    stock_lag['Chaikin'] = Chaikin(stock_lag)['Chaikin']
    #print stock_lag['Chaikin']

    stock_returns['Day Returns'] = stock_data['Adj Close'].pct_change() * 100
    # Sets lagging percent change data
    for i in range(0, lag_period):
        column_label = 'Lag{:d}'.format(i)
        stock_returns[column_label] = stock_lag[column_label].pct_change(
        ) * 100

    # Remove NaN's from stock lag
    print "shape of stock_lag before dropna: ", stock_lag.shape[0]
    stock_lag = stock_lag.dropna(axis=0, how='any')
    print "shape of stock_lag before dropna: ", stock_lag.shape[0]

    print "shape of stock_returns before dropna: ", stock_returns.shape[0]
    # Adjusts stock_return data to same length as stock_lag
    stock_returns = stock_returns.tail(stock_lag.shape[0])
    print "shape of stock_returns after dropna: ", stock_returns.shape[0]

    # Determine stock movement direction and lagging movement
    stock_movement = pd.DataFrame(index=stock_returns.index)
    stock_movement['Movement_0'] = np.sign(stock_returns['Day Returns'])
    stock_movement['Movement_0'][0] = 1
    for i in range(0, pred_period):
        column_label = 'Movement_{:d}'.format(i + 1)
        stock_movement[column_label] = stock_movement['Movement_0'].shift(i +
                                                                          1)

    # Removes NaNs from 'stock_movement' and resizes 'stocks_returns' and 'stock_lag' accordingly
    print "shape of stock_movement before dropna: ", stock_movement.shape[0]
    stock_movement = stock_movement.dropna(axis=0, how='any')
    print "shape of stock_movement after dropna: ", stock_movement.shape[0]

    stock_returns = stock_returns[stock_returns.index <= stock_movement.index[
        stock_movement.index.__len__() - 1]]
    stock_returns = stock_returns.tail(stock_movement.shape[0])
    stock_lag = stock_lag[stock_lag.index <= stock_movement.index[
        stock_movement.index.__len__() - 1]]
    stock_lag = stock_lag.tail(stock_movement.shape[0])

    return stock_data, stock_returns, stock_lag, stock_movement
Beispiel #13
0
def plot():
    from pandas_datareader import data
    import datetime
    import fix_yahoo_finance as yf
    yf.pdr_override()
    from bokeh.plotting import figure, show, output_file
    from bokeh.embed import components
    from bokeh.resources import CDN

    start = datetime.datetime(2015, 11, 1)
    end = datetime.datetime(2016, 3, 10)

    yf.pdr_override()
    df = data.get_data_yahoo(tickers="GOOG", start=start, end=end)

    # criando uma nova coluna pra armazenar se o dia teve fechamento peositivo ou negativo
    def inc_dec(c, o):
        if c > o:
            value = "Increase"
        elif c < o:
            value = "Decrease"
        else:
            value = "Equal"
        return value

    df["Status"] = [inc_dec(c, o) for c, o in zip(df.Close, df.Open)]

    # criando colunas aux para plotar os gráficos
    df["Middle"] = (df.Open + df.Close) / 2
    df["Height"] = abs(df.Close - df.Open)

    date_increase = df.index[df.Close > df.Open]
    date_decrease = df.index[df.Close < df.Open]

    p = figure(x_axis_type="datetime", width=1000, height=300)
    p.title.text = "Candlestick Chart"
    p.grid.grid_line_alpha = 0.3

    hours_12 = 12 * 60 * 60 * 1000

    # cria as linhas do plot
    p.segment(df.index, df.High, df.index, df.Low, color="Black")

    # caso o dia tenha fechado em alta
    p.rect(df.index[df.Status == "Increase"],
           df.Middle[df.Status == "Increase"],
           hours_12,
           df.Height[df.Status == "Increase"],
           fill_color='green',
           line_color="black")

    # caso o dia tenha fechado em baixa
    p.rect(df.index[df.Status == "Decrease"],
           df.Middle[df.Status == "Decrease"],
           hours_12,
           df.Height[df.Status == "Decrease"],
           fill_color='red',
           line_color="black")

    script1, div1 = components(p)
    cdn_js = CDN.js_files[0]
    cdn_css = CDN.css_files[0]
    return render_template("plot.html",
                           script1=script1,
                           div1=div1,
                           cdn_css=cdn_css,
                           cdn_js=cdn_js)
# In[ ]:

#Initialize and set debugging level to `debug` to track progress.

# In[ ]:

get_ipython().magic('matplotlib inline')

import numpy as np
import pandas as pd
from pandas_datareader import data as pdr
# data reader now seperated to new package. pip install pandas-datareader
#from pandas.io.data import DataReader
import fix_yahoo_finance as yf
yf.pdr_override()  # <== that's all it takes :-)
from datetime import datetime
import six
import universal as up
from universal import tools
from universal import algos
import logging
# we would like to see algos progress
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)

import matplotlib as mpl
import matplotlib.pyplot as plt


# we need 14 colors for the plot
#n_lines = 14
Beispiel #15
0
import csv
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like
from pandas_datareader import data
import fix_yahoo_finance
fix_yahoo_finance.pdr_override()

chdata = data.get_data_yahoo(
    # tickers list (single tickers accepts a string as well)
    tickers="SPY",

    # start date (YYYY-MM-DD / datetime.datetime object)
    # (optional, defaults is 1950-01-01)
    start="2017-01-01",

    # end date (YYYY-MM-DD / datetime.datetime object)
    # (optional, defaults is Today)
    end="2017-12-31",

    # return a multi-index dataframe
    # (optional, default is Panel, which is deprecated)
    as_panel=False,

    # group by ticker (to access via data['SPY'])
    # (optional, default is 'column')
    group_by='ticker',

    # adjust all OHLC automatically
    # (optional, default is False)
    auto_adjust=True,
import pandas as pd
from pandas_datareader import data, wb
import fix_yahoo_finance as yf
yf.pdr_override()
import datetime
import numpy as np
from datetime import timedelta
from sqlalchemy import create_engine


#To get data:


ticker = ['IBM',
'ICE',
'IDXX',
'IFF',
'ILMN',
'INCY',
'INFO',
'INTC',
'INTU',
'IP',
'IPG',
'IQV',
'IR',
'IRM',
'ISRG',
'IT',
'ITW',
'IVZ',
Beispiel #17
0
import pandas_datareader.data as pdr
import fix_yahoo_finance as fix
import numpy as np
fix.pdr_override()


def back_test(strategy, seq_len, ticker, start_date, end_date, dim):
    """
    A simple back test for a given date period
    :param strategy: the chosen strategy. Note to have already formed the model, and fitted with training data.
    :param seq_len: length of the days used for prediction
    :param ticker: company ticker
    :param start_date: starting date
    :type start_date: "YYYY-mm-dd"
    :param end_date: ending date
    :type end_date: "YYYY-mm-dd"
    :param dim: dimension required for strategy: 3dim for LSTM and 2dim for MLP
    :type dim: tuple
    :return: Percentage errors array that gives the errors for every test in the given date range
    """
    data = pdr.get_data_yahoo(ticker, start_date, end_date)
    stock_data = data["Adj Close"]
    errors = []
    for i in range((len(stock_data)//10)*10 - seq_len - 1):
        x = np.array(stock_data.iloc[i: i + seq_len, 1]).reshape(dim) / 200
        y = np.array(stock_data.iloc[i + seq_len + 1, 1]) / 200
        predict = strategy.predict(x)
        while predict == 0:
            predict = strategy.predict(x)
        error = (predict - y) / 100
        errors.append(error)
 def __init__(self,stock,stock_id):
     yf.pdr_override()
     self.stock = str( stock )
     self.stock_id = stock_id
Beispiel #19
0
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like
import fix_yahoo_finance as fyf
from pandas_datareader import data as pdr
import numpy as np
import datetime
import xlrd
fyf.pdr_override()
# We will look at stock prices over the past year, starting at January 1, 2016
#startd = datetime.datetime(2016,1,1)
#endd = datetime.date.today()

#BABA = pdr.get_data_yahoo("BABA", start=startd, end=endd)
#BABA = pdr.get_data_yahoo("BABA", start=endd, end=endd)
#BABA_np = np.array(BABA)

#type(BABA)
#print(BABA_np)
df = pd.read_excel("ipo.xlsx")
df_np = np.array(df)
print(df_np.shape)
print(df_np[1])
Beispiel #20
0
import datetime
import time

from pandas_datareader import data as pdr
import fix_yahoo_finance as yf
yf.pdr_override() # <== that's all it takes :-)

end_date = datetime.datetime.now().strftime('%Y-%m-%d')
start_date = (datetime.datetime.now()- datetime.timedelta(days=5*365)).strftime('%Y-%m-%d')
# n 年的資料
SID = "HYG"
out_file = "HYG.csv"

def true_val(current_date, current_val, dividend_dataframe):
    # return current_val  # 不還原權值滿
    
    ret = current_val
    for idx, dividend_date in enumerate(dividend_dataframe.index.values):
        if dividend_date <= current_date:
            ret = ret + dividend_dataframe.iloc[idx][0]
    return ret

# 分紅資料
dividend_dataframe = pdr.get_data_yahoo(SID, start=start_date, end=end_date,actions='only')
print(dividend_dataframe)
time.sleep(5)

# 日K 資料
data = pdr.get_data_yahoo(SID, start=start_date, end=end_date)

for idx, current_date  in enumerate(data.index.values):
Beispiel #21
0
#!/usr/bin/env python

import fix_yahoo_finance as y
import matplotlib.pyplot as plt
from pandas_datareader import data as pdr

y.pdr_override()


class Company:
    def __init__(self, name):
        self.name = name


def sort_data(data):
    data = data.split('\n')
    data_dict = {}
    dates = []
    open_prices = []
    high_prices = []
    low_prices = []
    close_prices = []
    adj_close_prices = []
    vol_prices = []
    for i in data[1:]:
        l = i.split(',')
        if len(l) != 7:
            continue
        dates.append(l[0])
        open_prices.append(float(l[1]))
        high_prices.append(float(l[2]))
Beispiel #22
0
import argh
import pandas as pd
import time
import glob
import os
import mylib.io
import pyarrow as pa
import pyarrow.parquet as pq
import shutil
import dask.bag as db
from joblib import Memory


from pandas_datareader import data as pdr
import fix_yahoo_finance as yf
yf.pdr_override()

_mydir = os.path.dirname(os.path.realpath(__file__))

cachedir = os.path.join(_mydir, 'joblib_cache')
memory = Memory(cachedir, verbose=1)

# WARNING: persistant state here, clear if worried
_period_seconds = 1
@memory.cache
@sleep_and_retry
@limits(calls=1, period=_period_seconds)
def _get_data_yahoo(names, start, end):
    return pdr.get_data_yahoo(names, start=start, end=end)

def get_data_yahoo(names, start, end):
Beispiel #23
0
from django.http import HttpResponse, FileResponse
from django.shortcuts import render
from pandas_datareader import data as pdr
# pip install multitasking
# pip install fix_yahoo_finance
import fix_yahoo_finance as yf
import tushare as ts
import pandas as pd
import numpy as np
from pandas_datareader import data, wb
import matplotlib.pyplot as plt
from sklearn.svm import SVR

from flower.models import AllStockBasic, RealTimePens, HISData

yf.pdr_override()  #需要调用这个函数
# 获取数据
PATH = r"flower/flowerData/stock/"
FILENAME = r"stock.csv"
FILENAME2 = r"wu.csv"
FILENAME3 = r"wu1.csv"


def index(request):
    return render(request, "stock.html")


def showStock(request):
    data = RealTimePens.objects.all()
    context = {"data": data}
    return render(request, "ajax/ajax_stock.html", context)