Exemple #1
0
def hyouka(myStrategy):
    kabu = pd.read_csv("./kabu_data.csv")
    kabu["Date"] = pd.to_datetime(kabu["Date"])
    code_number = kabu.銘柄コード.unique()[:-1]
    kabu = kabu.set_index(["銘柄コード", "Date"])

    cerebro = bt.Cerebro()  # Cerebroエンジンをインスタンス化
    cerebro.addstrategy(myStrategy)  # ストラテジーを追加
    for i in range(len(code_number)):
        data = btfeed.PandasData(
            dataname=kabu.loc[code_number[i]][::-1])  # Cerebro形式にデータを変換
        cerebro.adddata(data)  # データをCerebroエンジンに追加
    cerebro.broker.setcash(1000000.0)  # 所持金を設定
    cerebro.broker.setcommission(commission=0.0005)  # 手数料(スプレッド)を0.05%に設定
    cerebro.addsizer(
        bt.sizers.PercentSizer,
        percents=50)  # デフォルト(buy/sellで取引量を設定していない時)の取引量を所持金に対する割合で指定する
    startcash = cerebro.broker.getvalue()  # 開始時の所持金
    cerebro.broker.set_coc(True)  # 発注時の終値で約定する

    import backtrader.analyzers as btanalyzers  # バックテストの解析用ライブラリ
    cerebro.addanalyzer(btanalyzers.DrawDown, _name='myDrawDown')  # ドローダウン
    cerebro.addanalyzer(btanalyzers.SQN, _name='mySQN')  # SQN
    cerebro.addanalyzer(btanalyzers.TradeAnalyzer,
                        _name='myTradeAnalyzer')  # トレードの勝敗等の結果

    thestrats = cerebro.run()  # バックテストを実行
    thestrat = thestrats[0]  # 解析結果の取得
    return startcash + thestrat.analyzers.myTradeAnalyzer.get_analysis(
    ).pnl.net.total
Exemple #2
0
    def GetBbgClose(self, ticker, start_date, end_date):
        tmp_df = gdf.bbgDataWrapper().getBloombergData(ticker=ticker,
                                                       fields=['PX_LAST'],
                                                       start_date=start_date,
                                                       end_date=end_date)
        tmp_df.columns = ['CLOSE']

        return bt.PandasData(dataname=tmp_df)
Exemple #3
0
def getData(args):
    ''' get the symbol data '''
    modpath = os.path.dirname(os.path.abspath(sys.argv[0]))
    symbolDates = os.listdir('data\\2013-2016')
    symbolBuffer = modpath + "\\data\\" + args.symbol + ".csv"
    dataframe = pd.DataFrame()
    if os.path.exists(symbolBuffer):
        skiprows = 1 if args.noheaders else 0
        header = 0
        df = pd.read_csv(
            symbolBuffer,
            skiprows=skiprows,
            header=header,
            parse_dates=True,
            index_col=0,
            names=['date', 'symbol', 'open', 'high', 'low', 'close', 'volume'])
        dataframe = df
    else:
        for i, symbolDate in enumerate(symbolDates):
            datapath = os.path.join(modpath, 'data\\2013-2016', symbolDate)
            skiprows = 1 if args.noheaders else 0
            header = None if args.noheaders else 0
            df = pd.read_csv(datapath,
                             skiprows=skiprows,
                             header=header,
                             parse_dates=True,
                             index_col=1,
                             names=[
                                 'symbol', 'date', 'open', 'high', 'low',
                                 'close', 'volume'
                             ])
            df = df[df['symbol'] == args.symbol]
            if dataframe.empty:
                dataframe = df
            else:
                dataframe = pd.concat([dataframe, df])

    if not os.path.exists(symbolBuffer):
        dataframe.to_csv(
            modpath + "\\data\\" + args.symbol + ".csv",
            columns=['symbol', 'open', 'high', 'low', 'close', 'volume'],
            header=False if args.noheaders else True)
    dataframe = dataframe.drop('symbol', axis=1)
    if not args.noprint:
        print('--------------------------------------------------')
        print(dataframe)
        print('--------------------------------------------------')
    data = btfeeds.PandasData(dataname=dataframe)
    return data
Exemple #4
0
def main():
    dates = [
        '2021-04-05',
        '2021-04-10'
    ]
    for date in dates:
        starting_date = date
        df = pd.read_csv(f'./data/stockdweebs/weekly_10/{starting_date}.csv', index_col=['date'], parse_dates=True)

        start = starting_date
        end = dt.today().strftime('%Y-%m-%d')

        if df is None:
            print('Unable to retrieve weekly picks')
            sys.exit()

        for ticker in df['ticker']:
            with st.beta_expander(ticker, expanded=True):
                try:
                    data = get_data(ticker, start, end)
                    st.write(data.head())

                    if not df[['buy_zones', 'take_profit', 'cut_losses']].isna().values.any():
                        if not df[['buy_now']].isna().values.any():
                            entry_price = data.loc[starting_date, ['close']][0]
                            x = df['ticker'] == ticker
                            df.loc[x, ['buy_zones']] = df[x]['buy_zones'] + '|' + str(entry_price)

                        cerebro = bt.Cerebro()
                        cerebro.broker.setcash(cash)
                        datafeed = btfeeds.PandasData(dataname=data)
                        cerebro.adddata(datafeed, name=ticker)
                        cerebro.addstrategy(StockDweebStrategy, securities=df)
                        cer = cerebro.run()[0]
                        # Get final portfolio Value
                        portvalue = cerebro.broker.getvalue()
                        pnl = portvalue - cash
                        print(f'Final portfolio value: {portvalue}')
                        print('P/L: ${}'.format(pnl))

                        # Finally plot the end results
                        x = cerebro.plot(style ='bar', width=30, height=50, tight=False, volume=False)[0][0]
                        st.write(x)

                except:
                    e = sys.exc_info()[0]
                    print(f'Na data for: {ticker} : {e}')
Exemple #5
0
def benchmark_data(exchange, market_type, res, data_len):

    try:
        connection = db.prod_db_conn()
        c = connection.cursor()

        # Get name of benchmark datas for the exchange sectors
        c.execute('''select benchmark from benchmark_master 
                  where exchange = '{ex}' '''.format(ex=exchange.lower()))

        benchmark_id = c.fetchall()[0][0]

        # Data fetch required candles from DB by query
        df = pd.read_sql_query('''Select * from (
                                    select datetime,open,high,low,close,volume
                                    from ohlcv_index_{mt}_{r}
                                    where scrip_code = '{bid}'
                                    ORDER BY datetime DESC limit {c} ) as c_d
                                    order by datetime ASC
                               '''.format(bid=benchmark_id,
                                          mt=market_type,
                                          r=str(res),
                                          c=data_len),
                               con=connection)

        # Create a Data Feed
        data = btfeeds.PandasData(dataname=df,
                                  timeframe=bt.TimeFrame.Days,
                                  compression=1,
                                  datetime=0,
                                  high=2,
                                  low=3,
                                  open=1,
                                  close=4,
                                  volume=5,
                                  openinterest=-1)
        benchmark_name = benchmark_id.split('.')[1]

        return data, benchmark_name

    except Exception as e:
        logging.exception(str(e))

    finally:
        dao.close_db_connection(connection)
Exemple #6
0
def get_data(args):
    ''' get the price data for a given symbol '''
    modpath = os.path.dirname(os.path.abspath(sys.argv[0]))
    symbol_dates = os.listdir('data\\2013-2016')
    symbol_buffer = modpath + "\\data\\" + args.symbol + ".pkl"

    # load the dataframe from preload or parse out
    dataframe = pd.DataFrame()
    if os.path.exists(symbol_buffer):
        skiprows = 1 if args.noheaders else 0
        header = 0
        frame = pd.read_pickle(symbol_buffer)
        dataframe = frame
    else:
        for i, symbol_date in enumerate(symbol_dates):
            datapath = os.path.join(modpath, 'data\\2013-2016', symbol_date)
            skiprows = 1 if args.noheaders else 0
            header = None if args.noheaders else 0
            frame = pd.read_csv(datapath,
                                skiprows=skiprows,
                                header=header,
                                parse_dates=True,
                                index_col=1,
                                names=[
                                    'symbol', 'date', 'open', 'high', 'low',
                                    'close', 'volume'
                                ])
            frame = frame[frame['symbol'] == args.symbol]
            if dataframe.empty:
                dataframe = frame
            else:
                dataframe = pd.concat([dataframe, frame])

    # save out the dataframe for fast rerun
    if not os.path.exists(symbol_buffer):
        dataframe.to_pickle(modpath + "\\data\\" + args.symbol + ".pkl")
    dataframe = dataframe.drop('symbol', axis=1)
    if not args.noprint:
        print('--------------------------------------------------')
        print(dataframe)
        print('--------------------------------------------------')
    data = btfeeds.PandasData(dataname=dataframe)
    return data
Exemple #7
0
def getdata(index, noheaders=True):

    datapath = os.path.join(modpath, dataspath, datafiles[index])

    # Simulate the header row isn't there if noheaders requested
    skiprows = 1 if noheaders else 0
    header = None if noheaders else 0

    dataframe = pandas.read_csv(datapath,
                                skiprows=skiprows,
                                header=header,
                                parse_dates=True,
                                index_col=0)

    # Pass it to the backtrader datafeed and add it to the cerebro
    # Data in upper case for headers, nocase=True.
    if index:
        data = PandasDataOptix(dataname=dataframe, nocase=True)
    else:
        data = btfeeds.PandasData(dataname=dataframe, nocase=True)
    return data
Exemple #8
0
dir_name = 'data'  # ヒストリカルデータのフォルダ名

input_csv = os.path.join(os.getcwd(), dir_name, 'GOOG.csv')  # csvファイルのフルパス

df = pd.read_csv(input_csv,
                 skiprows=1,
                 names=("Date", "Open", "High", "Low", "Close", "Adj Close",
                        "Volume"))  # csvファイルをPandasデータフレームに読み込む

#日時列をdatatime型にしてインデックスにして、元の列は削除する
df = df.set_index(pd.to_datetime(df['Date'])).drop('Date', axis=1)

import backtrader as bt  # Backtrader
import backtrader.feeds as btfeed  # データ変換

data = btfeed.PandasData(dataname=df)  # PandasのデータをBacktraderの形式に変換する


class myStrategy(bt.Strategy):  # ストラテジー

    n1 = 20  # 終値のSMA(単純移動平均)の期間
    n2 = 60  # 終値のSMA(単純移動平均)の期間

    def log(self, txt, dt=None, doprint=False):  # ログ出力用のメソッド
        if doprint:
            print('{0:%Y-%m-%d %H:%M:%S}, {1}'.format(
                dt or self.datas[0].datetime.datetime(0), txt))

    def __init__(self):  # 事前処理
        self.sma1 = bt.indicators.SMA(self.data.close,
                                      period=self.n1)  # SMA(単純移動平均)のインジケータを追加
Exemple #9
0
def data_feed(scrip_code, ex, m_t, t_f, cerebro, min_data):

    try:
        connection = db.prod_db_conn()
        c = connection.cursor()

        print(
            f'Fetching data for {scrip_code} scrip(s) from exhange {ex}, timeframe {t_f} & market {m_t}'
        )
        table_name = su.get_table_name(t_f, ex, m_t)

        if scrip_code == 'all':  ## For Ticker list Handling

            try:
                p_s_f_amt = ap.penny_stock_filter[ex]
            except KeyError as e:
                print('Exchange', e,
                      'Does not exsist in Application Properties')
                print(
                    'Ensure Penny Stock filter Exchange amount pair in Application Properties'
                )

            # Data fetch required candles from DB by query
            c.execute(
                '''with scrips_with_close_filter AS (select scdtc.scrip_code from (select distinct on (scrip_code) scrip_code,datetime,close
                        FROM {t_n}
                        order by scrip_code, datetime DESC) as scdtc
                        inner join scrip_master sm on scdtc.scrip_code = sm.scrip_code
                        where scdtc.close > {sf} and sm.isactive in ('t' , 'y'))
                                              
                        SELECT scrip_code, array_agg(Array[datetime::text,open::text,high::text,low::text,close::text,volume::text])
                        FROM  (SELECT n.datetime,n.open,n.high,n.low,n.close,n.volume,n.scrip_code,RANK () OVER (PARTITION BY n.scrip_code ORDER BY n.scrip_code,datetime DESC) sc_rank
                        FROM {t_n} as n 
                        inner join scrips_with_close_filter cf on n.scrip_code = cf.scrip_code
                         ORDER BY n.scrip_code, n.datetime ASC) as ol
                         where ol.sc_rank <= {candles}
                        GROUP BY ol.scrip_code;'''.format(t_n=table_name,
                                                          sf=p_s_f_amt,
                                                          candles=min_data))

            scrip_data_list = c.fetchall()

            for scrip in scrip_data_list:
                try:
                    scrip_name = scrip[0]
                    print(scrip_name)
                    df = pd.DataFrame.from_records(scrip[1],
                                                   columns=[
                                                       'datetime', 'open',
                                                       'high', 'low', 'close',
                                                       'volume'
                                                   ])
                    df2 = df.astype({
                        'datetime': 'datetime64[ns]',
                        'open': 'float64',
                        'high': 'float64',
                        'low': 'float64',
                        'close': 'float64',
                        'volume': 'float64'
                    })
                    # print(df2)

                    if len(df2) < min_data:
                        print('Minimum candles not present, skipping',
                              scrip_name)
                        continue

                    # Create a Data Feed
                    data = btfeeds.PandasData(dataname=df2,
                                              timeframe=bt.TimeFrame.Days,
                                              compression=1,
                                              datetime=0,
                                              high=2,
                                              low=3,
                                              open=1,
                                              close=4,
                                              volume=5,
                                              openinterest=-1)

                    # cerebro.resampledata(data, timeframe=bt.TimeFrame.Weeks,
                    #                         compression=1, name=scrip_name)

                    cerebro.adddata(data, name=scrip_name)

                except Exception as e:
                    # err.error_log(str(e),data_feed.__name__,'bt_run')
                    logging.exception(str(e))

        elif isinstance(scrip_code, tuple):

            for scrip in scrip_code:
                try:
                    df = pd.read_sql_query('''Select * from (
                                            SELECT datetime,open,high,low,close,volume
                                            FROM "{t_n}" 
            								 WHERE scrip_code = '{sc}'
            								 ORDER BY datetime DESC limit {c} ) as c_d
                                             order by datetime ASC
                                                 '''.format(t_n=table_name,
                                                            sc=scrip,
                                                            c=min_data),
                                           con=connection)

                    if len(df) < min_data:
                        print('Minimum candles not present')
                        continue

                    # Create a Data Feed
                    data = btfeeds.PandasData(dataname=df,
                                              timeframe=bt.TimeFrame.Days,
                                              compression=1,
                                              datetime=0,
                                              high=2,
                                              low=3,
                                              open=1,
                                              close=4,
                                              volume=5,
                                              openinterest=-1)

                    cerebro.adddata(data, name=scrip)

                except Exception as e:
                    # err.error_log(str(e),data_feed.__name__,'bt_run')
                    logging.exception(str(e))

        else:  ## For Single Ticker Handling
            df = pd.read_sql_query('''
                                   Select * from (
                                   SELECT datetime,open,high,low,close,volume
                                         FROM "{t_n}" 
            								 WHERE scrip_code = '{sc}'
            								 ORDER BY datetime DESC limit {c} ) as c_d
                                             order by datetime ASC
                                             '''.format(t_n=table_name,
                                                        sc=scrip_code,
                                                        c=min_data),
                                   con=connection)

            # print(df)
            if len(df) < min_data:
                print('Minimum candles not present')
                return
            # Create a Data Feed
            data = btfeeds.PandasData(dataname=df,
                                      timeframe=bt.TimeFrame.Days,
                                      compression=1,
                                      datetime=0,
                                      high=2,
                                      low=3,
                                      open=1,
                                      close=4,
                                      volume=5,
                                      openinterest=-1)

            cerebro.adddata(data, name=scrip_code)

        ## Add Benchmark Data to Cerebro

        b_data, benchmark_name = benchmark_data(ex, m_t, t_f, min_data)
        cerebro.adddata(b_data, name=benchmark_name)
        print('Benchmark Added')
        return cerebro

    except Exception as e:
        logging.exception(str(e))

    finally:
        dao.close_db_connection(connection)
import backtrader as bt
import backtrader.feeds as btfeeds
import pandas as pd
from strategies import Basic_RSI, BuyAndHold_Target

start_cash = 10000

cerebro = bt.Cerebro()

df = pd.read_csv('data\ETHUSDT.csv')

df.index = pd.to_datetime(df['Datetime'], unit='s')

data = btfeeds.PandasData(dataname=df)

cerebro.adddata(data)

cerebro.broker.set_cash(start_cash)

cerebro.addstrategy(BuyAndHold_Target)

print('Start:   $ {}'.format(round(cerebro.broker.get_value())))

cerebro.run()

print('End:     $ {}'.format(round(cerebro.broker.getvalue())))
    cerebro.optstrategy(strategy, period=range(1, 40), devfactor=range(1, 40))

    # one_year = dt.timedelta(days=365)
    # days_100 = dt.timedelta(days=100)
    # days_150 = dt.timedelta(days=150)
    # days_30  = dt.timedelta(days=30)
    # month_6  = dt.timedelta(days=180)
    # start = dt.datetime.now() - days_100
    # pipeline = CoinbasePipeline('BTC-USD',start=start, granularity=3600)
    # dataframe = pipeline.get_data()

    dataframe = pd.read_csv("./hist-data/ETH-BTC-100d-1hr-12-16.csv",
                            index_col="datetime",
                            parse_dates=['datetime'])

    data = feeds.PandasData(dataname=dataframe)
    cerebro.adddata(data)
    cerebro.broker.setcash(startcash)
    cerebro.broker.setcommission(commission=0.005)
    SharpeRatioDay = bt.analyzers.SharpeRatio
    cerebro.addanalyzer(SharpeRatioDay,
                        _name='mysharpe',
                        timeframe=bt.TimeFrame.Days)
    cerebro.addanalyzer(btanalyzers.AnnualReturn, _name='areturn')
    cerebro.addanalyzer(btanalyzers.DrawDown, _name='ddown')

    opt_runs = cerebro.run()

    # Generate results list
    final_results_list = []
    for run in opt_runs:
    # Add a strategy
    cerebro.addstrategy(TestStrategy)

    # Datas are in a subfolder of the samples. Need to find where the script is
    # because it could have been called from anywhere
    # Create a Data Feed
    dataframe = pd.read_csv(
        '/Users/m4punk/Documents/Coding/python/QuantTrading/Quant-Strategies/datas/twtr.csv',
        parse_dates=True,
        index_col=0,
        na_values=['-'])

    data = btfeeds.PandasData(dataname=dataframe,
                              open=0,
                              high=1,
                              low=2,
                              close=3,
                              volume=4,
                              openinterest=None)

    # Add a FixedSize sizer according to the stake
    cerebro.addsizer(bt.sizers.FixedSize, stake=100)

    # Add the Data Feed to Cerebro
    cerebro.adddata(data)

    # Set Cash
    cerebro.broker.setcash(10000.0)

    # Set the commission - 0.1% ... divide by 100 to remove the %
    cerebro.broker.setcommission(commission=0.001)
Exemple #13
0
#drop all the indicator columns we dont need
data = d.drop(columns=[
    'ema-50', 'tema-50', 'dema-50', 'zlema-50', 'hma-50', 'macd', 'rsi-14',
    'stochrsi-14', 'cci-20', 'mfi-14', 'mass-10', 'mom-10', 'sma-50',
    'vwma-50', 'wma-50', 'kama-50', 'trima-50', 'bbands-sma-20-2', 'atr-14',
    'ultosc-7-14-28', 'obv', 'ao'
])

#if data = 1 hour data, then you can consolidate it to a 3h interval with this command
#data = data.resample('3H').ohlc()

#initialize backtrader 'datafeed' via pandas dataframe
data_feed = btfeeds.PandasData(dataname=data,
                               datetime=-1,
                               open=-1,
                               high=-1,
                               low=-1,
                               close=-1,
                               volume=-1,
                               openinterest=None)
cerebro.adddata(data_feed)

# quandl is useful for getting stock data
# import quandl
# quandl_data = quandl_data.reset_index()
# quandl_data.drop(columns=["Bid", "Ask", "VWAP"], inplace=True)
# quandl_data.rename(columns={"Date":"datetime", "High":"high","Low":"low","Last":"close", "Volume":"volume"})
# quandl_data["open"] = quandl_data['close'].shift(-1)
# print(quandl_data)

# Analyzers
cerebro.addanalyzer(btanalyzers.SharpeRatio, _name='sharpe_ratio')
to_datetime = '2020-04-01 00:00:00'

if __name__ == '__main__':
    # Create a cerebro entity
    cerebro = bt.Cerebro()

    # Add a strategy
    cerebro.addstrategy(IchimokuStrat)

    # Feed data
    data = pd.read_csv(os.path.join(datadir, datafile),
                       index_col='datetime',
                       parse_dates=True)
    data = data.loc[(data.index >= pd.to_datetime(from_datetime))
                    & (data.index <= pd.to_datetime(to_datetime))]
    datafeed = btfeeds.PandasData(dataname=data)

    # Add the Data Feed to Cerebro
    cerebro.adddata(datafeed)

    # cerebro.resampledata(datafeed, timeframe=bt.TimeFrame.Weeks, compression=1)

    # Set our desired cash start
    cerebro.broker.setcash(100000)

    # Add a FixedSize sizer according to the stake
    cerebro.addsizer(bt.sizers.PercentSizer, percents=99)

    # Set the commission
    cerebro.broker.setcommission(commission=0.001)