コード例 #1
0
    def test_incremental_transform(self, data_frame, exchange):
        difference_all = FractionalDifference(difference_order=0.5,
                                              inplace=True)

        feature_pipeline = FeaturePipeline(steps=[difference_all])

        transformed_frame = feature_pipeline.transform(
            data_frame, exchange.generated_space)

        expected_data_frame = pd.DataFrame([{
            'open': -26.20469322,
            'low': -46.15180724,
            'high': 33.63664884,
            'close': 13.68953482,
        }, {
            'open': 134.53651465,
            'low': 118.24976426,
            'high': 183.39676584,
            'close': 167.11001545,
        }])

        assert np.allclose(expected_data_frame.values,
                           transformed_frame.values)

        next_frame = pd.DataFrame([{
            'open': 200,
            'low': 150,
            'high': 350,
            'close': 300,
        }, {
            'open': 300,
            'low': 250,
            'high': 450,
            'close': 400,
        }])

        transformed_frame = feature_pipeline.transform(
            next_frame, exchange.generated_space)

        expected_data_frame = pd.DataFrame([{
            'open': 127.785105,
            'low': 87.031409,
            'high': 250.046192,
            'close': 209.292496,
        }, {
            'open': 185.484853,
            'low': 166.817514,
            'high': 241.486873,
            'close': 222.819533,
        }])

        assert np.allclose(expected_data_frame.values,
                           transformed_frame.values)

        import pandas as pd
コード例 #2
0
    def test_transform_space(self, data_frame, exchange):
        difference_all = FractionalDifference(difference_order=0.5, inplace=False)

        feature_pipeline = FeaturePipeline(steps=[difference_all])

        low = np.array([1E-3, ] * 4 + [1E-3, ])
        high = np.array([1E3, ] * 4 + [1E3, ])

        input_space = Box(low=low, high=high, dtype=np.float16)

        transformed_space = feature_pipeline.transform_space(
            input_space, exchange.generated_columns)

        assert transformed_space != input_space
コード例 #3
0
def test_injects_feature_pipeline_with_context():

    config = {'features': {'shape': (90, 70)}}

    with TradingContext(**config):

        steps = list(repeat(Identity(), 5))
        pipeline = FeaturePipeline(steps)
        assert hasattr(pipeline.context, 'shape')
        assert pipeline.context.shape == (90, 70)
コード例 #4
0
    def test_full_ta_min_max_pipeline(self, data_frame, reference_frame):
        ta_indicator = TAlibIndicator(
            indicators=["BBAND", "RSI", "EMA", "SMA"])
        min_max = MinMaxNormalizer()
        feature_pipeline = FeaturePipeline([ta_indicator, min_max])

        columns = reference_frame.columns
        col_1 = columns[0]

        reference_frame = reference_frame.drop(columns=[col_1])
        transformed_frame = feature_pipeline.transform(data_frame)

        transformed_frame = transformed_frame.dropna()
        reference_frame = reference_frame.dropna()
        # We round to 4 significant digits
        significance = 10
        transformed_frame = transformed_frame.round(significance)
        reference_frame = reference_frame.round(significance)

        bb_middle1 = reference_frame.bb_middle.values
        bb_middle2 = transformed_frame.bb_middle.values

        is_valid = (bb_middle1 == bb_middle2).all()
        assert is_valid
コード例 #5
0
ファイル: btc_simulate.py プロジェクト: luyh/tensortrade
def get_env(file_path):
    df = load_data(file_path)

    normalize = MinMaxNormalizer(inplace=True)
    difference = FractionalDifference(difference_order=0.6, inplace=True)
    feature_pipeline = FeaturePipeline(steps=[normalize, difference])

    reward_strategy = SimpleProfitStrategy()
    action_strategy = DiscreteActionStrategy(n_actions=20, instrument_symbol='BTC/USDT')

    exchange = SimulatedExchange(base_instrument='USDT',
                                 should_pretransform_obs=True,
                                 feature_pipeline=feature_pipeline
                                 )
    exchange.data_frame = df[:STEP]
    environment = TradingEnvironment(exchange=exchange,
                                     action_strategy=action_strategy,
                                     reward_strategy=reward_strategy,
                                     feature_pipeline=feature_pipeline)

    return environment
コード例 #6
0
    def handle(self, *args, **kwargs):
        print(
            'Start Time is : ',
            datetime.datetime.now(pytz.timezone('Asia/Tehran')).strftime(
                "%d %b %Y %H:%M:%S Tehran"))
        # print('Start Time is : ', strftime("%Y-%m-%d %H:%M:%S", localtime()))
        start_time = int(time.time())

        train = True
        train = False
        iteration = 1
        n_step_multiply = 100
        window_size = 5
        n_step = 128
        gamma = 0.995
        learning_rate = 2.5E-4
        lam = 0.9
        ent_coef = 0.1
        leverage = 25
        initial_balance = 100
        max_allowed_amount = 5000
        stop_loss_percent = 60
        n_splitt = 5
        end_candle = 300
        render_mode = 'chart2'

        # df = pd.read_csv('./tensortrade/data/Coinbase_BTCUSD_d.csv')[0:end_candle]
        # candles = BitmxMinCandle.objects.all().order_by('id')[99:end_candle] #100000
        # candles = MinCandle.objects.all().order_by('id')[99:end_candle] #134000
        # candles = Min3Candle.objects.all().order_by('id')[99:end_candle] #48000
        candles = Min5Candle.objects.all().order_by('id')[99:
                                                          end_candle]  #28800
        # candles = HourlyCandle.objects.all().order_by('id')[0:end_candle] #22500
        df = pd.DataFrame(list(candles.values()))
        df = df.drop(['id', 'coin_marketcap', 'marketcap', 'symbol', 'date'],
                     axis=1)
        df['open'] = df['open'].astype('float64')
        df['close'] = df['close'].astype('float64')
        df['low'] = df['low'].astype('float64')
        df['high'] = df['high'].astype('float64')
        df['volume'] = df['volume'].astype('float64')
        price_columns = ["close"]
        volume_column = ["volume"]
        normalized_price = MinMaxNormalizer(columns=price_columns,
                                            feature_min=1E-6,
                                            feature_max=1,
                                            input_min=1E-6,
                                            input_max=1E6)
        normalized_volume = MinMaxNormalizer(columns=volume_column,
                                             feature_min=1E-9,
                                             feature_max=1,
                                             input_min=1E-9,
                                             input_max=1E9)
        # difference_all = FractionalDifference(difference_order=0.6)
        # ema = TAlibIndicator(indicators=[['EMA', {'args':['close'], 'params':{'timeperiod':14}}]])
        # sma50 = TAlibIndicator(indicators=[['SMA', {'args':['close'], 'params':{'timeperiod':50}}]])
        # sma100 = TAlibIndicator(indicators=[['SMA', {'args':['close'], 'params':{'timeperiod':100}}]])
        # sma200 = TAlibIndicator(indicators=[['SMA', {'args':['close'], 'params':{'timeperiod':200}}]])
        rsi = TAlibIndicator(indicators=[[
            'RSI', {
                'args': ['close'],
                'params': {
                    'timeperiod': 14
                }
            }
        ]])
        macd = TAlibIndicator(indicators=[[
            'MACD', {
                'args': ['close'],
                'params': {
                    'fastperiod': 5,
                    'slowperiod': 20,
                    'signalperiod': 30
                }
            }
        ]])
        stochastic = TAlibIndicator(indicators=[[
            'STOCH', {
                'args': ['high', 'low', 'close'],
                'params': {
                    'fastk_period': 5,
                    'slowk_period': 3,
                    'slowd_period': 2
                }
            }
        ]])
        cci = TAlibIndicator(indicators=[[
            'CCI', {
                'args': ['high', 'low', 'close'],
                'params': {
                    'timeperiod': 20
                }
            }
        ]])
        feature_pipeline = FeaturePipeline(
            steps=[normalized_volume, normalized_price])
        exchange = BitmexExchange3(data_frame=df,
                                   base_instrument='USDT',
                                   window_size=window_size,
                                   initial_balance=initial_balance,
                                   commission_percent=0.60,
                                   leverage=leverage,
                                   stop_loss_percent=stop_loss_percent)
        action_scheme = DiscreteActionsPlus(
            n_actions=4,
            instrument='BTC/USDT',
            max_allowed_amount=max_allowed_amount)
        # action_scheme = CustomeDiscreteActions(n_splitt=n_splitt, instrument='BTC/USDT', max_allowed_amount=max_allowed_amount)
        reward_scheme = BitmexProfit()
        # reward_scheme = AdvancedProfit()
        environment = BitmexEnvironment(exchange=exchange,
                                        action_scheme=action_scheme,
                                        reward_scheme=reward_scheme,
                                        feature_pipeline=feature_pipeline)
        model_kwargs = {
            'learning_rate': learning_rate,
            'nminibatches': 1,
            'gamma': gamma,
            'lam': lam
        }
        # net_arch = [1024, 'lstm', dict(vf=[256, 64], pi=[64])]
        # net_arch = [1024, 'lstm', 512, 128]
        net_arch = [128, 'lstm', 512, 64]
        # net_arch = [1024, 256]
        policy_kwargs = {
            'net_arch': net_arch,
            # 'feature_extraction': 'mlp',
            'act_fun': tf.nn.relu,
            # 'n_env': 32,
        }
        strategy = BitmexTradingStrategySBL(environment=environment,
                                            model=DQN,
                                            policy='MlpPolicy')

        # custom_objects = {'learning_rate': learning_rate, 'nminibatches': 1, 'gamma': gamma, 'lam': lam, 'n_steps': n_step, 'ent_coef': ent_coef}
        custom_objects = {}
        if train:
            for i in range(iteration):
                print('*-------------- iteration =', i + 1, ' --------------*')
                try:
                    strategy.restore_agent(path="./tensortrade/agents/train_4",
                                           custom_objects=custom_objects)
                    print(
                        'Agent Loaded ',
                        datetime.datetime.now(
                            pytz.timezone('Asia/Tehran')).strftime("%H:%M:%S"))
                    # time.sleep(3)
                except:
                    print('Loading Failed: Agent is not exist')
                    print(
                        'New Agent created ',
                        datetime.datetime.now(
                            pytz.timezone('Asia/Tehran')).strftime("%H:%M:%S"))
                    # time.sleep(3)
                print('Training ...')
                strategy.train(steps=round(len(df) * n_step_multiply))
                strategy.save_agent(path="./tensortrade/agents/train_4")
                print(
                    'Agent Saved ',
                    datetime.datetime.now(
                        pytz.timezone('Asia/Tehran')).strftime("%H:%M:%S"))
                print('Elapsed Time is : ',
                      round((int(time.time()) - start_time) / 60, 2),
                      'Minutes')
        else:
            strategy.restore_agent(path="./tensortrade/agents/train_4")
            performance = strategy.test(steps=round(len(df) - window_size - 2),
                                        render_mode=render_mode)
            performance.balance.plot(label='Balance', color='green')
            print(performance)
            plt.xlabel('Steps (' + str(round(len(df) - window_size)) + ')')
            plt.ylabel('Balance (XBT)')
            plt.legend()
            plt.show()

        print('Elapsed Time is : ',
              round((int(time.time()) - start_time) / 60, 2), 'Minutes')
コード例 #7
0
    def handle(self, *args, **kwargs):
        print('Start Time is : ', datetime.datetime.now(pytz.timezone('Asia/Tehran')).strftime("%d %b %Y %H:%M:%S Tehran"))
        # print('Start Time is : ', strftime("%Y-%m-%d %H:%M:%S", localtime()))
        start_time = int(time.time())

        train = True
        train = False
        iteration = 500
        n_step_multiply = 2
        window_size = 50
        n_step = 16
        gamma = 0.995
        learning_rate = 2.5E-4
        lam = 0.9
        ent_coef = 0.1
        leverage = 10
        initial_balance = 100
        max_allowed_amount = 5000
        stop_loss_percent = 60
        n_splitt = 5
        end_candle = 7200
        render_mode = 'chart2'

        # df = pd.read_csv('./tensortrade/data/Coinbase_BTCUSD_d.csv')[0:end_candle]
        # candles = BitmxMinCandle.objects.all().order_by('id')[99:end_candle] #100000
        # candles = MinCandle.objects.all().order_by('id')[99:end_candle] #134000
        # candles = Min3Candle.objects.all().order_by('id')[99:end_candle] #48000
        # candles = Min5Candle.objects.all().order_by('id')[99:end_candle] #28800
        candles = HourlyCandle.objects.all().order_by('id')[0:end_candle] #22500
        df = pd.DataFrame(list(candles.values()))
        df = df.drop(['id', 'coin_marketcap', 'marketcap', 'symbol', 'date'], axis=1)
        df['open'] = df['open'].astype('float64')
        df['close'] = df['close'].astype('float64')
        df['low'] = df['low'].astype('float64')
        df['high'] = df['high'].astype('float64')
        df['volume'] = df['volume'].astype('float64')
        price_columns = ["open", "high", "low", "close"]
        volume_column = ["volume"]
        normalized_price = MinMaxNormalizer(columns=price_columns, feature_min=1E-6, feature_max=1, input_min=1E-6, input_max=1E6)
        normalized_volume = MinMaxNormalizer(columns=volume_column, feature_min=1E-9, feature_max=1, input_min=1E-9, input_max=1E9)
        # difference_all = FractionalDifference(difference_order=0.6)
        # ema = TAlibIndicator(indicators=[['EMA', {'args':['close'], 'params':{'timeperiod':14}}]])
        # sma50 = TAlibIndicator(indicators=[['SMA', {'args':['close'], 'params':{'timeperiod':50}}]])
        # sma100 = TAlibIndicator(indicators=[['SMA', {'args':['close'], 'params':{'timeperiod':100}}]])
        # sma200 = TAlibIndicator(indicators=[['SMA', {'args':['close'], 'params':{'timeperiod':200}}]])
        rsi = TAlibIndicator(indicators=[['RSI', {'args':['close'], 'params':{'timeperiod':14}}]])
        macd = TAlibIndicator(indicators=[['MACD', {'args':['close'], 'params':{'fastperiod':5,'slowperiod':20,'signalperiod':30}}]])
        stochastic = TAlibIndicator(indicators=[['STOCH', {'args':['high', 'low', 'close'], 'params':{'fastk_period':5,'slowk_period':3,'slowd_period':2}}]])
        cci = TAlibIndicator(indicators=[['CCI', {'args':['high', 'low', 'close'], 'params':{'timeperiod':20}}]])
        feature_pipeline = FeaturePipeline(steps=[normalized_volume, macd, stochastic, cci, rsi])
        exchange = BitmexExchange2(data_frame=df, base_instrument='USDT', window_size=window_size, initial_balance=initial_balance, commission_percent=0.60, leverage=leverage, stop_loss_percent=stop_loss_percent)
        # action_scheme = DiscreteActions(n_actions=3, instrument='BTC/USDT', max_allowed_amount=max_allowed_amount)
        action_scheme = CustomeDiscreteActions(n_splitt=n_splitt, instrument='BTC/USDT', max_allowed_amount=max_allowed_amount)
        reward_scheme = BitmexProfit()
        # reward_scheme = AdvancedProfit()
        environment = BitmexEnvironment(exchange=exchange, action_scheme=action_scheme, reward_scheme=reward_scheme, feature_pipeline=feature_pipeline)
        model_kwargs = {'learning_rate': learning_rate, 'nminibatches': 1, 'gamma': gamma, 'lam': lam, 'noptepochs': 4, 'n_steps': n_step, 'ent_coef': ent_coef,
                        # 'cliprange_vf': -1,
                        # 'tensorboard_log': "./tensortrade/logs/"
                        }
        # net_arch = [1024, 'lstm', dict(vf=[256, 64], pi=[64])]
        # net_arch = [1024, 'lstm', 512, 128]
        net_arch = [256, 'lstm', 512, 64]
        # net_arch = [1024, 256]
        policy_kwargs = {'net_arch': net_arch,
                         # 'feature_extraction': 'mlp',
                         'act_fun': tf.nn.relu,
                         # 'n_env': 32,
                         }
        strategy = BitmexTradingStrategySBL(environment=environment, model=PPO2, policy='MlpLstmPolicy', model_kwargs=model_kwargs, policy_kwargs=policy_kwargs)

        custom_objects = {'learning_rate': learning_rate, 'nminibatches': 1, 'gamma': gamma, 'lam': lam, 'n_steps': n_step, 'ent_coef': ent_coef}
        # custom_objects = {}
        if train:
            for i in range(iteration):
                print('*-------------- iteration =', i+1, ' --------------*')
                try:
                    strategy.restore_agent(path="./tensortrade/agents/train_2", custom_objects=custom_objects)
                    print('Agent Loaded ', datetime.datetime.now(pytz.timezone('Asia/Tehran')).strftime("%H:%M:%S"))
                    # time.sleep(3)
                except:
                    print('Loading Failed: Agent is not exist')
                    print('New Agent created ', datetime.datetime.now(pytz.timezone('Asia/Tehran')).strftime("%H:%M:%S"))
                    # time.sleep(3)
                print('Training ...')
                strategy.train(steps=round(len(df) * n_step_multiply))
                strategy.save_agent(path="./tensortrade/agents/train_2")
                print('Agent Saved ', datetime.datetime.now(pytz.timezone('Asia/Tehran')).strftime("%H:%M:%S"))
                print('Elapsed Time is : ', round((int(time.time())-start_time)/60, 2), 'Minutes')
        else:
            strategy.restore_agent(path="./tensortrade/agents/train_2")
            performance = strategy.test(steps=round(len(df) - window_size - 2), render_mode=render_mode)
            performance.balance.plot(label='Balance', color='green')
            print(performance)
            plt.xlabel('Steps ('+str(round(len(df) - window_size))+')')
            plt.ylabel('Balance (XBT)')
            plt.legend()
            plt.show()

        print('Elapsed Time is : ', round((int(time.time())-start_time)/60, 2), 'Minutes')

        # tensorboard --logdir /home/coinstealer/www/binanceweb/tensortrade/logs/
        # performances = []
        # iteration = 10
        # for i in range(iteration):
        #     print('-------------- iteration >', i, '    ', datetime.datetime.now(pytz.timezone('Asia/Tehran')).strftime("%d %b %Y %H:%M:%S Tehran"), ' --------------')
        #
        #     # performances.append(strategy.run(steps=round(len(df)*0.96), render_mode='chart2'))# steps=100 episodes=1 'render.modes': ['log', 'chart']
        #     performances.append(
        #         strategy.run(steps=round(len(df) * 0.96)))  # steps=100 episodes=1 'render.modes': ['log', 'chart']
        #     if i == 0:
        #         performances[i].balance.plot(label=i, color='blue')
        #     elif i == max(range(iteration)):
        #         performances[i].balance.plot(label=i, color='green')
        #     elif i % int(iteration / 10) == 0:
        #         performances[i].balance.plot(label=i, color=(0.9, 0.3, 0.5, i / iteration))

        # print(performances[0])


        # agent = model(policy, environment, model_kwargs=params)

        # layer1 = Dense(name='layer1', size=100, input_spec={'shape': (6,), 'type': 'float'})
        # layer2 = Dense(name='layer2', size=100, input_spec={'shape': (100,), 'type': 'float'})
        # network_spec = LayeredNetwork(name='net1', layers=[layer1, layer2], inputs_spec={'shape': (7,), 'type': 'float'})
        # agent_spec = {"type": "ppo", "discount": 0.995, "likelihood_ratio_clipping": 0.2, 'network': network_spec}

        # agent_spec = {
        #     "policy": {
        #         "network": {
        #             "type": "layered",
        #             "inputs_spec": {'shape': (7,), 'type': 'float'},
        #             "layers": [
        #                 {"type": 'dense', "size": 32, 'input_spec': {'shape': (6,), 'type': 'float'}},
        #                 {"type": 'dense', "size": 32, 'input_spec': {'shape': (6,), 'type': 'float'}},
        #                 # {"type": 'softmax'},
        #             ],
        #         }
        #     },
        #     "update": 64,
        #     "memory": {"type": Replay, "capacity": 10},
        #     "objective": "policy_gradient",
        #     "reward_estimation": {
        #         "horizon": 20
        #     }
        # }

        # agent_spec = {
        #     "policy": {
        #         "network": {
        #             "type": "auto",
        #             "size": 256,
        #             "depth": 2,
        #             # "internal_rnn": True,
        #         }
        #     },
        #     "update": 64,
        #     "memory": {"type": Replay, "capacity": 200},
        #     "objective": "policy_gradient",
        #     "reward_estimation": {
        #         "horizon": 20
        #     }
        # }
コード例 #8
0
assert tensorflow.__version__ == '1.13.1'

from tensortrade.strategies import TensorforceTradingStrategy
from tensortrade.rewards import DirectProfitStrategy
from tensortrade.actions import FutureActionStrategy
reward_strategy = DirectProfitStrategy()
action_strategy = FutureActionStrategy()

#%% Feature Pipeline
from tensortrade.features.stationarity import FractionalDifference
from tensortrade.features.scalers import MinMaxNormalizer
from tensortrade.features import FeaturePipeline

normalize = MinMaxNormalizer(inplace=True)
difference = FractionalDifference(difference_order=0.6, inplace=True)
feature_pipeline = FeaturePipeline(steps=[])

#%% Data Input
from tensortrade.exchanges.simulated.future_exchange import FutureExchange
data = pd.read_csv('TA.csv', index_col=0)

data = data[data.index % 30 == 0]
data = data.reset_index(drop=True)

exchange = FutureExchange(data,
                          base_instrument='RMB',
                          exclude_close=True,
                          initial_balance=10000,
                          should_pretransform_obs=False)

#%%Environment Setup
コード例 #9
0
    def create_unity_environment(worker_id: int):
        window_size = 10
        leverage = 25
        initial_balance = 100
        max_allowed_amount = 5000
        stop_loss_percent = 60
        n_splitt = 5
        env_seed = seed
        if not env_seed:
            env_seed = seed_pool[worker_id % len(seed_pool)]
        start_candle = 99
        end_candle = start_candle + n_steps + 1

        # df = pd.read_csv('./tensortrade/data/Coinbase_BTCUSD_d.csv')[start_candle:end_candle]
        # candles = BitmxMinCandle.objects.all().order_by('id')[start_candle:end_candle] #221000 (100K-150K Deleted)
        candles = MinCandle.objects.all().order_by('id')[start_candle:
                                                         end_candle]  # 204000
        # candles = Min3Candle.objects.all().order_by('id')[start_candle:end_candle] #68000
        # candles = Min5Candle.objects.all().order_by('id')[start_candle:end_candle] #41000
        # candles = HourlyCandle.objects.all().order_by('id')[start_candle:end_candle] #23900
        df = pd.DataFrame(list(candles.values()))
        df = df.drop(['id', 'coin_marketcap', 'marketcap', 'symbol', 'date'],
                     axis=1)
        df['open'] = df['open'].astype('float64')
        df['close'] = df['close'].astype('float64')
        df['low'] = df['low'].astype('float64')
        df['high'] = df['high'].astype('float64')
        df['volume'] = df['volume'].astype('float64')
        price_columns = ["open", "high", "low", "close"]
        volume_column = ["volume"]
        normalized_price = MinMaxNormalizer(columns=price_columns,
                                            feature_min=1E-6,
                                            feature_max=1,
                                            input_min=1E-6,
                                            input_max=1E6)
        normalized_volume = MinMaxNormalizer(columns=volume_column,
                                             feature_min=1E-9,
                                             feature_max=1,
                                             input_min=1E-9,
                                             input_max=1E9)
        # difference_all = FractionalDifference(difference_order=0.6)
        ema = TAlibIndicator(indicators=[[
            'EMA', {
                'args': ['close'],
                'params': {
                    'timeperiod': 14
                }
            }
        ]])
        # sma50 = TAlibIndicator(indicators=[['SMA', {'args':['close'], 'params':{'timeperiod':50}}]])
        sma100 = TAlibIndicator(indicators=[[
            'SMA', {
                'args': ['close'],
                'params': {
                    'timeperiod': 100
                }
            }
        ]])
        # sma200 = TAlibIndicator(indicators=[['SMA', {'args':['close'], 'params':{'timeperiod':200}}]])
        rsi = TAlibIndicator(indicators=[[
            'RSI', {
                'args': ['close'],
                'params': {
                    'timeperiod': 14
                }
            }
        ]])
        macd = TAlibIndicator(indicators=[[
            'MACD', {
                'args': ['close'],
                'params': {
                    'fastperiod': 5,
                    'slowperiod': 20,
                    'signalperiod': 30
                }
            }
        ]])
        stochastic = TAlibIndicator(indicators=[[
            'STOCH', {
                'args': ['high', 'low', 'close'],
                'params': {
                    'fastk_period': 5,
                    'slowk_period': 3,
                    'slowd_period': 2
                }
            }
        ]])
        cci = TAlibIndicator(indicators=[[
            'CCI', {
                'args': ['high', 'low', 'close'],
                'params': {
                    'timeperiod': 20
                }
            }
        ]])
        feature_pipeline = FeaturePipeline(
            steps=[normalized_price, normalized_volume, macd, stochastic])
        exchange = BitmexExchange(data_frame=df,
                                  base_instrument='USDT',
                                  window_size=window_size,
                                  initial_balance=initial_balance,
                                  commission_percent=0.60,
                                  leverage=leverage,
                                  stop_loss_percent=stop_loss_percent)
        # action_scheme = DiscreteActions(n_actions=3, instrument='BTC/USDT', max_allowed_amount=max_allowed_amount)
        action_scheme = DiscreteActionsPlus(
            n_actions=4,
            instrument='BTC/USDT',
            max_allowed_amount=max_allowed_amount)
        # action_scheme = CustomeDiscreteActions(n_splitt=n_splitt, instrument='BTC/USDT', max_allowed_amount=max_allowed_amount)
        reward_scheme = BitmexProfit()
        # reward_scheme = AdvancedProfit()
        environment = BitmexEnvironment(exchange=exchange,
                                        action_scheme=action_scheme,
                                        reward_scheme=reward_scheme,
                                        feature_pipeline=feature_pipeline)
        return environment
コード例 #10
0
exchange = SimulatedExchange(data_frame=df, base_instrument='USD')

# Feature Pipelines

from tensortrade.features import FeaturePipeline
from tensortrade.features.scalers import MinMaxNormalizer
from tensortrade.features.stationarity import FractionalDifference
from tensortrade.features.indicators import SimpleMovingAverage

price_columns = ['open', 'high', 'low', 'close']

normalize_price = MinMaxNormalizer(price_columns)
moving_averages = SimpleMovingAverage(price_columns)
difference_all = FractionalDifference(difference_order=0.6)

feature_pipeline = FeaturePipeline(
    steps=[normalize_price, moving_averages, difference_all])

exchange.feature_pipeline = feature_pipeline

# Action Strategies

from tensortrade.actions import DiscreteActionStrategy

action_strategy = DiscreteActionStrategy(n_actions=20, instrument_symbol='BTC')

# Reward Strategies

from tensortrade.rewards import SimpleProfitStrategy

reward_strategy = SimpleProfitStrategy()
コード例 #11
0
from tensortrade.exchanges.simulated import FBMExchange
from tensortrade.features.scalers import MinMaxNormalizer
from tensortrade.features.stationarity import FractionalDifference
from tensortrade.features import FeaturePipeline
from tensortrade.rewards import SimpleProfitStrategy
from tensortrade.actions import DiscreteActionStrategy
from tensortrade.environments import TradingEnvironment

# creating an environment

normalize_price = MinMaxNormalizer(['open', 'high', 'low', 'close'])
difference = FractionalDifference(difference_order=0.6)
feature_pipeline = FeaturePipeline(steps=[normalize_price, difference])

exchange = FBMExchange(timeframe='1h',
                       base_instrument='BTC',
                       feature_pipeline=feature_pipeline)

reward_strategy = SimpleProfitStrategy()

action_strategy = DiscreteActionStrategy(n_actions=20,
                                         instrument_symbol='ETH/BTC')

environment = TradingEnvironment(exchange=exchange,
                                 action_strategy=action_strategy,
                                 reward_strategy=reward_strategy,
                                 feature_pipeline=feature_pipeline)

# defining the agent

from stable_baselines.common.policies import MlpLnLstmPolicy
コード例 #12
0
import pandas as pd
import tensortrade.slippage as slippage
import copy

from tensortrade import TradingContext
from tensortrade.trades import Trade
from tensortrade.exchanges.simulated import StochasticExchange
from tensortrade.trades import TradeType, Trade

from tensortrade.features import FeaturePipeline
from tensortrade.features.indicators import TAlibIndicator
from tensortrade.features.scalers import MinMaxNormalizer

ta_indicator = TAlibIndicator(indicators=["BBAND", "RSI", "EMA", "SMA"])
min_max = MinMaxNormalizer()
feature_pipeline = FeaturePipeline([ta_indicator, min_max])

config = {
    'base_instrument': 'EURO',
    'instruments': 'ETH',
    'exchanges': {
        'credentials': {
            'api_key': '48hg34wydghi7ef',
            'api_secret_key': '0984hgoe8d7htg'
        }
    },
    "feature_pipeline": feature_pipeline
}


@pytest.fixture(scope="module")
コード例 #13
0
                             base_instrument='USD',
                             window_size=5,
                             should_pretransform_obs=True)
# -------------------------- Feature Pipelines ------------------------#
price_columns = ["open", "high", "low", "close"]
volume_column = ["volume"]
normalized_price = MinMaxNormalizer(price_columns)
normalized_volume = MinMaxNormalizer(volume_column)
sma = SimpleMovingAverage(columns=price_columns, window_size=50)
indicators = TAlibIndicator(
    indicators=["EMA", "RSI", "CCI", "Stochastic", "MACD"],
    lows=[30, -100, 20],
    highs=[70, 100, 80])
difference_all = FractionalDifference(difference_order=0.6)
feature_pipeline = FeaturePipeline(steps=[
    normalized_price, sma, difference_all, normalized_volume, indicators
])
exchange.feature_pipeline = feature_pipeline
# -------------------------- Action Schemes ------------------------#
action_scheme = DiscreteActions(n_actions=20, instrument='BTC/USD')
# -------------------------- Reward Schemes ------------------------#
reward_scheme = SimpleProfit()
# -------------------------- Live Exchange ------------------------#
# import ccxt
# from tensortrade.exchanges.live import CCXTExchange
# coinbase = ccxt.coinbasepro()
# exchange = CCXTExchange(exchange=coinbase, base_instrument='USD')
# -------------------------- Simulated Exchange ------------------------#
# df = pd.read_csv('./data/Coinbase_BTCUSD_1h.csv')
# exchange = SimulatedExchange(data_frame=df, base_instrument='USD',feature_pipeline=feature_pipeline)