Exemplo n.º 1
0
def test_smoke():
    cdd = CryptoDataDownload()
    coinbase_btc = cdd.fetch("Coinbase", "USD", "BTC", "1h")
    coinbase_eth = cdd.fetch("Coinbase", "USD", "ETH", "1h")

    bitstamp_btc = cdd.fetch("Bitstamp", "USD", "BTC", "1h")
    bitstamp_eth = cdd.fetch("Bitstamp", "USD", "ETH", "1h")
    bitstamp_ltc = cdd.fetch("Bitstamp", "USD", "LTC", "1h")

    steps = len(coinbase_btc)
    coinbase = Exchange("coinbase", service=execute_order)(
        Stream("USD-BTC", list(coinbase_btc['close'][-steps:])),
        Stream("USD-ETH", list(coinbase_eth['close'][-steps:])))

    bitstamp = Exchange("bitstamp", service=execute_order)(
        Stream("USD-BTC", list(bitstamp_btc['close'][-steps:])),
        Stream("USD-ETH", list(bitstamp_eth['close'][-steps:])),
        Stream("USD-LTC", list(bitstamp_ltc['close'][-steps:])))

    portfolio = Portfolio(USD, [
        Wallet(coinbase, 200000 * USD),
        Wallet(coinbase, 0 * BTC),
        Wallet(bitstamp, 10000 * USD),
        Wallet(bitstamp, 2 * BTC),
        Wallet(bitstamp, 20 * ETH),
        Wallet(bitstamp, 30 * LTC)
    ])

    action_scheme = ManagedRiskOrders(
        durations=[4, 6, 8, 10],
        stop_loss_percentages=[0.01, 0.003, 0.3],
        take_profit_percentages=[0.01, 0.003, 0.3],
        trade_sizes=[0.99999999999999])

    env = TradingEnvironment(action_scheme=action_scheme,
                             reward_scheme="simple",
                             portfolio=portfolio)

    done = False

    n_steps = 0
    while not done:
        action = env.action_space.sample()
        obs, reward, done, info = env.step(action)
        n_steps += 1

    portfolio.ledger.as_frame().sort_values(["step",
                                             "poid"]).to_clipboard(index=False)
    df = portfolio.ledger.as_frame()

    frames = []
    for poid in df.poid.unique():
        frames += [df.loc[df.poid == poid, :]]

    pd.concat(frames, ignore_index=True, axis=0).to_clipboard(index=False)

    pytest.fail("Failed.")
def test_runs_with__external_feed_only(portfolio):

    df = pd.read_csv("tests/data/input/coinbase_(BTC,ETH)USD_d.csv").tail(100)
    df = df.rename({"Unnamed: 0": "date"}, axis=1)
    df = df.set_index("date")

    coinbase_btc = df.loc[:, [name.startswith("BTC") for name in df.columns]]
    coinbase_eth = df.loc[:, [name.startswith("ETH") for name in df.columns]]

    ta.add_all_ta_features(
        coinbase_btc,
        colprefix="BTC:",
        **{k: "BTC:" + k for k in ['open', 'high', 'low', 'close', 'volume']}
    )
    ta.add_all_ta_features(
        coinbase_eth,
        colprefix="ETH:",
        **{k: "ETH:" + k for k in ['open', 'high', 'low', 'close', 'volume']}
    )

    nodes = []
    with Module("coinbase") as coinbase:
        for name in coinbase_btc.columns:
            nodes += [Stream(name, list(coinbase_btc[name]))]
        for name in coinbase_eth.columns:
            nodes += [Stream(name, list(coinbase_eth[name]))]

    feed = DataFeed()(coinbase)

    action_scheme = ManagedRiskOrders()
    reward_scheme = SimpleProfit()

    env = TradingEnvironment(
        portfolio=portfolio,
        action_scheme=action_scheme,
        reward_scheme=reward_scheme,
        feed=feed,
        window_size=50,
        use_internal=False,
        enable_logger=False
    )

    done = False
    obs = env.reset()
    while not done:

        action = env.action_space.sample()
        obs, reward, done, info = env.step(action)

    n_features = coinbase_btc.shape[1] + coinbase_eth.shape[1]
    assert obs.shape == (50, n_features)
Exemplo n.º 3
0
def make_env(exchange: str, action: str, reward: str):
    portfolio = mock.Mock()
    feed = mock.Mock()
    return TradingEnvironment(portfolio=portfolio,
                              action_scheme=action,
                              reward_scheme=reward,
                              feed=feed)
Exemplo n.º 4
0
def test_init_multiple_exchanges(portfolio):

    action_scheme = ManagedRiskOrders()
    reward_scheme = SimpleProfit()

    env = TradingEnvironment(portfolio=portfolio,
                             action_scheme=action_scheme,
                             reward_scheme=reward_scheme,
                             window_size=50,
                             enable_logger=False)

    obs = env.reset()

    assert obs.shape == (50, 32)

    assert env.observation_space.shape == (50, 32)
Exemplo n.º 5
0
def test_runs_with_only_internal_data_feed(portfolio):

    action_scheme = ManagedRiskOrders()
    reward_scheme = SimpleProfit()

    env = TradingEnvironment(portfolio=portfolio,
                             action_scheme=action_scheme,
                             reward_scheme=reward_scheme,
                             window_size=50,
                             enable_logger=False)

    done = False
    obs = env.reset()
    while not done:

        action = env.action_space.sample()
        obs, reward, done, info = env.step(action)

    assert obs.shape == (50, 32)
Exemplo n.º 6
0
def create_env():
    def fetch_data(exchange_name, symbol, timeframe):
        url = "https://www.cryptodatadownload.com/cdd/"
        filename = "{}_{}USD_{}.csv".format(exchange_name, symbol, timeframe)
        volume_column = "Volume {}".format(symbol)
        new_volume_column = "Volume_{}".format(symbol)

        df = pd.read_csv(url + filename, skiprows=1)
        df = df[::-1]
        df = df.drop(["Symbol"], axis=1)
        df = df.rename(
            {
                "Volume USD": "volume",
                volume_column: new_volume_column
            }, axis=1)
        df = df.set_index("Date")
        df.columns = [symbol + ":" + name.lower() for name in df.columns]

        return df

    ssl._create_default_https_context = ssl._create_unverified_context  # Only used if pandas gives a SSLError
    coinbase_data = pd.concat([
        fetch_data("Coinbase", "BTC", "1h"),
        fetch_data("Coinbase", "ETH", "1h")
    ],
                              axis=1)

    coinbase = Exchange("coinbase", service=execute_order)(
        Stream("USD-BTC", list(coinbase_data['BTC:close'])),
        Stream("USD-ETH", list(coinbase_data['ETH:close'])))

    with Module("coinbase") as coinbase_ns:
        nodes = [
            Stream(name, list(coinbase_data[name]))
            for name in coinbase_data.columns
        ]

    feed = DataFeed([coinbase_ns])

    portfolio = Portfolio(USD, [
        Wallet(coinbase, 10000 * USD),
        Wallet(coinbase, 10 * BTC),
        Wallet(coinbase, 5 * ETH),
    ])

    env = TradingEnvironment(feed=feed,
                             portfolio=portfolio,
                             action_scheme='managed-risk',
                             reward_scheme='risk-adjusted',
                             window_size=20)

    return env
def trading(agent_path, n_step = len(test),):

    from tensortrade.exchanges import Exchange
    from tensortrade.exchanges.services.execution.simulated import execute_order
    from tensortrade.data import Stream, DataFeed, Module
    from tensortrade.instruments import USD, BTC
    from tensortrade.wallets import Wallet, Portfolio

    coinbase = Exchange("coinbase", service=execute_order)(
        Stream("USD-BTC", price_history['close'].tolist())
    )


    portfolio = Portfolio(USD, [
        Wallet(coinbase, 10000 * USD),
        Wallet(coinbase, 10 * BTC),
    ])

    with Module("coinbase") as coinbase_ns:
        nodes = [Stream(name, test[name].tolist()) for name in test.columns]

    feed = DataFeed([coinbase_ns])


    from tensortrade.environments import TradingEnvironment

    env = TradingEnvironment(
        feed=feed,
        portfolio=portfolio,
        action_scheme='managed-risk',
        reward_scheme='risk-adjusted',
        window_size=20
    )


    agent = DQNAgent(env)
    agent.restore()
    action = agent.env.action_space.sample()
    state, reward, done, info = agent.env.step(action)
    action = agent.get_action(state)
    action = agent.env.action_space.sample()
    step = 0
    while not done and (step < n_steps):
      state, reward, done, info = agent.env.step(action)
      action = agent.get_action(state)
      print('step:', step, '-- action:', action)
      step += 1

    env.portfolio.performance.plot(figsize=(16, 10))
    env.portfolio.performance.net_worth.plot(figsize=(16, 10))
Exemplo n.º 8
0
def main():
    trading_pair = TradingPair(USD, BTC)
    action = DynamicOrders(trading_pair)
    exchange = CCXTExchange('bitmex', observation_pairs=[trading_pair],
                            timeframe='1m')
    wallet = Wallet(exchange, .01 * BTC)
    portfolio = Portfolio(USD, wallets=[wallet])
    env = TradingEnvironment(portfolio, exchange, action, 'simple')
    while True:
        env.step(0)
        env.render('human')
        time.sleep(.02)
Exemplo n.º 9
0
def env():
    context = {
        "base_instrument": USD,
        "actions": {
            "pairs": [USD / BTC],
            "stop_loss_percentages": [0.02, 0.04, 0.06],
            "take_profit_percentages": [0.01, 0.02, 0.03],
            "trade_sizes": 10,
            "trade_side": TradeSide.BUY,
            "trade_type": TradeType.MARKET,
            "order_listener": None
        },
        "rewards": {
            "return_algorithm": "sharpe",
            "risk_free_rate": 0,
            "target_returns": 0
        },
        "exchanges": {
            "model_type": "FBM",
            "hurst": 0.61,
            "timeframe": "1d",
            "base_price": 7500,
            "base_volume": 12000
        }
    }

    with TradingContext(**context):
        action_scheme = ManagedRiskOrders()
        reward_scheme = RiskAdjustedReturns()
        exchange = Exchange()

        portfolio = Portfolio(USD, [
            Wallet(exchange, 100000 * USD),
            Wallet(exchange, 0 * BTC)
        ])

        env = TradingEnvironment(
            portfolio=portfolio,
            action_scheme=action_scheme,
            reward_scheme=reward_scheme,
            window_size=14,
            enable_logger=False
        )

    return env
Exemplo n.º 10
0
def test_injects_trading_strategy_with_context():

    with TradingContext(**config):

        env = TradingEnvironment(exchange='simulated',
                                 action_scheme='discrete',
                                 reward_scheme='simple')

        strategy = ConcreteTradingStrategy(environment=env)

        assert hasattr(strategy.environment.exchange.context, 'credentials')
        assert strategy.environment.exchange.context.credentials == config[
            'exchanges']['credentials']

        assert hasattr(strategy.environment.action_scheme.context, 'n_actions')
        assert strategy.environment.action_scheme.context.n_actions == 24

        print(strategy.environment.reward_scheme.context.data)
        assert hasattr(strategy.environment.reward_scheme.context, 'amount')
        assert strategy.environment.reward_scheme.context.amount == 100
Exemplo n.º 11
0
def get_env(file_path):
    df = load_data(file_path)

    normalize = MinMaxNormalizer(inplace=True)
    difference = FractionalDifference(difference_order=0.6, inplace=True)
    feature_pipeline = FeaturePipeline(steps=[normalize, difference])

    reward_strategy = SimpleProfitStrategy()
    action_strategy = DiscreteActionStrategy(n_actions=20, instrument_symbol='BTC/USDT')

    exchange = SimulatedExchange(base_instrument='USDT',
                                 should_pretransform_obs=True,
                                 feature_pipeline=feature_pipeline
                                 )
    exchange.data_frame = df[:STEP]
    environment = TradingEnvironment(exchange=exchange,
                                     action_strategy=action_strategy,
                                     reward_strategy=reward_strategy,
                                     feature_pipeline=feature_pipeline)

    return environment
Exemplo n.º 12
0
def main():
    df = load_dataframe()

    exchange = SimulatedExchange(df)
    wallet_usd = Wallet(exchange, 0 * USD)
    wallet_btc = Wallet(exchange, .01 * BTC)
    portfolio = Portfolio(BTC, wallets=[wallet_usd, wallet_btc])
    trading_pair = TradingPair(USD, BTC)
    action = DynamicOrders(trading_pair)
    env = TradingEnvironment(portfolio, exchange, action, 'simple',
                             window_size=20)

    times = []
    for _ in range(300):
        action = 0
        if random.random() > .9:
            action = int(random.uniform(1, 20))
        env.step(action)
        t1 = time.time()
        env.render('human')
        times.append(time.time() - t1)
        if len(times) > 120:
            times.pop(0)
        print(f'FPS: {1 / np.mean(times):.1f}', end='\r')
def make_env(exchange: str, action: str, reward: str):
    return TradingEnvironment(exchange=exchange,
                              action_strategy=action,
                              reward_strategy=reward)
]

agent_spec = {
    "type": "ppo",
    "learning_rate": 0.0003,
    "discount": 1.0,
    "likelihood_ratio_clipping": 0.2,
    "estimate_terminal": False,
    "max_episode_timesteps": 200000,
    "network": network_spec,
    "batch_size": 10,
    "update_frequency": 10
}

environment = TradingEnvironment(exchange=exchange,
                                 action_strategy=action_strategy,
                                 reward_strategy=reward_strategy,
                                 feature_pipeline=feature_pipeline)

strategy = TensorforceTradingStrategy(environment=environment,
                                      agent_spec=agent_spec,
                                      save_best_agent=False)
#%%Start Over

performance = strategy.run(episodes=1000, evaluation=False)
#manually store agent
#strategy.save_agent(directory = 'test/', filename = '01')

#%% Restore and Continue
'''
strategy.restore_agent(directory = 'a/', filename = 'best-model')
performance = strategy.run(episodes=(strategy._runner.agent.episodes + 20), evaluation=False)
portfolio = Portfolio(USD, [
    Wallet(coinbase, 10000 * USD), 
    Wallet(coinbase, 10 * BTC),
])

with Module("coinbase") as coinbase_ns:
    nodes = [Stream(name, data[name].tolist()) for name in data.columns]

feed = DataFeed([coinbase_ns])

# Initialize the trading environment
from tensortrade.environments import TradingEnvironment
env_1 = TradingEnvironment(
    feed=feed,
    portfolio=portfolio,
    action_scheme='managed-risk',
    reward_scheme='risk-adjusted',
    window_size=4 
)

# Load the agent from the external (pre-trained) files
# Training parameters: window_size=4, n_episodes=7, n_steps=2000
from tensortrade.agents import A2CAgent

agent = A2CAgent(env_1)
agent.restore(path=path,
              actor_filename=r'C:\Users\ketap\Downloads\drive-download-20200322T222027Z-001\A2C_actor_7.hdf5',
              critic_filename=r'C:\Users\ketap\Downloads\drive-download-20200322T222027Z-001\A2C_critic_7.hdf5')

# Make agent take a set of actions
action = agent.env.action_space.sample()
Exemplo n.º 16
0
reward_scheme = SimpleProfit()
# -------------------------- Live Exchange ------------------------#
# import ccxt
# from tensortrade.exchanges.live import CCXTExchange
# coinbase = ccxt.coinbasepro()
# exchange = CCXTExchange(exchange=coinbase, base_instrument='USD')
# -------------------------- Simulated Exchange ------------------------#
# df = pd.read_csv('./data/Coinbase_BTCUSD_1h.csv')
# exchange = SimulatedExchange(data_frame=df, base_instrument='USD',feature_pipeline=feature_pipeline)

# from tensortrade.exchanges.simulated import FBMExchange
# exchange = FBMExchange(base_instrument='BTC', timeframe='1h', feature_pipeline=feature_pipeline)
# #################### Creating an Environment ######################

environment = TradingEnvironment(exchange=exchange,
                                 action_scheme=action_scheme,
                                 reward_scheme=reward_scheme,
                                 feature_pipeline=feature_pipeline)
# #################### Learning Agents ######################
params = {"learning_rate": 1e-5, 'nminibatches': 1}
# agent = model(policy, environment, model_kwargs=params)
# #################### Training a Strategy ######################
strategy = StableBaselinesTradingStrategy(environment=environment,
                                          model=PPO2,
                                          policy='MlpLnLstmPolicy',
                                          model_kwargs=params)
# 'render.modes': ['human', 'rgb_array']
# episodes=1
# steps=100
performance = strategy.run(episodes=5, render_mode='human')
print(performance[:])
# performance.balance.plot()
Exemplo n.º 17
0
warnings.warn = warn
warnings.simplefilter(action='ignore', category=FutureWarning)

sys.path.append(os.path.dirname(os.path.abspath('')))

from tensortrade.environments import TradingEnvironment
from tensortrade.exchanges.simulated import FBMExchange
from tensortrade.actions import DiscreteActionStrategy
from tensortrade.rewards import SimpleProfitStrategy

exchange = FBMExchange()
action_strategy = DiscreteActionStrategy()
reward_strategy = SimpleProfitStrategy()

env = TradingEnvironment(exchange=exchange,
                         action_strategy=action_strategy,
                         reward_strategy=reward_strategy)

obs = env.reset()
sell_price = 1e9
stop_price = -1

print('Initial portfolio: ', exchange.portfolio)

for i in range(1000):
    action = 0 if obs['close'] < sell_price else 18
    action = 19 if obs['close'] < stop_price else action

    if i == 0 or portfolio['BTC'] == 0:
        action = 16
        sell_price = obs['close'] + (obs['close'] / 50)