def test_groupement_portfolio(self): wallet = Wallet(initial_balance) transaction1 = Transaction('SPCE', 'buy', 10, 10.00) wallet.push(transaction1) transaction2 = Transaction('TSLA', 'buy', 2, 100.00) wallet.push(transaction2) transaction3 = Transaction('TSLA', 'sell', 1, 200.00) wallet.push(transaction3) df = wallet._ledger.as_frame() self.assertTrue(True) self.assertTrue(df['ticker_name'].values[0] == 'SPCE') self.assertTrue(df['ticker_name'].values[1] == 'TSLA') self.assertTrue(wallet._portfolio._portfolio[0].quantity == 10) self.assertTrue(wallet._portfolio._portfolio[1].quantity == 1) wallet.push(transaction3) self.assertEqual(len(wallet._portfolio._portfolio), 1)
def __init__(self, tickers: Text or List[Text] = None, initial_balance=None, broker: Broker = None, wallet: Wallet = None, env: Environment = None, data_streamer: Data_Streamer = None, neural_network: Neural_Network = None, reward_strategy: Reward_Strategy = None, action_strategy: Action_Strategy = None, load_name=None): self._tickers = tickers if isinstance(tickers, list) else [tickers] self._initial_balance = initial_balance self.broker = broker if isinstance(broker, Broker) else Fake_Broker( Wallet(self._initial_balance)) self.data_streamer = data_streamer or Data_Streamer(tickers) self.wallet = wallet or self.broker.wallet self.action_strategy = action_strategy or Simple_Action_Strategy() self.reward_strategy = reward_strategy or Simple_Reward_Strategy() self.env = env or Environment(data_streamer=self.data_streamer, broker=self.broker, action_strategy=self.action_strategy, reward_strategy=self.reward_strategy) self.neural_network = neural_network if isinstance( neural_network, Neural_Network) else Deep_Q_Learning( input_shape=self.env.observation_space.shape, output_size=self.env.action_space.n, load_name=load_name)
def test_reset(self): wallet = Wallet(initial_balance) transaction1 = Transaction('SPCE', 'buy', 10, 10.14) wallet.push(transaction1) transaction2 = Transaction('TSLA', 'buy', 2, 100.14) wallet.push(transaction2) wallet.reset() self.assertEqual(initial_balance, wallet.balance) self.assertTrue(len(wallet._portfolio) == 0) self.assertTrue(len(wallet._ledger) == 0)
def test_reset(self): tickers = ['SPCE', 'TSLA', 'AAPL'] initial_balance = 1000 broker = Fake_Broker(Wallet(initial_balance)) data_streamer = Data_Streamer(tickers) env = Environment(data_streamer, broker) for ticker_name in tickers: for i in range(10): state = env.step(2, ticker_name) env.reset(ticker_name) self.assertEqual(env.iter[ticker_name], 0) del data_streamer del env
def test_capacity(self): ticker = 'SPCE' initial_balance = 1000 broker = Fake_Broker(Wallet(initial_balance)) data_streamer = Data_Streamer(ticker) for history_capacity in [5, 10, 15, 20]: env = Environment(data_streamer, broker, history_capacity=history_capacity) state = env.reset(ticker) self.assertEqual(len(state), history_capacity) state = env.step(0, ticker) data_streamer.reset() del env
def __init__(self, tickers: Text or List[Text] = None, initial_balance=None, broker: Broker = None, wallet: Wallet = None, env: Environment = None, data_streamer: Data_Streamer = None, neural_network: Neural_Network = None, reward_strategy: Reward_Strategy = None, action_strategy: Action_Strategy = None, load_name=None, hyperparams: Dict = None, **kwargs): self.hyperparams = hyperparams self._history_capacity = kwargs.get('history_capacity', 30) self._random = kwargs.get('random', False) self.features_function = kwargs.get('features_function', 'basic_features') self._tickers = tickers if isinstance(tickers, list) else [tickers] self.broker = broker if isinstance(broker, Broker) else Fake_Broker( Wallet(initial_balance)) self.data_streamer = data_streamer or Data_Streamer( tickers, random=self._random, history_capacity=self._history_capacity, features_function=self.features_function) self.wallet = wallet or self.broker.wallet self.action_strategy = action_strategy or Simple_Action_Strategy() self.reward_strategy = reward_strategy or Simple_Reward_Strategy() self.renderer = kwargs.get('renderer', Basic_Plot()) self.env = env or Environment(data_streamer=self.data_streamer, broker=self.broker, action_strategy=self.action_strategy, reward_strategy=self.reward_strategy, renderer=self.renderer, history_capacity=self._history_capacity) self.neural_network = neural_network
def test_balances(self): wallet = Wallet(initial_balance) tickers = ['SPCE', 'TSLA', 'BCART', 'AA', 'AAA'] for t in tickers: i = random.uniform(0.00, 25.00) wallet.push(Transaction(t, 'buy', 10, i)) self.assertEqual(wallet.balance, wallet.free_balance + wallet.locked_balance) # print(wallet) self.assertTrue(len(wallet._ledger._transactions) == len(tickers)) self.assertTrue(wallet.balance == wallet.free_balance + wallet.locked_balance) #Checking whether quantities are right whilst selling higher i = 0 for t in tickers: n = random.uniform(25.00, 50.00) wallet.push(Transaction(t, 'sell', 5, n)) self.assertEqual(wallet._portfolio._portfolio[i].quantity, 5) i += 1 self.assertTrue(wallet.balance >= initial_balance)
def test_profit(self): wallet = Wallet(initial_balance) wallet.push(BUY_tr) wallet.push(SELL_tr) self.assertEqual(wallet.balance, initial_balance + 1) wallet.reset() wallet.push(BUY_tr) wallet.push(BUY_tr) wallet.push(SELL_tr) self.assertEqual(wallet.balance, initial_balance + 1)
def test_initialisation(self): wallet = Wallet(initial_balance) self.assertEqual(wallet.initial_balance, initial_balance) self.assertEqual(wallet.balance, initial_balance)