Example #1
0
    def __init__(self,
                 df,
                 initial_balance=10000,
                 commission=0.0025,
                 reward_func='sortino',
                 **kwargs):
        super(BitcoinTradingEnv, self).__init__()

        self.initial_balance = initial_balance
        self.commission = commission
        self.reward_func = reward_func

        self.df = df.fillna(method='bfill').reset_index()
        self.stationary_df = log_and_difference(
            self.df,
            ['Open', 'High', 'Low', 'Close', 'Volume BTC', 'Volume USD'])

        benchmarks = kwargs.get('benchmarks', [])
        self.benchmarks = [
            {
                'label':
                'Buy and HODL',
                'values':
                buy_and_hodl(self.df['Close'], initial_balance, commission)
            },
            {
                'label':
                'RSI Divergence',
                'values':
                rsi_divergence(self.df['Close'], initial_balance, commission)
            },
            {
                'label':
                'SMA Crossover',
                'values':
                sma_crossover(self.df['Close'], initial_balance, commission)
            },
            *benchmarks,
        ]

        self.forecast_len = kwargs.get('forecast_len', 10)
        self.confidence_interval = kwargs.get('confidence_interval', 0.95)
        self.obs_shape = (1, 5 + len(self.df.columns) - 2 +
                          (self.forecast_len * 3))

        # Actions of the format Buy 1/4, Sell 3/4, Hold (amount ignored), etc.
        self.action_space = spaces.Discrete(12)

        # Observes the price action, indicators, account action, price forecasts
        self.observation_space = spaces.Box(low=0,
                                            high=1,
                                            shape=self.obs_shape,
                                            dtype=np.float16)
Example #2
0
    def __init__(self,
                 df,
                 reward_func='sortino',
                 initialFunds=10000,
                 commission=0.0025,
                 forecastLength=10,
                 confidenceInterval=0.95,
                 scaler=preprocessing.MinMaxScaler()):
        """
        :param df:
        :param reward_func:   Risk-adjusted return metric to be used.   # Obligatory argument for gym.Env
        :param initialFunds: Initial resources the agent holds in USD.
        :param commission: Commission the "middleman" takes per transaction.
        :param forecastLength:
        :param confidenceInterval:
        :param scaler: Function to rescale data, default=minMaxScaler to normalize
        """

        # Set constant values, and functions to use throughout
        self.initialFunds = initialFunds
        self.commission = commission
        self.reward_func = reward_func  # ['sortino', 'calmar', 'omega', 'dummy']
        self.forecastLength = forecastLength
        self.confidenceInterval = confidenceInterval
        self.scaler = scaler
        self.strategies = {
            'sortino': sortino_ratio,
            'calmar': calmar_ratio,
            'omega': omega_ratio
        }

        # Prepare datasets
        self.df = df.fillna(method='bfill').reset_index(
        )  # Treat NaN (fill with next !NaN val) and reset index accordingly
        self.stacionaryDf = log_and_difference(
            self.df, ['Open', 'High', 'Low', 'Close'])

        # Agent spaces definition
        self.action_space = spaces.MultiDiscrete(
            [3, 10]
        )  # spaces.Discrete(12)         # fixme - not being done by the current model on github
        self.obsShape = (1, 5 + len(self.df.columns) - 2 +
                         (self.forecastLength * 3))  # fixme - why the 5 -2 ...
        # print(self.obsShape)

        # Observes the OHCLV values, net worth, and trade history.
        # A box in R^n with an identical bound for each dimension
        self.observation_space = spaces.Box(low=0,
                                            high=1,
                                            shape=self.obsShape,
                                            dtype=np.float16)
Example #3
0
    def __init__(self,
                 data: pd.DataFrame,
                 feature_columns: List[str],
                 initial_balance: float = 10000,
                 commission: float = 0.0025,
                 reward_function: str = 'sortino',
                 returns_lookback: int = 100,
                 trade_on_open: bool = True) -> None:
        """
        :param data: pandas DataFrame, containing data for the simulation,
                     'open', 'high', 'low', 'close', 'volume' columns should be present
        :param feature_columns: initial trading balance
        :param initial_balance: initial trading balance
        :param commission: commission to be applied on trading
        :param reward_function: type of reward function, calmar, sortino and omega allowed
        :param returns_lookback: last values in portfolio to be used when computing the reward
        :param trade_on_open: Use next entry open price as price to open/close positions
        """
        super(TradingEnv, self).__init__()
        self._logger = get_logger(self.__class__.__name__)

        self._feature_cols = feature_columns

        self._check_initial_data(data, cols=self._feature_cols)

        self._data = data
        self._scaled_data = log_and_difference(data, feature_columns)
        self._initial_balance = initial_balance
        self._commission = commission
        self._reward_function = reward_function
        self._returns_lookback = returns_lookback
        self._trade_on_open = trade_on_open

        # state and action spaces
        self._obs_shape = (1, len(self._feature_cols)
                           )  # todo: add account info here
        self.observation_space = spaces.Box(low=0,
                                            high=1,
                                            shape=self._obs_shape,
                                            dtype=np.float32)
        self.action_space = spaces.Discrete(n=3)

        # placeholders
        self.current_step = 0
        self.cash = self._initial_balance
        self.position = None
        self.position_history = [None]
        self.portfolio = []

        self.viewer = None
Example #4
0
    def __init__(self, df, initial_balance=10000, commission=0.0003, reward_func='sortino', **kwargs):
        super(BitcoinTradingEnv, self).__init__()

        self.initial_balance = initial_balance
        self.commission = commission
        self.reward_func = reward_func

        self.df = df.fillna(method='bfill')
        self.df = add_indicators(self.df.reset_index())
        self.stationary_df = log_and_difference(
            self.df, ['Open', 'High', 'Low', 'Close', 'Volume BTC', 'Volume USD'])

        self.n_forecasts = kwargs.get('n_forecasts', 10)
        self.confidence_interval = kwargs.get('confidence_interval', 0.95)
        self.obs_shape = (1, 5 + len(self.df.columns) -
                          2 + (self.n_forecasts * 3))

        # Actions of the format Buy 1/4, Sell 3/4, Hold (amount ignored), etc.
        self.action_space = spaces.Discrete(12)

        # Observes the price action, indicators, account action, price forecasts
        self.observation_space = spaces.Box(
            low=0, high=1, shape=self.obs_shape, dtype=np.float16)