def __init__(self, portfolio: Union[Portfolio, str], action_scheme: Union[ActionScheme, str], reward_scheme: Union[RewardScheme, str], feed: DataFeed = None, window_size: int = 1, use_internal=True, **kwargs): """ Arguments: portfolio: The `Portfolio` of wallets used to submit and execute orders from. action_scheme: The component for transforming an action into an `Order` at each timestep. reward_scheme: The component for determining the reward at each timestep. feed (optional): The pipeline of features to pass the observations through. kwargs (optional): Additional arguments for tuning the environments, logging, etc. """ super().__init__() self.portfolio = portfolio self.action_scheme = action_scheme self.reward_scheme = reward_scheme self.feed = feed self.window_size = window_size self.use_internal = use_internal if self.feed: self._external_keys = self.feed.next().keys() self.feed.reset() self.history = ObservationHistory(window_size=window_size) self._broker = Broker(exchanges=self.portfolio.exchanges) self.clock = Clock() self.action_space = None self.observation_space = None self.viewer = None self._enable_logger = kwargs.get('enable_logger', False) self._observation_dtype = kwargs.get('dtype', np.float32) self._observation_lows = kwargs.get('observation_lows', 0) self._observation_highs = kwargs.get('observation_highs', 1) if self._enable_logger: self.logger = logging.getLogger(kwargs.get('logger_name', __name__)) self.logger.setLevel(kwargs.get('log_level', logging.DEBUG)) logging.getLogger('tensorflow').disabled = kwargs.get( 'disable_tensorflow_logger', True) self.compile()
def __init__(self, portfolio: Union[Portfolio, str], action_scheme: Union[ActionScheme, str], reward_scheme: Union[RewardScheme, str], feed: DataFeed = None, window_size: int = 1, use_internal: bool = True, renderers: Union[str, List[str], List['BaseRenderer']] = 'screenlog', **kwargs): """ Arguments: portfolio: The `Portfolio` of wallets used to submit and execute orders from. action_scheme: The component for transforming an action into an `Order` at each timestep. reward_scheme: The component for determining the reward at each timestep. feed (optional): The pipeline of features to pass the observations through. renderers (optional): single or list of renderers for output by name or as objects. String Values: 'screenlog', 'filelog', or 'plotly'. None for no rendering. price_history (optional): OHLCV price history feed used for rendering the chart. Required if render_mode is 'plotly'. kwargs (optional): Additional arguments for tuning the environments, logging, etc. """ super().__init__() self.portfolio = portfolio self.action_scheme = action_scheme self.reward_scheme = reward_scheme self.feed = feed self.window_size = window_size self.use_internal = use_internal self._price_history: pd.DataFrame = kwargs.get('price_history', None) if self.feed: self._external_keys = self.feed.next().keys() self.feed.reset() self.history = ObservationHistory(window_size=window_size) self._broker = Broker(exchanges=self.portfolio.exchanges) self.clock = Clock() self.action_space = None self.observation_space = None if not renderers: renderers = [] elif type(renderers) is not list: renderers = [renderers] self._renderers = [] for renderer in renderers: if isinstance(renderer, str): renderer = get(renderer) self._renderers.append(renderer) self._enable_logger = kwargs.get('enable_logger', False) self._observation_dtype = kwargs.get('dtype', np.float32) self._observation_lows = kwargs.get('observation_lows', -np.iinfo(np.int64).max) self._observation_highs = kwargs.get('observation_highs', np.iinfo(np.int64).max) self._max_allowed_loss = kwargs.get('max_allowed_loss', 0.1) if self._enable_logger: self.logger = logging.getLogger(kwargs.get('logger_name', __name__)) self.logger.setLevel(kwargs.get('log_level', logging.DEBUG)) self._max_episodes = None self._max_steps = None logging.getLogger('tensorflow').disabled = kwargs.get('disable_tensorflow_logger', True) self.compile()
def test_basic_clock_increment(): clock = Clock() clock.increment() assert clock.step == 1
def test_basic_clock_init(): clock = Clock() assert clock assert clock.start == 0 assert clock.step == 0
def __init__(self, portfolio: Union[Portfolio, str], action_scheme: Union[ActionScheme, str], reward_scheme: Union[RewardScheme, str], feed: DataFeed = None, window_size: int = 1, use_internal: bool = True, renderer: Union[str, List['AbstractRenderer']] = 'human', **kwargs): """ Arguments: portfolio: The `Portfolio` of wallets used to submit and execute orders from. action_scheme: The component for transforming an action into an `Order` at each timestep. reward_scheme: The component for determining the reward at each timestep. feed (optional): The pipeline of features to pass the observations through. render_mode (optional): rendering mode, 'human' or 'log'. None for no rendering. chart_height (optioanl): int, the chart height for 'human' mode. price_history (optional): OHLCV price history feed used for rendering the chart. Required if render_mode is 'human'. kwargs (optional): Additional arguments for tuning the environments, logging, etc. """ super().__init__() self.portfolio = portfolio self.action_scheme = action_scheme self.reward_scheme = reward_scheme self.feed = feed self.window_size = window_size self.use_internal = use_internal self._price_history: pd.DataFrame = kwargs.get('price_history', None) if self.feed: self._external_keys = self.feed.next().keys() self.feed.reset() self.history = ObservationHistory(window_size=window_size) self._broker = Broker(exchanges=self.portfolio.exchanges) self.clock = Clock() self.action_space = None self.observation_space = None if renderer == 'human': self._renderer = [PlotlyTradingChart()] else: self._renderer = renderer if renderer else [] self._enable_logger = kwargs.get('enable_logger', False) self._observation_dtype = kwargs.get('dtype', np.float32) self._observation_lows = kwargs.get('observation_lows', -np.iinfo(np.int32).max) self._observation_highs = kwargs.get('observation_highs', np.iinfo(np.int32).max) self._max_allowed_loss = kwargs.get('max_allowed_loss', 0.1) if self._enable_logger: self.logger = logging.getLogger(kwargs.get('logger_name', __name__)) self.logger.setLevel(kwargs.get('log_level', logging.DEBUG)) self._max_episodes = None self._max_steps = None logging.getLogger('tensorflow').disabled = kwargs.get( 'disable_tensorflow_logger', True) self.compile()