Esempio n. 1
0
def fork_sim_env_visuals() -> 'ExecEnv':
    """
    Returns an execution environment of type VISUALS_GENERATION that can be used by the calling thread.
    """
    from tc2.env.ExecEnv import ExecEnv
    from tc2.env.EnvType import EnvType
    from tc2.env.TimeEnv import TimeEnv
    from tc2.data.data_storage.redis.RedisManager import RedisManager
    from tc2.data.data_storage.mongo.MongoManager import MongoManager
    from tc2.data.stock_data_collection.PolygonDataCollector import PolygonDataCollector

    if shared.sim_env_visuals is None:
        shared.sim_env_visuals = ExecEnv(shared.program.logfeed_program,
                                         shared.program.logfeed_visuals)
        sim_time = TimeEnv(datetime.now())
        shared.sim_env_visuals.setup_first_time(
            env_type=EnvType.VISUAL_GENERATION,
            time=sim_time,
            data_collector=PolygonDataCollector(
                logfeed_program=shared.program.logfeed_program,
                logfeed_process=shared.program.logfeed_visuals,
                time_env=sim_time),
            mongo=MongoManager(shared.program.logfeed_visuals,
                               EnvType.VISUAL_GENERATION),
            redis=RedisManager(shared.program.logfeed_visuals,
                               EnvType.VISUAL_GENERATION))
        return shared.sim_env_visuals

    # Wipe databases
    shared.sim_env_visuals.reset_dbs()

    shared.sim_env_visuals.fork_new_thread(creator_env=shared.sim_env_visuals)
    return shared.sim_env_visuals
Esempio n. 2
0
def fork_sim_env_health() -> 'ExecEnv':
    """
    Returns an execution environment of type HEALTH_CHECKING that can be used by the calling thread.
    """
    from tc2.env.ExecEnv import ExecEnv
    from tc2.env.EnvType import EnvType
    from tc2.env.TimeEnv import TimeEnv
    from tc2.data.data_storage.redis.RedisManager import RedisManager
    from tc2.data.data_storage.mongo.MongoManager import MongoManager
    from tc2.data.stock_data_collection.PolygonDataCollector import PolygonDataCollector

    if shared.sim_env_health is None:
        shared.sim_env_health = ExecEnv(None, None)
        sim_time = TimeEnv(datetime.now())
        shared.sim_env_health.setup_first_time(
            env_type=EnvType.HEALTH_CHECKING,
            time=sim_time,
            data_collector=PolygonDataCollector(logfeed_program=None,
                                                logfeed_process=None,
                                                time_env=sim_time),
            mongo=MongoManager(None, EnvType.HEALTH_CHECKING),
            redis=RedisManager(None, EnvType.HEALTH_CHECKING))
        return shared.sim_env_health

    # Wipe databases
    shared.sim_env_visuals.reset_dbs()

    shared.sim_env_health.fork_new_thread(creator_env=shared.sim_env_health)
    return shared.sim_env_health
Esempio n. 3
0
    def __init__(self, logfeed_program: LogFeed, logfeed_process: LogFeed,
                 time_env: TimeEnv) -> None:
        super().__init__(logfeed_program=logfeed_program,
                         logfeed_process=logfeed_process,
                         time_env=time_env)

        # Private variables
        self.next_api_call = time_env.now()
        self.rate_limit_wait = 0
Esempio n. 4
0
    def load_pre_reqs(self) -> None:
        # Initialize log feeds.
        self.logfeed_data = LogFeed(LogCategory.DATA)
        self.logfeed_data.log(LogLevel.ERROR,
                              '.             ...PROGRAM RESTARTED...')
        self.logfeed_trading = LogFeed(LogCategory.LIVE_TRADING)
        self.logfeed_trading.log(LogLevel.ERROR,
                                 '.             ...PROGRAM RESTARTED...')
        self.logfeed_optimization = LogFeed(LogCategory.OPTIMIZATION)
        self.logfeed_optimization.log(LogLevel.ERROR,
                                      '             ...PROGRAM RESTARTED...')
        self.logfeed_visuals = LogFeed(LogCategory.VISUALS)
        self.logfeed_visuals.log(LogLevel.ERROR,
                                 '.             ...PROGRAM RESTARTED...')
        self.logfeed_api = LogFeed(LogCategory.API)
        self.logfeed_api.log(LogLevel.ERROR,
                             '.             ...PROGRAM RESTARTED...')

        # Create time environment for live data collection and trading.
        live_time_env = TimeEnv(datetime.now())

        # Create database managers but don't initialize connections.
        live_redis = RedisManager(self.logfeed_program, EnvType.LIVE)
        live_mongo = MongoManager(self.logfeed_program, EnvType.LIVE)

        # Initialize collector manager to access polygon.io.
        live_data_collector = PolygonDataCollector(
            logfeed_program=self.logfeed_program,
            logfeed_process=self.logfeed_data,
            time_env=live_time_env)

        # Initialize the live execution environment with program logs.
        self.live_env = ExecEnv(logfeed_program=self.logfeed_program,
                                logfeed_process=self.logfeed_program)

        # Setup the live execution environment with live time & data variables.
        self.live_env.setup_first_time(env_type=EnvType.LIVE,
                                       time=live_time_env,
                                       data_collector=live_data_collector,
                                       mongo=live_mongo,
                                       redis=live_redis)

        # Set Alpaca credentials as environment variables so we don't have to pass them around.
        live_trading = True if Settings.get_endpoint(
            self.live_env) == BrokerEndpoint.LIVE else False
        os.environ['APCA_API_BASE_URL'] = 'https://api.alpaca.markets' \
            if live_trading else 'https://paper-api.alpaca.markets'
        os.environ['APCA_API_KEY_ID'] = self.live_env.get_setting('alpaca.live_key_id') \
            if live_trading else self.live_env.get_setting('alpaca.paper_key_id')
        os.environ['APCA_API_SECRET_KEY'] = self.live_env.get_setting('alpaca.live_secret_key') \
            if live_trading else self.live_env.get_setting('alpaca.paper_secret_key')
        os.environ['POLYGON_KEY_ID'] = self.live_env.get_setting(
            'alpaca.live_key_id')
Esempio n. 5
0
    def run(self) -> None:
        self.program.logfeed_program.log(
            LogLevel.INFO, 'Debug task setting up simulation environment')

        # Set simulation parameters.
        day_date = date(year=2020, month=3, day=10)

        # Clone live environment so it can run on this thread.
        live_env = ExecEnv(self.program.logfeed_program,
                           self.program.logfeed_program, self.program.live_env)
        live_env.fork_new_thread()

        # Initialize simulation environment.
        sim_time_env = TimeEnv(
            datetime.combine(day_date, time(hour=11, minute=3, second=40)))
        sim_data_collector = PolygonDataCollector(
            logfeed_program=self.program.logfeed_program,
            logfeed_process=self.program.logfeed_program,
            time_env=sim_time_env)
        sim_redis = RedisManager(self.program.logfeed_program,
                                 EnvType.STARTUP_DEBUG_1)
        sim_mongo = MongoManager(self.program.logfeed_program,
                                 EnvType.STARTUP_DEBUG_1)
        sim_env = ExecEnv(self.program.logfeed_program,
                          self.program.logfeed_program)
        sim_env.setup_first_time(env_type=EnvType.STARTUP_DEBUG_1,
                                 time=sim_time_env,
                                 data_collector=sim_data_collector,
                                 mongo=sim_mongo,
                                 redis=sim_redis)

        # Place the strategy in a simulated environment.
        strategy = LongShortStrategy(env=sim_env,
                                     symbols=['SPY', 'SPXL', 'SPXS'])

        # Simulate the strategy so its output gets printed to logfeed_optimization.
        self.program.logfeed_program.log(
            LogLevel.INFO, 'Creating StrategySimulator for debug task')
        simulator = StrategySimulator(strategy,
                                      live_env,
                                      all_symbols=['SPY', 'SPXL', 'SPXS'])
        self.program.logfeed_program.log(
            LogLevel.INFO, 'Running simulation of LongShortStrategy')
        simulator.run(warmup_days=2)
        self.program.logfeed_program.log(
            LogLevel.INFO, f'Completed LongShortStrategy simulation. '
            f'Results: {strategy.run_info.to_json()}')
    def __init__(self, logfeed_program: LogFeed, logfeed_process: LogFeed, time_env: TimeEnv) -> None:
        super().__init__(logfeed_program=logfeed_program,
                         logfeed_process=logfeed_process,
                         time_env=time_env)

        # Private variables
        self.next_api_call = time_env.now()
        self.rate_limit_wait = 0

        # Timings variables
        self.timings_total = []
        self.timings_basket = []
        self.timings_fetch = []
        self.timings_parse = []
        self.timings_avg_block1 = []
        self.timings_avg_block2 = []
        self.timings_avg_block3 = []
        self.debug_msgs = []
Esempio n. 7
0
    def optimize_strategy(self, strategy: AbstractStrategy,
                          symbol: str) -> None:
        """
        Runs simulations from START_DATE thru two days ago.
        Tries hundreds of model scoring systems and picks the highest performing one.
        """
        self.info_process(
            f'Optimizing {strategy.__class__.__name__}\'s weights using symbol: {symbol}'
        )
        end_date = self.time().now() - timedelta(days=2)
        dates_on_file = self.mongo().get_dates_on_file(symbol, START_DATE,
                                                       end_date)
        start_index = OPTIMIZATION_WARMUP_DAYS
        if len(dates_on_file) < start_index + 3:
            self.warn_process(
                f'Insufficient historical data ({len(dates_on_file)} days) for {symbol}'
            )
            return
        evaluation = None

        # Initialize objects that make up a kind of container for this evaluation
        sim_time_env = TimeEnv(
            datetime.combine(dates_on_file[start_index - 1], OPEN_TIME))
        sim_env = ExecEnv(self.logfeed_program, self.logfeed_process)
        sim_env.setup_first_time(env_type=EnvType.OPTIMIZATION,
                                 time=sim_time_env,
                                 data_collector=PolygonDataCollector(
                                     logfeed_program=self.logfeed_program,
                                     logfeed_process=self.logfeed_process,
                                     time_env=sim_time_env),
                                 mongo=MongoManager(self.logfeed_program,
                                                    EnvType.OPTIMIZATION),
                                 redis=RedisManager(self.logfeed_process,
                                                    EnvType.OPTIMIZATION))

        # Create a ModelFeeder for the simulated environment
        sim_model_feeder = ModelFeeder(sim_env)

        # Place the strategy in the simulated environment
        strategy = self._clone_strategy(strategy, sim_env)

        # Copy data we need from live environment into simulated environment
        data_copy_error = candle_util.init_simulation_data(
            live_env=self,
            sim_env=sim_env,
            symbols=[strategy.get_symbol()],
            days=start_index - 2,
            end_date=dates_on_file[start_index - 1],
            model_feeder=sim_model_feeder)
        if data_copy_error is not None:
            self.warn_process(data_copy_error)
            return

        for day_to_eval in dates_on_file[start_index:len(dates_on_file) - 2]:
            # Cancel simulations part-way through if a stop has been requested
            if not self.running:
                return

            # Copy day's data into the simulated environment but don't train analysis models
            data_copy_error = candle_util.init_simulation_data(
                live_env=self,
                sim_env=sim_env,
                symbols=[strategy.get_symbol()],
                days=2,
                end_date=dates_on_file[start_index - 1],
                model_feeder=sim_model_feeder,
                skip_last_day_training=True)
            if data_copy_error is not None:
                self.warn_process(data_copy_error)
                self.warn_process(
                    f'Optimization of {strategy.__class__.__name__} on '
                    f'{symbol} failed because the program is missing data on {day_to_eval:%Y-%m-%d}'
                )

            # Move the perspective to the historical day
            sim_env.time().set_moment(
                datetime.combine(day_to_eval,
                                 strategy.times_active().get_start_time()))

            # Create a new strategy for this run
            strategy = self._clone_strategy(strategy, sim_env)

            # Run evaluation on the day
            # TODO Change this to run an optimization simulation
            next_evaluation = StrategyEvaluator(strategy).evaluate()

            # Merge the results with all the evaluations from previous days
            if evaluation is None:
                evaluation = next_evaluation
                evaluation._calculate_metrics()
            else:
                evaluation.combine(next_evaluation)

        # Print results after evaluating each day
        if evaluation is not None:
            self.warn_process(
                'Evaluation results of {0} for {1}:\n\t total days = {2}, viable days: {3}, pct days entered = {4}%, '
                'avg profit = {5}, \n\tmedian profit = {6}, win ratio = {7}, entry-attempt ratio = {8}'
                .format(strategy.__class__.__name__, symbol,
                        evaluation.days_evaluated, evaluation.days_viable,
                        (100 * evaluation.days_entered /
                         evaluation.days_evaluated), evaluation.avg_profit,
                        evaluation.med_profit, evaluation.win_ratio,
                        evaluation.entry_ratio))

        return