コード例 #1
0
ファイル: Settings.py プロジェクト: maxilie/TC2_public
 def get_strategy_max_purchase_pct(cls, env: ExecEnv, strategy_id: str) -> float:
     """
     Returns the maximum percentage of our account balance to use on the given strategy.
     """
     setting_str = env.get_setting('max_purchase_pct.' + strategy_id)
     if setting_str == '':
         env.warn_main(f'Maximum purchase percent for {strategy_id} not set. Using default value of '
                       f'{100 * cls.DEFAULT_STRATEGY_MAX_PURCHASE_PCT:.0f}%')
         return cls.DEFAULT_STRATEGY_MAX_PURCHASE_PCT
     else:
         return float(setting_str)
コード例 #2
0
ファイル: Settings.py プロジェクト: maxilie/TC2_public
 def get_strategy_max_purchase_usd(cls, env: ExecEnv, strategy_id: str) -> float:
     """
     Returns the maximum amount of money to use on the given strategy.
     """
     setting_str = env.get_setting('max_purchase_usd.' + strategy_id)
     if setting_str == '':
         env.warn_main(f'Maximum purchase amount for {strategy_id} not set. '
                       f'Using default value of ${cls.DEFAULT_STRATEGY_MAX_PURCHASE_USD}')
         return cls.DEFAULT_STRATEGY_MAX_PURCHASE_USD
     else:
         return float(setting_str)
コード例 #3
0
ファイル: TC2Program.py プロジェクト: maxilie/TC2_public
    def load_pre_reqs(self) -> None:
        # Initialize log feeds.
        self.logfeed_data = LogFeed(LogCategory.DATA)
        self.logfeed_data.log(LogLevel.ERROR,
                              '.             ...PROGRAM RESTARTED...')
        self.logfeed_trading = LogFeed(LogCategory.LIVE_TRADING)
        self.logfeed_trading.log(LogLevel.ERROR,
                                 '.             ...PROGRAM RESTARTED...')
        self.logfeed_optimization = LogFeed(LogCategory.OPTIMIZATION)
        self.logfeed_optimization.log(LogLevel.ERROR,
                                      '             ...PROGRAM RESTARTED...')
        self.logfeed_visuals = LogFeed(LogCategory.VISUALS)
        self.logfeed_visuals.log(LogLevel.ERROR,
                                 '.             ...PROGRAM RESTARTED...')
        self.logfeed_api = LogFeed(LogCategory.API)
        self.logfeed_api.log(LogLevel.ERROR,
                             '.             ...PROGRAM RESTARTED...')

        # Create time environment for live data collection and trading.
        live_time_env = TimeEnv(datetime.now())

        # Create database managers but don't initialize connections.
        live_redis = RedisManager(self.logfeed_program, EnvType.LIVE)
        live_mongo = MongoManager(self.logfeed_program, EnvType.LIVE)

        # Initialize collector manager to access polygon.io.
        live_data_collector = PolygonDataCollector(
            logfeed_program=self.logfeed_program,
            logfeed_process=self.logfeed_data,
            time_env=live_time_env)

        # Initialize the live execution environment with program logs.
        self.live_env = ExecEnv(logfeed_program=self.logfeed_program,
                                logfeed_process=self.logfeed_program)

        # Setup the live execution environment with live time & data variables.
        self.live_env.setup_first_time(env_type=EnvType.LIVE,
                                       time=live_time_env,
                                       data_collector=live_data_collector,
                                       mongo=live_mongo,
                                       redis=live_redis)

        # Set Alpaca credentials as environment variables so we don't have to pass them around.
        live_trading = True if Settings.get_endpoint(
            self.live_env) == BrokerEndpoint.LIVE else False
        os.environ['APCA_API_BASE_URL'] = 'https://api.alpaca.markets' \
            if live_trading else 'https://paper-api.alpaca.markets'
        os.environ['APCA_API_KEY_ID'] = self.live_env.get_setting('alpaca.live_key_id') \
            if live_trading else self.live_env.get_setting('alpaca.paper_key_id')
        os.environ['APCA_API_SECRET_KEY'] = self.live_env.get_setting('alpaca.live_secret_key') \
            if live_trading else self.live_env.get_setting('alpaca.paper_secret_key')
        os.environ['POLYGON_KEY_ID'] = self.live_env.get_setting(
            'alpaca.live_key_id')
コード例 #4
0
def fork_live_env(logfeed_process: Optional['LogFeed'] = None) -> 'ExecEnv':
    """
    Returns an execution environment that outputs its logs to the API logfeed and can be used by the calling thread.
    """
    from tc2.env.ExecEnv import ExecEnv

    if not logfeed_process:
        logfeed_process = shared.program.logfeed_api

    live_env = ExecEnv(shared.program.logfeed_program,
                       logfeed_process,
                       creator_env=shared.program.live_env)
    live_env.fork_new_thread()
    return live_env
コード例 #5
0
def fork_sim_env_visuals() -> 'ExecEnv':
    """
    Returns an execution environment of type VISUALS_GENERATION that can be used by the calling thread.
    """
    from tc2.env.ExecEnv import ExecEnv
    from tc2.env.EnvType import EnvType
    from tc2.env.TimeEnv import TimeEnv
    from tc2.data.data_storage.redis.RedisManager import RedisManager
    from tc2.data.data_storage.mongo.MongoManager import MongoManager
    from tc2.data.stock_data_collection.PolygonDataCollector import PolygonDataCollector

    if shared.sim_env_visuals is None:
        shared.sim_env_visuals = ExecEnv(shared.program.logfeed_program,
                                         shared.program.logfeed_visuals)
        sim_time = TimeEnv(datetime.now())
        shared.sim_env_visuals.setup_first_time(
            env_type=EnvType.VISUAL_GENERATION,
            time=sim_time,
            data_collector=PolygonDataCollector(
                logfeed_program=shared.program.logfeed_program,
                logfeed_process=shared.program.logfeed_visuals,
                time_env=sim_time),
            mongo=MongoManager(shared.program.logfeed_visuals,
                               EnvType.VISUAL_GENERATION),
            redis=RedisManager(shared.program.logfeed_visuals,
                               EnvType.VISUAL_GENERATION))
        return shared.sim_env_visuals

    # Wipe databases
    shared.sim_env_visuals.reset_dbs()

    shared.sim_env_visuals.fork_new_thread(creator_env=shared.sim_env_visuals)
    return shared.sim_env_visuals
コード例 #6
0
def fork_sim_env_health() -> 'ExecEnv':
    """
    Returns an execution environment of type HEALTH_CHECKING that can be used by the calling thread.
    """
    from tc2.env.ExecEnv import ExecEnv
    from tc2.env.EnvType import EnvType
    from tc2.env.TimeEnv import TimeEnv
    from tc2.data.data_storage.redis.RedisManager import RedisManager
    from tc2.data.data_storage.mongo.MongoManager import MongoManager
    from tc2.data.stock_data_collection.PolygonDataCollector import PolygonDataCollector

    if shared.sim_env_health is None:
        shared.sim_env_health = ExecEnv(None, None)
        sim_time = TimeEnv(datetime.now())
        shared.sim_env_health.setup_first_time(
            env_type=EnvType.HEALTH_CHECKING,
            time=sim_time,
            data_collector=PolygonDataCollector(logfeed_program=None,
                                                logfeed_process=None,
                                                time_env=sim_time),
            mongo=MongoManager(None, EnvType.HEALTH_CHECKING),
            redis=RedisManager(None, EnvType.HEALTH_CHECKING))
        return shared.sim_env_health

    # Wipe databases
    shared.sim_env_visuals.reset_dbs()

    shared.sim_env_health.fork_new_thread(creator_env=shared.sim_env_health)
    return shared.sim_env_health
コード例 #7
0
    def generate_data(cls, live_env: ExecEnv, sim_env: ExecEnv,
                      **kwargs) -> 'RunHistoryData':
        """
        Compiles the program's trade history into a json string usable by the visualization script.
        :keyword: paper
        """

        # Extract parameters
        paper: bool = kwargs['paper']

        # Load entire run history for the endpoint (live or paper)
        runs = live_env.redis().get_live_run_history(
            strategies=DAY_STRATEGY_IDS, paper=paper)

        # Return the trade data in a neat object
        return RunHistoryData(runs_data=[run.to_json() for run in runs],
                              last_updated=live_env.time().now())
コード例 #8
0
ファイル: SwingSetupData.py プロジェクト: maxilie/TC2_public
    def generate_data(cls, live_env: ExecEnv, sim_env: ExecEnv,
                      **kwargs) -> 'SwingSetupData':
        """
        Compiles the symbol's price data into a json string usable by the graphing script.
        :keyword: symbol
        """

        # Extract parameters
        symbol: str = kwargs['symbol']

        # Get a list of dates with data on file
        dates_on_file = live_env.mongo().get_dates_on_file(
            symbol, START_DATE,
            live_env.time().now())
        if len(dates_on_file) < 30:
            live_env.warn_process(
                f'Couldn\'t generate SwingSetupData for {symbol} because it only has '
                f'{len(dates_on_file)} days of price data stored in mongo')
            return SwingSetupData._blank_swing_setup_data(
                symbol,
                live_env.time().now())

        swing_viable_days: List[SwingViableDay] = []

        # Create a SwingStrategy so we can test viability
        strategy = SwingStrategy(env=sim_env, symbols=[symbol])

        for day_date in dates_on_file:
            # TODO Copy day_date's data from the live environment into the simulation environment

            # TODO Test viability of SwingStrategy on day_date

            # TODO Feed models on day_date
            pass

        # Load all daily aggregate candles for the symbol
        daily_candles = [
            live_env.mongo().load_aggregate_candle(day_date)
            for day_date in dates_on_file
        ]

        # Return the price graph data in a neat object
        return SwingSetupData(symbol=symbol,
                              daily_candles=[
                                  daily_candle.to_json()
                                  for daily_candle in daily_candles
                                  if daily_candle is not None
                              ],
                              viable_days=[
                                  viable_day.to_json()
                                  for viable_day in swing_viable_days
                              ],
                              last_updated=live_env.time().now())
コード例 #9
0
    def run(self) -> None:
        self.program.logfeed_program.log(
            LogLevel.INFO, 'Debug task setting up simulation environment')

        # Set simulation parameters.
        day_date = date(year=2020, month=3, day=10)

        # Clone live environment so it can run on this thread.
        live_env = ExecEnv(self.program.logfeed_program,
                           self.program.logfeed_program, self.program.live_env)
        live_env.fork_new_thread()

        # Initialize simulation environment.
        sim_time_env = TimeEnv(
            datetime.combine(day_date, time(hour=11, minute=3, second=40)))
        sim_data_collector = PolygonDataCollector(
            logfeed_program=self.program.logfeed_program,
            logfeed_process=self.program.logfeed_program,
            time_env=sim_time_env)
        sim_redis = RedisManager(self.program.logfeed_program,
                                 EnvType.STARTUP_DEBUG_1)
        sim_mongo = MongoManager(self.program.logfeed_program,
                                 EnvType.STARTUP_DEBUG_1)
        sim_env = ExecEnv(self.program.logfeed_program,
                          self.program.logfeed_program)
        sim_env.setup_first_time(env_type=EnvType.STARTUP_DEBUG_1,
                                 time=sim_time_env,
                                 data_collector=sim_data_collector,
                                 mongo=sim_mongo,
                                 redis=sim_redis)

        # Place the strategy in a simulated environment.
        strategy = LongShortStrategy(env=sim_env,
                                     symbols=['SPY', 'SPXL', 'SPXS'])

        # Simulate the strategy so its output gets printed to logfeed_optimization.
        self.program.logfeed_program.log(
            LogLevel.INFO, 'Creating StrategySimulator for debug task')
        simulator = StrategySimulator(strategy,
                                      live_env,
                                      all_symbols=['SPY', 'SPXL', 'SPXS'])
        self.program.logfeed_program.log(
            LogLevel.INFO, 'Running simulation of LongShortStrategy')
        simulator.run(warmup_days=2)
        self.program.logfeed_program.log(
            LogLevel.INFO, f'Completed LongShortStrategy simulation. '
            f'Results: {strategy.run_info.to_json()}')
コード例 #10
0
ファイル: TC2Program.py プロジェクト: maxilie/TC2_public
class TC2Program(Loggable):
    """
    The backend program, which is initialized by django startup code.
    """

    # Live execution environment.
    live_env: ExecEnv

    # Log feeds.
    logfeed_data: LogFeed
    logfeed_trading: LogFeed
    logfeed_optimization: LogFeed
    logfeed_api: LogFeed
    logfeed_visuals: LogFeed

    # Logic loops (running inside threads).
    strategy_optimizer: StrategyOptimizer
    live_day_trader: LiveTrader
    live_swing_trader: LiveTrader
    daily_collector: DailyCollector
    visuals_refresher: VisualsRefresher
    health_checks_refresher: HealthChecksRefresher

    # Threads (containing logic loops).
    day_trading_process: Process
    swing_trading_process: Process
    optimizations_process: Process
    collection_process: Process

    def __init__(self, logfeed_program):
        super().__init__(logfeed_program, logfeed_program)

    def start_program(self) -> None:
        """
        Loads settings and runs program processes in their own threads.
        This can take several seconds to complete.
        """

        # Log startup.
        self.warn_main('.........')
        self.warn_main('........')
        self.warn_main('.......')
        self.warn_main('......')
        self.warn_main('.....')
        self.warn_main('....')
        self.warn_main('...')
        self.warn_main('..')
        self.warn_main('.')
        self.warn_main('')
        self.warn_main('Program starting...')
        self.warn_main('')
        self.warn_main('.')
        self.warn_main('..')
        self.warn_main('...')
        self.warn_main('....')
        self.warn_main('.....')
        self.warn_main('......')
        self.warn_main('.......')
        self.warn_main('........')
        self.warn_main('.........')

        # Load pre-reqs first.
        try:
            self.info_main('Loading settings from config...')
            self.load_pre_reqs()
            self.info_main('Loaded settings')
        except Exception:
            self.error_main('Failed to load program essentials:')
            self.warn_main(traceback.format_exc())
            self.shutdown()
            return

        # Connect to market data and brokerage account data.
        try:
            self.info_main('Connecting to live data streams')
            self.init_account_data_streams()
            livestream_updates = AccountDataStream._livestream_updates
            self.info_main('Connected to alpaca and polygon streams')
        except Exception:
            self.error_main('Failed to connect to data streams:')
            self.warn_main(traceback.format_exc())
            self.shutdown()
            return

        # Mark data as loading and start a thread to get data and models up to date.
        try:
            self.perform_data_catchup()
        except Exception:
            self.error_main('Failed to start data catch-up task:')
            self.warn_main(traceback.format_exc())
            self.shutdown()
            return

        # Run data collection in its own core, if possible - otherwise, in its own thread.
        try:
            self.info_main('Starting data collection process')
            self.start_daily_collection(livestream_updates)
            self.info_main('Started data collection process')
        except Exception:
            self.error_main('FAILED TO START DATA COLLECTION:')
            self.warn_main(traceback.format_exc())
            self.shutdown()

        # Run live trading in its own core, if possible - otherwise, in its own thread.
        try:
            self.info_main('Starting trading process')
            self.start_live_trading(livestream_updates)
            self.info_main('Started trading process')
        except Exception:
            self.error_main('Failed to start trading logic:')
            self.warn_main(traceback.format_exc())
            self.shutdown()

        # Run strategy optimization in its own core, if possible - otherwise, in its own thread.
        try:
            self.info_main('Starting simulation and evaluation process')
            self.start_strategy_optimization()
            self.info_main('Started simulation and evaluation process')
        except Exception:
            self.error_main(
                'Failed to start strategy parameter optimization logic:')
            self.warn_main(traceback.format_exc())
            self.shutdown()

        # Init manager class and refresher thread for visualization.
        try:
            self.info_main(
                'Initializing visuals (graphs, charts, etc.) generation components'
            )
            self.init_visualization()
            self.info_main('Initialized visualization components')
        except Exception:
            self.error_main('Failed to initialize visualization components:')
            self.warn_main(traceback.format_exc())
            self.shutdown()

        # Init manager class and refresher thread for health checks.
        try:
            self.info_main('Initializing health checker')
            self.init_health_checks()
            self.info_main('Initialized health checker')
        except Exception:
            self.error_main('Failed to initialize health checker')
            self.warn_main(traceback.format_exc())
            self.shutdown()

        self.info_main('Started successfully!')

    def load_pre_reqs(self) -> None:
        # Initialize log feeds.
        self.logfeed_data = LogFeed(LogCategory.DATA)
        self.logfeed_data.log(LogLevel.ERROR,
                              '.             ...PROGRAM RESTARTED...')
        self.logfeed_trading = LogFeed(LogCategory.LIVE_TRADING)
        self.logfeed_trading.log(LogLevel.ERROR,
                                 '.             ...PROGRAM RESTARTED...')
        self.logfeed_optimization = LogFeed(LogCategory.OPTIMIZATION)
        self.logfeed_optimization.log(LogLevel.ERROR,
                                      '             ...PROGRAM RESTARTED...')
        self.logfeed_visuals = LogFeed(LogCategory.VISUALS)
        self.logfeed_visuals.log(LogLevel.ERROR,
                                 '.             ...PROGRAM RESTARTED...')
        self.logfeed_api = LogFeed(LogCategory.API)
        self.logfeed_api.log(LogLevel.ERROR,
                             '.             ...PROGRAM RESTARTED...')

        # Create time environment for live data collection and trading.
        live_time_env = TimeEnv(datetime.now())

        # Create database managers but don't initialize connections.
        live_redis = RedisManager(self.logfeed_program, EnvType.LIVE)
        live_mongo = MongoManager(self.logfeed_program, EnvType.LIVE)

        # Initialize collector manager to access polygon.io.
        live_data_collector = PolygonDataCollector(
            logfeed_program=self.logfeed_program,
            logfeed_process=self.logfeed_data,
            time_env=live_time_env)

        # Initialize the live execution environment with program logs.
        self.live_env = ExecEnv(logfeed_program=self.logfeed_program,
                                logfeed_process=self.logfeed_program)

        # Setup the live execution environment with live time & data variables.
        self.live_env.setup_first_time(env_type=EnvType.LIVE,
                                       time=live_time_env,
                                       data_collector=live_data_collector,
                                       mongo=live_mongo,
                                       redis=live_redis)

        # Set Alpaca credentials as environment variables so we don't have to pass them around.
        live_trading = True if Settings.get_endpoint(
            self.live_env) == BrokerEndpoint.LIVE else False
        os.environ['APCA_API_BASE_URL'] = 'https://api.alpaca.markets' \
            if live_trading else 'https://paper-api.alpaca.markets'
        os.environ['APCA_API_KEY_ID'] = self.live_env.get_setting('alpaca.live_key_id') \
            if live_trading else self.live_env.get_setting('alpaca.paper_key_id')
        os.environ['APCA_API_SECRET_KEY'] = self.live_env.get_setting('alpaca.live_secret_key') \
            if live_trading else self.live_env.get_setting('alpaca.paper_secret_key')
        os.environ['POLYGON_KEY_ID'] = self.live_env.get_setting(
            'alpaca.live_key_id')

    def init_account_data_streams(self) -> None:
        AccountDataStream.connect_to_streams(symbols=Settings.get_symbols(
            self.live_env),
                                             logfeed_data=self.logfeed_data)

    def start_daily_collection(
            self, livestream_updates: 'multiprocessing list') -> None:
        """
        Starts a multiprocessing.Process, which is basically a Thread that can use its own core.
        Schedules data collection to run (and trigger model feeding) after markets close every day.
        """

        # Daily collector logic loop to schedule daily collection and model feeding.
        self.daily_collector = DailyCollector(self.live_env, self.logfeed_data)

        self.collection_process = Process(
            target=self.daily_collector.start_collection_loop,
            args=(livestream_updates, ))
        self.collection_process.start()

    def perform_data_catchup(self) -> None:
        """
        Fetches historical data off-thread from Polygon.io, if any is missing.
        """

        # Train models on any data that was missed while the bot was offline.
        catch_up_days = 3

        # Retrain models if the bot has insufficient warm-up data.
        warm_up_days = 27

        def catch_up():
            self.info_main(
                'Trading and simulation disabled while checking for missing recent data...'
            )
            catch_up_start_moment = pytime.monotonic()

            # Fork data_env for the new thread.
            catch_up_env = ExecEnv(self.logfeed_program,
                                   self.logfeed_data,
                                   creator_env=self.live_env)
            catch_up_env.fork_new_thread()
            catch_up_env.info_process(
                'Performing catch-up task: checking for missing recent data')

            # Fork model feeder for the new thread.
            catch_up_model_feeder = ModelFeeder(catch_up_env)

            # Reset models and go back 31 days if missing [t-31, t-4].
            # OR go back 4 days if only missing at most [t-4, t-1].

            # Start at t-31 days.
            day_date = catch_up_env.time().now().date()
            while not catch_up_env.time().is_mkt_day(day_date):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)
            for _ in range(warm_up_days + catch_up_days + 1):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)

            # Check that each day [t-31, t-4] has valid data.
            symbols_reset = []
            for _ in range(warm_up_days):
                # Check the next day.
                day_date = catch_up_env.time().get_next_mkt_day(day_date)

                for symbol in Settings.get_symbols(catch_up_env):
                    # Only check the symbol if it hasn't been reset.
                    if symbol in symbols_reset:
                        continue

                    # Load the day's data and validate it.
                    day_data = catch_up_env.mongo().load_symbol_day(
                        symbol, day_date)
                    if not SymbolDay.validate_candles(day_data.candles):
                        catch_up_env.info_process(
                            '{} missing price data on {}. Resetting its model data'
                            .format(symbol, day_date))
                        catch_up_model_feeder.reset_models([symbol])
                        symbols_reset.append(symbol)

            # Go back to the latest potential missing day.
            day_date = catch_up_env.time().now().date()
            while not catch_up_env.time().is_mkt_day(day_date):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)
            for _ in range(warm_up_days + catch_up_days +
                           1 if len(symbols_reset) != 0 else catch_up_days +
                           1):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)

            # Use price data to train models.
            for _ in range(warm_up_days + catch_up_days
                           if len(symbols_reset) != 0 else catch_up_days):

                # Go through each reset symbol.
                for symbol in symbols_reset:

                    # Load mongo price data if present.
                    start_instant = pytime.monotonic()
                    day_data = catch_up_env.mongo().load_symbol_day(
                        symbol, day_date)

                    # Collect polygon-rest price data if necessary.
                    if not SymbolDay.validate_candles(day_data.candles):
                        try:
                            day_data = catch_up_env.data_collector(
                            ).collect_candles_for_day(day_date, symbol)
                        except Exception as e:
                            catch_up_env.error_process(
                                'Error collecting polygon-rest data:')
                            catch_up_env.warn_process(traceback.format_exc())
                    collection_time = pytime.monotonic() - start_instant

                    # Validate data.
                    validation_debugger = []
                    if day_data is not None and SymbolDay.validate_candles(
                            day_data.candles,
                            debug_output=validation_debugger):
                        # Save data
                        catch_up_env.redis().reset_day_difficulty(
                            symbol, day_date)
                        catch_up_env.mongo().save_symbol_day(day_data)

                        # Use data to train models for symbol on day.
                        start_instant = pytime.monotonic()
                        catch_up_model_feeder.train_models(symbol=symbol,
                                                           day_date=day_date,
                                                           day_data=day_data,
                                                           stable=True)
                        train_time = pytime.monotonic() - start_instant
                        catch_up_env.info_process(
                            f'Catch-up for {symbol} on {day_date:%m-%d-%Y}: collection took '
                            f'{collection_time:.2f}s;  training took {train_time:.2f}s'
                        )
                    else:
                        catch_up_env.redis().incr_day_difficulty(
                            symbol, day_date)
                        catch_up_env.warn_process(
                            f'Couldn\'t collect catch-up data for {symbol} on {day_date}: '
                            f'{"null" if day_date is None else len(day_data.candles)} candles'
                        )
                        catch_up_env.warn_process(
                            '\n'.join(validation_debugger))

                # Move to the next day.
                day_date = catch_up_env.time().get_next_mkt_day(day_date)

            # Determine whether or not we have yesterday's cached data for at least one symbol.
            unstable_data_present = False
            while not catch_up_env.time().is_mkt_day(day_date):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)
            for symbol in Settings.get_symbols(catch_up_env):
                unstable_data = catch_up_env.redis().get_cached_candles(
                    symbol, day_date)
                if unstable_data is not None and SymbolDay.validate_candles(
                        unstable_data):
                    unstable_data_present = True
                    break

            if unstable_data_present:
                msg = f'Valid cached redis data on {day_date:%B %d} found. ' \
                      f'Models and strategies should function normally'
                catch_up_env.info_main(msg)
                catch_up_env.info_process(msg)
            else:
                msg = f'No valid redis data cached on {day_date:%b %d}. Models that need yesterday\'s data will ' \
                      f'fail, causing some strategies to fail.'
                catch_up_env.warn_main(msg)
                catch_up_env.warn_process(msg)

            # Allow processes to resume now that data_collector is not busy.
            catch_up_env.mark_data_as_loaded()
            msg = f'Trading and strategy optimization enabled (catch up task took ' \
                  f'{(pytime.monotonic() - catch_up_start_moment) / 3600:.2f} hrs)'
            catch_up_env.info_main(msg)
            catch_up_env.info_process(msg)

        data_load_thread = Thread(target=catch_up)
        data_load_thread.start()

    def start_live_trading(self,
                           livestream_updates: 'multiprocessing list') -> None:
        """
        Runs live day- and swing-trading in their own Processes.
        """
        self.live_day_trader = LiveTrader(creator_env=self.live_env,
                                          logfeed_trading=self.logfeed_trading,
                                          day_trader=True)
        self.day_trading_process = Process(target=self.live_day_trader.start,
                                           args=(livestream_updates, ))
        self.day_trading_process.start()

        self.live_swing_trader = LiveTrader(
            creator_env=self.live_env,
            logfeed_trading=self.logfeed_trading,
            day_trader=False)
        self.swing_trading_process = Process(
            target=self.live_swing_trader.start, args=(livestream_updates, ))
        self.swing_trading_process.start()

    def start_strategy_optimization(self) -> None:
        """
        Runs strategy optimization in its own Process.
        """
        self.strategy_optimizer = StrategyOptimizer(
            creator_env=self.live_env,
            logfeed_optimization=self.logfeed_optimization)
        self.optimizations_process = Process(
            target=self.strategy_optimizer.start)
        self.optimizations_process.start()

    def init_visualization(self) -> None:
        """
        Schedules visuals to update continuously.
        The user can also update visuals manually using the webpanel.
        """

        # Schedule visuals to continuously update in the background.
        self.visuals_refresher = VisualsRefresher(
            logfeed_program=self.logfeed_program,
            logfeed_process=self.logfeed_visuals,
            symbols=Settings.get_symbols(self.live_env),
            live_time_env=self.live_env.time())
        self.visuals_refresher.start()

    def init_health_checks(self) -> None:
        """
        Schedules health checks (e.g. data checks, analysis model checks) to run at night.
        The user can also run checks manually using the webpanel.
        """

        # Schedule health checks to run every night.
        self.health_checks_refresher = HealthChecksRefresher(
            logfeed_program=self.logfeed_program,
            logfeed_process=self.logfeed_program,
            symbols=Settings.get_symbols(self.live_env),
            live_time_env=self.live_env.time())
        self.health_checks_refresher.start()

    def shutdown(self) -> None:
        self.info_main('Shutting down...')

        try:
            # Stop thread that runs health checks.
            self.health_checks_refresher.stop()
        except Exception:
            traceback.print_exc()

        try:
            # Stop thread that generates visuals.
            self.visuals_refresher.stop()
        except Exception:
            traceback.print_exc()

        try:
            # Stop collection process.
            self.daily_collector.stop()
            self.collection_process.terminate()
        except Exception:
            traceback.print_exc()

        try:
            # Close account/market websocket connections.
            AccountDataStream.shutdown()
        except Exception:
            traceback.print_exc()

        try:
            # Stop evaluations process.
            self.strategy_optimizer.stop()
            self.optimizations_process.terminate()
        except Exception:
            traceback.print_exc()

        try:
            # Stop day trading process.
            self.live_day_trader.stop()
            self.day_trading_process.terminate()
        except Exception:
            traceback.print_exc()

        try:
            # Stop swing trading process.
            self.live_swing_trader.stop()
            self.swing_trading_process.terminate()
        except Exception:
            traceback.print_exc()

        self.info_main('Shutdown complete')
コード例 #11
0
ファイル: TC2Program.py プロジェクト: maxilie/TC2_public
        def catch_up():
            self.info_main(
                'Trading and simulation disabled while checking for missing recent data...'
            )
            catch_up_start_moment = pytime.monotonic()

            # Fork data_env for the new thread.
            catch_up_env = ExecEnv(self.logfeed_program,
                                   self.logfeed_data,
                                   creator_env=self.live_env)
            catch_up_env.fork_new_thread()
            catch_up_env.info_process(
                'Performing catch-up task: checking for missing recent data')

            # Fork model feeder for the new thread.
            catch_up_model_feeder = ModelFeeder(catch_up_env)

            # Reset models and go back 31 days if missing [t-31, t-4].
            # OR go back 4 days if only missing at most [t-4, t-1].

            # Start at t-31 days.
            day_date = catch_up_env.time().now().date()
            while not catch_up_env.time().is_mkt_day(day_date):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)
            for _ in range(warm_up_days + catch_up_days + 1):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)

            # Check that each day [t-31, t-4] has valid data.
            symbols_reset = []
            for _ in range(warm_up_days):
                # Check the next day.
                day_date = catch_up_env.time().get_next_mkt_day(day_date)

                for symbol in Settings.get_symbols(catch_up_env):
                    # Only check the symbol if it hasn't been reset.
                    if symbol in symbols_reset:
                        continue

                    # Load the day's data and validate it.
                    day_data = catch_up_env.mongo().load_symbol_day(
                        symbol, day_date)
                    if not SymbolDay.validate_candles(day_data.candles):
                        catch_up_env.info_process(
                            '{} missing price data on {}. Resetting its model data'
                            .format(symbol, day_date))
                        catch_up_model_feeder.reset_models([symbol])
                        symbols_reset.append(symbol)

            # Go back to the latest potential missing day.
            day_date = catch_up_env.time().now().date()
            while not catch_up_env.time().is_mkt_day(day_date):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)
            for _ in range(warm_up_days + catch_up_days +
                           1 if len(symbols_reset) != 0 else catch_up_days +
                           1):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)

            # Use price data to train models.
            for _ in range(warm_up_days + catch_up_days
                           if len(symbols_reset) != 0 else catch_up_days):

                # Go through each reset symbol.
                for symbol in symbols_reset:

                    # Load mongo price data if present.
                    start_instant = pytime.monotonic()
                    day_data = catch_up_env.mongo().load_symbol_day(
                        symbol, day_date)

                    # Collect polygon-rest price data if necessary.
                    if not SymbolDay.validate_candles(day_data.candles):
                        try:
                            day_data = catch_up_env.data_collector(
                            ).collect_candles_for_day(day_date, symbol)
                        except Exception as e:
                            catch_up_env.error_process(
                                'Error collecting polygon-rest data:')
                            catch_up_env.warn_process(traceback.format_exc())
                    collection_time = pytime.monotonic() - start_instant

                    # Validate data.
                    validation_debugger = []
                    if day_data is not None and SymbolDay.validate_candles(
                            day_data.candles,
                            debug_output=validation_debugger):
                        # Save data
                        catch_up_env.redis().reset_day_difficulty(
                            symbol, day_date)
                        catch_up_env.mongo().save_symbol_day(day_data)

                        # Use data to train models for symbol on day.
                        start_instant = pytime.monotonic()
                        catch_up_model_feeder.train_models(symbol=symbol,
                                                           day_date=day_date,
                                                           day_data=day_data,
                                                           stable=True)
                        train_time = pytime.monotonic() - start_instant
                        catch_up_env.info_process(
                            f'Catch-up for {symbol} on {day_date:%m-%d-%Y}: collection took '
                            f'{collection_time:.2f}s;  training took {train_time:.2f}s'
                        )
                    else:
                        catch_up_env.redis().incr_day_difficulty(
                            symbol, day_date)
                        catch_up_env.warn_process(
                            f'Couldn\'t collect catch-up data for {symbol} on {day_date}: '
                            f'{"null" if day_date is None else len(day_data.candles)} candles'
                        )
                        catch_up_env.warn_process(
                            '\n'.join(validation_debugger))

                # Move to the next day.
                day_date = catch_up_env.time().get_next_mkt_day(day_date)

            # Determine whether or not we have yesterday's cached data for at least one symbol.
            unstable_data_present = False
            while not catch_up_env.time().is_mkt_day(day_date):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)
            for symbol in Settings.get_symbols(catch_up_env):
                unstable_data = catch_up_env.redis().get_cached_candles(
                    symbol, day_date)
                if unstable_data is not None and SymbolDay.validate_candles(
                        unstable_data):
                    unstable_data_present = True
                    break

            if unstable_data_present:
                msg = f'Valid cached redis data on {day_date:%B %d} found. ' \
                      f'Models and strategies should function normally'
                catch_up_env.info_main(msg)
                catch_up_env.info_process(msg)
            else:
                msg = f'No valid redis data cached on {day_date:%b %d}. Models that need yesterday\'s data will ' \
                      f'fail, causing some strategies to fail.'
                catch_up_env.warn_main(msg)
                catch_up_env.warn_process(msg)

            # Allow processes to resume now that data_collector is not busy.
            catch_up_env.mark_data_as_loaded()
            msg = f'Trading and strategy optimization enabled (catch up task took ' \
                  f'{(pytime.monotonic() - catch_up_start_moment) / 3600:.2f} hrs)'
            catch_up_env.info_main(msg)
            catch_up_env.info_process(msg)
コード例 #12
0
    def run(self) -> None:
        # Set symbol and date we need data for.
        symbols = ['SPY', 'SPXL', 'SPXS']
        start_date = date(year=2020, month=4, day=1)
        days_to_dump = 5

        # Clone live environment so it can run on this thread.
        live_env = ExecEnv(self.program.logfeed_program,
                           self.program.logfeed_program, self.program.live_env)
        live_env.fork_new_thread()
        data_collector = PolygonDataCollector(self.program.logfeed_program,
                                              self.program.logfeed_program,
                                              live_env.time())

        # Go through each symbol.
        for symbol in symbols:

            # Go through the first 5 market days starting with start_date.
            day_date = start_date - timedelta(days=1)
            for i in range(days_to_dump):

                # Get the next market day.
                day_date = live_env.time().get_next_mkt_day(day_date)

                # Load price data.
                print(f'Fetching {symbol} data for {day_date:%m-%d-%Y}')
                day_data = live_env.mongo().load_symbol_day(symbol, day_date)

                # Get fresh data from polygon.io, if necessary.
                if not SymbolDay.validate_candles(day_data.candles):
                    try:
                        day_data = data_collector.collect_candles_for_day(
                            day_date, symbol)
                    except Exception as e:
                        live_env.error_process(
                            'Error collecting polygon-rest data:')
                        live_env.warn_process(traceback.format_exc())

                # Validate the data.
                if day_data is None or not SymbolDay.validate_candles(
                        day_data.candles):
                    print(
                        F'COULD NOT COMPILE DEBUG PRICE DATA FOR {symbol} ON {day_date:%m-%d-%Y}'
                    )
                    continue

                # Convert the data into json.
                data_dict = day_data.to_json()

                # Dump the data into a text file.
                if not os.path.exists('debug_data'):
                    os.mkdir('debug_data')
                with open(f'debug_data/{symbol}_{day_date:%m-%d-%Y}.txt',
                          'w+') as f:
                    f.write(json.dumps(data_dict))
                print(
                    f'Dumped data to TC2_data/debug_data/{symbol}_{day_date:%m-%d-%Y}'
                )
コード例 #13
0
    def generate_data(cls, live_env: ExecEnv, sim_env: ExecEnv,
                      **kwargs) -> 'PriceGraphData':
        """
        Compiles the symbol's price data into a json string usable by the graphing script.
        :keyword: symbol
        """

        # Extract parameters
        symbol: str = kwargs['symbol']

        # Format price data so it can be made into a graph
        json_array = []
        day_date = START_DATE
        end_date = (datetime.today() - timedelta(days=1)).date()
        valid_days = 0
        invalid_days = 0
        while day_date <= end_date:
            day_date = day_date + timedelta(days=1)

            # Skip days on which markets are closed
            if not live_env.time().is_open(
                    datetime.combine(day_date, OPEN_TIME)):
                continue

            # Denote missing data with price=0, valid_minutes=0
            day_data = live_env.mongo().load_symbol_day(symbol, day_date)
            if day_data is None or len(day_data.candles) == 0:
                invalid_days += 1
                json_array.append({
                    "date":
                    "{}/{}/{}".format(day_date.month, day_date.day,
                                      day_date.year),
                    "price":
                    "0",
                    "valid_minutes":
                    "0"
                })
                continue

            valid_days += 1

            # Find the open price and the number of minutes with at least 5 candles
            open_price = day_data.candles[0].open
            valid_mins = 0
            candles_in_min = 0
            last_min = day_data.candles[0].moment.replace(
                second=0) - timedelta(minutes=1)
            for candle in day_data.candles:
                if candle.moment.replace(
                        second=0) >= last_min + timedelta(seconds=1):
                    if candles_in_min >= MIN_CANDLES_PER_MIN:
                        valid_mins += 1
                    last_min = candle.moment.replace(second=0)
                    candles_in_min = 0
                else:
                    candles_in_min += 1

            # Create a json object (data point) corresponding to the day
            json_array.append({
                "date":
                "{}/{}/{}".format(day_date.month, day_date.day, day_date.year),
                "price":
                str(open_price),
                "valid_minutes":
                str(valid_mins)
            })

        # Return the price graph data in a neat object
        return PriceGraphData(symbol=symbol,
                              valid_days=valid_days,
                              total_days=valid_days + invalid_days,
                              data=json.dumps(json_array),
                              last_updated=live_env.time().now())
コード例 #14
0
    def optimize_strategy(self, strategy: AbstractStrategy,
                          symbol: str) -> None:
        """
        Runs simulations from START_DATE thru two days ago.
        Tries hundreds of model scoring systems and picks the highest performing one.
        """
        self.info_process(
            f'Optimizing {strategy.__class__.__name__}\'s weights using symbol: {symbol}'
        )
        end_date = self.time().now() - timedelta(days=2)
        dates_on_file = self.mongo().get_dates_on_file(symbol, START_DATE,
                                                       end_date)
        start_index = OPTIMIZATION_WARMUP_DAYS
        if len(dates_on_file) < start_index + 3:
            self.warn_process(
                f'Insufficient historical data ({len(dates_on_file)} days) for {symbol}'
            )
            return
        evaluation = None

        # Initialize objects that make up a kind of container for this evaluation
        sim_time_env = TimeEnv(
            datetime.combine(dates_on_file[start_index - 1], OPEN_TIME))
        sim_env = ExecEnv(self.logfeed_program, self.logfeed_process)
        sim_env.setup_first_time(env_type=EnvType.OPTIMIZATION,
                                 time=sim_time_env,
                                 data_collector=PolygonDataCollector(
                                     logfeed_program=self.logfeed_program,
                                     logfeed_process=self.logfeed_process,
                                     time_env=sim_time_env),
                                 mongo=MongoManager(self.logfeed_program,
                                                    EnvType.OPTIMIZATION),
                                 redis=RedisManager(self.logfeed_process,
                                                    EnvType.OPTIMIZATION))

        # Create a ModelFeeder for the simulated environment
        sim_model_feeder = ModelFeeder(sim_env)

        # Place the strategy in the simulated environment
        strategy = self._clone_strategy(strategy, sim_env)

        # Copy data we need from live environment into simulated environment
        data_copy_error = candle_util.init_simulation_data(
            live_env=self,
            sim_env=sim_env,
            symbols=[strategy.get_symbol()],
            days=start_index - 2,
            end_date=dates_on_file[start_index - 1],
            model_feeder=sim_model_feeder)
        if data_copy_error is not None:
            self.warn_process(data_copy_error)
            return

        for day_to_eval in dates_on_file[start_index:len(dates_on_file) - 2]:
            # Cancel simulations part-way through if a stop has been requested
            if not self.running:
                return

            # Copy day's data into the simulated environment but don't train analysis models
            data_copy_error = candle_util.init_simulation_data(
                live_env=self,
                sim_env=sim_env,
                symbols=[strategy.get_symbol()],
                days=2,
                end_date=dates_on_file[start_index - 1],
                model_feeder=sim_model_feeder,
                skip_last_day_training=True)
            if data_copy_error is not None:
                self.warn_process(data_copy_error)
                self.warn_process(
                    f'Optimization of {strategy.__class__.__name__} on '
                    f'{symbol} failed because the program is missing data on {day_to_eval:%Y-%m-%d}'
                )

            # Move the perspective to the historical day
            sim_env.time().set_moment(
                datetime.combine(day_to_eval,
                                 strategy.times_active().get_start_time()))

            # Create a new strategy for this run
            strategy = self._clone_strategy(strategy, sim_env)

            # Run evaluation on the day
            # TODO Change this to run an optimization simulation
            next_evaluation = StrategyEvaluator(strategy).evaluate()

            # Merge the results with all the evaluations from previous days
            if evaluation is None:
                evaluation = next_evaluation
                evaluation._calculate_metrics()
            else:
                evaluation.combine(next_evaluation)

        # Print results after evaluating each day
        if evaluation is not None:
            self.warn_process(
                'Evaluation results of {0} for {1}:\n\t total days = {2}, viable days: {3}, pct days entered = {4}%, '
                'avg profit = {5}, \n\tmedian profit = {6}, win ratio = {7}, entry-attempt ratio = {8}'
                .format(strategy.__class__.__name__, symbol,
                        evaluation.days_evaluated, evaluation.days_viable,
                        (100 * evaluation.days_entered /
                         evaluation.days_evaluated), evaluation.avg_profit,
                        evaluation.med_profit, evaluation.win_ratio,
                        evaluation.entry_ratio))

        return
コード例 #15
0
    def generate_data(cls, live_env: ExecEnv, sim_env: ExecEnv,
                      **kwargs) -> 'Breakout1SetupData':
        """
        Compiles the symbol's price data into a json string usable by the graphing script.
        :keyword: symbol
        """

        # Extract parameters
        symbol: str = kwargs['symbol']
        check_moment: datetime = kwargs['check_moment']

        live_env.info_process(
            f'Generating breakout1 setup visual for {symbol} '
            f'at {check_moment.strftime(DATE_TIME_FORMAT)}')

        # Set simulated environment's time to check_moment
        sim_env.time().set_moment(check_moment)

        # Copy data we need from live environment into simulated environment
        data_copy_error = candle_util.init_simulation_data(
            live_env=live_env,
            sim_env=sim_env,
            symbols=[symbol],
            days=9,
            end_date=check_moment.date(),
            model_feeder=ModelFeeder(sim_env),
            skip_last_day_training=True)
        if data_copy_error is not None:
            live_env.warn_process(data_copy_error)
            return Breakout1SetupData._blank_breakout1_setup_data(
                symbol=symbol,
                check_moment=check_moment,
                last_updated=live_env.time().now())

        # Create a Breakout1Model so we can test viability
        model = Breakout1Model(env=sim_env,
                               model_type=AnalysisModelType.BREAKOUT1_MODEL)
        model_data = model.calculate_output(symbol)

        # Return the price graph data in a neat object
        day_minute_candles = aggregate_minute_candles(
            sim_env.mongo().load_symbol_day(symbol=symbol,
                                            day=check_moment.date()).candles)
        live_env.info_process('Generated breakout1 setup visual')
        return Breakout1SetupData(
            symbol=symbol,
            check_moment=check_moment,
            day_data=[candle.to_json() for candle in day_minute_candles],
            model_data=model_data.to_json(),
            last_updated=live_env.time().now())
コード例 #16
0
ファイル: Settings.py プロジェクト: maxilie/TC2_public
 def get_endpoint(cls, env: ExecEnv) -> BrokerEndpoint:
     """
     Returns the alpaca endpoint being used for live trading: LIVE or PAPER.
     """
     return BrokerEndpoint.LIVE if env.get_setting('alpaca.endpoint').lower() == 'live' else BrokerEndpoint.PAPER
コード例 #17
0
ファイル: Settings.py プロジェクト: maxilie/TC2_public
 def get_symbols(cls, env: ExecEnv) -> List[str]:
     """
     Returns the list of symbols being watched by the program.
     """
     return [symbol.upper().strip() for symbol in env.get_setting('symbols').split(',')]
コード例 #18
0
    def run(self) -> None:
        # Set data parameters.
        start_date = date(year=2002, month=1, day=1)
        end_date = self.program.live_env.time().now().today() - timedelta(
            days=1)

        # Clone live environment so it can run on this thread.
        live_env = ExecEnv(self.program.logfeed_program,
                           self.program.logfeed_program, self.program.live_env)
        live_env.fork_new_thread()
        data_collector = PolygonDataCollector(self.program.logfeed_program,
                                              self.program.logfeed_program,
                                              live_env.time())

        # Clear the data file.
        filename = 'debug_data/spy_ai_data.txt'
        try:
            if not os.path.exists('debug_data'):
                os.mkdir('debug_data')
            with open(filename, 'w+') as file:
                file.write('')
            os.remove(filename)
        except Exception as e:
            print(f'Error deleting file: "{filename}"')
            pass

        # Go through the data we have on file.
        day_date = start_date - timedelta(days=1)
        while day_date < end_date:

            # Get the next market day.
            day_date = self.program.live_env.time().get_next_mkt_day(day_date)

            # Load price data.
            print(f'Fetching SPY data for {day_date:%m-%d-%Y}')
            day_data = live_env.mongo().load_symbol_day('SPY', day_date)

            # Get fresh data from polygon.io, if necessary.
            if not SymbolDay.validate_candles(day_data.candles):
                try:
                    day_data = data_collector.collect_candles_for_day(
                        day_date, 'SPY')
                except Exception as e:
                    live_env.error_process(
                        'Error collecting polygon-rest data:')
                    live_env.warn_process(traceback.format_exc())

            # Validate the data.
            if day_data is None or not SymbolDay.validate_candles(
                    day_data.candles):
                print(
                    F'COULD NOT COMPILE PRICE DATA FOR SPY ON {day_date:%m-%d-%Y}'
                )
                continue

            # Convert candles into sentences.

            #

            # Convert the data into json.
            data_dict = day_data.to_json()

            # Append the data to the txt file.
            with open(f'debug_data/spy_ai_data.txt', 'a+') as f:
                f.write(json.dumps(data_dict))

        print(f'Dumped data to TC2_data/{filename}')
コード例 #19
0
ファイル: DaySpreadData.py プロジェクト: maxilie/TC2_public
    def generate_data(cls, live_env: ExecEnv, sim_env: ExecEnv,
                      **kwargs) -> 'AbstractVisualizationData':
        """
        Compiles the symbol's price data into a json string usable by the visualization script.
        :keyword: symbol: str
        """

        # Extract parameters
        symbol: str = kwargs['symbol']

        # Format price data so it can be made into a graph
        day_date = (datetime.today() - timedelta(days=1)).date()
        pct_spreads = []
        while len(pct_spreads) < 31:
            day_date = day_date - timedelta(days=1)

            # Skip days on which markets are closed
            if not live_env.time().is_open(
                    datetime.combine(day_date, OPEN_TIME)):
                continue

            # Load data for the day
            day_data = live_env.mongo().load_symbol_day(symbol, day_date)

            # Calculate the day's price spread (difference between highest and lowest price)
            highest_price = max([candle.open for candle in day_data.candles])
            lowest_price = min([candle.open for candle in day_data.candles])
            pct_spreads.append(100 * (highest_price - lowest_price) /
                               lowest_price)

        # Calculate median pct_spread
        median_spread = 0 if len(pct_spreads) == 0 else statistics.median(
            pct_spreads)

        # Sum up frequencies of spreads in each bin
        bins_dict = {
            '<0.4%': 0,
            '0.4% - 0.8%': 0,
            '0.8% - 1.2%': 0,
            '1.2% - 1.6%': 0,
            '1.6% - 2.1%': 0,
            '>2.1%': 0
        }
        for spread in pct_spreads:
            if spread < 0.4:
                bins_dict['<0.4%'] += 1
            elif spread < 0.8:
                bins_dict['0.4% - 0.8%'] += 1
            elif spread < 1.2:
                bins_dict['0.8% - 1.2%'] += 1
            elif spread < 1.6:
                bins_dict['1.2% - 1.6%'] += 1
            elif spread < 2.1:
                bins_dict['1.6% - 2.1%'] += 1
            else:
                bins_dict['>2.1%'] += 1

        # Add json object to data array for each bin
        data = []
        for bin_name, frequency in bins_dict.items():
            data.append({'name': bin_name, 'frequency': str(frequency)})

        # Say whether the symbol is volatile enough to percentage and string
        volatility_str = 'volatile' if median_spread >= 0.008 else 'not volatile'

        # Return the price graph data in a neat object
        return DaySpreadData(symbol, f'{median_spread:.1f}', volatility_str,
                             json.dumps(data))
コード例 #20
0
    def run(self) -> None:
        # Clone live environment, connecting this thread to real data.
        live_env = ExecEnv(self.program.logfeed_optimization,
                           self.program.logfeed_optimization,
                           self.program.live_env)
        live_env.fork_new_thread()

        # Experiment settings.
        MAX_TRIALS_PER_DAY = 250  # max number of periods to evaluate per historical day
        EVAL_PERIOD_LEN = 3 * 60  # number of seconds over which to track profits
        EVAL_FLOOR_PERIOD_LEN = 7 * 60  # number of seconds over which to track killswitch floor

        # Load dates on which we have all the needed data.
        experiment_start_date = date(2018, 6, 1)
        spy_dates = live_env.mongo().get_dates_on_file(
            symbol='SPY',
            start_date=experiment_start_date,
            end_date=live_env.time().now().date())
        spxl_dates = live_env.mongo().get_dates_on_file(
            symbol='SPXL',
            start_date=experiment_start_date,
            end_date=live_env.time().now().date())
        spxl_dates = [
            day_date for day_date in spxl_dates if day_date in spy_dates
        ]  # narrow spxl to spy dates
        spy_dates = [
            day_date for day_date in spy_dates if day_date in spxl_dates
        ]  # narrow spy to spxl dates
        spxs_dates = live_env.mongo().get_dates_on_file(
            symbol='SPXS',
            start_date=experiment_start_date,
            end_date=live_env.time().now().date())
        spxs_dates = [
            day_date for day_date in spxs_dates if day_date in spy_dates
        ]  # narrow spxs to spy=sxpl dates
        spy_dates = [
            day_date for day_date in spy_dates if day_date in spxs_dates
        ]  # narrow spy to spxs<=sxpl dates
        spxl_dates = [
            day_date for day_date in spxl_dates if day_date in spy_dates
        ]  # narrow spxl to spy<=sxpl dates
        assert len(spy_dates) == len(spxl_dates) == len(spxs_dates)

        # Init statistics on the experiment.
        spxl_blr_setup_vals = []
        spxs_blr_setup_vals = []
        spxl_blr_10_vals = []
        spxs_blr_10_vals = []
        spxl_blr_25_vals = []
        spxs_blr_25_vals = []
        spxl_profits = []
        spxl_floors = []
        spxs_profits = []
        spxs_floors = []
        oscillation_model = OscillationModel(live_env,
                                             AnalysisModelType.OSCILLATION)
        trend_model = LSFavorModel(live_env, AnalysisModelType.LS_FAVOR)

        # Simulate the days on which SPY, SPXL, and SPXS jointly have data.
        live_env.info_process(
            f'Beginning BLR simulations over {len(spxs_dates)} dates')
        for day_date in spxs_dates:
            # Load data for experiment.
            live_env.info_process(
                f'Running trials on {day_date:%m-%d-%Y} (successful trials: {len(spxl_profits)})'
            )
            spy_data = live_env.mongo().load_symbol_day(symbol='SPY',
                                                        day=day_date)
            spxl_data = live_env.mongo().load_symbol_day(symbol='SPXL',
                                                         day=day_date)
            spxs_data = live_env.mongo().load_symbol_day(symbol='SPXS',
                                                         day=day_date)

            # Validate data.
            data_is_valid = True
            for day_data in [spy_data, spxl_data, spxs_data]:
                if not SymbolDay.validate_candles(day_data.candles):
                    data_is_valid = False
                    break
            if not data_is_valid:
                live_env.info_process(f'Invalid data on {day_date:%m-%d-%Y}')
                continue

            # Init time windows variables.
            start_moment = datetime.combine(
                day_date, OPEN_TIME) + timedelta(seconds=int(30 * 60))
            end_moment = datetime.combine(day_date, CLOSE_TIME) - timedelta(
                seconds=int(EVAL_PERIOD_LEN + 15 * 60))

            # Go thru time windows on each day.
            day_trials = 0
            while start_moment < end_moment and day_trials < MAX_TRIALS_PER_DAY:

                try:
                    # Move to the next time window.
                    start_moment += timedelta(seconds=random.randint(30, 120))
                    blr_setup_period = ContinuousTimeInterval(
                        (start_moment - timedelta(seconds=3 * 60)).time(),
                        start_moment.time())
                    blr_10_period = ContinuousTimeInterval(
                        (start_moment - timedelta(seconds=10 * 60)).time(),
                        start_moment.time())
                    blr_25_period = ContinuousTimeInterval(
                        (start_moment - timedelta(seconds=25 * 60)).time(),
                        start_moment.time())
                    eval_period = ContinuousTimeInterval(
                        start_moment.time(),
                        (start_moment +
                         timedelta(seconds=EVAL_PERIOD_LEN)).time())
                    eval_floor_period = ContinuousTimeInterval(
                        start_moment.time(),
                        (start_moment +
                         timedelta(seconds=EVAL_FLOOR_PERIOD_LEN)).time())

                    # Ignore non-oscillatory periods.
                    oscillation_val = oscillation_model.get_oscillation_val(
                        candles_in_period(blr_setup_period, spy_data.candles,
                                          spy_data.day_date))
                    if oscillation_val < 0.6:
                        continue

                    # Calculate BLR trendline indicators.
                    spxl_blr_setup_val = trend_model.get_blr_strength(
                        BoundedLinearRegressions(
                            candles_in_period(blr_setup_period,
                                              spxl_data.candles,
                                              spxl_data.day_date)))
                    spxs_blr_setup_val = trend_model.get_blr_strength(
                        BoundedLinearRegressions(
                            candles_in_period(blr_setup_period,
                                              spxs_data.candles,
                                              spxs_data.day_date)))
                    spxl_blr_10_val = trend_model.get_blr_strength(
                        BoundedLinearRegressions(
                            candles_in_period(blr_10_period, spxl_data.candles,
                                              spxl_data.day_date)))
                    spxs_blr_10_val = trend_model.get_blr_strength(
                        BoundedLinearRegressions(
                            candles_in_period(blr_10_period, spxs_data.candles,
                                              spxs_data.day_date)))
                    spxl_blr_25_val = trend_model.get_blr_strength(
                        BoundedLinearRegressions(
                            candles_in_period(blr_25_period, spxl_data.candles,
                                              spxl_data.day_date)))
                    spxs_blr_25_val = trend_model.get_blr_strength(
                        BoundedLinearRegressions(
                            candles_in_period(blr_25_period, spxs_data.candles,
                                              spxs_data.day_date)))

                    # Calculate maximum profits during evaluation period.
                    spxl_buy_price = candles_in_period(
                        blr_setup_period, spxl_data.candles,
                        spxl_data.day_date)[-1].close
                    spxs_buy_price = candles_in_period(
                        blr_setup_period, spxs_data.candles,
                        spxs_data.day_date)[-1].close
                    spxl_eval_candles = candles_in_period(
                        eval_period, spxl_data.candles, spxl_data.day_date)
                    spxs_eval_candles = candles_in_period(
                        eval_period, spxs_data.candles, spxs_data.day_date)
                    spxl_eval_floor_candles = candles_in_period(
                        eval_floor_period, spxl_data.candles,
                        spxl_data.day_date)
                    spxs_eval_floor_candles = candles_in_period(
                        eval_floor_period, spxs_data.candles,
                        spxs_data.day_date)
                    spxl_profit_pct = (max([
                        candle.high * 0.3 + candle.open * 0.7
                        for candle in spxl_eval_candles
                    ]) - spxl_buy_price) / spxl_buy_price
                    spxs_profit_pct = (max([
                        candle.high * 0.3 + candle.open * 0.7
                        for candle in spxs_eval_candles
                    ]) - spxs_buy_price) / spxs_buy_price
                    spxl_floor_pct = (spxl_buy_price - min([
                        candle.low * 0.3 + candle.open * 0.7
                        for candle in spxl_eval_floor_candles
                    ])) / spxl_buy_price
                    spxs_floor_pct = (spxs_buy_price - min([
                        candle.low * 0.3 + candle.open * 0.7
                        for candle in spxs_eval_floor_candles
                    ])) / spxs_buy_price

                    # Record trial stats.
                    spxl_blr_setup_vals.append(spxl_blr_setup_val)
                    spxs_blr_setup_vals.append(spxs_blr_setup_val)
                    spxl_blr_10_vals.append(spxl_blr_10_val)
                    spxs_blr_10_vals.append(spxs_blr_10_val)
                    spxl_blr_25_vals.append(spxl_blr_25_val)
                    spxs_blr_25_vals.append(spxs_blr_25_val)
                    spxl_profits.append(spxl_profit_pct)
                    spxl_floors.append(spxl_floor_pct)
                    spxs_profits.append(spxs_profit_pct)
                    spxs_floors.append(spxs_floor_pct)
                    day_trials += 1

                    # Print experiment stats every 100 trials.
                    if len(spxl_blr_setup_vals
                           ) > 0 and len(spxl_blr_setup_vals) % 100 != 0:
                        continue

                    live_env.info_process('\n\n')

                    def print_immediate_profit(val_lists, profits_list,
                                               threshold, symbol, trend_name):
                        # Get indices corresponding to vals that are above all thresholds.
                        indices = [i for i in range(len(val_lists[0]))]
                        for j in range(len(val_lists)):
                            indices = [
                                i for i in indices
                                if val_lists[j][i] >= threshold
                            ]

                        if len(indices) > 3:
                            profits = [profits_list[i] for i in indices]
                            profit_mean, profit_med, profit_stdev = (
                                mean(profits), median(profits), stdev(profits))
                            immediate_profit = profit_med
                            live_env.info_process(
                                f'Immediate {symbol} profit (< 3 mins) when {trend_name} strength >= '
                                f'{100 * threshold}%: '
                                f'{100 * immediate_profit:.2f}% (n={len(profits)})'
                            )

                    def print_profit_ratio(val_lists, spxl_profits_list,
                                           spxs_profits_list, threshold,
                                           trend_name):
                        # Get indices corresponding to vals that are above all thresholds.
                        indices = [i for i in range(len(val_lists[0]))]
                        for j in range(len(val_lists)):
                            indices = [
                                i for i in indices
                                if val_lists[j][i] >= threshold
                            ]

                        if len(indices) > 3:
                            profit_ratios = [
                                spxl_profits_list[i] /
                                max(0.0002, spxs_profits_list[i])
                                for i in indices
                            ]
                            ratios_mean, ratios_med, ratios_stdev = (
                                mean(profit_ratios), median(profit_ratios),
                                stdev(profit_ratios))
                            live_env.info_process(
                                f'Immediate profit ratio (SPXL:SPXS) when {trend_name} strength >= '
                                f'{100 * threshold}%: '
                                f'{ratios_med:.2f}:1 (n={len(profit_ratios)})')

                    # TODO NEXT: Implement a -1.65% killswitch in the strategy.

                    # TODO NEXT: What pct of oscillation range is expected profit?

                    def print_killswitch_floor(val_lists, floors_list,
                                               threshold, symbol, trend_name):
                        # Get indices corresponding to vals that are above all thresholds.
                        indices = [i for i in range(len(val_lists[0]))]
                        for j in range(len(val_lists)):
                            indices = [
                                i for i in indices
                                if val_lists[j][i] >= threshold
                            ]

                        if len(indices) > 3:
                            floors = [-floors_list[i] for i in indices]
                            floor_mean, floor_med, floor_stdev = (
                                mean(floors), median(floors), stdev(floors))
                            killswitch_floor = floor_med - 1.5 * floor_stdev
                            live_env.info_process(
                                f'{symbol} killswitch activation (-1.5 stdev floor) when {trend_name} strength >= '
                                f'{100 * threshold}%: '
                                f'{100 * killswitch_floor:.2f}% (n={len(floors)})'
                            )

                    """
                    # Print immediate profits when BLR strength >= 70%.
                    print_immediate_profit([spxl_blr_6_vals], spxl_profits, 0.7, 'SPXL', 'BLR-6')
                    print_immediate_profit([spxs_blr_6_vals], spxs_profits, 0.7, 'SPXS', 'BLR-6')
                    print_immediate_profit([spxl_blr_10_vals], spxl_profits, 0.7, 'SPXL', 'BLR-10')
                    print_immediate_profit([spxs_blr_10_vals], spxs_profits, 0.7, 'SPXS', 'BLR-10')
                    print_immediate_profit([spxl_blr_25_vals], spxl_profits, 0.7, 'SPXL', 'BLR-25')
                    print_immediate_profit([spxs_blr_25_vals], spxs_profits, 0.7, 'SPXS', 'BLR-25')

                    # Print immediate profits when BLR strength >= 85%.
                    print_immediate_profit([spxl_blr_6_vals], spxl_profits, 0.85, 'SPXL', 'BLR-6')
                    print_immediate_profit([spxs_blr_6_vals], spxs_profits, 0.85, 'SPXS', 'BLR-6')
                    print_immediate_profit([spxl_blr_10_vals], spxl_profits, 0.85, 'SPXL', 'BLR-10')
                    print_immediate_profit([spxs_blr_10_vals], spxs_profits, 0.85, 'SPXS', 'BLR-10')
                    print_immediate_profit([spxl_blr_25_vals], spxl_profits, 0.85, 'SPXL', 'BLR-25')
                    print_immediate_profit([spxs_blr_25_vals], spxs_profits, 0.85, 'SPXS', 'BLR-25')

                    # Print immediate profits when BLR strength >= 95%.
                    print_immediate_profit([spxl_blr_6_vals], spxl_profits, 0.95, 'SPXL', 'BLR-6')
                    print_immediate_profit([spxs_blr_6_vals], spxs_profits, 0.95, 'SPXS', 'BLR-6')
                    print_immediate_profit([spxl_blr_10_vals], spxl_profits, 0.95, 'SPXL', 'BLR-10')
                    print_immediate_profit([spxs_blr_10_vals], spxs_profits, 0.95, 'SPXS', 'BLR-10')
                    print_immediate_profit([spxl_blr_25_vals], spxl_profits, 0.95, 'SPXL', 'BLR-25')
                    print_immediate_profit([spxs_blr_25_vals], spxs_profits, 0.95, 'SPXS', 'BLR-25')

                    # Print SPXL immediate profit when second 2 BLR strengths >= 90%.
                    print_immediate_profit([spxl_blr_10_vals, spxl_blr_25_vals], spxl_profits,
                                           0.9, 'SPXL', 'BLR-10-25')

                    # Print SPXL immediate profit when all BLR strengths >= 30%.
                    print_immediate_profit([spxl_blr_6_vals, spxl_blr_10_vals, spxl_blr_25_vals], spxl_profits,
                                           0.3, 'SPXL', 'BLR-6-10-25')
                    """

                    # Print SPXL:SPXS profit ratio when BLR strength >= 60%.
                    print_profit_ratio([spxl_blr_setup_vals], spxl_profits,
                                       spxs_profits, 0.6, 'BLR-3')
                    print_profit_ratio([spxl_blr_10_vals], spxl_profits,
                                       spxs_profits, 0.6, 'BLR-10')
                    print_profit_ratio([spxl_blr_25_vals], spxl_profits,
                                       spxs_profits, 0.6, 'BLR-25')

                    # Print SPXL:SPXS profit ratio when BLR strength >= 85%.
                    print_profit_ratio([spxl_blr_setup_vals], spxl_profits,
                                       spxs_profits, 0.85, 'BLR-3')
                    print_profit_ratio([spxl_blr_10_vals], spxl_profits,
                                       spxs_profits, 0.85, 'BLR-10')
                    print_profit_ratio([spxl_blr_25_vals], spxl_profits,
                                       spxs_profits, 0.85, 'BLR-25')

                    # Print SPXL:SPXS profit ratio when BLR strength >= 95%.
                    print_profit_ratio([spxl_blr_setup_vals], spxl_profits,
                                       spxs_profits, 0.95, 'BLR-3')
                    print_profit_ratio([spxl_blr_10_vals], spxl_profits,
                                       spxs_profits, 0.95, 'BLR-10')
                    print_profit_ratio([spxl_blr_25_vals], spxl_profits,
                                       spxs_profits, 0.95, 'BLR-25')

                    # Print SPXL:SPXS profit ratio when long BLR strengths >= 60%.
                    print_profit_ratio([spxl_blr_10_vals, spxl_blr_25_vals],
                                       spxl_profits, spxs_profits, 0.6,
                                       'BLR-10-25')

                    # Print expected min profit when osc_val >= 60%.
                    print_immediate_profit([spxl_blr_setup_vals], [
                        min(spxl_profits[i], spxs_profits[i])
                        for i in range(len(spxl_profits))
                    ], 0, '', 'oscillating... N/A')

                    # Print killswitch floor when osc_val >= 60%.
                    print_killswitch_floor([spxl_blr_setup_vals], [
                        max(spxl_floors[i], spxs_floors[i])
                        for i in range(len(spxl_floors))
                    ], 0, '', 'oscillating... N/A')

                except Exception as e:
                    # live_env.warn_process(f'BLR Experiment error: {traceback.format_exc()}')
                    continue