Пример #1
0
    def __init__(self, model_weights: Dict[AnalysisModelType, float], logfeed_process: LogFeed):
        self.model_weights = model_weights

        # If the scoring system has no models, leave it blank
        if len(model_weights.keys()) == 0:
            return

        weights_total = sum(model_weights.values())
        if abs(weights_total - 1) > 0.01 and sum(self.model_weights.values()) > 0.0001:
            logfeed_process.log(LogLevel.WARNING, 'ModelWeightingSystem weights do not sum up to 1!')
Пример #2
0
    def load_pre_reqs(self) -> None:
        # Initialize log feeds.
        self.logfeed_data = LogFeed(LogCategory.DATA)
        self.logfeed_data.log(LogLevel.ERROR,
                              '.             ...PROGRAM RESTARTED...')
        self.logfeed_trading = LogFeed(LogCategory.LIVE_TRADING)
        self.logfeed_trading.log(LogLevel.ERROR,
                                 '.             ...PROGRAM RESTARTED...')
        self.logfeed_optimization = LogFeed(LogCategory.OPTIMIZATION)
        self.logfeed_optimization.log(LogLevel.ERROR,
                                      '             ...PROGRAM RESTARTED...')
        self.logfeed_visuals = LogFeed(LogCategory.VISUALS)
        self.logfeed_visuals.log(LogLevel.ERROR,
                                 '.             ...PROGRAM RESTARTED...')
        self.logfeed_api = LogFeed(LogCategory.API)
        self.logfeed_api.log(LogLevel.ERROR,
                             '.             ...PROGRAM RESTARTED...')

        # Create time environment for live data collection and trading.
        live_time_env = TimeEnv(datetime.now())

        # Create database managers but don't initialize connections.
        live_redis = RedisManager(self.logfeed_program, EnvType.LIVE)
        live_mongo = MongoManager(self.logfeed_program, EnvType.LIVE)

        # Initialize collector manager to access polygon.io.
        live_data_collector = PolygonDataCollector(
            logfeed_program=self.logfeed_program,
            logfeed_process=self.logfeed_data,
            time_env=live_time_env)

        # Initialize the live execution environment with program logs.
        self.live_env = ExecEnv(logfeed_program=self.logfeed_program,
                                logfeed_process=self.logfeed_program)

        # Setup the live execution environment with live time & data variables.
        self.live_env.setup_first_time(env_type=EnvType.LIVE,
                                       time=live_time_env,
                                       data_collector=live_data_collector,
                                       mongo=live_mongo,
                                       redis=live_redis)

        # Set Alpaca credentials as environment variables so we don't have to pass them around.
        live_trading = True if Settings.get_endpoint(
            self.live_env) == BrokerEndpoint.LIVE else False
        os.environ['APCA_API_BASE_URL'] = 'https://api.alpaca.markets' \
            if live_trading else 'https://paper-api.alpaca.markets'
        os.environ['APCA_API_KEY_ID'] = self.live_env.get_setting('alpaca.live_key_id') \
            if live_trading else self.live_env.get_setting('alpaca.paper_key_id')
        os.environ['APCA_API_SECRET_KEY'] = self.live_env.get_setting('alpaca.live_secret_key') \
            if live_trading else self.live_env.get_setting('alpaca.paper_secret_key')
        os.environ['POLYGON_KEY_ID'] = self.live_env.get_setting(
            'alpaca.live_key_id')
Пример #3
0
    def _on_account_update(cls, raw_msg, logfeed_data: LogFeed) -> None:
        """
        Adds an update signal to the queues, which are processed by AbstractAccount's on other threads.
        """
        # Decode raw msg into json
        data = raw_msg.account

        # Check that the account is active
        if data['status'].lower() != 'active':
            logfeed_data.log(
                LogLevel.WARNING,
                'Alpaca account status is "{0}"'.format(data['status']))
            logfeed_data.log(
                LogLevel.WARNING,
                'Alpaca account status is "{0}"'.format(data['status']))

        # Convert account info json into an AccountInfo object
        acct_info = AccountInfo(data['id'], float(data['cash']),
                                float(data['cash_withdrawable']))

        # Add the account update to the data queue
        cls._queue_update(moment=datetime.now(),
                          update_type=StreamUpdateType.ACCT_INFO,
                          acct_info=acct_info.to_json())
Пример #4
0
def evenly_distribute_weights(pass_fail_models: List[AnalysisModelType],
                              min_model_weights: Dict[AnalysisModelType, float],
                              max_model_weights: Dict[AnalysisModelType, float],
                              logfeed_process: LogFeed) -> ModelWeightingSystem:
    """Returns the scoring system which assigns weights evenly among models, within the allowed mins and maxes."""

    # Ensure min_model_weights has same number of models as max_model_weights
    if len(min_model_weights) != len(max_model_weights):
        logfeed_process.log(LogLevel.ERROR,
                            'Tried to generate a weight combination with mismatched model ranges '
                            '(min and max dicts have different number of models)')
        return ModelWeightingSystem(dict.fromkeys(pass_fail_models, 0), logfeed_process)

    # If the scoring system has no models, leave it blank
    if len(max_model_weights.keys()) == 0:
        return ModelWeightingSystem(dict.fromkeys(pass_fail_models, 0), logfeed_process)

    # Ensure at least one combination of weights sums to 1
    if sum(max_model_weights.values()) < 0.999:
        logfeed_process.log(LogLevel.ERROR,
                            'Tried to generate a weight combination but the weight ranges do not allow for any '
                            'combinations that add up to 1.')
        return ModelWeightingSystem(dict.fromkeys(pass_fail_models, 0), logfeed_process)

    # First, make each model's weight the midpoint between its min and max allowed value
    model_weights = {}
    for model_type, min_weight in min_model_weights.items():
        # Init the weight as the average of its min and max allowed value
        max_weight = max_model_weights[model_type]
        model_weights[model_type] = (min_weight + max_weight) / 2.0

    # Second, raise or lower weights until their sum is 1
    diff = _one_minus(model_weights)
    safety = 0
    while diff != 0 and safety < 999:
        safety += 1
        incr = max(0.001, abs(diff) / len(model_weights.values()))
        for model_type in model_weights:
            # Lower model's weight if weights are too high
            if diff < 0 and model_weights[model_type] - incr >= min_model_weights[model_type]:
                model_weights[model_type] -= incr
            # Raise model's weight if weights are too low
            if diff > 0 and model_weights[model_type] + incr <= max_model_weights[model_type]:
                model_weights[model_type] += incr
        # Calculate new diff
        diff = _one_minus(model_weights)
    if safety >= 999:
        logfeed_process.log(LogLevel.WARNING, 'Initialization of strategy\'s analysis model weights incomplete! '
                                              'Could not find an even combination that sums to one!')

    # Finally, add in pass/fail models
    for model_type in pass_fail_models:
        model_weights[model_type] = 0

    return ModelWeightingSystem(model_weights, logfeed_process)
Пример #5
0
    def ready(self):
        """
        Called when the Django backend starts.
        Starts a TC2Program.
        """

        # Create a new TC2Program object.
        from tc2.TC2Program import TC2Program
        from tc2.log.LogFeed import LogFeed
        from tc2.log.LogFeed import LogCategory

        if shared.program is not None:
            print('DJANGO RE-BOOTED BUT PROGRAM IS ALREADY RUNNING')
            return

        shared.program = TC2Program(LogFeed(LogCategory.PROGRAM))

        shared.program_starting.value = True

        # Start the program in a separate process.
        def start_logic():

            # Set environment's timezone to New York so logs are consistent.
            os.environ['TZ'] = 'America/New_York'
            pytime.tzset()

            # Start the program.
            shared.program.start_program()
            shared.program_starting.value = False
            print('Started program with pid {}'.format(os.getpid()))

            # STARTUP TASKS (single-run): Run each task once in another thread.
            try:
                print('Running startup debug task(s) in another thread')
                shared.program.info_main(
                    'Running startup debug task(s) in another thread')
                task = DumpAIDataTask(shared.program)
                debug_thread_2 = Thread(target=task.run)
                debug_thread_2.start()
            except Exception:
                shared.program.error_main('Error running startup debug tasks:')
                shared.program.warn_main(traceback.format_exc())

        program_process = Thread(target=start_logic)
        program_process.start()
Пример #6
0
def update(request):
    """Stops the program, pulls latest code from GitHub, and restarts."""
    try:
        # Ignore the request if the program is already being updated
        if shared.program_starting.value:
            return Response('Program already starting/stopping/updating')
        else:
            shared.program_starting.value = True

        # Stop TC2Program
        if shared.program is not None:
            shared.program.shutdown()
            print('Program shutdown from endpoint: /api/update')
            time.sleep(0.2)

        # Remove old code files\
        import shutil
        try:
            shutil.rmtree('/tc2', ignore_errors=True)
            shutil.rmtree('/tmp_update_cache', ignore_errors=True)
            os.remove('/config.properties')
        except OSError:
            pass

        # Fetch new code files from GitHub
        os.mkdir('/tmp_update_cache')
        os.system('git clone https://maxilie:[email protected]/maxilie/TC2 '
                  '/tmp_update_cache')

        # Copy over new code files
        copytree('/tmp_update_cache/backend/tc2', '/tc2')
        shutil.move('/tmp_update_cache/backend/config.properties', '/config.properties')

        # Reload the python modules
        import tc2
        reload_package(tc2)

        # Create a new TC2Program object
        from tc2.TC2Program import TC2Program
        from tc2.log.LogFeed import LogFeed
        from tc2.log.LogFeed import LogCategory
        program: TC2Program = TC2Program(LogFeed(LogCategory.PROGRAM))

        # Save the new program in the django app
        apps.program = program

        # Start the program in a separate thread so as not to block the django view
        def start_logic():
            # Set environment's timezone to New York so logs are consistent
            os.environ['TZ'] = 'America/New_York'
            pytime.tzset()
            # Start the program
            program.start_program()
            shared.program_starting.value = False
            print('Started program with pid {}'.format(os.getpid()))

        init_thread = Thread(target=start_logic)
        init_thread.start()
    except Exception:
        api_util.log_stacktrace('updating the program', traceback.format_exc())
        shared.program_starting.value = False
        return Response('Error updating the program!')

    return Response('Successfully updated and restarted the program')
Пример #7
0
class TC2Program(Loggable):
    """
    The backend program, which is initialized by django startup code.
    """

    # Live execution environment.
    live_env: ExecEnv

    # Log feeds.
    logfeed_data: LogFeed
    logfeed_trading: LogFeed
    logfeed_optimization: LogFeed
    logfeed_api: LogFeed
    logfeed_visuals: LogFeed

    # Logic loops (running inside threads).
    strategy_optimizer: StrategyOptimizer
    live_day_trader: LiveTrader
    live_swing_trader: LiveTrader
    daily_collector: DailyCollector
    visuals_refresher: VisualsRefresher
    health_checks_refresher: HealthChecksRefresher

    # Threads (containing logic loops).
    day_trading_process: Process
    swing_trading_process: Process
    optimizations_process: Process
    collection_process: Process

    def __init__(self, logfeed_program):
        super().__init__(logfeed_program, logfeed_program)

    def start_program(self) -> None:
        """
        Loads settings and runs program processes in their own threads.
        This can take several seconds to complete.
        """

        # Log startup.
        self.warn_main('.........')
        self.warn_main('........')
        self.warn_main('.......')
        self.warn_main('......')
        self.warn_main('.....')
        self.warn_main('....')
        self.warn_main('...')
        self.warn_main('..')
        self.warn_main('.')
        self.warn_main('')
        self.warn_main('Program starting...')
        self.warn_main('')
        self.warn_main('.')
        self.warn_main('..')
        self.warn_main('...')
        self.warn_main('....')
        self.warn_main('.....')
        self.warn_main('......')
        self.warn_main('.......')
        self.warn_main('........')
        self.warn_main('.........')

        # Load pre-reqs first.
        try:
            self.info_main('Loading settings from config...')
            self.load_pre_reqs()
            self.info_main('Loaded settings')
        except Exception:
            self.error_main('Failed to load program essentials:')
            self.warn_main(traceback.format_exc())
            self.shutdown()
            return

        # Connect to market data and brokerage account data.
        try:
            self.info_main('Connecting to live data streams')
            self.init_account_data_streams()
            livestream_updates = AccountDataStream._livestream_updates
            self.info_main('Connected to alpaca and polygon streams')
        except Exception:
            self.error_main('Failed to connect to data streams:')
            self.warn_main(traceback.format_exc())
            self.shutdown()
            return

        # Mark data as loading and start a thread to get data and models up to date.
        try:
            self.perform_data_catchup()
        except Exception:
            self.error_main('Failed to start data catch-up task:')
            self.warn_main(traceback.format_exc())
            self.shutdown()
            return

        # Run data collection in its own core, if possible - otherwise, in its own thread.
        try:
            self.info_main('Starting data collection process')
            self.start_daily_collection(livestream_updates)
            self.info_main('Started data collection process')
        except Exception:
            self.error_main('FAILED TO START DATA COLLECTION:')
            self.warn_main(traceback.format_exc())
            self.shutdown()

        # Run live trading in its own core, if possible - otherwise, in its own thread.
        try:
            self.info_main('Starting trading process')
            self.start_live_trading(livestream_updates)
            self.info_main('Started trading process')
        except Exception:
            self.error_main('Failed to start trading logic:')
            self.warn_main(traceback.format_exc())
            self.shutdown()

        # Run strategy optimization in its own core, if possible - otherwise, in its own thread.
        try:
            self.info_main('Starting simulation and evaluation process')
            self.start_strategy_optimization()
            self.info_main('Started simulation and evaluation process')
        except Exception:
            self.error_main(
                'Failed to start strategy parameter optimization logic:')
            self.warn_main(traceback.format_exc())
            self.shutdown()

        # Init manager class and refresher thread for visualization.
        try:
            self.info_main(
                'Initializing visuals (graphs, charts, etc.) generation components'
            )
            self.init_visualization()
            self.info_main('Initialized visualization components')
        except Exception:
            self.error_main('Failed to initialize visualization components:')
            self.warn_main(traceback.format_exc())
            self.shutdown()

        # Init manager class and refresher thread for health checks.
        try:
            self.info_main('Initializing health checker')
            self.init_health_checks()
            self.info_main('Initialized health checker')
        except Exception:
            self.error_main('Failed to initialize health checker')
            self.warn_main(traceback.format_exc())
            self.shutdown()

        self.info_main('Started successfully!')

    def load_pre_reqs(self) -> None:
        # Initialize log feeds.
        self.logfeed_data = LogFeed(LogCategory.DATA)
        self.logfeed_data.log(LogLevel.ERROR,
                              '.             ...PROGRAM RESTARTED...')
        self.logfeed_trading = LogFeed(LogCategory.LIVE_TRADING)
        self.logfeed_trading.log(LogLevel.ERROR,
                                 '.             ...PROGRAM RESTARTED...')
        self.logfeed_optimization = LogFeed(LogCategory.OPTIMIZATION)
        self.logfeed_optimization.log(LogLevel.ERROR,
                                      '             ...PROGRAM RESTARTED...')
        self.logfeed_visuals = LogFeed(LogCategory.VISUALS)
        self.logfeed_visuals.log(LogLevel.ERROR,
                                 '.             ...PROGRAM RESTARTED...')
        self.logfeed_api = LogFeed(LogCategory.API)
        self.logfeed_api.log(LogLevel.ERROR,
                             '.             ...PROGRAM RESTARTED...')

        # Create time environment for live data collection and trading.
        live_time_env = TimeEnv(datetime.now())

        # Create database managers but don't initialize connections.
        live_redis = RedisManager(self.logfeed_program, EnvType.LIVE)
        live_mongo = MongoManager(self.logfeed_program, EnvType.LIVE)

        # Initialize collector manager to access polygon.io.
        live_data_collector = PolygonDataCollector(
            logfeed_program=self.logfeed_program,
            logfeed_process=self.logfeed_data,
            time_env=live_time_env)

        # Initialize the live execution environment with program logs.
        self.live_env = ExecEnv(logfeed_program=self.logfeed_program,
                                logfeed_process=self.logfeed_program)

        # Setup the live execution environment with live time & data variables.
        self.live_env.setup_first_time(env_type=EnvType.LIVE,
                                       time=live_time_env,
                                       data_collector=live_data_collector,
                                       mongo=live_mongo,
                                       redis=live_redis)

        # Set Alpaca credentials as environment variables so we don't have to pass them around.
        live_trading = True if Settings.get_endpoint(
            self.live_env) == BrokerEndpoint.LIVE else False
        os.environ['APCA_API_BASE_URL'] = 'https://api.alpaca.markets' \
            if live_trading else 'https://paper-api.alpaca.markets'
        os.environ['APCA_API_KEY_ID'] = self.live_env.get_setting('alpaca.live_key_id') \
            if live_trading else self.live_env.get_setting('alpaca.paper_key_id')
        os.environ['APCA_API_SECRET_KEY'] = self.live_env.get_setting('alpaca.live_secret_key') \
            if live_trading else self.live_env.get_setting('alpaca.paper_secret_key')
        os.environ['POLYGON_KEY_ID'] = self.live_env.get_setting(
            'alpaca.live_key_id')

    def init_account_data_streams(self) -> None:
        AccountDataStream.connect_to_streams(symbols=Settings.get_symbols(
            self.live_env),
                                             logfeed_data=self.logfeed_data)

    def start_daily_collection(
            self, livestream_updates: 'multiprocessing list') -> None:
        """
        Starts a multiprocessing.Process, which is basically a Thread that can use its own core.
        Schedules data collection to run (and trigger model feeding) after markets close every day.
        """

        # Daily collector logic loop to schedule daily collection and model feeding.
        self.daily_collector = DailyCollector(self.live_env, self.logfeed_data)

        self.collection_process = Process(
            target=self.daily_collector.start_collection_loop,
            args=(livestream_updates, ))
        self.collection_process.start()

    def perform_data_catchup(self) -> None:
        """
        Fetches historical data off-thread from Polygon.io, if any is missing.
        """

        # Train models on any data that was missed while the bot was offline.
        catch_up_days = 3

        # Retrain models if the bot has insufficient warm-up data.
        warm_up_days = 27

        def catch_up():
            self.info_main(
                'Trading and simulation disabled while checking for missing recent data...'
            )
            catch_up_start_moment = pytime.monotonic()

            # Fork data_env for the new thread.
            catch_up_env = ExecEnv(self.logfeed_program,
                                   self.logfeed_data,
                                   creator_env=self.live_env)
            catch_up_env.fork_new_thread()
            catch_up_env.info_process(
                'Performing catch-up task: checking for missing recent data')

            # Fork model feeder for the new thread.
            catch_up_model_feeder = ModelFeeder(catch_up_env)

            # Reset models and go back 31 days if missing [t-31, t-4].
            # OR go back 4 days if only missing at most [t-4, t-1].

            # Start at t-31 days.
            day_date = catch_up_env.time().now().date()
            while not catch_up_env.time().is_mkt_day(day_date):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)
            for _ in range(warm_up_days + catch_up_days + 1):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)

            # Check that each day [t-31, t-4] has valid data.
            symbols_reset = []
            for _ in range(warm_up_days):
                # Check the next day.
                day_date = catch_up_env.time().get_next_mkt_day(day_date)

                for symbol in Settings.get_symbols(catch_up_env):
                    # Only check the symbol if it hasn't been reset.
                    if symbol in symbols_reset:
                        continue

                    # Load the day's data and validate it.
                    day_data = catch_up_env.mongo().load_symbol_day(
                        symbol, day_date)
                    if not SymbolDay.validate_candles(day_data.candles):
                        catch_up_env.info_process(
                            '{} missing price data on {}. Resetting its model data'
                            .format(symbol, day_date))
                        catch_up_model_feeder.reset_models([symbol])
                        symbols_reset.append(symbol)

            # Go back to the latest potential missing day.
            day_date = catch_up_env.time().now().date()
            while not catch_up_env.time().is_mkt_day(day_date):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)
            for _ in range(warm_up_days + catch_up_days +
                           1 if len(symbols_reset) != 0 else catch_up_days +
                           1):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)

            # Use price data to train models.
            for _ in range(warm_up_days + catch_up_days
                           if len(symbols_reset) != 0 else catch_up_days):

                # Go through each reset symbol.
                for symbol in symbols_reset:

                    # Load mongo price data if present.
                    start_instant = pytime.monotonic()
                    day_data = catch_up_env.mongo().load_symbol_day(
                        symbol, day_date)

                    # Collect polygon-rest price data if necessary.
                    if not SymbolDay.validate_candles(day_data.candles):
                        try:
                            day_data = catch_up_env.data_collector(
                            ).collect_candles_for_day(day_date, symbol)
                        except Exception as e:
                            catch_up_env.error_process(
                                'Error collecting polygon-rest data:')
                            catch_up_env.warn_process(traceback.format_exc())
                    collection_time = pytime.monotonic() - start_instant

                    # Validate data.
                    validation_debugger = []
                    if day_data is not None and SymbolDay.validate_candles(
                            day_data.candles,
                            debug_output=validation_debugger):
                        # Save data
                        catch_up_env.redis().reset_day_difficulty(
                            symbol, day_date)
                        catch_up_env.mongo().save_symbol_day(day_data)

                        # Use data to train models for symbol on day.
                        start_instant = pytime.monotonic()
                        catch_up_model_feeder.train_models(symbol=symbol,
                                                           day_date=day_date,
                                                           day_data=day_data,
                                                           stable=True)
                        train_time = pytime.monotonic() - start_instant
                        catch_up_env.info_process(
                            f'Catch-up for {symbol} on {day_date:%m-%d-%Y}: collection took '
                            f'{collection_time:.2f}s;  training took {train_time:.2f}s'
                        )
                    else:
                        catch_up_env.redis().incr_day_difficulty(
                            symbol, day_date)
                        catch_up_env.warn_process(
                            f'Couldn\'t collect catch-up data for {symbol} on {day_date}: '
                            f'{"null" if day_date is None else len(day_data.candles)} candles'
                        )
                        catch_up_env.warn_process(
                            '\n'.join(validation_debugger))

                # Move to the next day.
                day_date = catch_up_env.time().get_next_mkt_day(day_date)

            # Determine whether or not we have yesterday's cached data for at least one symbol.
            unstable_data_present = False
            while not catch_up_env.time().is_mkt_day(day_date):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)
            for symbol in Settings.get_symbols(catch_up_env):
                unstable_data = catch_up_env.redis().get_cached_candles(
                    symbol, day_date)
                if unstable_data is not None and SymbolDay.validate_candles(
                        unstable_data):
                    unstable_data_present = True
                    break

            if unstable_data_present:
                msg = f'Valid cached redis data on {day_date:%B %d} found. ' \
                      f'Models and strategies should function normally'
                catch_up_env.info_main(msg)
                catch_up_env.info_process(msg)
            else:
                msg = f'No valid redis data cached on {day_date:%b %d}. Models that need yesterday\'s data will ' \
                      f'fail, causing some strategies to fail.'
                catch_up_env.warn_main(msg)
                catch_up_env.warn_process(msg)

            # Allow processes to resume now that data_collector is not busy.
            catch_up_env.mark_data_as_loaded()
            msg = f'Trading and strategy optimization enabled (catch up task took ' \
                  f'{(pytime.monotonic() - catch_up_start_moment) / 3600:.2f} hrs)'
            catch_up_env.info_main(msg)
            catch_up_env.info_process(msg)

        data_load_thread = Thread(target=catch_up)
        data_load_thread.start()

    def start_live_trading(self,
                           livestream_updates: 'multiprocessing list') -> None:
        """
        Runs live day- and swing-trading in their own Processes.
        """
        self.live_day_trader = LiveTrader(creator_env=self.live_env,
                                          logfeed_trading=self.logfeed_trading,
                                          day_trader=True)
        self.day_trading_process = Process(target=self.live_day_trader.start,
                                           args=(livestream_updates, ))
        self.day_trading_process.start()

        self.live_swing_trader = LiveTrader(
            creator_env=self.live_env,
            logfeed_trading=self.logfeed_trading,
            day_trader=False)
        self.swing_trading_process = Process(
            target=self.live_swing_trader.start, args=(livestream_updates, ))
        self.swing_trading_process.start()

    def start_strategy_optimization(self) -> None:
        """
        Runs strategy optimization in its own Process.
        """
        self.strategy_optimizer = StrategyOptimizer(
            creator_env=self.live_env,
            logfeed_optimization=self.logfeed_optimization)
        self.optimizations_process = Process(
            target=self.strategy_optimizer.start)
        self.optimizations_process.start()

    def init_visualization(self) -> None:
        """
        Schedules visuals to update continuously.
        The user can also update visuals manually using the webpanel.
        """

        # Schedule visuals to continuously update in the background.
        self.visuals_refresher = VisualsRefresher(
            logfeed_program=self.logfeed_program,
            logfeed_process=self.logfeed_visuals,
            symbols=Settings.get_symbols(self.live_env),
            live_time_env=self.live_env.time())
        self.visuals_refresher.start()

    def init_health_checks(self) -> None:
        """
        Schedules health checks (e.g. data checks, analysis model checks) to run at night.
        The user can also run checks manually using the webpanel.
        """

        # Schedule health checks to run every night.
        self.health_checks_refresher = HealthChecksRefresher(
            logfeed_program=self.logfeed_program,
            logfeed_process=self.logfeed_program,
            symbols=Settings.get_symbols(self.live_env),
            live_time_env=self.live_env.time())
        self.health_checks_refresher.start()

    def shutdown(self) -> None:
        self.info_main('Shutting down...')

        try:
            # Stop thread that runs health checks.
            self.health_checks_refresher.stop()
        except Exception:
            traceback.print_exc()

        try:
            # Stop thread that generates visuals.
            self.visuals_refresher.stop()
        except Exception:
            traceback.print_exc()

        try:
            # Stop collection process.
            self.daily_collector.stop()
            self.collection_process.terminate()
        except Exception:
            traceback.print_exc()

        try:
            # Close account/market websocket connections.
            AccountDataStream.shutdown()
        except Exception:
            traceback.print_exc()

        try:
            # Stop evaluations process.
            self.strategy_optimizer.stop()
            self.optimizations_process.terminate()
        except Exception:
            traceback.print_exc()

        try:
            # Stop day trading process.
            self.live_day_trader.stop()
            self.day_trading_process.terminate()
        except Exception:
            traceback.print_exc()

        try:
            # Stop swing trading process.
            self.live_swing_trader.stop()
            self.swing_trading_process.terminate()
        except Exception:
            traceback.print_exc()

        self.info_main('Shutdown complete')
Пример #8
0
def get_next_weights(pass_fail_models: List[AnalysisModelType],
                     min_model_weights: Dict[AnalysisModelType, float],
                     max_model_weights: Dict[AnalysisModelType, float],
                     logfeed_process: LogFeed,
                     combination_number: int,
                     resolution: float = 0.025) -> Optional[ModelWeightingSystem]:
    """
    Returns the combination_number-th combination of weights (starting at 1),
     or None if current_weights is the final combination.
    """

    # Init each model weight to its min allowable value
    model_weights = {}
    for model_type, min_weight in min_model_weights.items():
        model_weights[model_type] = min_weight

    # Create a list of model_type's
    models = [model_type for model_type in model_weights.keys()]

    # Iterate combination_number times
    iterations = 0

    # Ensure at least one combination of weights sums to 1
    if sum(max_model_weights.values()) < 1:
        logfeed_process.log(LogLevel.WARNING,
                            'Tried to generate a weight combination but the weight ranges do not allow for any '
                            'combinations that add up to 1.')
        return None

    # Handle edge case of zero non-pass/fail models
    if len(models) == 0:
        logfeed_process.log(LogLevel.WARNING,
                            'Tried to generate a weight combination for a strategy that only uses pass/fail models')
        return None

    # Handle edge case of only one model
    elif len(models) == 1:
        model_weights[models[0]] = 1.0
        iterations += 1

    # Handle edge case of only two models
    elif len(models) == 2:
        # Call first model 'mover_1' and second model 'mover_2'
        mover_1 = models[0]
        mover_2 = models[1]

        # Generate combinations by raising mover_1 while lowering mover_2
        model_weights[mover_1] = min_model_weights[mover_1]
        model_weights[mover_2] = max_model_weights[mover_2]
        while model_weights[mover_2] > min_model_weights[mover_2] + 0.00001 \
                or model_weights[mover_1] < max_model_weights[mover_1] - 0.00001:
            # Stop when combination_number is reached
            if iterations == combination_number:
                break

            moved = False

            # Raise mover_1 if doing so will not raise mover_1 above its max
            if model_weights[mover_1] < max_model_weights[mover_1] - 0.00001:
                model_weights[mover_1] += min(resolution, max_model_weights[mover_1] - model_weights[mover_1])
                moved = True

            # Lower mover_2 if doing so will not lower mover_2 below its min or the sum of weights below 1
            if model_weights[mover_2] > min_model_weights[mover_2] + 0.00001 and \
                    sum(model_weights.values()) >= 1 + resolution:
                model_weights[mover_2] -= min(resolution, model_weights[mover_2] - min_model_weights[mover_2])
                moved = True

            if not moved:
                break

            # Count this combination if the sum of weights is 1
            if abs(sum(model_weights.values()) - 1) < 0.0001:
                iterations += 1

        if iterations < combination_number:
            # Generate combinations by raising mover_2 while lowering mover_1
            model_weights[mover_1] = max_model_weights[mover_1]
            model_weights[mover_2] = min_model_weights[mover_2]
            while model_weights[mover_1] > min_model_weights[mover_1] + 0.00001 \
                    or model_weights[mover_2] < max_model_weights[mover_2] - 0.00001:
                # Stop when combination_number is reached
                if iterations == combination_number:
                    break

                moved = False

                # Raise mover_2 if doing so will not raise mover_2 above its max
                if model_weights[mover_2] < max_model_weights[mover_2] - 0.00001:
                    model_weights[mover_2] += min(resolution, max_model_weights[mover_2] - model_weights[mover_2])
                    moved = True

                # Lower mover_1 if doing so will not lower mover_1 below its min or the sum of weights below 1
                if model_weights[mover_1] > min_model_weights[mover_1] + 0.00001 and \
                        sum(model_weights.values()) >= 1 + resolution:
                    model_weights[mover_1] -= min(resolution, model_weights[mover_1] - min_model_weights[mover_1])
                    moved = True

                if not moved:
                    break

                # Count this combination if the sum of weights is 1
                if abs(sum(model_weights.values()) - 1) < 0.0001:
                    iterations += 1

    # Handle general case of three or more models
    else:
        # Loop A: go thru each pivot
        for pivot_index in range(len(models)):
            # Stop when combination_number is reached
            if iterations == combination_number:
                break

            # Set all weights to their min
            pivot = models[pivot_index]
            set_mins(model_weights, min_model_weights)

            # Loop B: go thru each possible value of the pivot
            pivot_min = model_weights[pivot]
            pivot_max = max_model_weights[pivot]
            for pivot_weight in numpy.linspace(start=pivot_min,
                                               stop=pivot_max,
                                               num=int(math.ceil((pivot_max - pivot_min) / resolution))):
                # Stop when combination_number is reached
                if iterations == combination_number:
                    break

                # Increment pivot weight
                model_weights[pivot] = pivot_weight

                # Init mover_2 as the right-most model that isn't the pivot
                mover_2_index = len(models) - 1 if pivot_index != len(models) - 1 else len(models) - 2

                # Loop C: go thru each possible mover_2
                while mover_2_index > 0:
                    # Stop when combination_number is reached
                    if iterations == combination_number:
                        break

                    # Init mover_1 as the left-most model that isn't the pivot
                    mover_1_index = 0 if pivot_index != 0 else 1

                    # Loop D: go thru each possible mover_1
                    while mover_1_index < mover_2_index:
                        # Stop when combination_number is reached
                        if iterations == combination_number:
                            break

                        # Reset every weight except the pivot to its min
                        for model_type in models:
                            if model_type != pivot:
                                model_weights[model_type] = min_model_weights[model_type]

                        # Set mover_1 weight to its min and mover_2 weight to the max it can be
                        mover_1 = models[mover_1_index]
                        model_weights[mover_1] = min_model_weights[mover_1]
                        mover_2 = models[mover_2_index]
                        set_logical_max(mover_2, model_weights, min_model_weights[mover_2], max_model_weights[mover_2])

                        # Loop E: go thru each possible mover weights combination
                        while model_weights[mover_2] > min_model_weights[mover_2] + 0.00001 \
                                and model_weights[mover_1] < max_model_weights[mover_1] - 0.00001:
                            iterations += 1
                            # Stop when combination_number is reached
                            if iterations == combination_number:
                                break

                            # Increment mover_1 and decrement mover_2
                            model_weights[mover_1] += min(resolution,
                                                          max_model_weights[mover_1] - model_weights[mover_1])
                            model_weights[mover_2] -= min(resolution,
                                                          model_weights[mover_2] - min_model_weights[mover_2])

                        # Move to next mover_1
                        mover_1_index += 1
                        if mover_1_index == pivot_index:
                            mover_1_index += 1

                    # Move to next mover_2
                    mover_2_index -= 1
                    if mover_2_index == pivot_index:
                        mover_2_index -= 1

    # Add in pass/fail models and return the weight combination we just generated
    if iterations == combination_number:
        for model_type in pass_fail_models:
            model_weights[model_type] = 0
        return ModelWeightingSystem(model_weights, logfeed_process)

    # Return None once all combinations have been generated
    return None
Пример #9
0
    def from_alpaca_api(data: 'Alpaca Order Entity',
                        logfeed_process: LogFeed) -> Optional['Order']:
        """
        Returns an Order object made from an Alpaca API response.
        See https://docs.alpaca.markets/api-documentation/api-v2/orders/#order-entity.
        """

        # Convert order entity to raw dict, if not already done.
        try:
            data = data._raw
        except Exception as ignored:
            pass

        # Decode order type.
        try:
            order_type = ORDER_TYPES[data['type']][data['side']] \
                if data['type'] in ORDER_TYPES else OrderType.UNSUPPORTED
            if order_type is OrderType.UNSUPPORTED:
                logfeed_process.log(LogLevel.WARNING, f'Couldn\'t decode order: unknown type "{data["type"]}"')
                return None

            # Decode order status.
            st = data['status']
            if st == 'filled' \
                    or st == 'stopped' \
                    or st == 'calculated':
                order_status = OrderStatus.FILLED
            elif st == 'new' \
                    or st == 'partially_filled' \
                    or st == 'done_for_day' \
                    or st == 'accepted' \
                    or st == 'pending_new' \
                    or st == 'accepted_for_bidding':
                order_status = OrderStatus.OPEN
            else:
                order_status = OrderStatus.CANCELED

            # Decode order price.
            try:
                order_price = float(data[ORDER_PRICES[order_type]])
                if data['filled_avg_price'] is not None and data['filled_avg_price'] != '':
                    order_price = float(data['filled_avg_price'])
            except Exception as e:
                order_price = 0.0
                logfeed_process.log(LogLevel.INFO, 'Error decoding order price from {0} \t Error: {1}'
                                    .format(data, traceback.format_exc()))

            return Order(order_type=order_type,
                         status=order_status,
                         symbol=data['symbol'].upper(),
                         price=order_price,
                         qty=int(data['qty']),
                         order_id=data['id'],
                         moment=pd.Timestamp(data['created_at']).to_pydatetime())
        except Exception as e:
            print(f'ERROR DECODING ORDER:')
            print(f'{data}')
            print(f'Raw order:')
            try:
                print(f'{data._raw}')
            except Exception as e:
                print('None')
            traceback.print_exc()
Пример #10
0
    def connect_to_streams(cls, symbols: List[str],
                           logfeed_data: LogFeed) -> None:
        """
        Starts a thread that listens for alpaca and polygon data streams.
        """

        # Initialize multi-threaded access to data updates.
        if cls._livestream_updates is not None:
            logfeed_data.log(
                LogLevel.ERROR,
                'Tried to connect to account data streams twice!')
            print('Tried to connect to account data streams twice!')
            return
        cls._livestream_updates = multiprocessing.Manager().list()
        cls._queue_initially_filled = multiprocessing.Value(c_bool, False)

        # Define off-thread logic for handling stream messages.
        def init_streams():

            while cls.running:
                try:
                    # Create a new event loop for this thread.
                    loop = asyncio.new_event_loop()
                    asyncio.set_event_loop(loop)

                    # From alpaca-trade-api.
                    cls.alpaca_stream = StreamConn(data_stream='polygon')

                    @cls.alpaca_stream.on(r'^trade_updates$')
                    async def on_trade_updates(conn, channel, data):
                        # logfeed_data.log(LogLevel.DEBUG, 'Alpaca raw trade update: {0}'.format(data))
                        try:
                            print('receiving order from ws')
                            order = Order.from_alpaca_api(
                                data.order, logfeed_data)
                            if order is None:
                                logfeed_data.log(
                                    LogLevel.WARNING,
                                    'Data stream could not decode an order. Ignoring it'
                                )
                                print(
                                    'Data stream could not decode an order. Ignoring it'
                                )
                                cls._queue_update(
                                    moment=datetime.now(),
                                    update_type=StreamUpdateType.STARTED_UP)
                            else:
                                cls._on_trade_update(order)
                        except Exception as e:
                            logfeed_data.log(
                                LogLevel.ERROR,
                                'Error handling alpaca trade update:')
                            logfeed_data.log(LogLevel.WARNING,
                                             traceback.format_exc())
                            traceback.print_exc()

                    @cls.alpaca_stream.on(r'^account_updates$')
                    async def on_account_updates(conn, channel, msg):
                        logfeed_data.log(
                            LogLevel.DEBUG,
                            'Alpaca raw account update: {0}'.format(msg))
                        print('Alpaca raw account update: {0}'.format(msg))
                        try:
                            print('receiving acct update from ws')
                            cls._on_account_update(msg, logfeed_data)
                        except Exception as e:
                            logfeed_data.log(
                                LogLevel.ERROR,
                                'Error handling alpaca account update: ')
                            logfeed_data.log(LogLevel.WARNING,
                                             traceback.format_exc())
                            traceback.print_exc()

                    @cls.alpaca_stream.on(r'^status$')
                    async def on_status(conn, channel, msg):
                        try:
                            cls._on_status_update(msg.message, logfeed_data)
                        except Exception as e:
                            logfeed_data.log(
                                LogLevel.ERROR,
                                'Error handling polygon status update:')
                            logfeed_data.log(LogLevel.WARNING,
                                             traceback.format_exc())
                            traceback.print_exc()

                    @cls.alpaca_stream.on(r'^A$')
                    async def on_second_bars(conn, channel, data):
                        # start_queue = pytime.monotonic()
                        try:
                            # print(str(data))
                            # print(f'is {data.start:%Y/%m/%d_%H:%M:%S}')
                            # print(f'at {datetime.now():%Y/%m/%d_%H:%M:%S}')
                            cls._on_data_update(data)
                        except Exception as e:
                            logfeed_data.log(
                                LogLevel.ERROR,
                                'Error handling polygon candle update:')
                            logfeed_data.log(LogLevel.WARNING,
                                             traceback.format_exc())
                            traceback.print_exc()
                        # queue_time_ms = (pytime.monotonic() - start_queue) * 1000
                        # moment = datetime.strptime(data.start.strftime(DATE_TIME_FORMAT), DATE_TIME_FORMAT)
                        """
                        try:
                            if queue_time_ms > 80:
                                print(f'took {queue_time_ms:.0f}ms to queue {data.symbol} {moment:%M:%S} candle')
                            else:
                                print(f'queued {data.symbol} {moment:%M:%S} candle at {datetime.now():%M:%S}')
                        except Exception as e:
                            traceback.print_exc()
                        """

                    # Subscribe to alpaca and polygon streams
                    channels_to_stream = ['trade_updates', 'account_updates']
                    channels_to_stream.extend(f'A.{symbol}'
                                              for symbol in symbols)

                    logfeed_data.log(
                        LogLevel.INFO,
                        'Subscribing to polygon and alpaca streams')
                    cls.alpaca_stream.run(channels_to_stream)
                except Exception as e:
                    logfeed_data.log(
                        LogLevel.ERROR,
                        'Polygon and alpaca streams disconnected unexpectedly')
                    logfeed_data.log(LogLevel.WARNING, traceback.format_exc())
                    pytime.sleep(2)
                    logfeed_data.log(LogLevel.INFO,
                                     'Attempting to re-connect data streams')

        # Connect to the streams in another thread
        cls.streams_thread = Thread(target=init_streams)
        cls.streams_thread.start()
Пример #11
0
    def _on_status_update(cls, event: str, logfeed_data: LogFeed) -> None:
        """
        Logs messages received from alpaca.markets and polygon.io.
        """

        # Ignore uninteresting polygon status updates
        if event == 'Connecting to Polygon' or event == 'Connected Successfully':
            logfeed_data.log(LogLevel.DEBUG, event)
            try:
                cls._queue_update(moment=datetime.now(),
                                  update_type=StreamUpdateType.STARTED_UP)
            except Exception as e:
                logfeed_data.log(LogLevel.ERROR,
                                 'Error queuing ws startup update:')
                logfeed_data.log(LogLevel.WARNING, traceback.format_exc())
                traceback.print_exc()

        # Log successful authentication with polygon websocket
        elif event == 'authenticated':
            logfeed_data.log(
                LogLevel.DEBUG,
                'Successfully authenticated Polygon.io live stream (all channels unsubscribed)'
            )

        # Log successful channel subscription messages
        elif event.startswith('subscribed to:'):
            logfeed_data.log(
                LogLevel.DEBUG,
                'Subscribed to Polygon websocket channel: {0}'.format(
                    event.split('to: ')[1]))

        # Log unrecognized polygon status updates
        else:
            logfeed_data.log(
                LogLevel.INFO,
                'Unknown polygon.io status message: {0}'.format(event))