コード例 #1
0
    def run(self) -> None:
        # Set symbol and date we need data for.
        symbols = ['SPY', 'SPXL', 'SPXS']
        start_date = date(year=2020, month=4, day=1)
        days_to_dump = 5

        # Clone live environment so it can run on this thread.
        live_env = ExecEnv(self.program.logfeed_program,
                           self.program.logfeed_program, self.program.live_env)
        live_env.fork_new_thread()
        data_collector = PolygonDataCollector(self.program.logfeed_program,
                                              self.program.logfeed_program,
                                              live_env.time())

        # Go through each symbol.
        for symbol in symbols:

            # Go through the first 5 market days starting with start_date.
            day_date = start_date - timedelta(days=1)
            for i in range(days_to_dump):

                # Get the next market day.
                day_date = live_env.time().get_next_mkt_day(day_date)

                # Load price data.
                print(f'Fetching {symbol} data for {day_date:%m-%d-%Y}')
                day_data = live_env.mongo().load_symbol_day(symbol, day_date)

                # Get fresh data from polygon.io, if necessary.
                if not SymbolDay.validate_candles(day_data.candles):
                    try:
                        day_data = data_collector.collect_candles_for_day(
                            day_date, symbol)
                    except Exception as e:
                        live_env.error_process(
                            'Error collecting polygon-rest data:')
                        live_env.warn_process(traceback.format_exc())

                # Validate the data.
                if day_data is None or not SymbolDay.validate_candles(
                        day_data.candles):
                    print(
                        F'COULD NOT COMPILE DEBUG PRICE DATA FOR {symbol} ON {day_date:%m-%d-%Y}'
                    )
                    continue

                # Convert the data into json.
                data_dict = day_data.to_json()

                # Dump the data into a text file.
                if not os.path.exists('debug_data'):
                    os.mkdir('debug_data')
                with open(f'debug_data/{symbol}_{day_date:%m-%d-%Y}.txt',
                          'w+') as f:
                    f.write(json.dumps(data_dict))
                print(
                    f'Dumped data to TC2_data/debug_data/{symbol}_{day_date:%m-%d-%Y}'
                )
コード例 #2
0
def fork_live_env(logfeed_process: Optional['LogFeed'] = None) -> 'ExecEnv':
    """
    Returns an execution environment that outputs its logs to the API logfeed and can be used by the calling thread.
    """
    from tc2.env.ExecEnv import ExecEnv

    if not logfeed_process:
        logfeed_process = shared.program.logfeed_api

    live_env = ExecEnv(shared.program.logfeed_program,
                       logfeed_process,
                       creator_env=shared.program.live_env)
    live_env.fork_new_thread()
    return live_env
コード例 #3
0
    def run(self) -> None:
        self.program.logfeed_program.log(
            LogLevel.INFO, 'Debug task setting up simulation environment')

        # Set simulation parameters.
        day_date = date(year=2020, month=3, day=10)

        # Clone live environment so it can run on this thread.
        live_env = ExecEnv(self.program.logfeed_program,
                           self.program.logfeed_program, self.program.live_env)
        live_env.fork_new_thread()

        # Initialize simulation environment.
        sim_time_env = TimeEnv(
            datetime.combine(day_date, time(hour=11, minute=3, second=40)))
        sim_data_collector = PolygonDataCollector(
            logfeed_program=self.program.logfeed_program,
            logfeed_process=self.program.logfeed_program,
            time_env=sim_time_env)
        sim_redis = RedisManager(self.program.logfeed_program,
                                 EnvType.STARTUP_DEBUG_1)
        sim_mongo = MongoManager(self.program.logfeed_program,
                                 EnvType.STARTUP_DEBUG_1)
        sim_env = ExecEnv(self.program.logfeed_program,
                          self.program.logfeed_program)
        sim_env.setup_first_time(env_type=EnvType.STARTUP_DEBUG_1,
                                 time=sim_time_env,
                                 data_collector=sim_data_collector,
                                 mongo=sim_mongo,
                                 redis=sim_redis)

        # Place the strategy in a simulated environment.
        strategy = LongShortStrategy(env=sim_env,
                                     symbols=['SPY', 'SPXL', 'SPXS'])

        # Simulate the strategy so its output gets printed to logfeed_optimization.
        self.program.logfeed_program.log(
            LogLevel.INFO, 'Creating StrategySimulator for debug task')
        simulator = StrategySimulator(strategy,
                                      live_env,
                                      all_symbols=['SPY', 'SPXL', 'SPXS'])
        self.program.logfeed_program.log(
            LogLevel.INFO, 'Running simulation of LongShortStrategy')
        simulator.run(warmup_days=2)
        self.program.logfeed_program.log(
            LogLevel.INFO, f'Completed LongShortStrategy simulation. '
            f'Results: {strategy.run_info.to_json()}')
コード例 #4
0
ファイル: TC2Program.py プロジェクト: maxilie/TC2_public
        def catch_up():
            self.info_main(
                'Trading and simulation disabled while checking for missing recent data...'
            )
            catch_up_start_moment = pytime.monotonic()

            # Fork data_env for the new thread.
            catch_up_env = ExecEnv(self.logfeed_program,
                                   self.logfeed_data,
                                   creator_env=self.live_env)
            catch_up_env.fork_new_thread()
            catch_up_env.info_process(
                'Performing catch-up task: checking for missing recent data')

            # Fork model feeder for the new thread.
            catch_up_model_feeder = ModelFeeder(catch_up_env)

            # Reset models and go back 31 days if missing [t-31, t-4].
            # OR go back 4 days if only missing at most [t-4, t-1].

            # Start at t-31 days.
            day_date = catch_up_env.time().now().date()
            while not catch_up_env.time().is_mkt_day(day_date):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)
            for _ in range(warm_up_days + catch_up_days + 1):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)

            # Check that each day [t-31, t-4] has valid data.
            symbols_reset = []
            for _ in range(warm_up_days):
                # Check the next day.
                day_date = catch_up_env.time().get_next_mkt_day(day_date)

                for symbol in Settings.get_symbols(catch_up_env):
                    # Only check the symbol if it hasn't been reset.
                    if symbol in symbols_reset:
                        continue

                    # Load the day's data and validate it.
                    day_data = catch_up_env.mongo().load_symbol_day(
                        symbol, day_date)
                    if not SymbolDay.validate_candles(day_data.candles):
                        catch_up_env.info_process(
                            '{} missing price data on {}. Resetting its model data'
                            .format(symbol, day_date))
                        catch_up_model_feeder.reset_models([symbol])
                        symbols_reset.append(symbol)

            # Go back to the latest potential missing day.
            day_date = catch_up_env.time().now().date()
            while not catch_up_env.time().is_mkt_day(day_date):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)
            for _ in range(warm_up_days + catch_up_days +
                           1 if len(symbols_reset) != 0 else catch_up_days +
                           1):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)

            # Use price data to train models.
            for _ in range(warm_up_days + catch_up_days
                           if len(symbols_reset) != 0 else catch_up_days):

                # Go through each reset symbol.
                for symbol in symbols_reset:

                    # Load mongo price data if present.
                    start_instant = pytime.monotonic()
                    day_data = catch_up_env.mongo().load_symbol_day(
                        symbol, day_date)

                    # Collect polygon-rest price data if necessary.
                    if not SymbolDay.validate_candles(day_data.candles):
                        try:
                            day_data = catch_up_env.data_collector(
                            ).collect_candles_for_day(day_date, symbol)
                        except Exception as e:
                            catch_up_env.error_process(
                                'Error collecting polygon-rest data:')
                            catch_up_env.warn_process(traceback.format_exc())
                    collection_time = pytime.monotonic() - start_instant

                    # Validate data.
                    validation_debugger = []
                    if day_data is not None and SymbolDay.validate_candles(
                            day_data.candles,
                            debug_output=validation_debugger):
                        # Save data
                        catch_up_env.redis().reset_day_difficulty(
                            symbol, day_date)
                        catch_up_env.mongo().save_symbol_day(day_data)

                        # Use data to train models for symbol on day.
                        start_instant = pytime.monotonic()
                        catch_up_model_feeder.train_models(symbol=symbol,
                                                           day_date=day_date,
                                                           day_data=day_data,
                                                           stable=True)
                        train_time = pytime.monotonic() - start_instant
                        catch_up_env.info_process(
                            f'Catch-up for {symbol} on {day_date:%m-%d-%Y}: collection took '
                            f'{collection_time:.2f}s;  training took {train_time:.2f}s'
                        )
                    else:
                        catch_up_env.redis().incr_day_difficulty(
                            symbol, day_date)
                        catch_up_env.warn_process(
                            f'Couldn\'t collect catch-up data for {symbol} on {day_date}: '
                            f'{"null" if day_date is None else len(day_data.candles)} candles'
                        )
                        catch_up_env.warn_process(
                            '\n'.join(validation_debugger))

                # Move to the next day.
                day_date = catch_up_env.time().get_next_mkt_day(day_date)

            # Determine whether or not we have yesterday's cached data for at least one symbol.
            unstable_data_present = False
            while not catch_up_env.time().is_mkt_day(day_date):
                day_date = catch_up_env.time().get_prev_mkt_day(day_date)
            for symbol in Settings.get_symbols(catch_up_env):
                unstable_data = catch_up_env.redis().get_cached_candles(
                    symbol, day_date)
                if unstable_data is not None and SymbolDay.validate_candles(
                        unstable_data):
                    unstable_data_present = True
                    break

            if unstable_data_present:
                msg = f'Valid cached redis data on {day_date:%B %d} found. ' \
                      f'Models and strategies should function normally'
                catch_up_env.info_main(msg)
                catch_up_env.info_process(msg)
            else:
                msg = f'No valid redis data cached on {day_date:%b %d}. Models that need yesterday\'s data will ' \
                      f'fail, causing some strategies to fail.'
                catch_up_env.warn_main(msg)
                catch_up_env.warn_process(msg)

            # Allow processes to resume now that data_collector is not busy.
            catch_up_env.mark_data_as_loaded()
            msg = f'Trading and strategy optimization enabled (catch up task took ' \
                  f'{(pytime.monotonic() - catch_up_start_moment) / 3600:.2f} hrs)'
            catch_up_env.info_main(msg)
            catch_up_env.info_process(msg)
コード例 #5
0
    def run(self) -> None:
        # Set data parameters.
        start_date = date(year=2002, month=1, day=1)
        end_date = self.program.live_env.time().now().today() - timedelta(
            days=1)

        # Clone live environment so it can run on this thread.
        live_env = ExecEnv(self.program.logfeed_program,
                           self.program.logfeed_program, self.program.live_env)
        live_env.fork_new_thread()
        data_collector = PolygonDataCollector(self.program.logfeed_program,
                                              self.program.logfeed_program,
                                              live_env.time())

        # Clear the data file.
        filename = 'debug_data/spy_ai_data.txt'
        try:
            if not os.path.exists('debug_data'):
                os.mkdir('debug_data')
            with open(filename, 'w+') as file:
                file.write('')
            os.remove(filename)
        except Exception as e:
            print(f'Error deleting file: "{filename}"')
            pass

        # Go through the data we have on file.
        day_date = start_date - timedelta(days=1)
        while day_date < end_date:

            # Get the next market day.
            day_date = self.program.live_env.time().get_next_mkt_day(day_date)

            # Load price data.
            print(f'Fetching SPY data for {day_date:%m-%d-%Y}')
            day_data = live_env.mongo().load_symbol_day('SPY', day_date)

            # Get fresh data from polygon.io, if necessary.
            if not SymbolDay.validate_candles(day_data.candles):
                try:
                    day_data = data_collector.collect_candles_for_day(
                        day_date, 'SPY')
                except Exception as e:
                    live_env.error_process(
                        'Error collecting polygon-rest data:')
                    live_env.warn_process(traceback.format_exc())

            # Validate the data.
            if day_data is None or not SymbolDay.validate_candles(
                    day_data.candles):
                print(
                    F'COULD NOT COMPILE PRICE DATA FOR SPY ON {day_date:%m-%d-%Y}'
                )
                continue

            # Convert candles into sentences.

            #

            # Convert the data into json.
            data_dict = day_data.to_json()

            # Append the data to the txt file.
            with open(f'debug_data/spy_ai_data.txt', 'a+') as f:
                f.write(json.dumps(data_dict))

        print(f'Dumped data to TC2_data/{filename}')
コード例 #6
0
    def run(self) -> None:
        # Clone live environment, connecting this thread to real data.
        live_env = ExecEnv(self.program.logfeed_optimization,
                           self.program.logfeed_optimization,
                           self.program.live_env)
        live_env.fork_new_thread()

        # Experiment settings.
        MAX_TRIALS_PER_DAY = 250  # max number of periods to evaluate per historical day
        EVAL_PERIOD_LEN = 3 * 60  # number of seconds over which to track profits
        EVAL_FLOOR_PERIOD_LEN = 7 * 60  # number of seconds over which to track killswitch floor

        # Load dates on which we have all the needed data.
        experiment_start_date = date(2018, 6, 1)
        spy_dates = live_env.mongo().get_dates_on_file(
            symbol='SPY',
            start_date=experiment_start_date,
            end_date=live_env.time().now().date())
        spxl_dates = live_env.mongo().get_dates_on_file(
            symbol='SPXL',
            start_date=experiment_start_date,
            end_date=live_env.time().now().date())
        spxl_dates = [
            day_date for day_date in spxl_dates if day_date in spy_dates
        ]  # narrow spxl to spy dates
        spy_dates = [
            day_date for day_date in spy_dates if day_date in spxl_dates
        ]  # narrow spy to spxl dates
        spxs_dates = live_env.mongo().get_dates_on_file(
            symbol='SPXS',
            start_date=experiment_start_date,
            end_date=live_env.time().now().date())
        spxs_dates = [
            day_date for day_date in spxs_dates if day_date in spy_dates
        ]  # narrow spxs to spy=sxpl dates
        spy_dates = [
            day_date for day_date in spy_dates if day_date in spxs_dates
        ]  # narrow spy to spxs<=sxpl dates
        spxl_dates = [
            day_date for day_date in spxl_dates if day_date in spy_dates
        ]  # narrow spxl to spy<=sxpl dates
        assert len(spy_dates) == len(spxl_dates) == len(spxs_dates)

        # Init statistics on the experiment.
        spxl_blr_setup_vals = []
        spxs_blr_setup_vals = []
        spxl_blr_10_vals = []
        spxs_blr_10_vals = []
        spxl_blr_25_vals = []
        spxs_blr_25_vals = []
        spxl_profits = []
        spxl_floors = []
        spxs_profits = []
        spxs_floors = []
        oscillation_model = OscillationModel(live_env,
                                             AnalysisModelType.OSCILLATION)
        trend_model = LSFavorModel(live_env, AnalysisModelType.LS_FAVOR)

        # Simulate the days on which SPY, SPXL, and SPXS jointly have data.
        live_env.info_process(
            f'Beginning BLR simulations over {len(spxs_dates)} dates')
        for day_date in spxs_dates:
            # Load data for experiment.
            live_env.info_process(
                f'Running trials on {day_date:%m-%d-%Y} (successful trials: {len(spxl_profits)})'
            )
            spy_data = live_env.mongo().load_symbol_day(symbol='SPY',
                                                        day=day_date)
            spxl_data = live_env.mongo().load_symbol_day(symbol='SPXL',
                                                         day=day_date)
            spxs_data = live_env.mongo().load_symbol_day(symbol='SPXS',
                                                         day=day_date)

            # Validate data.
            data_is_valid = True
            for day_data in [spy_data, spxl_data, spxs_data]:
                if not SymbolDay.validate_candles(day_data.candles):
                    data_is_valid = False
                    break
            if not data_is_valid:
                live_env.info_process(f'Invalid data on {day_date:%m-%d-%Y}')
                continue

            # Init time windows variables.
            start_moment = datetime.combine(
                day_date, OPEN_TIME) + timedelta(seconds=int(30 * 60))
            end_moment = datetime.combine(day_date, CLOSE_TIME) - timedelta(
                seconds=int(EVAL_PERIOD_LEN + 15 * 60))

            # Go thru time windows on each day.
            day_trials = 0
            while start_moment < end_moment and day_trials < MAX_TRIALS_PER_DAY:

                try:
                    # Move to the next time window.
                    start_moment += timedelta(seconds=random.randint(30, 120))
                    blr_setup_period = ContinuousTimeInterval(
                        (start_moment - timedelta(seconds=3 * 60)).time(),
                        start_moment.time())
                    blr_10_period = ContinuousTimeInterval(
                        (start_moment - timedelta(seconds=10 * 60)).time(),
                        start_moment.time())
                    blr_25_period = ContinuousTimeInterval(
                        (start_moment - timedelta(seconds=25 * 60)).time(),
                        start_moment.time())
                    eval_period = ContinuousTimeInterval(
                        start_moment.time(),
                        (start_moment +
                         timedelta(seconds=EVAL_PERIOD_LEN)).time())
                    eval_floor_period = ContinuousTimeInterval(
                        start_moment.time(),
                        (start_moment +
                         timedelta(seconds=EVAL_FLOOR_PERIOD_LEN)).time())

                    # Ignore non-oscillatory periods.
                    oscillation_val = oscillation_model.get_oscillation_val(
                        candles_in_period(blr_setup_period, spy_data.candles,
                                          spy_data.day_date))
                    if oscillation_val < 0.6:
                        continue

                    # Calculate BLR trendline indicators.
                    spxl_blr_setup_val = trend_model.get_blr_strength(
                        BoundedLinearRegressions(
                            candles_in_period(blr_setup_period,
                                              spxl_data.candles,
                                              spxl_data.day_date)))
                    spxs_blr_setup_val = trend_model.get_blr_strength(
                        BoundedLinearRegressions(
                            candles_in_period(blr_setup_period,
                                              spxs_data.candles,
                                              spxs_data.day_date)))
                    spxl_blr_10_val = trend_model.get_blr_strength(
                        BoundedLinearRegressions(
                            candles_in_period(blr_10_period, spxl_data.candles,
                                              spxl_data.day_date)))
                    spxs_blr_10_val = trend_model.get_blr_strength(
                        BoundedLinearRegressions(
                            candles_in_period(blr_10_period, spxs_data.candles,
                                              spxs_data.day_date)))
                    spxl_blr_25_val = trend_model.get_blr_strength(
                        BoundedLinearRegressions(
                            candles_in_period(blr_25_period, spxl_data.candles,
                                              spxl_data.day_date)))
                    spxs_blr_25_val = trend_model.get_blr_strength(
                        BoundedLinearRegressions(
                            candles_in_period(blr_25_period, spxs_data.candles,
                                              spxs_data.day_date)))

                    # Calculate maximum profits during evaluation period.
                    spxl_buy_price = candles_in_period(
                        blr_setup_period, spxl_data.candles,
                        spxl_data.day_date)[-1].close
                    spxs_buy_price = candles_in_period(
                        blr_setup_period, spxs_data.candles,
                        spxs_data.day_date)[-1].close
                    spxl_eval_candles = candles_in_period(
                        eval_period, spxl_data.candles, spxl_data.day_date)
                    spxs_eval_candles = candles_in_period(
                        eval_period, spxs_data.candles, spxs_data.day_date)
                    spxl_eval_floor_candles = candles_in_period(
                        eval_floor_period, spxl_data.candles,
                        spxl_data.day_date)
                    spxs_eval_floor_candles = candles_in_period(
                        eval_floor_period, spxs_data.candles,
                        spxs_data.day_date)
                    spxl_profit_pct = (max([
                        candle.high * 0.3 + candle.open * 0.7
                        for candle in spxl_eval_candles
                    ]) - spxl_buy_price) / spxl_buy_price
                    spxs_profit_pct = (max([
                        candle.high * 0.3 + candle.open * 0.7
                        for candle in spxs_eval_candles
                    ]) - spxs_buy_price) / spxs_buy_price
                    spxl_floor_pct = (spxl_buy_price - min([
                        candle.low * 0.3 + candle.open * 0.7
                        for candle in spxl_eval_floor_candles
                    ])) / spxl_buy_price
                    spxs_floor_pct = (spxs_buy_price - min([
                        candle.low * 0.3 + candle.open * 0.7
                        for candle in spxs_eval_floor_candles
                    ])) / spxs_buy_price

                    # Record trial stats.
                    spxl_blr_setup_vals.append(spxl_blr_setup_val)
                    spxs_blr_setup_vals.append(spxs_blr_setup_val)
                    spxl_blr_10_vals.append(spxl_blr_10_val)
                    spxs_blr_10_vals.append(spxs_blr_10_val)
                    spxl_blr_25_vals.append(spxl_blr_25_val)
                    spxs_blr_25_vals.append(spxs_blr_25_val)
                    spxl_profits.append(spxl_profit_pct)
                    spxl_floors.append(spxl_floor_pct)
                    spxs_profits.append(spxs_profit_pct)
                    spxs_floors.append(spxs_floor_pct)
                    day_trials += 1

                    # Print experiment stats every 100 trials.
                    if len(spxl_blr_setup_vals
                           ) > 0 and len(spxl_blr_setup_vals) % 100 != 0:
                        continue

                    live_env.info_process('\n\n')

                    def print_immediate_profit(val_lists, profits_list,
                                               threshold, symbol, trend_name):
                        # Get indices corresponding to vals that are above all thresholds.
                        indices = [i for i in range(len(val_lists[0]))]
                        for j in range(len(val_lists)):
                            indices = [
                                i for i in indices
                                if val_lists[j][i] >= threshold
                            ]

                        if len(indices) > 3:
                            profits = [profits_list[i] for i in indices]
                            profit_mean, profit_med, profit_stdev = (
                                mean(profits), median(profits), stdev(profits))
                            immediate_profit = profit_med
                            live_env.info_process(
                                f'Immediate {symbol} profit (< 3 mins) when {trend_name} strength >= '
                                f'{100 * threshold}%: '
                                f'{100 * immediate_profit:.2f}% (n={len(profits)})'
                            )

                    def print_profit_ratio(val_lists, spxl_profits_list,
                                           spxs_profits_list, threshold,
                                           trend_name):
                        # Get indices corresponding to vals that are above all thresholds.
                        indices = [i for i in range(len(val_lists[0]))]
                        for j in range(len(val_lists)):
                            indices = [
                                i for i in indices
                                if val_lists[j][i] >= threshold
                            ]

                        if len(indices) > 3:
                            profit_ratios = [
                                spxl_profits_list[i] /
                                max(0.0002, spxs_profits_list[i])
                                for i in indices
                            ]
                            ratios_mean, ratios_med, ratios_stdev = (
                                mean(profit_ratios), median(profit_ratios),
                                stdev(profit_ratios))
                            live_env.info_process(
                                f'Immediate profit ratio (SPXL:SPXS) when {trend_name} strength >= '
                                f'{100 * threshold}%: '
                                f'{ratios_med:.2f}:1 (n={len(profit_ratios)})')

                    # TODO NEXT: Implement a -1.65% killswitch in the strategy.

                    # TODO NEXT: What pct of oscillation range is expected profit?

                    def print_killswitch_floor(val_lists, floors_list,
                                               threshold, symbol, trend_name):
                        # Get indices corresponding to vals that are above all thresholds.
                        indices = [i for i in range(len(val_lists[0]))]
                        for j in range(len(val_lists)):
                            indices = [
                                i for i in indices
                                if val_lists[j][i] >= threshold
                            ]

                        if len(indices) > 3:
                            floors = [-floors_list[i] for i in indices]
                            floor_mean, floor_med, floor_stdev = (
                                mean(floors), median(floors), stdev(floors))
                            killswitch_floor = floor_med - 1.5 * floor_stdev
                            live_env.info_process(
                                f'{symbol} killswitch activation (-1.5 stdev floor) when {trend_name} strength >= '
                                f'{100 * threshold}%: '
                                f'{100 * killswitch_floor:.2f}% (n={len(floors)})'
                            )

                    """
                    # Print immediate profits when BLR strength >= 70%.
                    print_immediate_profit([spxl_blr_6_vals], spxl_profits, 0.7, 'SPXL', 'BLR-6')
                    print_immediate_profit([spxs_blr_6_vals], spxs_profits, 0.7, 'SPXS', 'BLR-6')
                    print_immediate_profit([spxl_blr_10_vals], spxl_profits, 0.7, 'SPXL', 'BLR-10')
                    print_immediate_profit([spxs_blr_10_vals], spxs_profits, 0.7, 'SPXS', 'BLR-10')
                    print_immediate_profit([spxl_blr_25_vals], spxl_profits, 0.7, 'SPXL', 'BLR-25')
                    print_immediate_profit([spxs_blr_25_vals], spxs_profits, 0.7, 'SPXS', 'BLR-25')

                    # Print immediate profits when BLR strength >= 85%.
                    print_immediate_profit([spxl_blr_6_vals], spxl_profits, 0.85, 'SPXL', 'BLR-6')
                    print_immediate_profit([spxs_blr_6_vals], spxs_profits, 0.85, 'SPXS', 'BLR-6')
                    print_immediate_profit([spxl_blr_10_vals], spxl_profits, 0.85, 'SPXL', 'BLR-10')
                    print_immediate_profit([spxs_blr_10_vals], spxs_profits, 0.85, 'SPXS', 'BLR-10')
                    print_immediate_profit([spxl_blr_25_vals], spxl_profits, 0.85, 'SPXL', 'BLR-25')
                    print_immediate_profit([spxs_blr_25_vals], spxs_profits, 0.85, 'SPXS', 'BLR-25')

                    # Print immediate profits when BLR strength >= 95%.
                    print_immediate_profit([spxl_blr_6_vals], spxl_profits, 0.95, 'SPXL', 'BLR-6')
                    print_immediate_profit([spxs_blr_6_vals], spxs_profits, 0.95, 'SPXS', 'BLR-6')
                    print_immediate_profit([spxl_blr_10_vals], spxl_profits, 0.95, 'SPXL', 'BLR-10')
                    print_immediate_profit([spxs_blr_10_vals], spxs_profits, 0.95, 'SPXS', 'BLR-10')
                    print_immediate_profit([spxl_blr_25_vals], spxl_profits, 0.95, 'SPXL', 'BLR-25')
                    print_immediate_profit([spxs_blr_25_vals], spxs_profits, 0.95, 'SPXS', 'BLR-25')

                    # Print SPXL immediate profit when second 2 BLR strengths >= 90%.
                    print_immediate_profit([spxl_blr_10_vals, spxl_blr_25_vals], spxl_profits,
                                           0.9, 'SPXL', 'BLR-10-25')

                    # Print SPXL immediate profit when all BLR strengths >= 30%.
                    print_immediate_profit([spxl_blr_6_vals, spxl_blr_10_vals, spxl_blr_25_vals], spxl_profits,
                                           0.3, 'SPXL', 'BLR-6-10-25')
                    """

                    # Print SPXL:SPXS profit ratio when BLR strength >= 60%.
                    print_profit_ratio([spxl_blr_setup_vals], spxl_profits,
                                       spxs_profits, 0.6, 'BLR-3')
                    print_profit_ratio([spxl_blr_10_vals], spxl_profits,
                                       spxs_profits, 0.6, 'BLR-10')
                    print_profit_ratio([spxl_blr_25_vals], spxl_profits,
                                       spxs_profits, 0.6, 'BLR-25')

                    # Print SPXL:SPXS profit ratio when BLR strength >= 85%.
                    print_profit_ratio([spxl_blr_setup_vals], spxl_profits,
                                       spxs_profits, 0.85, 'BLR-3')
                    print_profit_ratio([spxl_blr_10_vals], spxl_profits,
                                       spxs_profits, 0.85, 'BLR-10')
                    print_profit_ratio([spxl_blr_25_vals], spxl_profits,
                                       spxs_profits, 0.85, 'BLR-25')

                    # Print SPXL:SPXS profit ratio when BLR strength >= 95%.
                    print_profit_ratio([spxl_blr_setup_vals], spxl_profits,
                                       spxs_profits, 0.95, 'BLR-3')
                    print_profit_ratio([spxl_blr_10_vals], spxl_profits,
                                       spxs_profits, 0.95, 'BLR-10')
                    print_profit_ratio([spxl_blr_25_vals], spxl_profits,
                                       spxs_profits, 0.95, 'BLR-25')

                    # Print SPXL:SPXS profit ratio when long BLR strengths >= 60%.
                    print_profit_ratio([spxl_blr_10_vals, spxl_blr_25_vals],
                                       spxl_profits, spxs_profits, 0.6,
                                       'BLR-10-25')

                    # Print expected min profit when osc_val >= 60%.
                    print_immediate_profit([spxl_blr_setup_vals], [
                        min(spxl_profits[i], spxs_profits[i])
                        for i in range(len(spxl_profits))
                    ], 0, '', 'oscillating... N/A')

                    # Print killswitch floor when osc_val >= 60%.
                    print_killswitch_floor([spxl_blr_setup_vals], [
                        max(spxl_floors[i], spxs_floors[i])
                        for i in range(len(spxl_floors))
                    ], 0, '', 'oscillating... N/A')

                except Exception as e:
                    # live_env.warn_process(f'BLR Experiment error: {traceback.format_exc()}')
                    continue