Example #1
0
def main(dataset: str, target: str, pipeline: str):
    shapes = []
    ds_service = DatasetService()
    m_service = ModelService()
    for symbol in SYMBOLS:
        print(f"Exporting shap dataframes for symbol {symbol}")
        ds = ds_service.get_dataset(name=dataset, symbol=symbol)
        fs = DatasetService.get_feature_selection(ds=ds,
                                                  method='importances_shap',
                                                  target=target)
        X_all = ds_service.get_dataset_features(ds=ds, columns=fs.features)
        y_all = ds_service.get_dataset_target(ds=ds, name=target)
        model = m_service.get_model(pipeline=pipeline,
                                    dataset=dataset,
                                    target=target,
                                    symbol=symbol)
        for t in model.tests:
            os.makedirs(
                f"data/shap_values/{dataset}/{target}/{pipeline}/daily",
                exist_ok=True)
            placeholder = "{label}"
            csv_name = f"data/shap_values/{dataset}/{target}/{pipeline}/shap_training_window_{symbol}_{placeholder}_Wdays{t.window['days']}_.csv"
            day_csv_name = f"data/shap_values/{dataset}/{target}/{pipeline}/daily/shap_training_window_{symbol}_{placeholder}_Wdays{t.window['days']}_"
            print(f"Loading estimators for test {t.window}")
            estimators = ModelService.load_test_estimators(model=model, mt=t)
            results = ModelService.parse_test_results(test=t)
            shaps = [[], [], []]

            X_test = X_all.loc[t.test_interval.begin:t.test_interval.end]
            shap_expected = []
            print(f"Calculating shap values")
            shap_abs_mean = [pd.DataFrame(), pd.DataFrame(), pd.DataFrame()]
            for est in tqdm(estimators):
                est_class = y_all.loc[est.day]
                training_data = est.train_x.astype(np.float64).fillna(value=0)

                shap_v, shap_exp = get_shap_values(estimator=est.named_steps.c,
                                                   X=training_data,
                                                   X_train=training_data,
                                                   bytes=False)

                if isinstance(shap_exp, float):
                    shap_expected.append([est.day] + [0, 0, shap_exp])
                else:
                    shap_expected.append([est.day] + [v for v in shap_exp])
                for cls, label in enumerate(["SELL", "HOLD", "BUY"]):
                    df = pd.DataFrame(shap_v[cls],
                                      index=est.train_x.index,
                                      columns=est.train_x.columns)
                    # if not shaps[cls]: # If list is empty, append whole df
                    #     shaps[cls].append(df)
                    # else:
                    #     shaps[cls].append(df.iloc[-1:])  # otherwise only append new row (sliding window)
                    # Save shap values dataframe for each day
                    dayname = est.day.replace('+00:00',
                                              '').replace('T', '').replace(
                                                  ':', '').replace('-', '')
                    day_class_csv_name = day_csv_name.format(
                        label=label) + f"DAY{dayname}.csv"
                    df.to_csv(day_class_csv_name, index_label='time')

                    # Process data for next plot
                    df_abs_mean = df.abs().mean().to_dict()
                    df_abs_mean['time'] = est.day
                    shaps[cls].append(df_abs_mean)

                    # print(shap_abs_mean.head())

            # Merge shap values in an unique dataframe and save to csv for each class
            for cls, label in enumerate(["SELL", "HOLD", "BUY"]):
                class_csv_name = csv_name.format(label=label)
                print(
                    f"Exporting dataframe for class {label} -> {class_csv_name}"
                )
                # cdf = pd.concat(shaps[cls], axis='index')
                cdf = pd.DataFrame.from_records(shaps[cls])
                cdf.index = pd.to_datetime(cdf.time)
                cdf = cdf[cdf.columns.difference(['time'])]
                cdf.to_csv(class_csv_name, index_label='time')

            expected_csv_name = csv_name.format(label='SHAP_expected')
            print(
                f"Exporting expected values dataframe -> {expected_csv_name}")
            edf = pd.DataFrame(
                shap_expected,
                columns=[
                    "time", "shap_expected_sell", "shap_expected_hold",
                    "shap_expected_buy"
                ],
            )
            edf.to_csv(expected_csv_name, index_label='time')

            print(f"Exported symbol {symbol}.")
            # # Load day estimator
            # est = load_estimator()

        print(f"Plotted {symbol}")
def main(dataset: str, target: str):
    num_shap_plots = 3
    shap_show_count = 10

    ds_service = DatasetService()
    m_service = ModelService()
    for pipeline in PIPELINES:
        for symbol in SYMBOLS:
            print(
                f"Plotting shap dataframes for pipeline {pipeline} symbol {symbol}"
            )
            ds = ds_service.get_dataset(name=dataset, symbol=symbol)
            fs = DatasetService.get_feature_selection(
                ds=ds, method='importances_shap', target=target)
            X_all = ds_service.get_dataset_features(ds=ds, columns=fs.features)
            y_all = ds_service.get_dataset_target(ds=ds, name=target)
            model = m_service.get_model(pipeline=pipeline,
                                        dataset=dataset,
                                        target=target,
                                        symbol=symbol)
            for t in model.tests:
                placeholder = "{label}"
                csv_name = f"data/shap_values/{dataset}/{target}/{pipeline}/shap_training_window_{symbol}_{placeholder}_Wdays{t.window['days']}_.csv"
                expected_csv_name = csv_name.format(label='SHAP_expected')
                print(f"Loading results for test {t.window}")
                results = ModelService.parse_test_results(test=t)
                exp_shap_df = pd.read_csv(expected_csv_name,
                                          index_col='time',
                                          parse_dates=True)
                for cls, label in enumerate(["SELL", "HOLD", "BUY"]):
                    class_csv_name = csv_name.format(label=label)
                    cls_shap_df = pd.read_csv(class_csv_name,
                                              index_col='time',
                                              parse_dates=True)
                    cls_shap_df = cls_shap_df.loc[t.test_interval.begin:t.
                                                  test_interval.end]

                    x_train = X_all.loc[cls_shap_df.index]
                    chunk_size = int(cls_shap_df.shape[0] / num_shap_plots)

                    fig = plt.figure(constrained_layout=True,
                                     figsize=(100, 50),
                                     dpi=300)  #
                    gs = GridSpec(3,
                                  num_shap_plots,
                                  figure=fig,
                                  wspace=1.5,
                                  hspace=0.3)
                    precision_ax = fig.add_subplot(gs[0, :])
                    shap_values_ax = fig.add_subplot(gs[1, :])
                    beeswarms_axs = [
                        fig.add_subplot(gs[2, i])
                        for i in range(num_shap_plots)
                    ]
                    #format_axes(fig)
                    shap_plot_labels = set()
                    first_shap_day = results.iloc[0]['time'].replace(
                        '+00:00',
                        '').replace('T', '').replace(':', '').replace('-', '')
                    middle_shap_day = results.iloc[int(
                        results.shape[0] / 2)]['time'].replace(
                            '+00:00',
                            '').replace('T', '').replace(':',
                                                         '').replace('-', '')
                    last_shap_day = results.iloc[-1]['time'].replace(
                        '+00:00',
                        '').replace('T', '').replace(':', '').replace('-', '')
                    for idx, dayname in enumerate(
                        [first_shap_day, middle_shap_day, last_shap_day]):
                        day_csv_name = f"data/shap_values/{dataset}/{target}/{pipeline}/daily/shap_training_window_{symbol}_{label}_Wdays{t.window['days']}_DAY{dayname}.csv"

                        # Plot each section's SHAP values
                        cdf_subset = pd.read_csv(day_csv_name,
                                                 index_col='time',
                                                 parse_dates=True)
                        train_subset = X_all.loc[cdf_subset.index]

                        # Get a rank of feature labels based on this section's shap values
                        abs_mean_shap = cdf_subset.abs().mean(axis='index')
                        abs_mean_rank = abs_mean_shap.sort_values(
                            ascending=False)[:shap_show_count]
                        for l in abs_mean_rank.index:
                            # Save labels for features in the top-N
                            shap_plot_labels.add(l)

                        # Plot this section's SHAP values
                        plt.sca(beeswarms_axs[idx])
                        shap.summary_plot(cdf_subset.values,
                                          train_subset,
                                          max_display=shap_show_count,
                                          show=False,
                                          color_bar=False,
                                          sort=True)
                        min_date = cdf_subset.index.min().to_pydatetime()
                        max_date = cdf_subset.index.max().to_pydatetime(
                        ) + timedelta(days=1)
                        min_date_f = min_date.strftime("%Y/%m/%d")
                        max_date_f = max_date.strftime("%Y/%m/%d")
                        beeswarms_axs[idx].set_xlabel(
                            f"SHAP values\nWindow: {min_date_f} - {max_date_f}",
                            fontsize=8)
                        beeswarms_axs[idx].tick_params(axis='y',
                                                       which='major',
                                                       labelsize=6)
                        beeswarms_axs[idx].tick_params(axis='x',
                                                       which='major',
                                                       labelsize=8)

                    # Plot shap values
                    day_csv_name = f"data/shap_values/{dataset}/{target}/{pipeline}/shap_training_window_{symbol}_{label}_Wdays{t.window['days']}_.csv"
                    plot_cls_shap_df = pd.read_csv(day_csv_name,
                                                   index_col='time',
                                                   parse_dates=True)

                    def get_spread(series):
                        return np.abs(series.max() - series.min())

                    plot_rank = plot_cls_shap_df[list(shap_plot_labels)].apply(
                        get_spread, axis='index').sort_values(
                            ascending=False)[:shap_show_count]
                    plot_cls_shap_df['xlabel'] = [
                        t.to_pydatetime().strftime("%Y/%m/%d")
                        for t in plot_cls_shap_df.index
                    ]
                    shap_ax = plot_cls_shap_df.plot(
                        x='xlabel',
                        y=[c for c in plot_rank.index],
                        kind='line',
                        ax=shap_values_ax,
                        legend=False,
                        xlabel='')
                    patches, labels = shap_ax.get_legend_handles_labels()
                    shap_ax.legend(patches,
                                   labels,
                                   loc='center left',
                                   bbox_to_anchor=(1, 0.5),
                                   prop={'size': 6})
                    shap_ax.tick_params(axis='x', which='major', labelsize=8)
                    shap_ax.set_ylabel('mean(|SHAP|)', fontsize=6)
                    #shap_ax.tick_params(labelbottom=False, labelleft=False)

                    # Get Metrics scores dataframe
                    cri_df = get_metrics_df(results).rolling(
                        7, min_periods=1).mean()
                    cri_df['xlabel'] = [
                        t.to_pydatetime().strftime("%Y/%m/%d")
                        for t in cri_df.index
                    ]
                    cri_ax = cri_df.plot(x='xlabel',
                                         y=f"pre_{cls}",
                                         kind='line',
                                         ax=precision_ax,
                                         legend=False,
                                         xlabel='')
                    patches, labels = cri_ax.get_legend_handles_labels()
                    cri_ax.legend(patches,
                                  labels,
                                  loc='center left',
                                  bbox_to_anchor=(1, 0.5),
                                  prop={'size': 6})
                    cri_ax.set_ylabel('mean(precision)', fontsize=6)
                    cri_ax.tick_params(labelbottom=False, labelleft=True)

                    min_date = cri_df.index.min().to_pydatetime().strftime(
                        "%Y/%m/%d")
                    max_date = cri_df.index.max().to_pydatetime().strftime(
                        "%Y/%m/%d")
                    window = t.window['days']
                    fig.suptitle(
                        f"{symbol}, {pipeline}, W={window}D, Class {label}, From {min_date} to {max_date}"
                    )

                    # fig.show()
                    os.makedirs(f"images/shap-test-final/", exist_ok=True)
                    plt.savefig(
                        f"images/shap-test-final/{pipeline}_W{window}D_{dataset}_{target}_{symbol}_{label}.png",
                        dpi='figure')
                    plt.close()
                    print(f"{label} OK")

            print(f"Exported symbol {symbol}.")
            # # Load day estimator
            # est = load_estimator()

        print(f"Plotted {symbol}")
Example #3
0
def main(pipeline: str, dataset: str, symbol: str, window: int):
    ds = DatasetService()
    ms = ModelService()
    ts = TradingService()
    ohlcv_ds = ds.get_dataset('ohlcv', symbol=symbol)
    asset = ts.get_asset(pipeline=pipeline,
                         dataset=dataset,
                         target='class',
                         symbol=symbol,
                         window=window,
                         create=False)
    if not asset:
        print(
            f"Asset {pipeline}.{dataset}.class for {symbol} on window {window} not found!"
        )
        return
    test = ms.get_test(pipeline=pipeline,
                       dataset=dataset,
                       target='class',
                       symbol=symbol,
                       window=window)
    if not test:
        print(
            f"Test {pipeline}.{dataset}.class for {symbol} on window {window} not found!"
        )
    # ohlcv = ohlcv.loc[test.test_interval.begin:test.test_interval.end]
    ohlcv = ds.get_dataset_features(ohlcv_ds,
                                    begin=test.test_interval.begin,
                                    end=test.test_interval.end)
    test_results = ModelService.parse_test_results(test).iloc[:-1]
    enc_label = onehot_target(test_results.label,
                              labels=["is_sell", "is_hold", "is_buy"],
                              fill=False)
    enc_pred = onehot_target(test_results.predicted,
                             labels=["is_sell", "is_hold", "is_buy"],
                             fill=False)

    # Mask predictions with low value minus a certain amount
    signals_level_diff = ohlcv.low * 10 / 100
    signals_level = ohlcv.low - signals_level_diff
    #signals_level = ohlcv.low
    enc_pred.is_sell.mask(enc_pred.is_sell > 0,
                          other=signals_level,
                          inplace=True)
    enc_pred.is_hold.mask(enc_pred.is_hold > 0,
                          other=signals_level,
                          inplace=True)
    enc_pred.is_buy.mask(enc_pred.is_buy > 0,
                         other=signals_level,
                         inplace=True)

    # Get unique years in index to split plots in smaller scale
    unique_years = ohlcv.index.year.unique()
    for year in unique_years:
        year_pred = enc_pred[enc_pred.index.year == year]
        year_ohlcv = ohlcv[ohlcv.index.year == year]

        # Set up xticks
        daysToIndex = {
            ts.to_pydatetime(): i
            for i, ts in enumerate(year_ohlcv.index)
        }
        days = [i for i in daysToIndex.values()]
        labels = [
            ts.to_pydatetime().strftime("%Y-%m-%d") for ts in year_ohlcv.index
        ]

        # Setup matplotfinance styles and figure
        s = mpf.make_mpf_style(
            base_mpf_style='binance')  # , rc={'font.size': 6}
        fig = mpf.figure(
            figsize=(16, 8),
            style=s)  # pass in the self defined style to the whole canvas
        fig.suptitle(f"{ohlcv_ds.symbol}, {year}, 1D")

        ax = fig.add_subplot(3, 1, (1, 2))  # main candle stick chart subplot
        av = fig.add_subplot(3, 1, 3, sharex=ax)  # volume candles subplot

        # Setup horizontal grids
        ax.grid(axis='x', color='0.5', linestyle='--')
        av.grid(axis='x', color='0.5', linestyle='--')

        # for a in [ax, av]:
        #     a.set_xticks(ticks=days)
        #     a.set_xticklabels(labels=labels)
        #     a.tick_params(axis='x', labelrotation=90)

        apds = [
            #     mpf.make_addplot(tcdf)
            # Predictions
            mpf.make_addplot(year_ohlcv.close,
                             ax=ax,
                             type='line',
                             color=(0.5, 0.5, 0.5, 0.05)),
            mpf.make_addplot(year_pred.is_sell,
                             ax=ax,
                             type='scatter',
                             marker='v',
                             color='red'),
            mpf.make_addplot(year_pred.is_hold,
                             ax=ax,
                             type='scatter',
                             marker='_',
                             color='silver'),
            mpf.make_addplot(year_pred.is_buy,
                             ax=ax,
                             type='scatter',
                             marker='^',
                             color='lime'),
        ]

        mpf.plot(
            year_ohlcv,
            type='candle',
            style=s,
            #ylabel='Price ($)',
            ax=ax,
            volume=av,
            #ylabel_lower='Volume',
            show_nontrading=True,
            addplot=apds,
            returnfig=True)
        fig.autofmt_xdate()
        fig.tight_layout()
        plt.show()
        print("Done")
Example #4
0
def main(pipeline: str, dataset: str, symbol: str, window: int):
    ds = DatasetService()
    ms = ModelService()
    ts = TradingService()
    ohlcv_ds = ds.get_dataset('ohlcv', symbol=symbol)
    ohlcv = ds.get_dataset_features(
        ohlcv_ds)  # [ohlcv_ds.valid_index_min:ohlcv_ds.valid_index_max]

    # boll = pd.Series(percent_b(ohlcv.close, 21), index=ohlcv.index)
    boll = pd.Series(to_discrete_double(percent_b(ohlcv.close, 21), 20, 80),
                     index=ohlcv.index).replace(to_replace=-1, value=np.nan)

    #model = ms.get_model(pipeline, dataset, 'class', symbol)
    _test = ms.get_test(pipeline, dataset, 'class', symbol, window)
    for test in [
            _test
    ]:  # I originally traded all the tests in the model. ToDo: Refactor this.
        # Re-convert classification results from test to a DataFrame
        ohlcv_results = ohlcv[test.test_interval.begin:test.test_interval.end]
        results = ModelService.parse_test_results(test)

        #results.index = ohlcv_results.index
        # Parse index so it's a DateTimeIndex, because Mongo stores it as a string
        # results.index = pd.to_datetime(results.index)

        asset = ts.get_asset(pipeline=pipeline,
                             dataset=dataset,
                             target='class',
                             symbol=symbol,
                             window=test.window['days'])
        # Now use classification results to trade!
        day_count = results.shape[0]
        cur_day = 0
        print(
            "%B_Precision = {}",
            precision_score(results.label,
                            boll.loc[results.index],
                            average='macro',
                            zero_division=0))
        # Amount to buy in coins for buy and hold: $10k divided by first price in test set
        bh_price = ohlcv.close.loc[test.test_interval.begin]
        bh_amount = 10000 / bh_price

        for index, pred in results.iterrows():
            cur_day += 1
            # Get simulation day by converting Pandas' Timestamp to our format
            simulation_day = to_timestamp(index.to_pydatetime())
            # Results dataframe interprets values as float, while they are actually int
            predicted, label = int(pred.predicted), int(pred.label)

            # Grab ohlcv values for current day
            try:
                values = ohlcv.loc[index]
            except KeyError:
                print(f"Day: {index} not in OHLCV index!")
                continue
            try:
                boll_sig = boll.loc[
                    index] if boll.loc[index] != np.nan else None
            except KeyError:
                boll_sig = None
                print(f"Day: {index} not in BOLL index!")
                pass
            _index = ohlcv.index.get_loc(index)
            change = TradingService.get_percent_change(values.close,
                                                       values.open)

            print(
                f"Day {cur_day}/{day_count} [{index}] "
                f"[O {values.open} H {values.high} L {values.low} C {values.close}] "
                f"PCT={change}% "
                f"LABEL={TARGETS[label]} BPRED={TARGETS[boll_sig]} PRED={TARGETS[predicted]}"
            )
            open_positions = ts.get_open_positions(asset=asset,
                                                   day=simulation_day)
            for p in open_positions:
                p_age = TradingService.get_position_age(position=p,
                                                        day=simulation_day)
                try:
                    if p.type == 'MARGIN_LONG':
                        if TradingService.check_stop_loss(p, values.low):
                            ts.close_long(asset=asset,
                                          day=simulation_day,
                                          close_price=p.stop_loss,
                                          position=p,
                                          detail='Stop Loss')
                        elif TradingService.check_take_profit(p, values.high):
                            ts.close_long(asset=asset,
                                          day=simulation_day,
                                          close_price=p.take_profit,
                                          position=p,
                                          detail='Take Profit')
                        elif predicted == SELL:
                            ts.close_long(asset=asset,
                                          day=simulation_day,
                                          close_price=values.close,
                                          position=p,
                                          detail='Sell Signal')
                        elif predicted == HOLD and p_age > 86400 * 3:
                            ts.close_long(asset=asset,
                                          day=simulation_day,
                                          close_price=values.close,
                                          position=p,
                                          detail='Age')
                        elif predicted == BUY:
                            if change > 0:
                                ts.update_stop_loss(asset=asset,
                                                    position=p,
                                                    close_price=values.close,
                                                    pct=-0.05)
                    elif p.type == 'MARGIN_SHORT':
                        if TradingService.check_stop_loss(p, values.high):
                            ts.close_short(asset=asset,
                                           day=simulation_day,
                                           close_price=p.stop_loss,
                                           position=p,
                                           detail='Stop Loss')
                        elif TradingService.check_take_profit(p, values.low):
                            ts.close_short(asset=asset,
                                           day=simulation_day,
                                           close_price=p.take_profit,
                                           position=p,
                                           detail='Take Profit')
                        elif predicted == SELL:
                            # If we had some profit and signal is still SELL, book those by lowering stop loss
                            if change < 0:
                                ts.update_stop_loss(asset=asset,
                                                    position=p,
                                                    close_price=values.close,
                                                    pct=0.05)
                        elif predicted == HOLD and p_age > 86400 * 3:
                            ts.close_short(asset=asset,
                                           day=simulation_day,
                                           close_price=values.close,
                                           position=p,
                                           detail='Age')
                        elif predicted == BUY:
                            ts.close_short(asset=asset,
                                           day=simulation_day,
                                           close_price=values.close,
                                           position=p,
                                           detail='Buy Signal')
                except MessageException as e:
                    print(f"Order handling exception: {e.message}")

            try:
                # If prediction is BUY (price will rise) then open a MARGIN LONG position
                if predicted == BUY:
                    ts.open_long(asset=asset,
                                 day=simulation_day,
                                 close_price=values.close,
                                 size=0.1,
                                 stop_loss=-0.1,
                                 take_profit=0.05)
                # If prediction is SELL (price will drop) open a MARGIN SHORT position
                elif predicted == SELL:
                    ts.open_short(asset=asset,
                                  day=simulation_day,
                                  close_price=values.close,
                                  size=0.1,
                                  stop_loss=0.1,
                                  take_profit=-0.05)
            except MessageException as e:
                print(f"Order placement exception: {e.message}")

            # If this is the last trading day of the period, close all open positions
            if index.timestamp() == results.index[-1].timestamp():
                print("Last trading day reached, liquidating all positions..")
                open_positions = ts.get_open_positions(asset=asset,
                                                       day=simulation_day)
                for p in open_positions:
                    try:
                        if p.type == 'MARGIN_LONG':
                            ts.close_long(asset=asset,
                                          day=simulation_day,
                                          close_price=values.close,
                                          position=p,
                                          detail='Liquidation')
                        elif p.type == 'MARGIN_SHORT':
                            ts.close_short(asset=asset,
                                           day=simulation_day,
                                           close_price=values.close,
                                           position=p,
                                           detail='Liquidation')
                    except MessageException as e:
                        print(f"Order liquidation exception: {e.message}")

            # Update equity value for the asset
            ts.update_equity(asset=asset,
                             day=simulation_day,
                             price=values.close)
            # Update baseline values for the asset
            ts.update_baseline(asset=asset,
                               day=simulation_day,
                               name='buy_and_hold',
                               value=values.close * bh_amount)

        print("Timeframe done.")
def main(dataset: str, target: str, pipeline: str):
    hierarchy = load_hierarchy(f"{dataset}_{target}_feature_hierarchy.yml")
    hdf = pd.DataFrame(hierarchy)

    num_shap_plots = 3
    shap_show_count = 10

    ds_service = DatasetService()
    m_service = ModelService()
    for symbol in SYMBOLS:
        print(f"Plotting shap dataframes for symbol {symbol}")
        ds = ds_service.get_dataset(name=dataset, symbol=symbol)
        fs = DatasetService.get_feature_selection(ds=ds,
                                                  method='importances_shap',
                                                  target=target)
        X_all = ds_service.get_dataset_features(ds=ds, columns=fs.features)
        y_all = ds_service.get_dataset_target(ds=ds, name=target)
        model = m_service.get_model(pipeline=pipeline,
                                    dataset=dataset,
                                    target=target,
                                    symbol=symbol)
        for t in model.tests:
            os.makedirs(
                f"images/shap-test-hierarchy/{dataset}/{target}/{pipeline}/",
                exist_ok=True)
            placeholder = "{label}"
            csv_name = f"data/shap_values/{dataset}/{target}/{pipeline}/shap_training_window_{symbol}_{placeholder}_Wdays{t.window['days']}_.csv"
            expected_csv_name = csv_name.format(label='SHAP_expected')
            print(f"Loading results for test {t.window}")
            results = ModelService.parse_test_results(test=t)
            exp_shap_df = pd.read_csv(expected_csv_name,
                                      index_col='time',
                                      parse_dates=True)
            for cls, label in enumerate(["SELL", "HOLD", "BUY"]):
                class_csv_name = csv_name.format(label=label)
                cls_shap_df = pd.read_csv(class_csv_name,
                                          index_col='time',
                                          parse_dates=True)
                cls_shap_df = cls_shap_df.loc[t.test_interval.begin:t.
                                              test_interval.end]

                x_train = X_all.loc[cls_shap_df.index]
                chunk_size = int(cls_shap_df.shape[0] / num_shap_plots)

                # fig = plt.figure(constrained_layout=True, figsize=(100, 50), dpi=300) #
                # gs = GridSpec(3, num_shap_plots, figure=fig, wspace=1.5, hspace=0.3)
                # precision_ax = fig.add_subplot(gs[0, :])
                # shap_values_ax = fig.add_subplot(gs[1, :])
                # beeswarms_axs = [fig.add_subplot(gs[2, i]) for i in range(num_shap_plots)]
                # #format_axes(fig)
                # shap_plot_labels = set()
                # for idx, start in enumerate(range(0, cls_shap_df.shape[0], chunk_size)):
                #     end = start + chunk_size
                #     left = cls_shap_df.shape[0] - end
                #     if left > 0 and left < chunk_size:
                #         end += left
                #     elif left < 0:
                #         break
                #     # Plot each section's SHAP values
                #     cdf_subset = cls_shap_df.iloc[start:end]
                #     train_subset = x_train.iloc[start:end]
                #
                #     # Get a rank of feature labels based on this section's shap values
                #     abs_mean_shap = cdf_subset.abs().mean(axis='index')
                #     abs_mean_rank = abs_mean_shap.sort_values(ascending=False)[:shap_show_count]
                #     for l in abs_mean_rank.index:
                #         # Save labels for features in the top-N
                #         shap_plot_labels.add(l)
                #
                #     # Plot this section's SHAP values
                #     plt.sca(beeswarms_axs[idx])
                #     shap.summary_plot(
                #         cdf_subset.values,
                #         train_subset,
                #         max_display=shap_show_count,
                #         show=False,
                #         color_bar=False,
                #         sort=True
                #     )
                #     min_date = cdf_subset.index.min().to_pydatetime().strftime("%Y/%m/%d")
                #     max_date = cdf_subset.index.max().to_pydatetime().strftime("%Y/%m/%d")
                #     beeswarms_axs[idx].set_xlabel(f"SHAP values\n{min_date} - {max_date}", fontsize=8)
                #     beeswarms_axs[idx].tick_params(axis='y', which='major', labelsize=6)
                #     beeswarms_axs[idx].tick_params(axis='x', which='major', labelsize=8)

                # # Plot shap values
                # plot_cls_shap_df = cls_shap_df.abs().rolling(7, min_periods=1).mean()
                # def get_spread(series):
                #     return np.abs(series.max() - series.min())
                # plot_rank = plot_cls_shap_df[list(shap_plot_labels)].apply(get_spread, axis='index').sort_values(ascending=False)[:shap_show_count]
                # plot_cls_shap_df['xlabel'] = [t.to_pydatetime().strftime("%Y/%m/%d") for t in plot_cls_shap_df.index]
                # shap_ax = plot_cls_shap_df.plot(
                #     x='xlabel',
                #     y=[c for c in plot_rank.index],
                #     kind='line',
                #     ax=shap_values_ax,
                #     legend=False,
                #     xlabel=''
                # )
                # patches, labels = shap_ax.get_legend_handles_labels()
                # shap_ax.legend(
                #     patches, labels,
                #     loc='center left', bbox_to_anchor=(1, 0.5), prop={'size': 6}
                # )
                # shap_ax.tick_params(axis='x', which='major', labelsize=8)
                # shap_ax.set_ylabel('mean(|SHAP|)', fontsize=6)
                # #shap_ax.tick_params(labelbottom=False, labelleft=False)
                #
                # # Get Metrics scores dataframe
                # cri_df = get_metrics_df(results).rolling(7, min_periods=1).mean()
                # cri_df['xlabel'] = [t.to_pydatetime().strftime("%Y/%m/%d") for t in cri_df.index]
                # cri_ax = cri_df.plot(
                #     x='xlabel',
                #     y=f"pre_{cls}",
                #     kind='line',
                #     ax=precision_ax,
                #     legend=False,
                #     xlabel=''
                # )
                # patches, labels = cri_ax.get_legend_handles_labels()
                # cri_ax.legend(
                #     patches, labels,
                #     loc='center left', bbox_to_anchor=(1, 0.5), prop={'size': 6}
                # )
                # cri_ax.set_ylabel('mean(precision)', fontsize=6)
                # cri_ax.tick_params(labelbottom=False, labelleft=True)
                #
                # min_date = cri_df.index.min().to_pydatetime().strftime("%Y/%m/%d")
                # max_date = cri_df.index.max().to_pydatetime().strftime("%Y/%m/%d")
                # fig.suptitle(f"{pipeline}, {symbol}, class {label} tests from {min_date} to {max_date}")
                #
                # # fig.show()
                # plt.savefig(
                #     f"images/shap-test/{pipeline}_{dataset}_{target}_{symbol}_{label}.png",
                #     dpi='figure'
                # )
                # plt.close()
                print(f"{label} OK")

            print(f"Exported symbol {symbol}.")
            # # Load day estimator
            # est = load_estimator()

        print(f"Plotted {symbol}")
Example #6
0
def main(dataset: str, target: str):
    models = ModelService()
    writer = pd.ExcelWriter("{}_{}.xlsx".format(dataset, target))
    for symbol in symbols:
        model_tests = models.compare_models(symbol, dataset, target)
        result = []
        for m in model_tests:
            test = m["tests"]
            report = test["classification_report"] if "classification_report" in test else None
            #duration = from_timestamp(test["end_at"]).timestamp() - from_timestamp(test["start_at"]).timestamp()
            # mean_dur = pd.Series(test["classification_results"]["duration"]).mean()
            results = ModelService.parse_test_results(test)
            result.append({
                "pipeline": m["pipeline"],
                "window": test['window']['days'],
                #"step": str(test["step"]),
                "mean_fit_time": results.fit_time.mean(),
                "mean_predict_time": results.predict_time.mean(),
                "support_all": report["total_support"] if report else np.nan,
                "support_0": report["sup_0"] if report else np.nan,
                "support_1": report["sup_1"] if report else np.nan,
                "support_2": report["sup_2"] if report else np.nan,
                # Per-class precision/recall/f1/spe/geom/iba
                "precision_0": report["pre_0"] if report else np.nan,
                "recall_0": report["rec_0"] if report else np.nan,
                "specificity_0": report["spe_0"] if report else np.nan,
                "f1-score_0": report["f1_0"] if report else np.nan,
                "geometric_mean_0": report["geo_0"] if report else np.nan,
                "index_balanced_accuracy_0": report["iba_0"] if report else np.nan,

                "precision_1": report["pre_1"] if report else np.nan,
                "recall_1": report["rec_1"] if report else np.nan,
                "specificity_1": report["spe_1"] if report else np.nan,
                "f1-score_1": report["f1_1"] if report else np.nan,
                "geometric_mean_1": report["geo_1"] if report else np.nan,
                "index_balanced_accuracy_1": report["iba_1"] if report else np.nan,

                "precision_2": report["pre_2"] if report else np.nan,
                "recall_2": report["rec_2"] if report else np.nan,
                "specificity_2": report["spe_2"] if report else np.nan,
                "f1-score_2": report["f1_2"] if report else np.nan,
                "geometric_mean_2": report["geo_2"] if report else np.nan,
                "index_balanced_accuracy_2": report["iba_2"] if report else np.nan,

                # Roc-auc
                # "roc_auc_ovo_macro": report["roc_auc_ovo_macro"] if report else np.nan,
                # "roc_auc_ovo_weighted": report["roc_auc_ovo_weighted"] if report else np.nan,
                # "roc_auc_ovr_macro": report["roc_auc_ovr_macro"] if report else np.nan,
                # "roc_auc_ovr_weighted": report["roc_auc_ovr_weighted"] if report else np.nan,
                # Averages
                "precision_avg": report["avg_pre"] if report else np.nan,
                "recall_avg": report["avg_rec"] if report else np.nan,
                "specificity_avg": report["avg_spe"] if report else np.nan,
                "f1-score_avg": report["avg_f1"] if report else np.nan,
                "geometric_mean_avg": report["avg_geo"] if report else np.nan,
                "index_balanced_accuracy_avg": report["avg_iba"] if report else np.nan,

            })
        df = pd.DataFrame(result)
        # Plot to XLSX with conditional formatting by coolwarm color map,
        # ordering by ascending accuracy
        df.sort_values(by='precision_avg', ascending=True)\
            .style.background_gradient(cmap=cm.get_cmap('coolwarm')) \
            .format(None, na_rep="-")\
            .to_excel(writer, sheet_name=symbol, index_label="#", float_format = "%0.3f")
        # Adjust column width
        for column in df:
            column_length = max(df[column].astype(str).map(len).max(), len(column))
            col_idx = df.columns.get_loc(column) + 1
            writer.sheets[symbol].set_column(col_idx, col_idx, column_length)
    writer.close()
def main(dataset: str):
    ds = DatasetService()
    ms = ModelService()
    ts = TradingService()
    logs = []
    for pipeline in PIPELINES:
        for symbol in SYMBOLS:
            for window in WINDOWS:
                print(
                    f"PIPELINE: {pipeline} SYMBOL: {symbol} WINDOW: {window}")
                ohlcv_ds = ds.get_dataset('ohlcv', symbol=symbol)
                test = ms.get_test(pipeline=pipeline,
                                   dataset=dataset,
                                   target='class',
                                   symbol=symbol,
                                   window=window)
                if not test:
                    print(
                        f"Test {pipeline}.{dataset}.class for {symbol} on window {window} not found!"
                    )
                    logs.append(
                        f"MISSING_TEST {pipeline} {dataset} {symbol} class {window} --features importances_shap --parameters gridsearch\n"
                    )
                    continue
                asset = ts.get_asset(pipeline=pipeline,
                                     dataset=dataset,
                                     target='class',
                                     symbol=symbol,
                                     window=window,
                                     create=False)
                if not asset:
                    print(
                        f"Asset {pipeline}.{dataset}.class for {symbol} on window {window} not found!"
                    )
                    logs.append(
                        f"MISSING_ASSET {pipeline} {dataset} {symbol} {window}\n"
                    )
                    continue

                equity = TradingService.parse_equity_df(asset=asset)
                buy_and_hold = TradingService.parse_baseline_df(
                    asset=asset, name='buy_and_hold')
                orders = TradingService.parse_orders_df(asset=asset)

                # Map order position_id to numbers so we don't get a mess in the graph
                position_uids = set(orders.position_id.values)
                for i, uid in enumerate(position_uids):
                    orders.position_id.replace(to_replace=uid,
                                               value=i,
                                               inplace=True)

                ohlcv = ds.get_dataset_features(ohlcv_ds,
                                                begin=test.test_interval.begin,
                                                end=test.test_interval.end)
                test_results = ModelService.parse_test_results(test).iloc[:-1]
                # Mask predictions with low value minus a certain amount
                signals_level_diff = ohlcv.low * 10 / 100
                signals_level = ohlcv.low - signals_level_diff
                enc_pred = onehot_target(
                    test_results.predicted,
                    labels=["is_sell", "is_hold", "is_buy"],
                    fill=False)
                #  In case of classifier bias (due to input bias) some classes are ignored.
                # In such cases, enc_pred won't contain the ignored classes.
                # Add them back by nan-filling (never selected)
                if hasattr(enc_pred, 'is_sell'):
                    use_idx = enc_pred.is_sell > 0
                    enc_pred.is_sell.mask(
                        use_idx,
                        other=signals_level.loc[enc_pred.index],
                        inplace=True)
                else:
                    enc_pred['is_sell'] = pd.Series(np.nan,
                                                    index=enc_pred.index)
                if hasattr(enc_pred, 'is_hold'):
                    enc_pred.is_hold.mask(
                        enc_pred.is_hold > 0,
                        other=signals_level.loc[enc_pred.index],
                        inplace=True)
                else:
                    enc_pred['is_hold'] = pd.Series(np.nan,
                                                    index=enc_pred.index)
                if hasattr(enc_pred, 'is_buy'):
                    enc_pred.is_buy.mask(
                        enc_pred.is_buy > 0,
                        other=signals_level.loc[enc_pred.index],
                        inplace=True)
                else:
                    enc_pred['is_buy'] = pd.Series(np.nan,
                                                   index=enc_pred.index)

                # Get unique years in index to split plots in smaller scale
                unique_years = ohlcv.index.year.unique()
                for year in unique_years:
                    year_ohlcv = ohlcv[ohlcv.index.year == year]
                    year_pred = enc_pred[enc_pred.index.year == year]
                    year_equity = equity[equity.index.year == year]
                    year_buy_and_hodl = buy_and_hold[buy_and_hold.index.year ==
                                                     year]
                    year_orders = orders[orders.index.year == year]

                    unique_quarters = year_ohlcv.index.quarter.unique()
                    for quarter in unique_quarters:
                        q_ohlcv = year_ohlcv[year_ohlcv.index.quarter ==
                                             quarter]
                        q_pred = year_pred[year_pred.index.quarter == quarter]
                        q_equity = year_equity[year_equity.index.quarter ==
                                               quarter]
                        q_orders = year_orders[year_orders.index.quarter ==
                                               quarter]
                        q_buy_and_hodl = year_buy_and_hodl[
                            year_buy_and_hodl.index.quarter == quarter]
                        #f"{ohlcv_ds.symbol}, {year} - Q{quarter}, 1D", 'Trades', 'Equity'
                        img_path = f"images/backtests-final/{pipeline}-{dataset}-class-W{window}/{symbol}/"
                        img_name = f"trades-{year}-Q{quarter}.png"
                        if os.path.exists(f"{img_path}/{img_name}"):
                            print(f"[SKIP] File exists {img_path}/{img_name}")
                            continue
                        make_plot(
                            ohlcv=q_ohlcv,
                            orders=q_orders,
                            equity=q_equity,
                            baselines=[('Buy and Hold', q_buy_and_hodl)],
                            pred=q_pred,
                            signals_title=
                            f"{ohlcv_ds.symbol}, {pipeline}, W={window}D, {year} - Q{quarter}, 1D",
                            img_path=img_path,
                            img_name=img_name,
                            bollinger=True)
                        print(
                            f"{year}-Q{quarter} saved to {img_path}{img_name}")
    with open(f"trading_plotly.{dataset}.log", "w") as f:
        f.writelines(logs)
    print("Logs saved")