Exemple #1
0
    def from_moonshot_csv(cls, filepath_or_buffer,
                          trim_outliers=None,
                          riskfree=0,
                          compound=True,
                          rolling_sharpe_window=200):
        """
        Creates a DailyPerformance instance from a Moonshot backtest results CSV.

        Parameters
        ----------
        filepath_or_buffer : str or file-like object
            filepath or file-like object of the CSV

        trim_outliers: int or float, optional
            discard returns that are more than this many standard deviations from
            the mean. Useful for dealing with data anomalies that cause large
            spikes in plots.

        riskfree : float, optional
            the riskfree rate (default 0)

        compound : bool
             True for compound/geometric returns, False for arithmetic returns (default True)

        rolling_sharpe_window : int, optional
            compute rolling Sharpe over this many periods (default 200)

        Returns
        -------
        DailyPerformance

        Examples
        --------
        Plot cumulative returns:

        >>> perf = DailyPerformance.from_moonshot_csv("backtest_results.csv")
        >>> perf.cum_returns.plot()
        """
        try:
            results = read_moonshot_csv(filepath_or_buffer)
        except ValueError as e:
            # "ValueError: 'Date' is not in list" might mean the user passed
            # a paramscan csv by mistake
            if "Date" not in repr(e):
                raise
            results = pd.read_csv(filepath_or_buffer)
            if "StrategyOrDate" in results.columns:
                raise MoonchartError("this is a parameter scan CSV, please use ParamscanTearsheet.from_moonshot_csv")
            else:
                raise

        return cls._from_moonshot(
            results, trim_outliers=trim_outliers,
            riskfree=riskfree,
            compound=compound,
            rolling_sharpe_window=rolling_sharpe_window)
    def test_intraday_aggregate(self):

        results = pd.DataFrame.from_dict(INTRADAY_AGGREGATE_RESULTS)
        results.index.set_names(["Field", "Date", "Time"], inplace=True)
        results.to_csv("results.csv")

        results = read_moonshot_csv("results.csv")

        results = results.reset_index()
        results.loc[:, "Date"] = results.Date.dt.strftime("%Y-%m-%d")
        results = results.set_index(["Field", "Date", "Time"])

        self.assertDictEqual(results.to_dict(), INTRADAY_AGGREGATE_RESULTS)
    def test_eod_detailed(self):

        results = pd.DataFrame.from_dict(EOD_DETAILED_RESULTS)
        results.index.set_names(["Field", "Date"], inplace=True)
        results.to_csv("results.csv")

        results = read_moonshot_csv("results.csv")

        results = results.reset_index()
        results.loc[:, "Date"] = results.Date.dt.strftime("%Y-%m-%d")
        results = results.set_index(["Field", "Date"])

        self.assertDictEqual(results.to_dict(), EOD_DETAILED_RESULTS)
    def test_intraday_detailed(self):

        results = pd.DataFrame.from_dict(INTRADAY_DETAILED_RESULTS)
        results.index.set_names(["Field", "Date", "Time"], inplace=True)
        results.to_csv("results.csv")

        results = read_moonshot_csv("results.csv")

        results = results.reset_index()
        results.loc[:, "Date"] = results.Date.dt.strftime("%Y-%m-%d")
        results = results.set_index(["Field", "Date", "Time"])

        results = results.where(results.notnull(), None)

        self.assertDictEqual(results.to_dict(), INTRADAY_DETAILED_RESULTS)
    def test_aggregate_intraday_to_daily(self):

        results = pd.DataFrame.from_dict(INTRADAY_AGGREGATE_RESULTS)
        results.index.set_names(["Field", "Date", "Time"], inplace=True)
        results.to_csv("results.csv")

        intraday_results = read_moonshot_csv("results.csv")

        daily_results = intraday_to_daily(intraday_results)

        daily_results = daily_results.reset_index()
        daily_results.loc[:,
                          "Date"] = daily_results.Date.dt.strftime("%Y-%m-%d")
        daily_results = daily_results.set_index(["Field", "Date"])

        self.assertDictEqual(
            daily_results.to_dict(),
            {
                'fx-revert': {
                    # max
                    ('AbsExposure', '2018-12-18'): 0.0,
                    ('AbsExposure', '2018-12-19'): 1.0,
                    # max
                    ('AbsWeight', '2018-12-18'): 0.0,
                    ('AbsWeight', '2018-12-19'): 1.0,
                    # last
                    ('Benchmark', '2018-12-18'): 1.13606,
                    ('Benchmark', '2018-12-19'): 1.142185,
                    # sum
                    ('Commission', '2018-12-18'): 0.00022,
                    ('Commission', '2018-12-19'): 0.0001,
                    # mean
                    ('NetExposure', '2018-12-18'): 0.0,
                    ('NetExposure', '2018-12-19'): 1.5,
                    # sum
                    ('Return', '2018-12-18'): 0.0,
                    ('Return', '2018-12-19'): 0.0005820400950750315,
                    # sum
                    ('Slippage', '2018-12-18'): 4e-05,
                    ('Slippage', '2018-12-19'): 5.000000000000001e-05,
                    # max
                    ('TotalHoldings', '2018-12-18'): 0.0,
                    ('TotalHoldings', '2018-12-19'): 5.0,
                    # sum
                    ('Turnover', '2018-12-18'): 0.1,
                    ('Turnover', '2018-12-19'): 0.30000000000000004
                }
            })
def from_moonshot_csv(filepath_or_buffer, **kwargs):
    """
    Creates a full tear sheet from a moonshot backtest results CSV.

    Additional kwargs are passed to :class:`pyfolio.create_full_tear_sheet`.

    Parameters
    ----------
    filepath_or_buffer : str or file-like object
        filepath or file-like object of the CSV

    Returns
    -------
    None
    """
    results = read_moonshot_csv(filepath_or_buffer)
    return from_moonshot(results, **kwargs)
Exemple #7
0
    def from_pnl_csv(cls, filepath_or_buffer,
                          trim_outliers=None,
                          riskfree=0,
                          compound=True,
                          rolling_sharpe_window=200):
        """
        Creates a DailyPerformance instance from a PNL CSV.

        Parameters
        ----------
        filepath_or_buffer : str or file-like object
            filepath or file-like object of the CSV

        trim_outliers: int or float, optional
            discard returns that are more than this many standard deviations from the mean

        riskfree : float, optional
            the riskfree rate (default 0)

        compound : bool
             True for compound/geometric returns, False for arithmetic returns (default True)

        rolling_sharpe_window : int, optional
            compute rolling Sharpe over this many periods (default 200)

        Returns
        -------
        DailyPerformance

        Examples
        --------
        Plot cumulative returns:

        >>> perf = DailyPerformance.from_pnl_csv("pnl.csv")
        >>> perf.cum_returns.plot()
        """
        results = read_moonshot_csv(filepath_or_buffer)

        return cls._from_pnl(
            results, trim_outliers=trim_outliers,
            riskfree=riskfree,
            compound=compound,
            rolling_sharpe_window=rolling_sharpe_window)
    def test_detailed_intraday_to_daily(self):

        results = pd.DataFrame.from_dict(INTRADAY_DETAILED_RESULTS)
        results.index.set_names(["Field", "Date", "Time"], inplace=True)
        results.to_csv("results.csv")

        intraday_results = read_moonshot_csv("results.csv")

        daily_results = intraday_to_daily(intraday_results)

        daily_results = daily_results.reset_index()
        daily_results.loc[:,
                          "Date"] = daily_results.Date.dt.strftime("%Y-%m-%d")
        daily_results = daily_results.set_index(["Field", "Date"])

        daily_results = daily_results.where(daily_results.notnull(), None)

        self.assertDictEqual(
            daily_results.to_dict(),
            {
                'EUR.USD(12087792)': {  # max
                    ('AbsExposure', '2018-12-18'): 0.0,
                    ('AbsExposure', '2018-12-19'): 0.2,
                    # max
                    ('AbsWeight', '2018-12-18'): 0.0,
                    ('AbsWeight', '2018-12-19'): 0.2,
                    # last
                    ('Benchmark', '2018-12-18'): 1.13606,
                    ('Benchmark', '2018-12-19'): 1.142185,
                    # sum
                    ('Commission', '2018-12-18'): 1e-05,
                    ('Commission', '2018-12-19'): 2e-05,
                    # mean
                    ('NetExposure', '2018-12-18'): 0.0,
                    ('NetExposure', '2018-12-19'): 0.20000000000000004,
                    # sum
                    ('Return', '2018-12-18'): 0.0,
                    ('Return', '2018-12-19'): 0.00035730412741801225,
                    # sum
                    ('Slippage', '2018-12-18'): 0.0,
                    ('Slippage', '2018-12-19'): 4e-05,
                    # max
                    ('TotalHoldings', '2018-12-18'): 0.0,
                    ('TotalHoldings', '2018-12-19'): 2.0,
                    # sum
                    ('Turnover', '2018-12-18'): 0.0,
                    ('Turnover', '2018-12-19'): 0.30000000000000004,
                    # mean
                    ('Weight', '2018-12-18'): 0.0,
                    ('Weight', '2018-12-19'): 0.15
                },
                'GBP.USD(12087797)': {  # max
                    ('AbsExposure', '2018-12-18'): 0.0,
                    ('AbsExposure', '2018-12-19'): 0.2,
                    # max
                    ('AbsWeight', '2018-12-18'): 0.0,
                    ('AbsWeight', '2018-12-19'): 0.2,
                    # last
                    ('Benchmark', '2018-12-18'): None,
                    ('Benchmark', '2018-12-19'): None,
                    # sum
                    ('Commission', '2018-12-18'): 0.0,
                    ('Commission', '2018-12-19'): 3.0000000000000004e-05,
                    # mean
                    ('NetExposure', '2018-12-18'): 0.0,
                    ('NetExposure', '2018-12-19'): 0.20000000000000004,
                    # sum
                    ('Return', '2018-12-18'): 0.0,
                    ('Return', '2018-12-19'): 0.0007114899464124138,
                    # sum
                    ('Slippage', '2018-12-18'): 0.0,
                    ('Slippage', '2018-12-19'): 3.0000000000000004e-05,
                    # max
                    ('TotalHoldings', '2018-12-18'): 0.0,
                    ('TotalHoldings', '2018-12-19'): 1.0,
                    # sum
                    ('Turnover', '2018-12-18'): 0.0,
                    ('Turnover', '2018-12-19'): 0.0,
                    # mean
                    ('Weight', '2018-12-18'): 0.0,
                    ('Weight', '2018-12-19'): 0.20000000000000004
                }
            })
Exemple #9
0
    def from_moonshot_csv(cls,
                          filepath_or_buffer,
                          start_date=None,
                          end_date=None,
                          trim_outliers=None,
                          how_to_aggregate=None,
                          riskfree=0,
                          compound=True,
                          rolling_sharpe_window=200):
        """
        Creates a DailyPerformance instance from a Moonshot backtest results CSV.

        Parameters
        ----------
        filepath_or_buffer : str or file-like object
            filepath or file-like object of the CSV

        start_date : str (YYYY-MM-DD), optional
            truncate at this start date (otherwise include entire date range)

        end_date : str (YYYY-MM-DD), optional
            truncate at this end date (otherwise include entire date range)

        trim_outliers: int or float, optional
            discard returns that are more than this many standard deviations from
            the mean. Useful for dealing with data anomalies that cause large
            spikes in plots.

        how_to_aggregate : dict, optional
            a dict of {fieldname: aggregation method} specifying how to aggregate
            fields from intraday to daily. See the docstring for
            `moonchart.utils.intraday_to_daily` for more details.

        riskfree : float, optional
            the riskfree rate (default 0)

        compound : bool
             True for compound/geometric returns, False for arithmetic returns (default True)

        rolling_sharpe_window : int, optional
            compute rolling Sharpe over this many periods (default 200)

        Returns
        -------
        DailyPerformance

        Examples
        --------
        Plot cumulative returns:

        >>> perf = DailyPerformance.from_moonshot_csv("backtest_results.csv")
        >>> perf.cum_returns.plot()
        """
        try:
            results = read_moonshot_csv(filepath_or_buffer)
        except ValueError as e:
            # "ValueError: 'Date' is not in list" might mean the user passed
            # a paramscan csv by mistake
            if "Date" not in repr(e):
                raise
            results = pd.read_csv(filepath_or_buffer)
            if "StrategyOrDate" in results.columns:
                raise MoonchartError(
                    "this is a parameter scan CSV, please use ParamscanTearsheet.from_moonshot_csv"
                )
            else:
                raise

        return cls._from_moonshot_or_pnl(
            results,
            start_date=start_date,
            end_date=end_date,
            trim_outliers=trim_outliers,
            how_to_aggregate=how_to_aggregate,
            riskfree=riskfree,
            compound=compound,
            rolling_sharpe_window=rolling_sharpe_window)