Ejemplo n.º 1
0
def bt_anz_folio(qx):
    # 分析BT量化回测数据
    # 专业pyFolio量化分析图表
    #
    print('\n-----------pyFolio----------')
    strat = qx.bt_results[0]
    anzs = strat.analyzers
    #
    xpyf = anzs.getbyname('pyfolio')
    xret, xpos, xtran, gross_lev = xpyf.get_pf_items()
    #
    #xret.to_csv('tmp/x_ret.csv',index=True,header=None,encoding='utf8')
    #xpos.to_csv('tmp/x_pos.csv',index=True,encoding='utf8')
    #xtran.to_csv('tmp/x_tran.csv',index=True,encoding='utf8')
    #
    xret, xpos, xtran = to_utc(xret), to_utc(xpos), to_utc(xtran)
    #
    # 创建瀑布(活页)式分析图表
    # 部分图表需要联网现在spy标普数据,
    # 可能会出现"假死"现象,需要人工中断
    pf.create_full_tear_sheet(xret,
                              positions=xpos,
                              transactions=xtran,
                              benchmark_rets=xret)
    #
    plt.show()
Ejemplo n.º 2
0
class TestGrossLev(TestCase):
    __location__ = os.path.realpath(
        os.path.join(os.getcwd(), os.path.dirname(__file__)))

    test_pos = to_utc(pd.read_csv(
        gzip.open(__location__ + '/test_data/test_pos.csv.gz'),
        index_col=0, parse_dates=True))
    test_gross_lev = pd.read_csv(
        gzip.open(
            __location__ + '/test_data/test_gross_lev.csv.gz'),
        index_col=0, parse_dates=True)
    test_gross_lev = to_series(to_utc(test_gross_lev))

    def test_gross_lev_calculation(self):
        assert_series_equal(
            timeseries.gross_lev(self.test_pos)['2004-02-01':],
            self.test_gross_lev['2004-02-01':], check_names=False)
Ejemplo n.º 3
0
    def load_data(self, sid):
        print('resultview load ', sid)
        for ax in self.fig.axes:
            ax.clear()
        datapath = os.path.realpath(
            os.path.join(os.getcwd(), os.path.dirname(__file__)))
        datapath = datapath + '/../../pyfolio/tests/test_data/'
        test_returns = pd.read_csv(gzip.open(datapath + 'test_returns.csv.gz'),
                                   index_col=0,
                                   parse_dates=True)
        test_returns = to_series(to_utc(test_returns))
        test_txn = to_utc(
            pd.read_csv(gzip.open(datapath + 'test_txn.csv.gz'),
                        index_col=0,
                        parse_dates=True))
        test_pos = to_utc(
            pd.read_csv(gzip.open(datapath + 'test_pos.csv.gz'),
                        index_col=0,
                        parse_dates=True))
        self.perf_stats = plotting.get_perf_stats(test_returns,
                                                  positions=test_pos,
                                                  transactions=test_txn)
        #print(self.perf_stats)
        plotting.plot_rolling_returns(test_returns, ax=self.ax_rolling_returns)
        self.ax_rolling_returns.set_title('Cumulative returns')
        plotting.plot_returns(test_returns, ax=self.ax_returns)
        self.ax_returns.set_title('Returns')
        plotting.plot_rolling_volatility(test_returns,
                                         ax=self.ax_rolling_volatility)
        plotting.plot_rolling_sharpe(test_returns, ax=self.ax_rolling_sharpe)
        plotting.plot_drawdown_underwater(returns=test_returns,
                                          ax=self.ax_underwater)

        # self.fig.suptitle('Market Data')
        # self.axes.set_ylabel('静态图:Y轴')
        # self.axes.set_xlabel('静态图:X轴')
        # self.axes.grid(True)
        for ax in self.fig.axes:
            plt.setp(ax.get_xticklabels(), visible=True)
        self.draw()
Ejemplo n.º 4
0
    def load_data(self, sid):
        print('txnview load ', sid)
        for ax in self.fig.axes:
            ax.clear()
        datapath = os.path.realpath(
            os.path.join(os.getcwd(), os.path.dirname(__file__)))
        datapath = datapath + '/../../pyfolio/tests/test_data/'
        test_returns = pd.read_csv(gzip.open(datapath + 'test_returns.csv.gz'),
                                   index_col=0,
                                   parse_dates=True)
        test_returns = to_series(to_utc(test_returns))
        test_txn = to_utc(
            pd.read_csv(gzip.open(datapath + 'test_txn.csv.gz'),
                        index_col=0,
                        parse_dates=True))
        test_pos = to_utc(
            pd.read_csv(gzip.open(datapath + 'test_pos.csv.gz'),
                        index_col=0,
                        parse_dates=True))
        positions = utils.check_intraday('infer', test_returns, test_pos,
                                         test_txn)

        plotting.plot_turnover(test_returns,
                               test_txn,
                               positions,
                               ax=self.ax_turnover)
        plotting.plot_daily_volume(test_returns,
                                   test_txn,
                                   ax=self.ax_daily_volume)
        try:
            plotting.plot_daily_turnover_hist(test_txn,
                                              positions,
                                              ax=self.ax_turnover_hist)
        except ValueError:
            warnings.warn('Unable to generate turnover plot.', UserWarning)
        plotting.plot_txn_time_hist(test_txn, ax=self.ax_txn_timings)
        for ax in self.fig.axes:
            plt.setp(ax.get_xticklabels(), visible=True)
        self.draw()
Ejemplo n.º 5
0
    def load_data(self, sid):
        print('posview load ', sid)
        for ax in self.fig.axes:
            ax.clear()
        datapath = os.path.realpath(
            os.path.join(os.getcwd(), os.path.dirname(__file__)))
        datapath = datapath + '/../../pyfolio/tests/test_data/'
        test_returns = pd.read_csv(gzip.open(datapath + 'test_returns.csv.gz'),
                                   index_col=0,
                                   parse_dates=True)
        test_returns = to_series(to_utc(test_returns))
        test_txn = to_utc(
            pd.read_csv(gzip.open(datapath + 'test_txn.csv.gz'),
                        index_col=0,
                        parse_dates=True))
        test_pos = to_utc(
            pd.read_csv(gzip.open(datapath + 'test_pos.csv.gz'),
                        index_col=0,
                        parse_dates=True))

        positions = utils.check_intraday('infer', test_returns, test_pos,
                                         test_txn)
        positions_alloc = pos.get_percent_alloc(positions)
        plotting.plot_exposures(test_returns, positions, ax=self.ax_exposures)
        plotting.plot_holdings(test_returns,
                               positions_alloc,
                               ax=self.ax_holdings)
        plotting.plot_long_short_holdings(test_returns,
                                          positions_alloc,
                                          ax=self.ax_long_short_holdings)
        plotting.plot_gross_leverage(test_returns,
                                     positions,
                                     ax=self.ax_gross_leverage)

        for ax in self.fig.axes:
            plt.setp(ax.get_xticklabels(), visible=True)
        self.draw()
Ejemplo n.º 6
0
class PositionsTestCase(TestCase):
    __location__ = os.path.realpath(
        os.path.join(os.getcwd(), os.path.dirname(__file__)))

    test_returns = read_csv(gzip.open(__location__ +
                                      '/test_data/test_returns.csv.gz'),
                            index_col=0,
                            parse_dates=True)
    test_returns = to_series(to_utc(test_returns))
    test_txn = to_utc(
        read_csv(gzip.open(__location__ + '/test_data/test_txn.csv.gz'),
                 index_col=0,
                 parse_dates=True))
    test_pos = to_utc(
        read_csv(gzip.open(__location__ + '/test_data/test_pos.csv.gz'),
                 index_col=0,
                 parse_dates=True))

    @parameterized.expand([
        ({}, ),
        ({
            'slippage': 1
        }, ),
        ({
            'live_start_date': test_returns.index[-20]
        }, ),
        ({
            'round_trips': True
        }, ),
        ({
            'hide_positions': True
        }, ),
        ({
            'cone_std': 1
        }, ),
        ({
            'bootstrap': True
        }, ),
    ])
    @cleanup
    def test_create_full_tear_sheet_breakdown(self, kwargs):
        create_full_tear_sheet(self.test_returns,
                               positions=self.test_pos,
                               transactions=self.test_txn,
                               **kwargs)

    @parameterized.expand([
        ({}, ),
        ({
            'slippage': 1
        }, ),
        ({
            'live_start_date': test_returns.index[-20]
        }, ),
    ])
    @cleanup
    def test_create_simple_tear_sheet_breakdown(self, kwargs):
        create_simple_tear_sheet(self.test_returns,
                                 positions=self.test_pos,
                                 transactions=self.test_txn,
                                 **kwargs)

    @parameterized.expand([
        ({}, ),
        ({
            'live_start_date': test_returns.index[-20]
        }, ),
        ({
            'cone_std': 1
        }, ),
        ({
            'bootstrap': True
        }, ),
    ])
    @cleanup
    def test_create_returns_tear_sheet_breakdown(self, kwargs):
        create_returns_tear_sheet(self.test_returns, **kwargs)

    @parameterized.expand([
        ({}, ),
        ({
            'hide_positions': True
        }, ),
        ({
            'show_and_plot_top_pos': 0
        }, ),
        ({
            'show_and_plot_top_pos': 1
        }, ),
    ])
    @cleanup
    def test_create_position_tear_sheet_breakdown(self, kwargs):
        create_position_tear_sheet(self.test_returns, self.test_pos, **kwargs)

    @parameterized.expand([
        ({}, ),
        ({
            'unadjusted_returns': test_returns
        }, ),
    ])
    @cleanup
    def test_create_txn_tear_sheet_breakdown(self, kwargs):
        create_txn_tear_sheet(self.test_returns, self.test_pos, self.test_txn,
                              **kwargs)

    @parameterized.expand([
        ({}, ),
        ({
            'sector_mappings': {}
        }, ),
    ])
    @cleanup
    def test_create_round_trip_tear_sheet_breakdown(self, kwargs):
        create_round_trip_tear_sheet(self.test_returns, self.test_pos,
                                     self.test_txn, **kwargs)

    @parameterized.expand([
        ({}, ),
        ({
            'legend_loc': 1
        }, ),
    ])
    @cleanup
    def test_create_interesting_times_tear_sheet_breakdown(self, kwargs):
        create_interesting_times_tear_sheet(self.test_returns, **kwargs)

    @parameterized.expand([
        ({}, ),
        ({
            'stoch_vol': True
        }, ),
    ])
    @cleanup
    def test_create_bayesian_tear_sheet_breakdown(self, kwargs):
        create_bayesian_tear_sheet(
            self.test_returns,
            live_start_date=self.test_returns.index[-20],
            **kwargs)
Ejemplo n.º 7
0
class PositionsTestCase(TestCase):
    dates = date_range(start='2015-01-01', freq='D', periods=20)

    def test_get_percent_alloc(self):
        raw_data = arange(15, dtype=float).reshape(5, 3)
        # Make the first column negative to test absolute magnitudes.
        raw_data[:, 0] *= -1

        frame = DataFrame(raw_data,
                          index=date_range('01-01-2015', freq='D', periods=5),
                          columns=['A', 'B', 'C'])

        result = get_percent_alloc(frame)
        expected_raw = zeros_like(raw_data)
        for idx, row in enumerate(raw_data):
            expected_raw[idx] = row / row.sum()

        expected = DataFrame(
            expected_raw,
            index=frame.index,
            columns=frame.columns,
        )

        assert_frame_equal(result, expected)

    def test_extract_pos(self):
        index_dup = [
            Timestamp('2015-06-08', tz='UTC'),
            Timestamp('2015-06-08', tz='UTC'),
            Timestamp('2015-06-09', tz='UTC'),
            Timestamp('2015-06-09', tz='UTC')
        ]
        index = [
            Timestamp('2015-06-08', tz='UTC'),
            Timestamp('2015-06-09', tz='UTC')
        ]

        positions = DataFrame(
            {
                'amount': [100., 200., 300., 400.],
                'last_sale_price': [10., 20., 30., 40.],
                'sid': [1, 2, 1, 2]
            },
            index=index_dup)
        cash = Series([100., 200.], index=index)

        result = extract_pos(positions, cash)

        expected = DataFrame(OrderedDict([(1, [100. * 10., 300. * 30.]),
                                          (2, [200. * 20., 400. * 40.]),
                                          ('cash', [100., 200.])]),
                             index=index)
        expected.index.name = 'index'

        assert_frame_equal(result, expected)

    @parameterized.expand([(DataFrame([[1.0, 2.0, 3.0, 10.0]] * len(dates),
                                      columns=[0, 1, 2, 'cash'],
                                      index=dates), {
                                          0: 'A',
                                          1: 'B',
                                          2: 'A'
                                      },
                            DataFrame([[4.0, 2.0, 10.0]] * len(dates),
                                      columns=['A', 'B', 'cash'],
                                      index=dates), False),
                           (DataFrame([[1.0, 2.0, 3.0, 10.0]] * len(dates),
                                      columns=[0, 1, 2, 'cash'],
                                      index=dates),
                            Series(index=[0, 1, 2], data=['A', 'B', 'A']),
                            DataFrame([[4.0, 2.0, 10.0]] * len(dates),
                                      columns=['A', 'B', 'cash'],
                                      index=dates), False),
                           (DataFrame([[1.0, 2.0, 3.0, 10.0]] * len(dates),
                                      columns=[0, 1, 2, 'cash'],
                                      index=dates), {
                                          0: 'A',
                                          1: 'B'
                                      },
                            DataFrame([[1.0, 2.0, 10.0]] * len(dates),
                                      columns=['A', 'B', 'cash'],
                                      index=dates), True)])
    def test_sector_exposure(self, positions, mapping,
                             expected_sector_exposure, warning_expected):
        """
        Tests sector exposure mapping and rollup.

        """
        with warnings.catch_warnings(record=True) as w:
            result_sector_exposure = get_sector_exposures(positions, mapping)

            assert_frame_equal(result_sector_exposure,
                               expected_sector_exposure)
            if warning_expected:
                self.assertEqual(len(w), 1)
            else:
                self.assertEqual(len(w), 0)

    @parameterized.expand([
        (DataFrame([[1.0, 2.0, 3.0, 14.0]] * len(dates),
                   columns=[0, 1, 2, 'cash'],
                   index=dates),
         DataFrame(
             [[0.15, 0.1, nan, nan]] * len(dates),
             columns=['max_long', 'median_long', 'median_short', 'max_short'],
             index=dates)),
        (DataFrame([[1.0, -2.0, -13.0, 15.0]] * len(dates),
                   columns=[0, 1, 2, 'cash'],
                   index=dates),
         DataFrame(
             [[1.0, 1.0, -7.5, -13.0]] * len(dates),
             columns=['max_long', 'median_long', 'median_short', 'max_short'],
             index=dates)),
        (DataFrame([[nan, 2.0, nan, 8.0]] * len(dates),
                   columns=[0, 1, 2, 'cash'],
                   index=dates),
         DataFrame(
             [[0.2, 0.2, nan, nan]] * len(dates),
             columns=['max_long', 'median_long', 'median_short', 'max_short'],
             index=dates))
    ])
    def test_max_median_exposure(self, positions, expected):
        alloc_summary = get_max_median_position_concentration(positions)
        assert_frame_equal(expected, alloc_summary)

    __location__ = os.path.realpath(
        os.path.join(os.getcwd(), os.path.dirname(__file__)))

    test_returns = read_csv(gzip.open(__location__ +
                                      '/test_data/test_returns.csv.gz'),
                            index_col=0,
                            parse_dates=True)
    test_returns = to_series(to_utc(test_returns))
    test_txn = to_utc(
        read_csv(gzip.open(__location__ + '/test_data/test_txn.csv.gz'),
                 index_col=0,
                 parse_dates=True))
    test_pos = to_utc(
        read_csv(gzip.open(__location__ + '/test_data/test_pos.csv.gz'),
                 index_col=0,
                 parse_dates=True))

    @parameterized.expand([(test_pos, test_txn, False),
                           (test_pos.resample('1W').last(), test_txn, True)])
    def test_detect_intraday(self, positions, transactions, expected):
        detected = detect_intraday(positions, transactions, threshold=0.25)
        assert_equal(detected, expected)

    @parameterized.expand([
        ('infer', test_returns, test_pos, test_txn, test_pos),
        (False, test_returns, test_pos, test_txn, test_pos)
    ])
    def test_check_intraday(self, estimate, returns, positions, transactions,
                            expected):
        detected = check_intraday(estimate, returns, positions, transactions)
        assert_frame_equal(detected, expected)

    @parameterized.expand([(test_returns, test_pos, test_txn, (1506, 8)),
                           (test_returns, test_pos.resample('1W').last(),
                            test_txn, (1819, 8))])
    def test_estimate_intraday(self, returns, positions, transactions,
                               expected):
        intraday_pos = estimate_intraday(returns, positions, transactions)
        assert_equal(intraday_pos.shape, expected)
Ejemplo n.º 8
0
class RiskTestCase(TestCase):
    __location__ = os.path.realpath(
        os.path.join(os.getcwd(), os.path.dirname(__file__)))

    test_pos = to_utc(
        read_csv(gzip.open(__location__ + '/test_data/test_pos.csv.gz'),
                 index_col=0,
                 parse_dates=True))
    test_pos.columns = [351, 1419, 1787, 25317, 3321, 3951, 4922, 'cash']

    test_txn = to_utc(
        read_csv(gzip.open(__location__ + '/test_data/test_txn.csv.gz'),
                 index_col=0,
                 parse_dates=True))
    test_sectors = to_utc(
        read_csv(__location__ + '/test_data/test_sectors.csv',
                 index_col=0,
                 parse_dates=True))
    expected_sectors_longed = to_utc(
        read_csv(__location__ + '/test_data/expected_sectors_longed.csv',
                 index_col=0,
                 parse_dates=True))
    expected_sectors_shorted = to_utc(
        read_csv(__location__ + '/test_data/expected_sectors_shorted.csv',
                 index_col=0,
                 parse_dates=True))
    expected_sectors_grossed = to_utc(
        read_csv(__location__ + '/test_data/expected_sectors_grossed.csv',
                 index_col=0,
                 parse_dates=True))
    test_caps = to_utc(
        read_csv(__location__ + '/test_data/test_caps.csv',
                 index_col=0,
                 parse_dates=True))
    expected_caps_longed = to_utc(
        read_csv(__location__ + '/test_data/expected_caps_longed.csv',
                 index_col=0,
                 parse_dates=True))
    expected_caps_shorted = to_utc(
        read_csv(__location__ + '/test_data/expected_caps_shorted.csv',
                 index_col=0,
                 parse_dates=True))
    expected_caps_grossed = to_utc(
        read_csv(__location__ + '/test_data/expected_caps_grossed.csv',
                 index_col=0,
                 parse_dates=True))
    expected_caps_netted = to_utc(
        read_csv(__location__ + '/test_data/expected_caps_netted.csv',
                 index_col=0,
                 parse_dates=True))
    test_shares_held = to_utc(
        read_csv(__location__ + '/test_data/test_shares_held.csv',
                 index_col=0,
                 parse_dates=True))
    test_volumes = to_utc(
        read_csv(__location__ + '/test_data/test_volumes.csv',
                 index_col=0,
                 parse_dates=True))
    expected_volumes = to_utc(
        read_csv(__location__ + '/test_data/expected_volumes.csv',
                 index_col=0,
                 parse_dates=True))

    test_dict = {}
    styles = ['LT_MOMENTUM', 'LMCAP', 'VLTY', 'MACDSignal']
    for style in styles:
        df = to_utc(
            read_csv(__location__ + '/test_data/test_{}.csv'.format(style),
                     index_col=0,
                     parse_dates=True))
        test_dict.update({style: df})
    # test_styles = pd.DataFrame()
    # test_styles = test_styles.from_dict(test_dict)
    test_styles = test_dict

    expected_styles = to_utc(
        read_csv(__location__ + '/test_data/expected_styles.csv',
                 index_col=0,
                 parse_dates=True))

    @parameterized.expand([(test_pos, test_styles, expected_styles)])
    def test_compute_style_factor_exposures(self, positions, risk_factor_dict,
                                            expected):
        style_list = []
        for name, value in risk_factor_dict.items():
            risk_factor_dict[name].columns = \
                risk_factor_dict[name].columns.astype(int)
            style_list.append(
                compute_style_factor_exposures(positions,
                                               risk_factor_dict[name]))
        expected.columns = expected.columns.astype(int)
        assert_frame_equal(pd.concat(style_list, axis=1), expected)

    @parameterized.expand([(test_pos, test_sectors, expected_sectors_longed,
                            expected_sectors_shorted, expected_sectors_grossed)
                           ])
    def test_compute_sector_exposures(self, positions, sectors,
                                      expected_longed, expected_shorted,
                                      expected_grossed):
        sectors.columns = sectors.columns.astype(int)
        sector_exposures = compute_sector_exposures(positions, sectors)

        expected_longed.columns = expected_longed.columns.astype(int)
        expected_shorted.columns = expected_shorted.columns.astype(int)
        expected_grossed.columns = expected_grossed.columns.astype(int)

        assert_frame_equal(pd.concat(sector_exposures[0], axis=1),
                           expected_longed)
        assert_frame_equal(pd.concat(sector_exposures[1], axis=1),
                           expected_shorted)
        assert_frame_equal(pd.concat(sector_exposures[2], axis=1),
                           expected_grossed)

    @parameterized.expand([
        (test_pos, test_caps, expected_caps_longed, expected_caps_shorted,
         expected_caps_grossed, expected_caps_netted)
    ])
    def test_compute_cap_exposures(self, positions, caps, expected_longed,
                                   expected_shorted, expected_grossed,
                                   expected_netted):
        caps.columns = caps.columns.astype(int)
        cap_exposures = compute_cap_exposures(positions, caps)

        expected_longed.columns = expected_longed.columns.astype(int)
        expected_shorted.columns = expected_shorted.columns.astype(int)
        expected_grossed.columns = expected_grossed.columns.astype(int)
        expected_netted.columns = expected_netted.columns.astype(int)

        assert_frame_equal(pd.concat(cap_exposures[0], axis=1),
                           expected_longed)
        assert_frame_equal(pd.concat(cap_exposures[1], axis=1),
                           expected_shorted)
        assert_frame_equal(pd.concat(cap_exposures[2], axis=1),
                           expected_grossed)
        assert_frame_equal(pd.concat(cap_exposures[3], axis=1),
                           expected_netted)

    @parameterized.expand([(test_shares_held, test_volumes, 0.1,
                            expected_volumes)])
    def test_compute_volume_exposures(self, shares_held, volumes, percentile,
                                      expected):
        l_thresh, s_thresh, g_thresh = compute_volume_exposures(
            shares_held, volumes, percentile)

        assert_series_equal(l_thresh, expected['0'], check_names=False)
        assert_series_equal(s_thresh, expected['1'], check_names=False)
        assert_series_equal(g_thresh, expected['2'], check_names=False)
Ejemplo n.º 9
0
print('\n pyFolio专业量化分析图表\n')
    
    
xpyf = anzs.getbyname('pyfolio')
xret, xpos, xtran, gross_lev = xpyf.get_pf_items()
#print(xret);print('xx',type(xret))
#
#xret.to_csv('tmp/x_ret.csv',index=True,header=None,encoding='utf8')    
#gross_lev.to_csv('tmp/x_gros.csv',index=True,header=None,encoding='utf8')    
#
#xpos.to_csv('tmp/x_pos.csv',index=True,encoding='utf8')    
#xtran.to_csv('tmp/x_tran.csv',index=True,encoding='utf8')    

#
#xret=to_series(to_utc(xret))
xret=to_utc(xret)
xpos=to_utc(xpos)
xtran=to_utc(xtran)
#print('\n@xret',xret);#print('xx',type(xret))
#print('\n@xpos',xpos);#print('xx',type(xpos))
#print('\n@xtran',xtran);#print('xx',type(xtran))
#xxx
#

#----
#qx.tim0str,qx.tim9str     tim0str,tim9str='2015-01-01','2018-12-31'
#xtim0=dt.datetime(2015,1,1)
pf.create_full_tear_sheet(xret
    ,positions=xpos
    ,transactions=xtran
    ,benchmark_rets=xret
Ejemplo n.º 10
0
class PositionsTestCase(TestCase):
    dates = date_range(start="2015-01-01", freq="D", periods=20)

    def test_get_percent_alloc(self):
        raw_data = arange(15, dtype=float).reshape(5, 3)
        # Make the first column negative to test absolute magnitudes.
        raw_data[:, 0] *= -1

        frame = DataFrame(
            raw_data,
            index=date_range("01-01-2015", freq="D", periods=5),
            columns=["A", "B", "C"],
        )

        result = get_percent_alloc(frame)
        expected_raw = zeros_like(raw_data)
        for idx, row in enumerate(raw_data):
            expected_raw[idx] = row / row.sum()

        expected = DataFrame(
            expected_raw,
            index=frame.index,
            columns=frame.columns,
        )

        assert_frame_equal(result, expected)

    def test_extract_pos(self):
        index_dup = [
            Timestamp("2015-06-08", tz="UTC"),
            Timestamp("2015-06-08", tz="UTC"),
            Timestamp("2015-06-09", tz="UTC"),
            Timestamp("2015-06-09", tz="UTC"),
        ]
        index = [
            Timestamp("2015-06-08", tz="UTC"),
            Timestamp("2015-06-09", tz="UTC"),
        ]

        positions = DataFrame(
            {
                "amount": [100.0, 200.0, 300.0, 400.0],
                "last_sale_price": [10.0, 20.0, 30.0, 40.0],
                "sid": [1, 2, 1, 2],
            },
            index=index_dup,
        )
        cash = Series([100.0, 200.0], index=index)

        result = extract_pos(positions, cash)

        expected = DataFrame(
            OrderedDict(
                [
                    (1, [100.0 * 10.0, 300.0 * 30.0]),
                    (2, [200.0 * 20.0, 400.0 * 40.0]),
                    ("cash", [100.0, 200.0]),
                ]
            ),
            index=index,
        )
        expected.index.name = "index"
        expected.columns.name = "sid"

        assert_frame_equal(result, expected)

    @parameterized.expand(
        [
            (
                DataFrame(
                    [[1.0, 2.0, 3.0, 10.0]] * len(dates),
                    columns=[0, 1, 2, "cash"],
                    index=dates,
                ),
                {0: "A", 1: "B", 2: "A"},
                DataFrame(
                    [[4.0, 2.0, 10.0]] * len(dates),
                    columns=["A", "B", "cash"],
                    index=dates,
                ),
                False,
            ),
            (
                DataFrame(
                    [[1.0, 2.0, 3.0, 10.0]] * len(dates),
                    columns=[0, 1, 2, "cash"],
                    index=dates,
                ),
                Series(index=[0, 1, 2], data=["A", "B", "A"]),
                DataFrame(
                    [[4.0, 2.0, 10.0]] * len(dates),
                    columns=["A", "B", "cash"],
                    index=dates,
                ),
                False,
            ),
            (
                DataFrame(
                    [[1.0, 2.0, 3.0, 10.0]] * len(dates),
                    columns=[0, 1, 2, "cash"],
                    index=dates,
                ),
                {0: "A", 1: "B"},
                DataFrame(
                    [[1.0, 2.0, 10.0]] * len(dates),
                    columns=["A", "B", "cash"],
                    index=dates,
                ),
                True,
            ),
        ]
    )
    def test_sector_exposure(
        self, positions, mapping, expected_sector_exposure, warning_expected
    ):
        """
        Tests sector exposure mapping and rollup.

        """
        with warnings.catch_warnings(record=True) as w:
            result_sector_exposure = get_sector_exposures(positions, mapping)

            assert_frame_equal(
                result_sector_exposure, expected_sector_exposure
            )
            if warning_expected:
                self.assertEqual(len(w), 1)
            else:
                self.assertEqual(len(w), 0)

    @parameterized.expand(
        [
            (
                DataFrame(
                    [[1.0, 2.0, 3.0, 14.0]] * len(dates),
                    columns=[0, 1, 2, "cash"],
                    index=dates,
                ),
                DataFrame(
                    [[0.15, 0.1, nan, nan]] * len(dates),
                    columns=[
                        "max_long",
                        "median_long",
                        "median_short",
                        "max_short",
                    ],
                    index=dates,
                ),
            ),
            (
                DataFrame(
                    [[1.0, -2.0, -13.0, 15.0]] * len(dates),
                    columns=[0, 1, 2, "cash"],
                    index=dates,
                ),
                DataFrame(
                    [[1.0, 1.0, -7.5, -13.0]] * len(dates),
                    columns=[
                        "max_long",
                        "median_long",
                        "median_short",
                        "max_short",
                    ],
                    index=dates,
                ),
            ),
            (
                DataFrame(
                    [[nan, 2.0, nan, 8.0]] * len(dates),
                    columns=[0, 1, 2, "cash"],
                    index=dates,
                ),
                DataFrame(
                    [[0.2, 0.2, nan, nan]] * len(dates),
                    columns=[
                        "max_long",
                        "median_long",
                        "median_short",
                        "max_short",
                    ],
                    index=dates,
                ),
            ),
        ]
    )
    def test_max_median_exposure(self, positions, expected):
        alloc_summary = get_max_median_position_concentration(positions)
        assert_frame_equal(expected, alloc_summary)

    __location__ = os.path.realpath(
        os.path.join(os.getcwd(), os.path.dirname(__file__))
    )

    test_returns = read_csv(
        gzip.open(__location__ + "/test_data/test_returns.csv.gz"),
        index_col=0,
        parse_dates=True,
    )
    test_returns = to_series(to_utc(test_returns))
    test_txn = to_utc(
        read_csv(
            gzip.open(__location__ + "/test_data/test_txn.csv.gz"),
            index_col=0,
            parse_dates=True,
        )
    )
    test_pos = to_utc(
        read_csv(
            gzip.open(__location__ + "/test_data/test_pos.csv.gz"),
            index_col=0,
            parse_dates=True,
        )
    )

    @parameterized.expand(
        [
            (test_pos, test_txn, False),
            (test_pos.resample("1W").last(), test_txn, True),
        ]
    )
    def test_detect_intraday(self, positions, transactions, expected):
        detected = detect_intraday(positions, transactions, threshold=0.25)
        assert detected == expected

    @parameterized.expand(
        [
            ("infer", test_returns, test_pos, test_txn, test_pos),
            (False, test_returns, test_pos, test_txn, test_pos),
        ]
    )
    def test_check_intraday(
        self, estimate, returns, positions, transactions, expected
    ):
        detected = check_intraday(estimate, returns, positions, transactions)
        assert_frame_equal(detected, expected)

    @parameterized.expand(
        [
            (test_returns, test_pos, test_txn, (1506, 8)),
            (
                test_returns,
                test_pos.resample("1W").last(),
                test_txn,
                (1819, 8),
            ),
        ]
    )
    def test_estimate_intraday(
        self, returns, positions, transactions, expected
    ):
        intraday_pos = estimate_intraday(returns, positions, transactions)
        assert intraday_pos.shape == expected
Ejemplo n.º 11
0
class PositionsTestCase(TestCase):
    __location__ = os.path.realpath(
        os.path.join(os.getcwd(), os.path.dirname(__file__)))

    test_returns = read_csv(
        gzip.open(__location__ + "/test_data/test_returns.csv.gz"),
        index_col=0,
        parse_dates=True,
    )
    test_returns = to_series(to_utc(test_returns))
    test_txn = to_utc(
        read_csv(
            gzip.open(__location__ + "/test_data/test_txn.csv.gz"),
            index_col=0,
            parse_dates=True,
        ))
    test_pos = to_utc(
        read_csv(
            gzip.open(__location__ + "/test_data/test_pos.csv.gz"),
            index_col=0,
            parse_dates=True,
        ))

    @parameterized.expand([
        ({}, ),
        ({
            "slippage": 1
        }, ),
        ({
            "live_start_date": test_returns.index[-20]
        }, ),
        ({
            "round_trips": True
        }, ),
        ({
            "hide_positions": True
        }, ),
        ({
            "cone_std": 1
        }, ),
        ({
            "bootstrap": True
        }, ),
    ])
    @cleanup
    def test_create_full_tear_sheet_breakdown(self, kwargs):
        create_full_tear_sheet(
            self.test_returns,
            positions=self.test_pos,
            transactions=self.test_txn,
            benchmark_rets=self.test_returns,
            **kwargs,
        )

    @parameterized.expand([
        ({}, ),
        ({
            "slippage": 1
        }, ),
        ({
            "live_start_date": test_returns.index[-20]
        }, ),
    ])
    @cleanup
    def test_create_simple_tear_sheet_breakdown(self, kwargs):
        create_simple_tear_sheet(
            self.test_returns,
            positions=self.test_pos,
            transactions=self.test_txn,
            **kwargs,
        )

    @parameterized.expand([
        ({}, ),
        ({
            "live_start_date": test_returns.index[-20]
        }, ),
        ({
            "cone_std": 1
        }, ),
        ({
            "bootstrap": True
        }, ),
    ])
    @cleanup
    def test_create_returns_tear_sheet_breakdown(self, kwargs):
        create_returns_tear_sheet(self.test_returns,
                                  benchmark_rets=self.test_returns,
                                  **kwargs)

    @parameterized.expand([
        ({}, ),
        ({
            "hide_positions": True
        }, ),
        ({
            "show_and_plot_top_pos": 0
        }, ),
        ({
            "show_and_plot_top_pos": 1
        }, ),
    ])
    @cleanup
    def test_create_position_tear_sheet_breakdown(self, kwargs):
        create_position_tear_sheet(self.test_returns, self.test_pos, **kwargs)

    @parameterized.expand([
        ({}, ),
        ({
            "unadjusted_returns": test_returns
        }, ),
    ])
    @cleanup
    def test_create_txn_tear_sheet_breakdown(self, kwargs):
        create_txn_tear_sheet(self.test_returns, self.test_pos, self.test_txn,
                              **kwargs)

    @parameterized.expand([
        ({}, ),
        ({
            "sector_mappings": {}
        }, ),
    ])
    @cleanup
    def test_create_round_trip_tear_sheet_breakdown(self, kwargs):
        create_round_trip_tear_sheet(self.test_returns, self.test_pos,
                                     self.test_txn, **kwargs)

    @parameterized.expand([
        ({}, ),
        ({
            "legend_loc": 1
        }, ),
    ])
    @cleanup
    def test_create_interesting_times_tear_sheet_breakdown(self, kwargs):
        create_interesting_times_tear_sheet(self.test_returns,
                                            self.test_returns, **kwargs)