def benchmark_build_rank_with_group(n_samples: int, n_loops: int,
                                    n_included: int, n_groups: int) -> None:
    print("-" * 60)
    print(
        "Starting  portfolio construction by rank with group-by values benchmarking"
    )
    print(
        "Parameters(n_samples: {0}, n_included: {1}, n_loops: {2}, n_groups: {3})"
        .format(n_samples, n_included, n_loops, n_groups))

    n_portfolio = 10

    x = np.random.randn(n_samples, n_portfolio)
    groups = np.random.randint(n_groups, size=n_samples)

    start = dt.datetime.now()
    for _ in range(n_loops):
        calc_weights = rank_build(x, n_included, groups=groups)
    impl_model_time = dt.datetime.now() - start

    print('{0:20s}: {1}'.format('Implemented model', impl_model_time))

    start = dt.datetime.now()
    for _ in range(n_loops):
        grouped_ordering = pd.DataFrame(-x).groupby(groups).rank()
        exp_weights = np.zeros((len(x), n_portfolio))
        masks = (grouped_ordering <= n_included).values
        for j in range(n_portfolio):
            exp_weights[masks[:, j], j] = 1.
    benchmark_model_time = dt.datetime.now() - start

    np.testing.assert_array_almost_equal(calc_weights, exp_weights)

    print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
def benchmark_build_rank(n_samples: int, n_loops: int,
                         n_included: int) -> None:
    print("-" * 60)
    print("Starting portfolio construction by rank benchmarking")
    print("Parameters(n_samples: {0}, n_included: {1}, n_loops: {2})".format(
        n_samples, n_included, n_loops))

    n_portfolio = 10

    x = np.random.randn(n_samples, n_portfolio)

    start = dt.datetime.now()
    for _ in range(n_loops):
        calc_weights = rank_build(x, n_included)
    impl_model_time = dt.datetime.now() - start

    print('{0:20s}: {1}'.format('Implemented model', impl_model_time))

    start = dt.datetime.now()
    for _ in range(n_loops):
        exp_weights = np.zeros((len(x), n_portfolio))
        choosed_index = (-x).argsort(axis=0).argsort(axis=0) < n_included
        for j in range(n_portfolio):
            exp_weights[choosed_index[:, j], j] = 1.
    benchmark_model_time = dt.datetime.now() - start

    np.testing.assert_array_almost_equal(calc_weights, exp_weights)

    print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
Exemple #3
0
    def test_rank_build_with_masks(self):
        for n_portfolio in self.n_portfolio:
            x = np.random.randn(self.n_samples, n_portfolio)
            choices = np.random.choice(self.n_samples,
                                       self.n_mask,
                                       replace=False)
            masks = np.full(self.n_samples, True, dtype=bool)
            masks[choices] = False

            calc_weights = rank_build(x, self.n_included, masks=masks)

            expected_weights = np.zeros((len(x), n_portfolio))

            filtered_index = np.arange(len(x))[masks]
            filtered_x = x[masks]
            big_boolen = np.full(x.shape, False, dtype=bool)

            chosen = (-filtered_x).argsort(axis=0).argsort(
                axis=0) < self.n_included
            big_boolen[filtered_index] = chosen

            for j in range(x.shape[1]):
                expected_weights[big_boolen[:, j], j] = 1.

            np.testing.assert_array_almost_equal(calc_weights,
                                                 expected_weights)
Exemple #4
0
    def test_rank_build(self):
        for n_portfolio in self.n_portfolio:
            x = np.random.randn(self.n_samples, n_portfolio)

            calc_weights = rank_build(x, self.n_included)

            expected_weights = np.zeros((len(x), n_portfolio))
            chosen = (-x).argsort(axis=0).argsort(axis=0) < self.n_included

            for j in range(x.shape[1]):
                expected_weights[chosen[:, j], j] = 1.

            np.testing.assert_array_almost_equal(calc_weights,
                                                 expected_weights)
Exemple #5
0
    def test_rank_build_with_group(self):
        n_include = int(self.n_included / self.n_groups)

        for n_portfolio in self.n_portfolio:

            x = np.random.randn(self.n_samples, n_portfolio)
            groups = np.random.randint(self.n_groups, size=self.n_samples)

            calc_weights = rank_build(x, n_include, groups)

            grouped_ordering = pd.DataFrame(-x).groupby(groups).rank()
            expected_weights = np.zeros((len(x), n_portfolio))
            chosen = (grouped_ordering <= n_include).values
            for j in range(x.shape[1]):
                expected_weights[chosen[:, j], j] = 1.

            np.testing.assert_array_almost_equal(calc_weights,
                                                 expected_weights)
def build_portfolio(er: np.ndarray,
                    builder: Optional[str] = 'long_short',
                    **kwargs) -> np.ndarray:

    builder = builder.lower()

    if builder == 'ls' or builder == 'long_short':
        return long_short_build(er, **kwargs).flatten()
    elif builder == 'rank':
        return rank_build(er, **kwargs).flatten()
    elif builder == 'percent':
        return percent_build(er, **kwargs).flatten()
    elif builder == 'linear_prog' or builder == 'linear':
        status, _, weight = linear_build(er, **kwargs)
        if status != 'optimal':
            raise ValueError(
                'linear programming optimizer in status: {0}'.format(status))
        else:
            return weight
Exemple #7
0
def er_portfolio_analysis(
        er: np.ndarray,
        industry: np.ndarray,
        dx_return: np.ndarray,
        constraints: Optional[Union[LinearConstraints, Constraints]] = None,
        detail_analysis=True,
        benchmark: Optional[np.ndarray] = None,
        is_tradable: Optional[np.ndarray] = None,
        method='risk_neutral',
        **kwargs) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
    er = er.flatten()

    def create_constraints(benchmark, **kwargs):
        if 'lbound' in kwargs:
            lbound = kwargs['lbound'].copy()
            del kwargs['lbound']
        else:
            lbound = np.maximum(0., benchmark - 0.01)

        if 'ubound' in kwargs:
            ubound = kwargs['ubound'].copy()
            del kwargs['ubound']
        else:
            ubound = 0.01 + benchmark
        if is_tradable is not None:
            ubound[~is_tradable] = np.minimum(lbound, ubound)[~is_tradable]

        risk_lbound, risk_ubound = constraints.risk_targets()
        cons_exp = constraints.risk_exp
        return lbound, ubound, cons_exp, risk_lbound, risk_ubound

    if method == 'risk_neutral':
        lbound, ubound, cons_exp, risk_lbound, risk_ubound = create_constraints(
            benchmark, **kwargs)

        turn_over_target = kwargs.get('turn_over_target')
        current_position = kwargs.get('current_position')

        status, _, weights = linear_builder(er,
                                            risk_constraints=cons_exp,
                                            lbound=lbound,
                                            ubound=ubound,
                                            risk_target=(risk_lbound,
                                                         risk_ubound),
                                            turn_over_target=turn_over_target,
                                            current_position=current_position)
        if status != 'optimal':
            raise ValueError(
                'linear programming optimizer in status: {0}'.format(status))

    elif method == 'rank':
        weights = rank_build(
            er, use_rank=kwargs['use_rank'],
            masks=is_tradable).flatten() * benchmark.sum() / kwargs['use_rank']
    elif method == 'ls' or method == 'long_short':
        weights = long_short_builder(er).flatten()
    elif method == 'mv' or method == 'mean_variance':
        lbound, ubound, cons_exp, risk_lbound, risk_ubound = create_constraints(
            benchmark, **kwargs)
        cov = kwargs['cov']

        if 'lam' in kwargs:
            lam = kwargs['lam']
        else:
            lam = 1.

        status, _, weights = mean_variance_builder(er,
                                                   cov=cov,
                                                   bm=benchmark,
                                                   lbound=lbound,
                                                   ubound=ubound,
                                                   risk_exposure=cons_exp,
                                                   risk_target=(risk_lbound,
                                                                risk_ubound),
                                                   lam=lam)
        if status != 'optimal':
            raise ValueError(
                'mean variance optimizer in status: {0}'.format(status))

    elif method == 'tv' or method == 'target_vol':
        lbound, ubound, cons_exp, risk_lbound, risk_ubound = create_constraints(
            benchmark, **kwargs)
        cov = kwargs['cov']

        if 'target_vol' in kwargs:
            target_vol = kwargs['target_vol']
        else:
            target_vol = 1.

        status, _, weights = target_vol_builder(er,
                                                cov=cov,
                                                bm=benchmark,
                                                lbound=lbound,
                                                ubound=ubound,
                                                risk_exposure=cons_exp,
                                                risk_target=(risk_lbound,
                                                             risk_ubound),
                                                vol_low=0,
                                                vol_high=target_vol)
    else:
        raise ValueError("Unknown building type ({0})".format(method))

    if detail_analysis:
        analysis = simple_settle(weights, dx_return, industry, benchmark)
    else:
        analysis = None
    return pd.DataFrame({'weight': weights,
                         'industry': industry,
                         'er': er}), \
           analysis