Esempio n. 1
0
File: hrp.py Progetto: rca32/finlab
 def __init__(self):
     self.weights = list()
     self.seriated_distances = None
     self.seriated_correlations = None
     self.ordered_indices = None
     self.clusters = None
     self.returns_estimator = ReturnsEstimators()
     self.risk_metrics = RiskMetrics()
     self.risk_estimator = RiskEstimators()
Esempio n. 2
0
    def test_valuerror_with_no_asset_names(self):
        """
        Test ValueError when not supplying a list of asset names and no other input.
        """

        with self.assertRaises(ValueError):
            cla = CriticalLineAlgorithm()
            expected_returns = ReturnsEstimators().calculate_mean_historical_returns(asset_prices=self.data,
                                                                                     resample_by='W')
            covariance = ReturnsEstimators().calculate_returns(asset_prices=self.data, resample_by='W').cov()
            cla.allocate(expected_asset_returns=expected_returns, covariance_matrix=covariance)
Esempio n. 3
0
    def semi_covariance(returns, price_data=False, threshold_return=0):
        """
        Calculates the Semi-Covariance matrix for a dataframe of asset prices or returns.

        Semi-Covariance matrix is used to calculate the portfolio's downside volatility. Usually, the
        threshold return is zero and the negative volatility is measured. A threshold can be a positive number
        when one assumes a required return rate. If the threshold is above zero, the output is the volatility
        measure for returns below this threshold.

        If a dataframe of prices is given, it is transformed into a dataframe of returns using
        the calculate_returns method from the ReturnsEstimators class.

        :param returns: (pd.DataFrame) Dataframe where each column is a series of returns or prices for an asset.
        :param price_data: (bool) Flag if prices of assets are used and not returns.
        :param threshold_return: (float) Required return for each period in the frequency of the input data
                                         (If the input data is daily, it's a daily threshold return).
        :return: (np.array) Semi-Covariance matrix.
        """

        # Calculating the series of returns from series of prices
        if price_data:
            # Class with returns calculation function
            ret_est = ReturnsEstimators()

            # Calculating returns
            returns = ret_est.calculate_returns(returns)

        # Returns that are lower than the threshold
        lower_returns = returns - threshold_return < 0

        # Calculating the minimum of 0 and returns minus threshold
        min_returns = (returns - threshold_return) * lower_returns

        # Simple covariance matrix
        semi_covariance = returns.cov()

        # Iterating to fill elements
        for row_number in range(semi_covariance.shape[0]):
            for column_number in range(semi_covariance.shape[1]):
                # Series of returns for the element from the row and column
                row_asset = min_returns.iloc[:, row_number]
                column_asset = min_returns.iloc[:, column_number]

                # Series of element-wise products
                covariance_series = row_asset * column_asset

                # Element of the Semi-Covariance matrix
                semi_cov_element = covariance_series.sum() / min_returns.size

                # Inserting the element in the Semi-Covariance matrix
                semi_covariance.iloc[row_number,
                                     column_number] = semi_cov_element

        return semi_covariance
Esempio n. 4
0
    def shrinked_covariance(returns, price_data=False, shrinkage_type='basic', assume_centered=False,
                            basic_shrinkage=0.1):
        """
        Calculates the Covariance estimator with shrinkage for a dataframe of asset prices or returns.

        This function allows three types of shrinkage - Basic, Ledoit-Wolf and Oracle Approximating Shrinkage.
        It is a wrap of the sklearn's ShrunkCovariance, LedoitWolf and OAS classes. According to the
        scikit-learn User Guide on Covariance estimation:

        "Sometimes, it even occurs that the empirical covariance matrix cannot be inverted for numerical
        reasons. To avoid such an inversion problem, a transformation of the empirical covariance matrix
        has been introduced: the shrinkage. Mathematically, this shrinkage consists in reducing the ratio
        between the smallest and the largest eigenvalues of the empirical covariance matrix".

        Link to the documentation:
        <https://scikit-learn.org/stable/modules/covariance.html>`_

        If a dataframe of prices is given, it is transformed into a dataframe of returns using
        the calculate_returns method from the ReturnsEstimators class.

        :param returns: (pd.DataFrame) Dataframe where each column is a series of returns or prices for an asset.
        :param price_data: (bool) Flag if prices of assets are used and not returns. (False by default)
        :param shrinkage_type: (str) Type of shrinkage to use. (``basic`` by default, ``lw``, ``oas``, ``all``)
        :param assume_centered: (bool) Flag for data with mean almost, but not exactly zero.
                                       (Read documentation for chosen shrinkage class, False by default)
        :param basic_shrinkage: (float) Between 0 and 1. Coefficient in the convex combination for basic shrinkage.
                                        (0.1 by default)
        :return: (np.array) Estimated covariance matrix. Tuple of covariance matrices if shrinkage_type = ``all``.
        """

        # Calculating the series of returns from series of prices
        if price_data:
            # Class with returns calculation function
            ret_est = ReturnsEstimators()

            # Calculating returns
            returns = ret_est.calculate_returns(returns)

        # Calculating the covariance matrix for the chosen method
        if shrinkage_type == 'basic':
            cov_matrix = ShrunkCovariance(assume_centered=assume_centered, shrinkage=basic_shrinkage).fit(
                returns).covariance_
        elif shrinkage_type == 'lw':
            cov_matrix = LedoitWolf(assume_centered=assume_centered).fit(returns).covariance_
        elif shrinkage_type == 'oas':
            cov_matrix = OAS(assume_centered=assume_centered).fit(returns).covariance_
        else:
            cov_matrix = (
                ShrunkCovariance(assume_centered=assume_centered, shrinkage=basic_shrinkage).fit(returns).covariance_,
                LedoitWolf(assume_centered=assume_centered).fit(returns).covariance_,
                OAS(assume_centered=assume_centered).fit(returns).covariance_)

        return cov_matrix
Esempio n. 5
0
    def setUp(self):
        """
        Initialize and get the test data
        """

        # Stock prices data to test the Covariance functions
        project_path = os.path.dirname(__file__)
        data_path = project_path + '/test_data/stock_prices.csv'
        self.data = pd.read_csv(data_path, parse_dates=True, index_col="Date")

        # And series of returns
        ret_est = ReturnsEstimators()
        self.returns = ret_est.calculate_returns(self.data)
Esempio n. 6
0
    def test_hrp_with_input_as_covariance_matrix(self):
        """
        Test HRP when passing a covariance matrix as input.
        """

        hrp = HierarchicalRiskParity()
        returns = ReturnsEstimators().calculate_returns(asset_prices=self.data)
        hrp.allocate(asset_names=self.data.columns,
                     covariance_matrix=returns.cov())
        weights = hrp.weights.values[0]
        assert (weights >= 0).all()
        assert len(weights) == self.data.shape[1]
        np.testing.assert_almost_equal(np.sum(weights), 1)
Esempio n. 7
0
    def test_valuerror_with_no_asset_names(self):
        """
        Test ValueError when not supplying a list of asset names and no other input
        """

        with self.assertRaises(ValueError):
            mvo = MeanVarianceOptimisation()
            expected_returns = ReturnsEstimators(
            ).calculate_mean_historical_returns(asset_prices=self.data,
                                                resample_by='W')
            covariance = ReturnsEstimators().calculate_returns(
                asset_prices=self.data, resample_by='W').cov()
            mvo.allocate(expected_asset_returns=expected_returns,
                         covariance_matrix=covariance.values)
Esempio n. 8
0
    def test_hrp_with_nan_inputs(self):
        """
        Test HRP with NaN inputs
        """

        hrp = HierarchicalRiskParity()
        returns = ReturnsEstimators().calculate_returns(asset_prices=self.data)
        covariance = returns.cov()
        covariance *= np.nan
        hrp.allocate(covariance_matrix=covariance)
        weights = hrp.weights.values[0]
        assert (weights >= 0).all()
        assert len(weights) == self.data.shape[1]
        np.testing.assert_almost_equal(np.sum(weights), 1)
Esempio n. 9
0
    def test_hcaa_with_asset_returns_as_none(self):
        """
        Test HCAA when asset returns are not required for calculating the weights.
        """

        hcaa = HierarchicalClusteringAssetAllocation()
        returns = ReturnsEstimators().calculate_returns(asset_prices=self.data)
        hcaa.allocate(asset_names=self.data.columns,
                      covariance_matrix=returns.cov(),
                      optimal_num_clusters=5,
                      risk_measure='equal_weighting')
        weights = hcaa.weights.values[0]
        assert (weights >= 0).all()
        assert len(weights) == self.data.shape[1]
        np.testing.assert_almost_equal(np.sum(weights), 1)
Esempio n. 10
0
    def test_hcaa_with_input_as_covariance_matrix(self):
        """
        Test HCAA when passing a covariance matrix as input.
        """

        hcaa = HierarchicalClusteringAssetAllocation()
        returns = ReturnsEstimators().calculate_returns(asset_prices=self.data)
        hcaa.allocate(asset_names=self.data.columns,
                      covariance_matrix=returns.cov(),
                      optimal_num_clusters=6,
                      asset_returns=returns)
        weights = hcaa.weights.values[0]
        assert (weights >= 0).all()
        assert len(weights) == self.data.shape[1]
        np.testing.assert_almost_equal(np.sum(weights), 1)
Esempio n. 11
0
    def test_value_error_for_custom_obj_optimal_weights(self):
        # pylint: disable=invalid-name
        """
        Test ValueError when no optimal weights are found for custom objective solution.
        """

        with self.assertRaises(ValueError):
            mvo = MeanVarianceOptimisation()
            custom_obj = 'cp.Minimize(risk - kappa)'
            constraints = [
                'cp.sum(weights) == 1', 'weights >= 0', 'weights <= 1'
            ]
            non_cvxpy_variables = {
                'num_assets':
                self.data.shape[1],
                'covariance':
                self.data.cov(),
                'expected_returns':
                ReturnsEstimators().calculate_mean_historical_returns(
                    asset_prices=self.data, resample_by='W')
            }
            cvxpy_variables = [
                'risk = cp.quad_form(weights, covariance)',
                'portfolio_return = cp.matmul(weights, expected_returns)',
                'kappa = cp.Variable(1)'
            ]
            mvo.allocate_custom_objective(
                non_cvxpy_variables=non_cvxpy_variables,
                cvxpy_variables=cvxpy_variables,
                objective_function=custom_obj,
                constraints=constraints)
Esempio n. 12
0
    def test_custom_objective_function(self):
        """
        Test custom portfolio objective and allocation constraints.
        """

        mvo = MeanVarianceOptimisation()
        custom_obj = 'cp.Minimize(risk)'
        constraints = ['cp.sum(weights) == 1', 'weights >= 0', 'weights <= 1']
        non_cvxpy_variables = {
            'num_assets':
            self.data.shape[1],
            'covariance':
            self.data.cov(),
            'expected_returns':
            ReturnsEstimators().calculate_mean_historical_returns(
                asset_prices=self.data, resample_by='W')
        }
        cvxpy_variables = [
            'risk = cp.quad_form(weights, covariance)',
            'portfolio_return = cp.matmul(weights, expected_returns)'
        ]
        mvo.allocate_custom_objective(non_cvxpy_variables=non_cvxpy_variables,
                                      cvxpy_variables=cvxpy_variables,
                                      objective_function=custom_obj,
                                      constraints=constraints)
        weights = mvo.weights.values[0]
        assert (weights >= 0).all()
        assert len(weights) == self.data.shape[1]
        assert mvo.asset_names == list(range(mvo.num_assets))
        assert mvo.portfolio_return == 0.012854555899642236
        assert mvo.portfolio_risk == 3.0340907720046832
        np.testing.assert_almost_equal(np.sum(weights), 1)
Esempio n. 13
0
    def test_no_asset_names_by_passing_cov(self):
        """
        Test MVO when not supplying a list of asset names but passing covariance matrix as input
        """

        mvo = MeanVarianceOptimisation()
        expected_returns = ReturnsEstimators(
        ).calculate_exponential_historical_returns(asset_prices=self.data,
                                                   resample_by='W')
        covariance = ReturnsEstimators().calculate_returns(
            asset_prices=self.data, resample_by='W').cov()
        mvo.allocate(expected_asset_returns=expected_returns,
                     covariance_matrix=covariance)
        weights = mvo.weights.values[0]
        assert (weights >= 0).all()
        assert len(weights) == self.data.shape[1]
        np.testing.assert_almost_equal(np.sum(weights), 1)
Esempio n. 14
0
    def test_exception_in_plotting_efficient_frontier(self):
        # pylint: disable=invalid-name, protected-access
        """
        Test raising of exception when plotting the efficient frontier.
        """

        mvo = MeanVarianceOptimisation()
        expected_returns = ReturnsEstimators(
        ).calculate_mean_historical_returns(asset_prices=self.data,
                                            resample_by='W')
        covariance = ReturnsEstimators().calculate_returns(
            asset_prices=self.data, resample_by='W').cov()
        plot = mvo.plot_efficient_frontier(
            covariance=covariance,
            max_return=1.0,
            expected_asset_returns=expected_returns)
        assert len(plot._A) == 41
Esempio n. 15
0
    def minimum_covariance_determinant(returns,
                                       price_data=False,
                                       assume_centered=False,
                                       support_fraction=None,
                                       random_state=None):
        """
        Calculates the Minimum Covariance Determinant for a dataframe of asset prices or returns.

        This function is a wrap of the sklearn's MinCovDet (MCD) class. According to the
        scikit-learn User Guide on Covariance estimation:

        "The idea is to find a given proportion (h) of “good” observations that are not outliers
        and compute their empirical covariance matrix. This empirical covariance matrix is then
        rescaled to compensate for the performed selection of observations".

        Link to the documentation:
        <https://scikit-learn.org/stable/modules/generated/sklearn.covariance.MinCovDet.html>`_

        If a dataframe of prices is given, it is transformed into a dataframe of returns using
        the calculate_returns method from the ReturnsEstimators class.

        :param returns: (pd.DataFrame) Dataframe where each column is a series of returns or prices for an asset.
        :param price_data: (bool) Flag if prices of assets are used and not returns.
        :param assume_centered: (bool) Flag for data with mean significantly equal to zero
                                       (Read the documentation for MinCovDet class).
        :param support_fraction: (float) Values between 0 and 1. The proportion of points to be included in the support
                                         of the raw MCD estimate (Read the documentation for MinCovDet class).
        :param random_state: (int) Seed used by the random number generator.
        :return: (np.array) Estimated robust covariance matrix.
        """

        # Calculating the series of returns from series of prices
        if price_data:
            # Class with returns calculation function
            ret_est = ReturnsEstimators()

            # Calculating returns
            returns = ret_est.calculate_returns(returns)

        # Calculating the covariance matrix
        cov_matrix = MinCovDet(
            assume_centered=assume_centered,
            support_fraction=support_fraction,
            random_state=random_state).fit(returns).covariance_

        return cov_matrix
Esempio n. 16
0
    def __init__(self, confidence_level=0.05):
        """
        Initialise.

        :param confidence_level: (float) The confidence level (alpha) used for calculating expected shortfall and conditional
                                         drawdown at risk.
        """

        self.weights = list()
        self.clusters = None
        self.ordered_indices = None
        self.cluster_children = None
        self.optimal_num_clusters = None
        self.returns_estimator = ReturnsEstimators()
        self.risk_estimator = RiskEstimators()
        self.risk_metrics = RiskMetrics()
        self.confidence_level = confidence_level
Esempio n. 17
0
    def exponential_covariance(returns, price_data=False, window_span=60):
        """
        Calculates the Exponentially-weighted Covariance matrix for a dataframe of asset prices or returns.

        It calculates the series of covariances between elements and then gets the last value of exponentially
        weighted moving average series from covariance series as an element in matrix.

        If a dataframe of prices is given, it is transformed into a dataframe of returns using
        the calculate_returns method from the ReturnsEstimators class.

        :param returns: (pd.DataFrame) Dataframe where each column is a series of returns or prices for an asset.
        :param price_data: (bool) Flag if prices of assets are used and not returns. (False by default)
        :param window_span: (int) Used to specify decay in terms of span for the exponentially-weighted series.
                                  (60 by default)
        :return: (np.array) Exponentially-weighted Covariance matrix.
        """

        # Calculating the series of returns from series of prices
        if price_data:
            # Class with returns calculation function
            ret_est = ReturnsEstimators()

            # Calculating returns
            returns = ret_est.calculate_returns(returns)

        # Simple covariance matrix
        cov_matrix = returns.cov()

        # Iterating to fill elements
        for row_number in range(cov_matrix.shape[0]):
            for column_number in range(cov_matrix.shape[1]):
                # Series of returns for the element from the row and column
                row_asset = returns.iloc[:, row_number]
                column_asset = returns.iloc[:, column_number]

                # Series of covariance
                covariance_series = (row_asset - row_asset.mean()) * (column_asset - column_asset.mean())

                # Exponentially weighted moving average series
                ew_ma = covariance_series.ewm(span=window_span).mean()

                # Using the most current element as the Exponential Covariance value
                cov_matrix.iloc[row_number, column_number] = ew_ma[-1]

        return cov_matrix
Esempio n. 18
0
    def test_cla_with_input_as_returns_and_covariance(self):
        # pylint: disable=invalid-name
        """
        Test CLA when we pass expected returns and covariance matrix as input.
        """

        cla = CriticalLineAlgorithm()
        expected_returns = ReturnsEstimators().calculate_mean_historical_returns(asset_prices=self.data)
        covariance = ReturnsEstimators().calculate_returns(asset_prices=self.data).cov()
        cla.allocate(covariance_matrix=covariance,
                     expected_asset_returns=expected_returns,
                     asset_names=self.data.columns)
        weights = cla.weights.values
        weights[weights <= 1e-15] = 0  # Convert very very small numbers to 0
        for turning_point in weights:
            assert (turning_point >= 0).all()
            assert len(turning_point) == self.data.shape[1]
            np.testing.assert_almost_equal(np.sum(turning_point), 1)
def test_exception_in_plotting_efficient_frontier(S_value):
    # pylint: disable=invalid-name, protected-access
    """
    Test raising of exception when plotting the efficient frontier.
    """

    mvo = MeanVarianceOptimisation()
    pdPrice = pd.DataFrame(S_value)
    pdPrice.index = pd.RangeIndex(start=0, stop=6, step=1)
    dates = ['2019-01-01','2019-02-01','2019-03-01','2019-04-01','2019-05-01','2019-06-01']
    pdPrice['Datetime'] = pd.to_datetime(dates)
    pdPrice.set_index('Datetime')
    expected_returns = ReturnsEstimators().calculate_mean_historical_returns(asset_prices=pdPrice, resample_by=None) #'W')
    covariance = ReturnsEstimators().calculate_returns(asset_prices=pdPrice, resample_by=None).cov()
    plot = mvo.plot_efficient_frontier(covariance=covariance, max_return=1.0, expected_asset_returns=expected_returns)
    assert len(plot._A) == 41
    plot.savefig('books_read.png')
    print("read books")
Esempio n. 20
0
File: hcaa.py Progetto: ydm/mlfinlab
    def __init__(self, calculate_expected_returns='mean', confidence_level=0.05):
        """
        Initialise.

        :param calculate_expected_returns: (str) The method to use for calculation of expected returns.
                                                 Currently supports: ``mean``, ``exponential``.
        :param confidence_level: (float) The confidence level (alpha) used for calculating expected shortfall and conditional
                                         drawdown at risk.
        """

        self.weights = list()
        self.clusters = None
        self.ordered_indices = None
        self.cluster_children = None
        self.returns_estimator = ReturnsEstimators()
        self.risk_metrics = RiskMetrics()
        self.calculate_expected_returns = calculate_expected_returns
        self.confidence_level = confidence_level
Esempio n. 21
0
    def test_mvo_with_input_as_returns_and_covariance(self):
        # pylint: disable=invalid-name
        """
        Test MVO when we pass expected returns and covariance matrix as input.
        """

        mvo = MeanVarianceOptimisation()
        expected_returns = ReturnsEstimators(
        ).calculate_mean_historical_returns(asset_prices=self.data,
                                            resample_by='W')
        covariance = ReturnsEstimators().calculate_returns(
            asset_prices=self.data, resample_by='W').cov()
        mvo.allocate(covariance_matrix=covariance,
                     expected_asset_returns=expected_returns,
                     asset_names=self.data.columns)
        weights = mvo.weights.values[0]
        assert (weights >= 0).all()
        assert len(weights) == self.data.shape[1]
        np.testing.assert_almost_equal(np.sum(weights), 1)
Esempio n. 22
0
    def test_plotting_efficient_frontier(self):
        # pylint: disable=invalid-name, protected-access
        """
        Test the plotting of the efficient frontier.
        """

        mvo = MeanVarianceOptimisation()
        expected_returns = ReturnsEstimators(
        ).calculate_mean_historical_returns(asset_prices=self.data,
                                            resample_by='W')
        covariance = ReturnsEstimators().calculate_returns(
            asset_prices=self.data, resample_by='W').cov()
        plot = mvo.plot_efficient_frontier(
            covariance=covariance,
            max_return=1.0,
            expected_asset_returns=expected_returns)
        assert plot.axes.xaxis.label._text == 'Volatility'
        assert plot.axes.yaxis.label._text == 'Return'
        assert len(plot._A) == 41
Esempio n. 23
0
    def __init__(self, calculate_expected_returns='mean', risk_free_rate=0.03):
        """
        Constructor.

        :param calculate_expected_returns: (str) The method to use for calculation of expected returns.
                                                 Currently supports: ``mean``, ``exponential``.
        """

        self.weights = list()
        self.asset_names = None
        self.num_assets = None
        self.portfolio_risk = None
        self.portfolio_return = None
        self.portfolio_sharpe_ratio = None
        self.calculate_expected_returns = calculate_expected_returns
        self.returns_estimator = ReturnsEstimators()
        self.risk_estimators = RiskEstimators()
        self.weight_bounds = (0, 1)
        self.risk_free_rate = risk_free_rate
Esempio n. 24
0
    def test_valu_error_with_no_asset_names(self):
        """
        Test ValueError when not supplying a list of asset names and no other input
        """

        with self.assertRaises(ValueError):
            hcaa = HierarchicalClusteringAssetAllocation()
            returns = ReturnsEstimators().calculate_returns(
                asset_prices=self.data)
            hcaa.allocate(asset_returns=returns.values, optimal_num_clusters=6)
Esempio n. 25
0
    def test_valuerror_with_no_asset_names(self):
        """
        Test ValueError when not supplying a list of asset names and no other input.
        """

        with self.assertRaises(ValueError):
            hrp = HierarchicalRiskParity()
            returns = ReturnsEstimators().calculate_returns(
                asset_prices=self.data)
            hrp.allocate(asset_returns=returns.values)
Esempio n. 26
0
    def setUp(self):
        """
        Initialize and load data
        """

        project_path = os.path.dirname(__file__)

        # Loading the price series of ETFs
        price_data_path = project_path + '/test_data/stock_prices.csv'
        self.price_data = pd.read_csv(price_data_path,
                                      parse_dates=True,
                                      index_col="Date")

        # Transforming series of prices to series of returns
        ret_est = ReturnsEstimators()
        self.returns_data = ret_est.calculate_returns(self.price_data)

        # Loading the classification tree of ETFs
        classification_tree_path = project_path + '/test_data/classification_tree.csv'
        self.classification_tree = pd.read_csv(classification_tree_path)
Esempio n. 27
0
    def test_hcaa_sharpe_ratio_alloc_factor_less_than_one(self):
        # pylint: disable=invalid-name
        """
        Test the condition when allocation factor calculated for sharpe ratio metric is less than 0
        or greater than 1 (in which case the variance is used as the metric).
        """

        hcaa = HierarchicalClusteringAssetAllocation()
        returns = ReturnsEstimators().calculate_returns(asset_prices=self.data)
        expected_returns = returns.mean()
        expected_returns[0] = -10000
        hcaa.allocate(expected_asset_returns=expected_returns,
                      asset_names=self.data.columns,
                      covariance_matrix=returns.corr(),
                      optimal_num_clusters=5,
                      allocation_metric='sharpe_ratio')
        weights = hcaa.weights.values[0]
        assert (weights >= 0).all()
        assert len(weights) == self.data.shape[1]
        np.testing.assert_almost_equal(np.sum(weights), 1)
Esempio n. 28
0
    def test_hrp_with_input_as_distance_matrix(self):
        """
        Test HRP when passing a distance matrix as input.
        """

        hrp = HierarchicalRiskParity()
        returns = ReturnsEstimators().calculate_returns(asset_prices=self.data)
        covariance = returns.cov()
        corr = RiskEstimators.cov_to_corr(covariance)
        corr = pd.DataFrame(corr,
                            index=covariance.columns,
                            columns=covariance.columns)
        distance_matrix = np.sqrt((1 - corr).round(5) / 2)
        hrp.allocate(asset_names=self.data.columns,
                     covariance_matrix=covariance,
                     distance_matrix=distance_matrix)
        weights = hrp.weights.values[0]
        self.assertTrue((weights >= 0).all())
        self.assertTrue(len(weights) == self.data.shape[1])
        self.assertAlmostEqual(np.sum(weights), 1)
Esempio n. 29
0
    def test_no_asset_names_with_asset_returns(self):
        """
        Test HRP when not supplying a list of asset names and when the user passes asset_returns.
        """

        hrp = HierarchicalRiskParity()
        returns = ReturnsEstimators().calculate_returns(asset_prices=self.data)
        hrp.allocate(asset_returns=returns)
        weights = hrp.weights.values[0]
        assert (weights >= 0).all()
        assert len(weights) == self.data.shape[1]
        np.testing.assert_almost_equal(np.sum(weights), 1)
Esempio n. 30
0
    def test_no_asset_names_with_asset_returns(self):
        """
        Test HCAA when not supplying a list of asset names and when the user passes asset_returns.
        """

        hcaa = HierarchicalClusteringAssetAllocation()
        returns = ReturnsEstimators().calculate_returns(asset_prices=self.data)
        hcaa.allocate(asset_returns=returns, optimal_num_clusters=6)
        weights = hcaa.weights.values[0]
        assert (weights >= 0).all()
        assert len(weights) == self.data.shape[1]
        np.testing.assert_almost_equal(np.sum(weights), 1)