Ejemplo n.º 1
0
def add_block(entity_scope: str, entity_code: str, bs: InMemoryBlockStore, sd, ed, ad) -> None:
    """
    Adds a block to a block store

    :param str entity_scope: The scope of the entity to add
    :param str entity_code: The code of the entity to add
    :param InMemoryBlockStore bs: The block store to add the block to
    :param sd: The effectiveAt start date
    :param ed: The effectiveAt end date
    :param ad: The asAt date

    :return: None
    """

    # Create a block using the provided dates
    b = PerformanceDataSet(from_date=sd, to_date=ed, asat=ad)

    global src

    # For each date and group (DataFrame)
    for d, g in src.get_perf_data(
            entity_scope=entity_scope,
            entity_code=entity_code,
            from_date=b.from_date,
            to_date=b.to_date,
            asat=b.asat
    ).groupby('date'):

        # Populate the block with each PerformanceDataPoint in chronological order
        b.add_values(date=d, data_source=g.apply(lambda r: (r['key'], r['mv'], r['net']), axis=1))

    # Add the populated block to the block store
    bs.add_block(entity_scope=entity_scope, entity_code=entity_code, block=b)
Ejemplo n.º 2
0
def test_two_periods(fund, expected_cum_ror, expected_correction):
    bs = InMemoryBlockStore()
    rs = ReturnSource(
        dataset='Ret1',
        portfolio=fund,
        filename=Path(__file__).parent.parent.joinpath("test-data.xlsx"))
    Returns(bs).import_data(test_scope, fund, rs, '2020-01-01', '2020-01-11',
                            '2020-01-11').import_data(test_scope, fund, rs,
                                                      '2020-01-08',
                                                      '2020-01-15',
                                                      '2020-01-15')

    def check_returns(locked):
        df = pd.DataFrame.from_records(
            Performance(test_scope, fund, None, bs).report(locked,
                                                           '2020-01-01',
                                                           '2020-01-15',
                                                           '2020-01-15',
                                                           fields=[DAY, WTD]))

        assert len(df) == 15
        cum_ror = df['inception'].iloc[-1]
        correction = df['correction'].iloc[-4]
        assert cum_ror == pytest.approx(expected_cum_ror, abs=0.00005)
        assert correction == pytest.approx(
            expected_correction if locked else 0.0, abs=0.00005)

    # Check the returns in locked mode
    check_returns(True)
    # And, using the same block-store, run in 'true' mode
    check_returns(False)
Ejemplo n.º 3
0
def test_get_change_parameters_are_as_expected(scenario):
    # These will test that the logic for unlocked queries is correct
    global testcases
    global src

    scenario = testcases[scenario]
    prf = Performance(entity_scope="Test",
                      entity_code="GetChangesParams",
                      src=src,
                      block_store=InMemoryBlockStore())

    # Create an initial block
    prf.get_performance(locked=True,
                        start_date='2019-12-31',
                        end_date='2020-01-05',
                        asat='2020-01-06',
                        create=True)

    with pytest.raises(ChangeCalled) as cc:
        prf.get_performance(False, *scenario[0])

    for actual, expected, field in zip(
            cc.value.args[2:], scenario[1],
        ['last_eff_date', 'last_asat_date', 'required_asat_date']):
        assert actual == expected, field
Ejemplo n.º 4
0
def test_get_changes_not_called_if_block_exists():

    global src
    # Create an instance of the Performance class for generating reports
    prf = Performance(entity_scope="test",
                      entity_code="get_changes",
                      src=src,
                      block_store=InMemoryBlockStore())

    # Create a block - as an initial block, get_changes will not be called. No exception raised.
    p = list(
        prf.get_performance(locked=True,
                            start_date='2019-12-31',
                            end_date='2020-01-05',
                            asat='2020-01-05',
                            create=True))
    assert len(p) == 6

    # Subsequent calls can reuse the original block - no exception should be thrown

    # Get performance for the exact same period as the original block
    p = list(
        prf.get_performance(locked=True,
                            start_date='2019-12-31',
                            end_date='2020-01-05',
                            asat='2020-01-05'))
    assert len(p) == 6

    # Get performance for a subset of the original block with the start trimmed
    p = list(
        prf.get_performance(locked=True,
                            start_date='2020-01-03',
                            end_date='2020-01-05',
                            asat='2020-01-05'))
    assert len(p) == 3

    # Get performance for a subset of the original block with the end trimmed
    p = list(
        prf.get_performance(locked=True,
                            start_date='2019-12-31',
                            end_date='2020-01-04',
                            asat='2020-01-05'))
    assert len(p) == 5

    # Get performance for a subset of the original block with both the ends trimmed
    p = list(
        prf.get_performance(locked=True,
                            start_date='2020-01-03',
                            end_date='2020-01-04',
                            asat='2020-01-05'))
    assert len(p) == 2
Ejemplo n.º 5
0
def test_perf_start_date_is_honoured():
    prf = list(
        Performance(entity_scope="test",
                    entity_code="start_date",
                    src=src1,
                    block_store=InMemoryBlockStore(),
                    perf_start='2019-12-31').report(False,
                                                    '2020-01-03',
                                                    '2020-01-05',
                                                    '2020-01-10',
                                                    fields=[DAY]))

    cum_ror = prf[-1]['inception']

    assert cum_ror == pytest.approx(0.187343)
Ejemplo n.º 6
0
def test_5yr_vol(recording):
    vol_fields = [
        DAY, VOL_INC, VOL_1YR, VOL_3YR, VOL_5YR, ANN_VOL_3YR, ANN_VOL_5YR,
        ANN_VOL_INC
    ]

    src = SeededSource()
    src.add_seeded_perf_data("test", "test_5yr_vol", "2014-12-31", 24106)

    df = pd.DataFrame.from_records(
        Performance(entity_scope="test",
                    entity_code="test_5yr_vol",
                    src=src,
                    block_store=InMemoryBlockStore()).report(
                        False,
                        '2013-12-31',
                        '2020-03-05',
                        '2020-03-05',
                        fields=vol_fields))[['date', 'mv'] + vol_fields]

    # Test the boundaries of each year
    # This is where the different period volatilities
    # Will diverge from the inception volatility
    subset = df[df['date'].isin(
        set(
            dates('2014-12-31', '2015-01-01', '2015-12-31', '2016-01-01',
                  '2016-12-31', '2017-01-01', '2017-12-31', '2018-01-01',
                  '2018-12-31', '2019-01-01', '2019-12-31', '2020-01-01',
                  '2020-03-05')))]

    # To record tests, use pytest --recording=test_5yr_vol
    filename = Path(__file__).parent.joinpath('expected', '5yr_vol.pk')

    if 'test_5yr_vol' in recording:
        # Record the result and save as the expectation
        subset.to_pickle(filename, protocol=0)

        # Also save a temporary csv version for manual review
        nicer(df).to_csv('5yr_vol.csv', index=False)
    else:
        # Load expected values and compare with the generated data
        target = pd.read_pickle(filename)

        # Compare with the expectation
        pd.testing.assert_frame_equal(subset, target)
Ejemplo n.º 7
0
def test_change_parameters_not_called_when_inside_locked_period():
    prf = Performance(entity_scope="Test",
                      entity_code="GetChangesParams",
                      src=src,
                      block_store=InMemoryBlockStore())

    # Create an initial block
    prf.get_performance(True,
                        '2019-12-31',
                        '2020-01-05',
                        '2020-01-06',
                        create=True)

    # Query locked performance within the boundary
    r = prf.get_performance(True, '2020-01-02', '2020-01-03', '2020-01-10')

    # No exception thrown means we will get here
    assert r is not None
Ejemplo n.º 8
0
def test_sharpe_ratios(recording):
    fields = [
        DAY, AGE_DAYS, ROLL_YEAR, ROLL_3YR, ROLL_5YR, ANN_INC, ANN_1YR,
        ANN_3YR, ANN_5YR, VOL_INC, VOL_1YR, VOL_3YR, VOL_5YR, ANN_VOL_INC,
        ANN_VOL_1YR, ANN_VOL_3YR, ANN_VOL_5YR, RISK_FREE_1YR, RISK_FREE_3YR,
        RISK_FREE_5YR, SHARPE_1YR, SHARPE_3YR, SHARPE_5YR
    ]

    src = SeededSource(rfr_func=risk_free_rates)
    src.add_seeded_perf_data("test", "sharpe_ratio", '2013-12-31', 24106)

    df = pd.DataFrame.from_records(
        Performance(entity_scope="test",
                    entity_code="sharpe_ratio",
                    src=src,
                    block_store=InMemoryBlockStore()).report(
                        locked=False,
                        start_date='2013-12-31',
                        end_date='2020-03-05',
                        asat='2020-03-05',
                        fields=fields))[['date', 'mv', 'inception'] + fields]

    # Test cases
    subset = df

    # To record tests, use pytest --recording=test_sharpe
    filename = Path(__file__).parent.joinpath('expected', 'sharpe_ratios.pk')

    if 'test_sharpe_ratios' in recording:
        # Record the result and save as the expectation
        subset.to_pickle(filename, protocol=0)

        # Also save a temporary csv version for manual review
        nicer(df).to_csv('sharpe_ratios.csv', index=False)

    else:
        # Load expected values and compare with the generated data
        target = pd.read_pickle(filename)

        # Compare with the expectation
        pd.testing.assert_frame_equal(subset, target)
Ejemplo n.º 9
0
def test_cumulative_returns(fund, expected):
    bs = InMemoryBlockStore()

    portfolio_code = str(uuid.uuid4())

    Returns(bs).import_data(
        test_scope, portfolio_code,
        ReturnSource(
            dataset='Ret1',
            portfolio=fund,
            filename=Path(__file__).parent.parent.joinpath("test-data.xlsx")),
        '2020-01-01', '2020-01-11', '2020-01-11')

    df = pd.DataFrame.from_records(
        Performance(test_scope, portfolio_code, None,
                    bs).report(False, '2020-01-11', '2020-01-11',
                               '2020-01-11'))

    assert len(df) == 1
    cum_ror = df['inception'].iloc[-1]
    assert cum_ror == pytest.approx(expected, abs=0.00005)
Ejemplo n.º 10
0
def test_change_parameters_called_when_outside_locked_period():

    global src
    prf = Performance(entity_scope="Test",
                      entity_code="GetChangesParams",
                      src=src,
                      block_store=InMemoryBlockStore())

    # Create an initial block
    prf.get_performance(True,
                        '2019-12-31',
                        '2020-01-05',
                        '2020-01-06',
                        create=True)

    # Query locked performance, crossing the boundary
    with pytest.raises(ChangeCalled) as cc:
        prf.get_performance(True, '2020-01-02', '2020-01-10', '2020-01-10')

    assert cc.value.args[2] == as_date('2020-01-05')
    assert cc.value.args[3] == as_date('2020-01-06')
    assert cc.value.args[4] == as_date('2020-01-10')
Ejemplo n.º 11
0
def create_demo_sheet():
    fields = [
        AGE_DAYS, DAY, WTD, MTD, QTD, YTD, ROLL_WEEK, ROLL_MONTH, ROLL_QTR,
        ROLL_YEAR, ROLL_3YR, ROLL_5YR, ANN_INC, ANN_1YR, ANN_3YR, ANN_5YR,
        VOL_INC, VOL_1YR, VOL_3YR, VOL_5YR, ANN_VOL_INC, ANN_VOL_1YR,
        ANN_VOL_3YR, ANN_VOL_5YR, RISK_FREE_1YR, RISK_FREE_3YR, RISK_FREE_5YR,
        SHARPE_1YR, SHARPE_3YR, SHARPE_5YR
    ]

    df = pd.DataFrame.from_records(
        Performance(
            mock_src.SeededSource('2013-12-31',
                                  24106,
                                  rfr_func=risk_free_rates,
                                  max=0.02,
                                  trend=0.0005),
            InMemoryBlockStore()).report(
                False, '2013-12-31', '2020-03-05', '2020-03-05',
                fields=fields))[['date', AGE_DAYS, 'mv', 'inception'] +
                                fields[1:]]

    nicer(df).to_csv('demo.csv', index=False)
def test_scenario(locked, expected):
    with api_cacher.CachingApi("scenario1") as api:
        prf = Performance('JLH', 'FUND1', LusidSource(api, config),
                          InMemoryBlockStore())

        # Lock the May period
        prf.get_performance(True,
                            "2019-05-01",
                            "2019-05-31",
                            "2020-03-25T09:00",
                            create=True)
        # Lock the June period
        prf.get_performance(True,
                            "2019-05-31",
                            "2019-06-30",
                            "2020-03-25T10:00",
                            create=True)
        # Lock the July period
        prf.get_performance(True,
                            "2019-06-30",
                            "2019-07-31",
                            "2020-03-25T11:00",
                            create=True)

        fields = [DAY, WTD]
        df = pd.DataFrame.from_records(
            prf.report(locked,
                       '2019-05-01',
                       '2019-07-31',
                       '2020-03-25T11:00',
                       fields=fields)
        )[[
            'date', 'mv', 'inception', 'flows', 'correction', 'flow_correction'
        ] + fields]

        assert len(df) == 92
        assert df.tail(1).iloc[0]['inception'] == pytest.approx(0.002099104)
        actual = df['correction'].sum()
        assert actual == pytest.approx(expected, abs=0.000005)
    def create_composite(composite_code, date_range_1, date_range_2,
                         date_range_3, **kwargs):

        nonlocal performance_api

        composite.create_composite(composite_scope=test_scope,
                                   composite_code=composite_code)

        composite.add_composite_member(composite_scope=test_scope,
                                       composite_code=composite_code,
                                       member_scope=test_scope,
                                       member_code="P1",
                                       from_date=date_range_1[0],
                                       to_date=date_range_1[1])

        composite.add_composite_member(composite_scope=test_scope,
                                       composite_code=composite_code,
                                       member_scope=test_scope,
                                       member_code="P2",
                                       from_date=date_range_2[0],
                                       to_date=date_range_2[1])

        composite.add_composite_member(composite_scope=test_scope,
                                       composite_code=composite_code,
                                       member_scope=test_scope,
                                       member_code="P3",
                                       from_date=date_range_3[0],
                                       to_date=date_range_3[1])

        cs = CompositeSource(composite=composite,
                             performance_api=performance_api,
                             **kwargs)

        return Performance(test_scope,
                           composite_code,
                           cs,
                           InMemoryBlockStore(),
                           perf_start='2018-03-18')
Ejemplo n.º 14
0
def test_volatility_example():
    # Test based upon the example here:
    # http://invest-made-easy.blogspot.com/2013/03/understanding-volatility-and-sharpe.html
    # The calculated standard deviation of the monthly returns should be 2.25%

    vol_fields = [DAY, VOL_INC, ANN_VOL_INC]

    df = pd.DataFrame.from_records(
        Performance(entity_scope="test",
                    entity_code="test_5yr_vol",
                    src=mock_src.MockSource(
                        'VolEx1',
                        filename=Path(__file__).parent.parent.joinpath(
                            "test-data.xlsx")),
                    block_store=InMemoryBlockStore()).report(
                        False,
                        '2019-12-31',
                        '2020-01-11',
                        '2020-01-11',
                        fields=vol_fields))[['date', 'mv'] + vol_fields]

    volatility = df[VOL_INC].values[-1]
    assert volatility == pytest.approx(0.0225, abs=0.00005)
Ejemplo n.º 15
0
standard_fields = ['date', 'key', 'mv', 'flows', 'inception', 'correction']
field_set = [
    DAY, WTD, MTD, QTD, YTD, ROLL_WEEK, ROLL_MONTH, ROLL_YEAR, ROLL_QTR
]

# Source for Set1 - used in env1 and env2
src1 = mock_src.MockSource(
    'Set1', filename=Path(__file__).parent.parent.joinpath("test-data.xlsx"))
src1.get_changes = lambda v, w, x, y, z: as_date('2020-01-08'
                                                 )  # only needed once

# Set up environment #1 - no performance posted
env1 = Performance(entity_scope="test",
                   entity_code="env1",
                   src=src1,
                   block_store=InMemoryBlockStore())  # No blocks
# Set up environment #2 - locked period, and back-dated changes
env2 = Performance(entity_scope="test",
                   entity_code="env2",
                   src=src1,
                   block_store=InMemoryBlockStore())
# Set up environment #3 - 2 years data, and 1 b.p return/day (easy to verify)
env3 = Performance(entity_scope="test",
                   entity_code="env3",
                   src=mock_src.SimpleSource('2018-03-05'),
                   block_store=InMemoryBlockStore())
# Set up environment #4 - 2 years data, and 1 b.p return/day, with a weekly flow
env4 = Performance(entity_scope="test",
                   entity_code="env4",
                   src=mock_src.SimpleSource('2018-03-05',
                                             recurring_flow=300.0),
Ejemplo n.º 16
0
def test_combinations(locked):
    bs = InMemoryBlockStore()
    entity_code = str(uuid.uuid4())
    # block 1
    add_block(test_scope, entity_code, bs, '2019-12-31', '2020-01-05',
              '2020-01-05')
    # block 2
    add_block(test_scope, entity_code, bs, '2020-01-06', '2020-01-10',
              '2020-01-10')
    # block 3
    add_block(test_scope, entity_code, bs, '2020-01-11', '2020-01-11',
              '2020-01-11')
    # block 4 - Includes back-dated activity into block 2
    add_block(test_scope, entity_code, bs, '2020-01-08', '2020-01-15',
              '2020-01-15')
    # block 5 Includes back-dated activity to first item of block 2
    add_block(test_scope, entity_code, bs, '2020-01-06', '2020-01-15',
              '2020-02-01')
    # block 6 Includes back-dated activity to last item of block 1
    add_block(test_scope, entity_code, bs, '2020-01-05', '2020-01-15',
              '2020-02-02')
    # block 7 Includes back-dated activity to exclude block 1 entirely
    add_block(test_scope, entity_code, bs, '2012-12-31', '2020-01-15',
              '2020-02-03')

    def run_scenario(entity_scope, entity_code, to_date, asat_date,
                     expected_open_tmv, expected_locked_tmv):
        blocks = bs.find_blocks(entity_scope, entity_code, '2020-01-03',
                                to_date, asat_date)
        df = pd.DataFrame.from_records([
            (o.date, o.tmv)
            for o in combine(blocks, locked, '2020-01-03', to_date, asat_date)
        ],
                                       columns=['date', 'tmv'])
        total = df['tmv'].sum()
        expected = expected_locked_tmv if locked else expected_open_tmv
        if debug:
            print(nicer(df))
            print(f"Expected: {expected}, Actual: {total}")
        assert (total == pytest.approx(expected, 0.001))

    # View on 01/10 for 01/09
    run_scenario(test_scope, entity_code, '2020-01-09', '2020-01-10', 6061.34,
                 6061.34)
    # View on 01/10 for 01/10
    run_scenario(test_scope, entity_code, '2020-01-10', '2020-01-10', 7082.17,
                 7082.17)
    # View on 01/11
    run_scenario(test_scope, entity_code, '2020-01-11', '2020-01-11', 7963.82,
                 7963.82)
    # View on 01/15 for 01/11
    run_scenario(test_scope, entity_code, '2020-01-11', '2020-01-15', 8295.58,
                 7963.82)
    # View on 01/15 for 01/15
    run_scenario(test_scope, entity_code, '2020-01-15', '2020-01-15', 12158.41,
                 11826.65)
    # View on 02/01
    run_scenario(test_scope, entity_code, '2020-01-15', '2020-02-01', 2425.39,
                 11826.65)
    # View on 02/02
    run_scenario(test_scope, entity_code, '2020-01-15', '2020-02-02', 1606.25,
                 11826.65)
    # View on 02/03
    run_scenario(test_scope, entity_code, '2020-01-15', '2020-02-03', 39.0,
                 11826.65)
Ejemplo n.º 17
0
            entity_code=entity_code,
            from_date=b.from_date,
            to_date=b.to_date,
            asat=b.asat
    ).groupby('date'):

        # Populate the block with each PerformanceDataPoint in chronological order
        b.add_values(date=d, data_source=g.apply(lambda r: (r['key'], r['mv'], r['net']), axis=1))

    # Add the populated block to the block store
    bs.add_block(entity_scope=entity_scope, entity_code=entity_code, block=b)

@pytest.mark.parametrize(
    "test_name, bs",
    [
        ("InMemoryBlockStore", InMemoryBlockStore()),
        ("StructuredResultsBlockStore", BlockStoreStructuredResults(api_factory=api_factory))
    ]
)
def test_single_block(test_name: str, bs: IBlockStore) -> None:
    """
    Test that find_blocks works as expected when there is a single block in the store

    :return: None
    """
    entity_code = str(uuid.uuid4())

    # Add a block to the block store
    add_block(test_scope, entity_code, bs, sd='2020-01-05', ed='2020-01-10', ad='2020-01-10')

    def pb(b: PerformanceDataSet) -> None:
Ejemplo n.º 18
0
def test_cumulative_ror():

    entity_scope="test"
    entity_code="cumulative_ror"

    prf = Performance(entity_scope=entity_scope, entity_code=entity_code, src=src, block_store=InMemoryBlockStore())

    def check_performance(expected_len, expected_ror, *args, **kwargs):
        perf_data = list(prf.get_performance(*args, **kwargs))
        df = pd.DataFrame.from_records(
                [(o.date,o.tmv,o.ror,o.cum_fctr) for o in perf_data],
                columns=['date','mv','ror','fctr'])
        #print(nicer(df))
        cum_ror = perf_data[-1].cum_fctr

        assert len(perf_data) == expected_len
        assert cum_ror == pytest.approx(expected_ror,0.00001)
        return df

    # Step 1 - Post performance up to 5th Jan - asat 5th Jan
    check_performance(6,1.187343,True,'2019-12-31','2020-01-05','2020-01-05',create=True)

    # Step 2 - Post performance up to 10th Jan - asat 10th Jan
    check_performance(11,1.458329,True,'2019-12-31','2020-01-10','2020-01-10',create=True)

    # Step 3 - Post performance up to 12th Jan - asat 15th Jan
    check_performance(13,1.293112,True,'2019-12-31','2020-01-12','2020-01-15',create=True)

    # Step 4 - Post performance up to 15th Jan - asat 16th Jan
    check_performance(16,1.646827,True,'2019-12-31','2020-01-15','2020-01-16',create=True)

    # Test 5 - Query actual performance up to 15th Jan - asat 16th Jan
    check_performance(16,1.646827,False,'2019-12-31','2020-01-15','2020-01-16')

    # Test 6 - Query actual performance from 3rd up to 9th Jan - asat 10th Jan
    check_performance(7,1.378071,False,'2020-01-03','2020-01-09','2020-01-10')

    # Test 7 - Query actual performance from 3rd up to 9th Jan - asat 15th Jan
    check_performance(7,1.370092,False,'2020-01-03','2020-01-09','2020-01-15')

    # Test 8 - Query locked performance from 3rd up to 9th Jan - asat 10th Jan
    check_performance(7,1.378071,True,'2020-01-03','2020-01-09','2020-01-10')

    # Test 9 - Query locked performance from 3rd up to 9th Jan - asat 15th Jan
    check_performance(7,1.378071,True,'2020-01-03','2020-01-09','2020-01-15')
    
    # Test 10 - Query locked performance up to 13th Jan - asat 3rd Feb
    check_performance(14,1.39204,True,'2019-12-15','2020-01-13','2020-02-03')
    
    # Test 11 - Query actual performance up to 13th Jan - asat 3rd Feb
    check_performance(14,1.0,False,'2019-12-15','2020-01-13','2020-02-03')

    # Test 11 - Query actual performance up to 13th Jan - asat 2nd Feb
    check_performance(14,0.00285714,False,'2019-12-15','2020-01-13','2020-02-02')

    # Test 11 - Query locked performance up to 13th Jan - asat 2nd Feb
    check_performance(14,1.39204,True,'2019-12-15','2020-01-13','2020-02-02')

    # The block store should only contain the 4 locked blocks
    assert len(prf.block_store.blocks[f"{entity_scope}_{entity_code}"]) == 4
def test_composite_methods(test_name, composite):

    in_memory_block_store = InMemoryBlockStore()

    seeded_source = SeededSource()
    seeded_source.add_seeded_perf_data(entity_scope=test_scope,
                                       entity_code="P1",
                                       start_date='2018-03-05',
                                       seed=24106)
    seeded_source.add_seeded_perf_data(entity_scope=test_scope,
                                       entity_code="P2",
                                       start_date='2018-03-05',
                                       seed=12345)
    seeded_source.add_seeded_perf_data(entity_scope=test_scope,
                                       entity_code="P3",
                                       start_date='2018-10-16',
                                       seed=33333)

    performance_api = PortfolioPerformanceApi(
        block_store=InMemoryBlockStore(),
        portfolio_performance_source=seeded_source)

    # utility to create composite from the three portfolios
    def create_composite(composite_code, date='2018-03-19', **kwargs):

        nonlocal performance_api

        composite.create_composite(composite_scope=test_scope,
                                   composite_code=composite_code)

        composite.add_composite_member(composite_scope=test_scope,
                                       composite_code=composite_code,
                                       member_scope=test_scope,
                                       member_code="P1",
                                       from_date='2018-03-05',
                                       to_date=None)

        composite.add_composite_member(composite_scope=test_scope,
                                       composite_code=composite_code,
                                       member_scope=test_scope,
                                       member_code="P2",
                                       from_date='2018-03-05',
                                       to_date=None)

        composite.add_composite_member(composite_scope=test_scope,
                                       composite_code=composite_code,
                                       member_scope=test_scope,
                                       member_code="P3",
                                       from_date='2018-03-05',
                                       to_date=None)

        cs = CompositeSource(composite=composite,
                             performance_api=performance_api,
                             **kwargs)

        return Performance(test_scope,
                           composite_code,
                           cs,
                           InMemoryBlockStore(),
                           perf_start='2018-03-18')

    # AND Composites, created using the different methods
    c1 = create_composite(str(uuid.uuid4()))
    c2 = create_composite(str(uuid.uuid4()), composite_mode="equal")
    c3 = create_composite(str(uuid.uuid4()), composite_mode="agg")

    p1 = Performance(test_scope,
                     "P1",
                     src=seeded_source,
                     block_store=in_memory_block_store,
                     perf_start='2018-03-05')
    p2 = Performance(test_scope,
                     "P2",
                     src=seeded_source,
                     block_store=in_memory_block_store,
                     perf_start='2018-03-05')
    p3 = Performance(test_scope,
                     "P3",
                     src=seeded_source,
                     block_store=in_memory_block_store,
                     perf_start='2018-10-16')

    def run_performance(p):
        return pd.DataFrame.from_records(
            p.report(locked=False,
                     start_date='2018-03-05',
                     end_date='2018-12-31',
                     asat='2019-01-05'))[['date', 'inception', 'mv', 'wt']]

    # WHEN we calculate the performance for all the portfolios and the
    #      composites
    # THEN the cumulative return on the final date should be as expected
    for df, exp_cum in zip(
            map(run_performance, [p1, p2, p3, c1, c2, c3]),
        [4.272095, 5.552946, 0.779682, 4.415115, 4.639544, 4.415115]):
        last = df.tail(1).iloc[0]
        assert last['inception'] == pytest.approx(exp_cum)