Esempio n. 1
0
from portopt.backtest import gen_mvportfolio, gen_msvportfolio

from portopt.portfoliostats import rkw_statistics

from numpy.random import seed
from numpy import linspace

from pandas import datetime
from tinydb import TinyDB

# %%

db = TinyDB('20160910_results_20_lowweight.json')

seed(1835)
assets = [sample_assets(range(100), 10) for _ in range(20)]
rkw_daterange = slice(datetime(1990,1,1), datetime(2012,12,1))

ff100sbm = loaddata.load_fama_french_100_size_book_to_market()
ff100sbmrkw = ff100sbm.loc[rkw_daterange, :]
ers = [ff100sbmrkw.iloc[:, a].fillna(0.0) for a in assets]


mv_weights = linspace(0.01, 0.15, 15)
etas = linspace(0.0, 0.03, 11)
msv_parameters = [[list() for _ in range(len(mv_weights))] for _ in range(len(ers))]

# Given each mv weight, a range of mean semivariance portfolios for
# different values of eta will be able to generate a comparable mean
# return on the first training return set.
for er_idx in range(len(ers)):
Esempio n. 2
0
            'returns_name': 'fama_french_10_industry', 
            'asset_columns': [0, 1, 2], 
            'start_date': '1990-01-01', 
            'end_date': '2012-12-01', 
            'granularity': 'monthly'
        }
        out['backtest'] = {
            'update_periods': 12,
            'trailing_periods': 120
        }
        outfile.write(json.dumps(out) + '\n')

# %%
np.random.seed(958039197)

asset_columns = [sample_assets(list(range(100)), 10) for x in range(20)]

# %%
returns_name = 'fama_french_100_size_book_to_market'
start_date = '1990-01-01'
end_date = '2012-12-01'
granularity = 'monthly'

experiments = list()
for c in asset_columns:
    experiments.append({
            'returns_name': returns_name,
            'asset_columns': c, 
            'start_date': start_date, 
            'end_date': end_date, 
            'granularity': granularity