Exemple #1
0
    def get(self, request, *args, **kwargs):

        # Load all objects
        files = PublicFile.objects.all()

        # Filter by request
        pk = request.query_params.get("id", None)
        freq = request.query_params.get("freq", "M")
        window = int(request.query_params.get("window", 36))
        bm = request.query_params.get("bm", None)

        if pk is not None:
            series_files = files.filter(pk=pk)

        # Perform operation on the filter result
        if len(series_files) == 1:

            # Select file to use, load it into Return Series
            file = series_files[0]
            series = ReturnSeries.read_csv(file.file)

            # Set series name. Default behavior without this is to use
            # the column name
            series.name = file.seriesName

            if bm is not None:
                bm = bm.split(",")
                bm_files = files.filter(pk__in=bm)

                for bm_file in bm_files:

                    bm_series = ReturnSeries.read_csv(bm_file.file)
                    bm_series.name = bm_file.seriesName
                    series.add_bm(bm_series, bm_file.seriesName)

            # Create results
            result = series.get_rolling_ann_vol(freq=freq, window=window)

            output = []
            for name, series in result.items():

                series.index = to_js_time(series.index)
                series = series.reset_index()
                series = series.to_json(orient="values")
                output.append({"name": name, "data": series})

            return JsonResponse(output, safe=False)

        elif len(series_files) > 1:
            raise Http404

        else:
            raise Http404
Exemple #2
0
    def get(self, request, *args, **kwargs):

        files = PublicFile.objects.all()
        pk = request.query_params.get("id", None)

        if pk is not None:
            series_files = files.filter(pk=pk)

        output = []
        if len(series_files) == 1:

            file = series_files[0]
            series = ReturnSeries.read_csv(file.file)
            series.name = file.seriesName
            result = table_calendar_return(series)
            result = result.to_json(orient="records")
            output.append({"name": series.name, "data": result})

            return JsonResponse(output, safe=False)

        elif len(series_files) > 1:
            raise Http404

        else:
            raise Http404
Exemple #3
0
def test_index_series():

    returns = ReturnSeries.read_csv("tests/unit/data/twitter_returns.csv")

    # No benchmark
    index_series = returns.get_index_series()
    index_twitter = index_series["TWTR"]
    assert index_twitter.index[-1] == datetime.datetime.strptime(
        "2020-06-30", "%Y-%m-%d")
    assert index_twitter["TWTR"][-1] == -0.35300922502128296

    # Daily
    index_series = returns.get_index_series(freq="D")
    index_twitter = index_series["TWTR"]
    assert index_twitter.index[-1] == datetime.datetime.strptime(
        "2020-06-26", "%Y-%m-%d")
    assert index_twitter["TWTR"][-1] == -0.35300922502128473

    returns.add_bm(spy)
    index_series = returns.get_index_series()
    index_twitter = index_series["TWTR"]
    index_spy = index_series["SPY"]
    assert index_twitter.index[-1] == datetime.datetime.strptime(
        "2020-06-30", "%Y-%m-%d")
    assert index_spy.index[-1] == datetime.datetime.strptime(
        "2020-06-30", "%Y-%m-%d")
    assert index_twitter["TWTR"][-1] == -0.35300922502128296
    assert index_spy["SPY"][-1] == 0.6935467657365093
Exemple #4
0
def test_rolling_ann_return():

    returns = ReturnSeries.read_csv("tests/unit/data/twitter_returns.csv")

    # No benchmark
    roll_ann_ret = returns.get_rolling_ann_ret()
    roll_twtr = roll_ann_ret["TWTR"]
    assert roll_twtr.index[0] == datetime.datetime.strptime(
        "2016-10-31", "%Y-%m-%d")
    assert roll_twtr["TWTR"][0] == -0.2633310984499173

    # Daily, rolling 252 days
    roll_ann_ret = returns.get_rolling_ann_ret(window=252, freq="D")
    roll_twtr = roll_ann_ret["TWTR"]
    assert roll_twtr.index[0] == datetime.datetime.strptime(
        "2014-11-06", "%Y-%m-%d")
    assert roll_twtr["TWTR"][0] == -0.09048985526180642

    returns.add_bm(spy)
    roll_ann_ret = returns.get_rolling_ann_ret()
    roll_twtr = roll_ann_ret["TWTR"]
    roll_spy = roll_ann_ret["SPY"]
    assert roll_twtr.index[0] == datetime.datetime.strptime(
        "2016-10-31", "%Y-%m-%d")
    assert roll_twtr["TWTR"][0] == -0.2633310984499173
    assert roll_spy.index[0] == datetime.datetime.strptime(
        "2016-10-31", "%Y-%m-%d")
    assert roll_spy["SPY"][0] == 0.06256787890936222
Exemple #5
0
def test_rolling_tot_ret():

    returns = ReturnSeries.read_csv("tests/unit/data/twitter_returns.csv")

    # No benchmark
    roll_tot_ret = returns.get_rolling_tot_ret()
    roll_twtr = roll_tot_ret["TWTR"]
    assert roll_twtr.index[0] == datetime.datetime.strptime(
        "2016-10-31", "%Y-%m-%d")
    assert roll_twtr["TWTR"][0] == -0.6002237318946346

    # Daily, rolling 252 days
    roll_tot_ret = returns.get_rolling_tot_ret(window=252, freq="D")
    roll_twtr = roll_tot_ret["TWTR"]
    assert roll_twtr.index[0] == datetime.datetime.strptime(
        "2014-11-06", "%Y-%m-%d")
    assert roll_twtr["TWTR"][0] == -0.09043080731969699

    returns.add_bm(spy)
    roll_tot_ret = returns.get_rolling_tot_ret()
    roll_twtr = roll_tot_ret["TWTR"]
    roll_spy = roll_tot_ret["SPY"]
    assert roll_twtr.index[0] == datetime.datetime.strptime(
        "2016-10-31", "%Y-%m-%d")
    assert roll_twtr["TWTR"][0] == -0.6002237318946346
    assert roll_spy.index[0] == datetime.datetime.strptime(
        "2016-10-31", "%Y-%m-%d")
    assert roll_spy["SPY"][0] == 0.1996927920869329
Exemple #6
0
def test_rolling_ann_vol():

    returns = ReturnSeries.read_csv("tests/unit/data/twitter_returns.csv")

    # No benchmark
    roll_ann_vol = returns.get_rolling_ann_vol()
    roll_twtr = roll_ann_vol["TWTR"]
    assert roll_twtr.index[0] == datetime.datetime.strptime(
        "2016-10-31", "%Y-%m-%d")
    assert roll_twtr["TWTR"][0] == 0.5725024779205684

    # Daily, rolling 252 days
    roll_ann_vol = returns.get_rolling_ann_vol(window=252, freq="D")
    roll_twtr = roll_ann_vol["TWTR"]
    assert roll_twtr.index[0] == datetime.datetime.strptime(
        "2014-11-06", "%Y-%m-%d")
    assert roll_twtr["TWTR"][0] == 0.6376491116934246

    returns.add_bm(spy)
    roll_ann_vol = returns.get_rolling_ann_vol()
    roll_twtr = roll_ann_vol["TWTR"]
    roll_spy = roll_ann_vol["SPY"]
    assert roll_twtr.index[0] == datetime.datetime.strptime(
        "2016-10-31", "%Y-%m-%d")
    assert roll_twtr["TWTR"][0] == 0.5725024779205684
    assert roll_spy.index[0] == datetime.datetime.strptime(
        "2016-10-31", "%Y-%m-%d")
    assert roll_spy["SPY"][0] == 0.10810183559733508
Exemple #7
0
def test_total_return():

    returns = ReturnSeries.read_csv("tests/unit/data/twitter_returns.csv")

    # No benchmark
    total_return = returns.get_tot_ret()
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR"],
            "field": "total return",
            "value": [-0.35300922502128473],
        })
    assert total_return.equals(expected_output)

    # with single benchmark
    returns.add_bm(spy)
    total_return = returns.get_tot_ret()
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR", "SPY"],
            "field": "total return",
            "value": [-0.35300922502128473, 0.6935467657365115],
        })
    assert total_return.equals(expected_output)

    # meta=True
    total_return = returns.get_tot_ret(meta=True)
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR", "SPY"],
            "field": "total return",
            "value": [-0.35300922502128473, 0.6935467657365115],
            "method": "geometric",
            "start": datetime.datetime.strptime("2013-11-07", "%Y-%m-%d"),
            "end": datetime.datetime.strptime("2020-06-26", "%Y-%m-%d"),
        })
    assert total_return.equals(expected_output)

    # has benchmark, but include_bm=False
    total_return = returns.get_tot_ret(include_bm=False)
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR"],
            "field": "total return",
            "value": [-0.35300922502128473],
        })
    assert total_return.equals(expected_output)

    # test multiple benchmarks
    returns.add_bm(qqq)
    total_return = returns.get_tot_ret()
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR", "SPY", "QQQ"],
            "field": "total return",
            "value":
            [-0.35300922502128473, 0.6935467657365115, 1.894217403555647],
        })
    assert total_return.equals(expected_output)
Exemple #8
0
def table_calendar_return(return_series: ReturnSeries,
                          use_month_abbr: bool = True) -> pd.DataFrame:
    """Create calendar like monthly return table

    Args:
        return_series: A return series. Should be of minimum monthly frequency
        use_month_abbr: Whether to use 3 letter month abbreviations instead of numerical
          month. Defaults to True.

    Returns:
        pd.DataFrame: DataFrame with columns: Year, Jan, Feb, ..., Dec, Total
    """

    # create monthly and annual returns for output rows
    monthly = return_series.to_month()
    annual = return_series.to_year()

    # append necessary information
    monthly["Year"] = monthly.index.year
    monthly["Month"] = monthly.index.month
    monthly["Month"] = monthly["Month"]
    annual["Year"] = annual.index.year

    # Pivot monthly data
    monthly = monthly.pivot(index="Year",
                            columns="Month",
                            values=monthly.columns[0])

    # Rename annual column, and merge with monthly
    annual = annual.rename(columns={annual.columns[0]: "Total"})

    output = monthly.merge(annual, how="left", on="Year")

    if use_month_abbr:
        months = [*range(1, 13)]
        month_abbr_map = {
            month: calendar.month_abbr[month]
            for month in months
        }
        output = output.rename(columns=month_abbr_map)

    return output
Exemple #9
0
def test_corr():

    returns = ReturnSeries.read_csv("tests/unit/data/twitter_returns.csv")

    # no benchmark should raise ValueError
    with pytest.raises(ValueError):
        returns.get_corr()

    # with single benchmark
    returns.add_bm(spy)

    corr = returns.get_corr()
    expected_output = pd.DataFrame(data={
        "name": ["SPY"],
        "field": "correlation",
        "value": [0.21224719919904408]
    })
    assert corr.equals(expected_output)

    corr = returns.get_corr(meta=True)
    expected_output = pd.DataFrame(
        data={
            "name": ["SPY"],
            "field": "correlation",
            "value": [0.21224719919904408],
            "freq": "M",
            "method": "pearson",
            "start": datetime.datetime.strptime("2013-11-07", "%Y-%m-%d"),
            "end": datetime.datetime.strptime("2020-06-26", "%Y-%m-%d"),
            "total": 80,
            "used": 80,
        })
    assert corr.equals(expected_output)

    # test multiple benchmarks
    returns.add_bm(qqq)
    corr = returns.get_corr()
    expected_output = pd.DataFrame(
        data={
            "name": ["SPY", "QQQ"],
            "field": "correlation",
            "value": [0.21224719919904408, 0.27249109347246325],
        })
    assert corr.equals(expected_output)
Exemple #10
0
    def get(self, request, *args, **kwargs):

        # Load all objects
        files = PublicFile.objects.all()

        # Filter by request
        pk = request.query_params.get("id", None)

        pk_params = []
        if pk is not None:
            pk_params = [int(i) for i in pk.split(",")]
            files = files.filter(pk__in=pk_params)

        result = []

        libor = CashSeries.read_fred_libor_1m()

        # Perform operation on the filter result
        for file in files:
            series = ReturnSeries.read_csv(file.file)
            series.add_rf(libor, "libor")
            series.name = file.seriesName
            start = series.start
            end = series.end
            ann_ret = series.get_ann_ret()
            ann_vol = series.get_ann_vol()
            sharpe = series.get_sharpe()
            result.append(
                {
                    "name": file.seriesName,
                    "start": to_js_time(start),
                    "end": to_js_time(end),
                    "ann_ret": percent(ann_ret.value[0], 1),
                    "ann_vol": percent(ann_vol.value[0], 1),
                    "sharpe": round(sharpe.value[0], 2),
                }
            )

        if len(result) > 0:
            return JsonResponse(result, safe=False)
        else:
            raise Http404
Exemple #11
0
from pyform.analysis import table_calendar_return
from pyform import ReturnSeries

returns = ReturnSeries.read_csv("tests/unit/data/twitter_returns.csv")


def test_calendar_return():

    calendar_return = table_calendar_return(returns)
    assert (
        calendar_return.columns
        == [
            "Year",
            "Jan",
            "Feb",
            "Mar",
            "Apr",
            "May",
            "Jun",
            "Jul",
            "Aug",
            "Sep",
            "Oct",
            "Nov",
            "Dec",
            "Total",
        ]
    ).all()

    assert calendar_return.iloc[0, 0] == 2013
Exemple #12
0
def test_sharpe_ratio():

    returns = ReturnSeries.read_csv("tests/unit/data/spy_returns.csv")

    # No benchmark
    sharpe_ratio = returns.get_sharpe()
    expected_output = pd.DataFrame(data={
        "name": ["SPY"],
        "field": "sharpe ratio",
        "value": [0.5319128667616774]
    })
    assert sharpe_ratio.equals(expected_output)

    # daily
    sharpe_ratio = returns.get_sharpe(freq="D", meta=True)
    expected_output = pd.DataFrame(
        data={
            "name": ["SPY"],
            "field": "sharpe ratio",
            "value": [0.39292358311061165],
            "freq": "D",
            "risk_free": "cash_0: 0.0%",
            "start": datetime.datetime.strptime("2003-04-01", "%Y-%m-%d"),
            "end": datetime.datetime.strptime("2020-06-26", "%Y-%m-%d"),
        })
    assert sharpe_ratio.equals(expected_output)

    # use libor for risk free rate
    returns.add_rf(libor1m, "libor")
    sharpe_ratio = returns.get_sharpe(freq="D", risk_free="libor", meta=True)
    expected_output = pd.DataFrame(
        data={
            "name": ["SPY"],
            "field": "sharpe ratio",
            "value": [0.3175248036195898],
            "freq": "D",
            "risk_free": "LIBOR_1M: 1.54%",
            "start": datetime.datetime.strptime("2003-04-01", "%Y-%m-%d"),
            "end": datetime.datetime.strptime("2020-06-19", "%Y-%m-%d"),
        })
    assert sharpe_ratio.equals(expected_output)

    # with benchmark
    returns.add_bm(qqq)
    sharpe_ratio = returns.get_sharpe(meta=True)
    expected_output = pd.DataFrame(
        data={
            "name": ["SPY", "QQQ"],
            "field": "sharpe ratio",
            "value": [0.5319128667616774, 0.8028116328839393],
            "freq": "M",
            "risk_free": "cash_0: 0.0%",
            "start": datetime.datetime.strptime("2003-04-01", "%Y-%m-%d"),
            "end": datetime.datetime.strptime("2020-06-26", "%Y-%m-%d"),
        })
    assert sharpe_ratio.equals(expected_output)

    # wrong key
    with pytest.raises(ValueError):
        returns.get_sharpe(risk_free="not-exist")

    # wrong type
    with pytest.raises(TypeError):
        returns.get_sharpe(risk_free=libor1m)
Exemple #13
0
def test_annualized_volatility():

    returns = ReturnSeries.read_csv("tests/unit/data/twitter_returns.csv")

    # No benchmark
    ann_vol = returns.get_ann_vol()
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR"],
            "field": "annualized volatility",
            "value": [0.5199859200287252],
        })
    assert ann_vol.equals(expected_output)

    # daily volatility
    ann_vol = returns.get_ann_vol(freq="D", meta=True)
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR"],
            "field": "annualized volatility",
            "value": [0.545190844726424],
            "freq": "D",
            "method": "sample",
            "start": datetime.datetime.strptime("2013-11-07", "%Y-%m-%d"),
            "end": datetime.datetime.strptime("2020-06-26", "%Y-%m-%d"),
        })
    assert ann_vol.equals(expected_output)

    # population standard deviation
    ann_vol = returns.get_ann_vol(method="population", meta=True)
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR"],
            "field": "annualized volatility",
            "value": [0.5167257880784241],
            "freq": "M",
            "method": "population",
            "start": datetime.datetime.strptime("2013-11-07", "%Y-%m-%d"),
            "end": datetime.datetime.strptime("2020-06-26", "%Y-%m-%d"),
        })
    assert ann_vol.equals(expected_output)

    # with single benchmark
    returns.add_bm(spy)
    ann_vol = returns.get_ann_vol()
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR", "SPY"],
            "field": "annualized volatility",
            "value": [0.5199859200287252, 0.13606427329407125],
        })
    assert ann_vol.equals(expected_output)

    # daily volatility
    ann_vol = returns.get_ann_vol(freq="D", meta=True)
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR", "SPY"],
            "field": "annualized volatility",
            "value": [0.545190844726424, 0.17497687303106887],
            "freq": "D",
            "method": "sample",
            "start": datetime.datetime.strptime("2013-11-07", "%Y-%m-%d"),
            "end": datetime.datetime.strptime("2020-06-26", "%Y-%m-%d"),
        })
    assert ann_vol.equals(expected_output)

    # has benchmark, but include_bm=False
    ann_vol = returns.get_ann_vol(include_bm=False)
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR"],
            "field": "annualized volatility",
            "value": [0.5199859200287252],
        })
    assert ann_vol.equals(expected_output)
Exemple #14
0
def test_annualized_return():

    returns = ReturnSeries.read_csv("tests/unit/data/twitter_returns.csv")

    # No benchmark
    ann_return = returns.get_ann_ret()
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR"],
            "field": "annualized return",
            "value": [-0.06350385733729014],
        })
    assert ann_return.equals(expected_output)

    ann_return = returns.get_ann_ret(method="arithmetic", meta=True)
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR"],
            "field": "annualized return",
            "value": [0.0855005949876238],
            "method": "arithmetic",
            "start": datetime.datetime.strptime("2013-11-07", "%Y-%m-%d"),
            "end": datetime.datetime.strptime("2020-06-26", "%Y-%m-%d"),
        })
    assert ann_return.equals(expected_output)

    ann_return = returns.get_ann_ret(method="continuous")
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR"],
            "field": "annualized return",
            "value": [0.0855005949876238],
        })
    assert ann_return.equals(expected_output)

    # with single benchmark
    returns.add_bm(spy)
    ann_return = returns.get_ann_ret()
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR", "SPY"],
            "field": "annualized return",
            "value": [-0.06350385733729014, 0.08261818990205616],
        })
    assert ann_return.equals(expected_output)

    # meta=True
    ann_return = returns.get_ann_ret(meta=True)
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR", "SPY"],
            "field": "annualized return",
            "value": [-0.06350385733729014, 0.08261818990205616],
            "method": "geometric",
            "start": datetime.datetime.strptime("2013-11-07", "%Y-%m-%d"),
            "end": datetime.datetime.strptime("2020-06-26", "%Y-%m-%d"),
        })
    assert ann_return.equals(expected_output)

    # has benchmark, but include_bm=False
    ann_return = returns.get_ann_ret(include_bm=False)
    expected_output = pd.DataFrame(
        data={
            "name": ["TWTR"],
            "field": "annualized return",
            "value": [-0.06350385733729014],
        })
    assert ann_return.equals(expected_output)
Exemple #15
0
def test_init():

    df = pd.read_csv("tests/unit/data/twitter_returns.csv")
    ReturnSeries(df, "Twitter")
Exemple #16
0
import datetime
import pytest
import pandas as pd
from pyform import ReturnSeries, CashSeries

returns = ReturnSeries.read_csv("tests/unit/data/twitter_returns.csv")
spy = ReturnSeries.read_csv("tests/unit/data/spy_returns.csv")
qqq = ReturnSeries.read_csv("tests/unit/data/qqq_returns.csv")
libor1m = ReturnSeries.read_csv("tests/unit/data/libor1m_returns.csv")


def test_init():

    df = pd.read_csv("tests/unit/data/twitter_returns.csv")
    ReturnSeries(df, "Twitter")


def test_to_period():

    assert returns.to_week().iloc[1, 0] == 0.055942142480424284
    assert returns.to_month().iloc[1, 0] == 0.5311520760874386
    assert returns.to_quarter().iloc[1, 0] == -0.2667730077753935
    assert returns.to_year().iloc[1, 0] == -0.4364528678695403

    with pytest.raises(ValueError):
        returns.to_period("H",
                          "geometric")  # converting data to higher frequency


def test_add_bm():