Esempio n. 1
0
 def validated(self, val):
     "Accepts datetime, string, or list of 6 ints.  Returns a datetime."
     if isinstance(val, _dt):
         return val
     if self._format and isinstance(val, basestring):
         return _dt.strptime(val, self._format)
     try:
         return _dt.utcfromtimestamp(float(val))
     except TypeError:
         pass  # it was not a number
     if len(val) > 2:
         return _dt(*val[:6])
     raise TypeError("value %r isn't a datetime" % val)
Esempio n. 2
0
 def validated(self, val):
     "Accepts datetime, string, or list of 6 ints.  Returns a datetime."
     if isinstance(val,_dt):
         return val
     if self._format and isinstance(val,basestring):
         return _dt.strptime(val,self._format)
     try:
         return _dt.utcfromtimestamp(float(val))
     except TypeError:
         pass # it was not a number
     if len(val) > 2:
         return _dt(*val[:6])
     raise TypeError("value %r isn't a datetime"%val)
Esempio n. 3
0
def get_most_recent_docs(db_name):
    try:
        db = _get_authentication("normal")[db_name]
        
        checked_types = _config["checked_types"]
        
        rec_list = [(t, db.design('document_type').view('document_type').get(
                       params=dict(limit=1,reduce=False,endkey=[t], startkey=[t,{}], descending=True)).result().json()['rows'])
                     for t in checked_types
                   ]
        for t, r in rec_list:
            if len(r) == 0: continue
            # Months in JS begin with 0, in python with 1!
            r[0]["key"][2] += 1
            doc = dict(id=db_name + ":" + str(t), refid=r[0]['id'],
                   timestamp=_dt(*r[0]["key"][1:]).strftime("%a, %d %b %Y %H:%M:%S GMT"))
            _post_doc_to_aggregate(doc)
    except:
        error("Error while posting to aggregate DB ({})".format(traceback.format_exc()))
Esempio n. 4
0
    async def _get_newest(self, *, item):
        logger.debug(f'{self._name} type:{type(item)} {item}')

        l_newest_item = None
        l_newest_ts = _dt(1970, 1, 1, tzinfo=timezone.utc)
        if isinstance(item, list):
            for l_item in item:
                l_time = l_item.get('time', None)
                if l_time:
                    l_ts = ciso8601.parse_datetime(l_time).replace(
                        tzinfo=timezone.utc)
                    if l_ts > l_newest_ts:
                        l_newest_item = l_item
                        l_newest_ts = l_ts
        else:
            l_newest_item = item

        logger.debug(f'{self._name} item: {l_newest_item}')
        return l_newest_item
Esempio n. 5
0
def metrics(returns,
            benchmark=None,
            rf=0.,
            display=True,
            mode='basic',
            sep=False,
            compounded=True,
            periods_per_year=252,
            prepare_returns=True,
            match_dates=False,
            **kwargs):

    win_year, _ = _get_trading_periods(periods_per_year)

    if benchmark is not None \
            and isinstance(benchmark, _pd.DataFrame) and len(benchmark.columns) > 1:
        raise ValueError("`benchmark` must be a pandas Series, "
                         "but a multi-column DataFrame was passed")

    blank = ['']

    if isinstance(returns, _pd.DataFrame):
        if len(returns.columns) > 1:
            raise ValueError(
                "`returns` needs to be a Pandas Series or one column DataFrame. multi colums DataFrame was passed"
            )
        returns = returns[returns.columns[0]]

    if prepare_returns:
        returns = _utils._prepare_returns(returns)

    df = _pd.DataFrame({"returns": returns})

    if benchmark is not None:
        blank = ['', '']
        benchmark = _utils._prepare_benchmark(benchmark, returns.index, rf)
        if match_dates is True:
            returns, benchmark = _match_dates(returns, benchmark)
        df["returns"] = returns
        df["benchmark"] = benchmark

    df = df.fillna(0)

    # pct multiplier
    pct = 100 if display or "internal" in kwargs else 1
    if kwargs.get("as_pct", False):
        pct = 100

    # return df
    dd = _calc_dd(df,
                  display=(display or "internal" in kwargs),
                  as_pct=kwargs.get("as_pct", False))

    metrics = _pd.DataFrame()

    s_start = {'returns': df['returns'].index.strftime('%Y-%m-%d')[0]}
    s_end = {'returns': df['returns'].index.strftime('%Y-%m-%d')[-1]}
    s_rf = {'returns': rf}

    if "benchmark" in df:
        s_start['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[0]
        s_end['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[-1]
        s_rf['benchmark'] = rf

    metrics['Start Period'] = _pd.Series(s_start)
    metrics['End Period'] = _pd.Series(s_end)
    metrics['Risk-Free Rate %'] = _pd.Series(s_rf)
    metrics['Time in Market %'] = _stats.exposure(df,
                                                  prepare_returns=False) * pct

    metrics['~'] = blank

    if compounded:
        metrics['Cumulative Return %'] = (_stats.comp(df) * pct).map(
            '{:,.2f}'.format)
    else:
        metrics['Total Return %'] = (df.sum() * pct).map('{:,.2f}'.format)

    metrics['CAGR﹪%'] = _stats.cagr(df, rf, compounded) * pct

    metrics['~~~~~~~~~~~~~~'] = blank

    metrics['Sharpe'] = _stats.sharpe(df, rf, win_year, True)
    if mode.lower() == 'full':
        metrics['Smart Sharpe'] = _stats.smart_sharpe(df, rf, win_year, True)
    metrics['Sortino'] = _stats.sortino(df, rf, win_year, True)
    if mode.lower() == 'full':
        metrics['Smart Sortino'] = _stats.smart_sortino(df, rf, win_year, True)
    metrics['Sortino/√2'] = metrics['Sortino'] / _sqrt(2)
    if mode.lower() == 'full':
        metrics['Smart Sortino/√2'] = metrics['Smart Sortino'] / _sqrt(2)
    metrics['Omega'] = _stats.omega(df, rf, 0., win_year)

    metrics['~~~~~~~~'] = blank
    metrics['Max Drawdown %'] = blank
    metrics['Longest DD Days'] = blank

    if mode.lower() == 'full':
        ret_vol = _stats.volatility(
            df['returns'], win_year, True, prepare_returns=False) * pct
        if "benchmark" in df:
            bench_vol = _stats.volatility(
                df['benchmark'], win_year, True, prepare_returns=False) * pct
            metrics['Volatility (ann.) %'] = [ret_vol, bench_vol]
            metrics['R^2'] = _stats.r_squared(df['returns'],
                                              df['benchmark'],
                                              prepare_returns=False)
        else:
            metrics['Volatility (ann.) %'] = [ret_vol]

        metrics['Calmar'] = _stats.calmar(df, prepare_returns=False)
        metrics['Skew'] = _stats.skew(df, prepare_returns=False)
        metrics['Kurtosis'] = _stats.kurtosis(df, prepare_returns=False)

        metrics['~~~~~~~~~~'] = blank

        metrics['Expected Daily %%'] = _stats.expected_return(
            df, prepare_returns=False) * pct
        metrics['Expected Monthly %%'] = _stats.expected_return(
            df, aggregate='M', prepare_returns=False) * pct
        metrics['Expected Yearly %%'] = _stats.expected_return(
            df, aggregate='A', prepare_returns=False) * pct
        metrics['Kelly Criterion %'] = _stats.kelly_criterion(
            df, prepare_returns=False) * pct
        metrics['Risk of Ruin %'] = _stats.risk_of_ruin(df,
                                                        prepare_returns=False)

        metrics['Daily Value-at-Risk %'] = -abs(
            _stats.var(df, prepare_returns=False) * pct)
        metrics['Expected Shortfall (cVaR) %'] = -abs(
            _stats.cvar(df, prepare_returns=False) * pct)

    metrics['~~~~~~'] = blank

    metrics['Gain/Pain Ratio'] = _stats.gain_to_pain_ratio(df, rf)
    metrics['Gain/Pain (1M)'] = _stats.gain_to_pain_ratio(df, rf, "M")
    # if mode.lower() == 'full':
    #     metrics['GPR (3M)'] = _stats.gain_to_pain_ratio(df, rf, "Q")
    #     metrics['GPR (6M)'] = _stats.gain_to_pain_ratio(df, rf, "2Q")
    #     metrics['GPR (1Y)'] = _stats.gain_to_pain_ratio(df, rf, "A")
    metrics['~~~~~~~'] = blank

    metrics['Payoff Ratio'] = _stats.payoff_ratio(df, prepare_returns=False)
    metrics['Profit Factor'] = _stats.profit_factor(df, prepare_returns=False)
    metrics['Common Sense Ratio'] = _stats.common_sense_ratio(
        df, prepare_returns=False)
    metrics['CPC Index'] = _stats.cpc_index(df, prepare_returns=False)
    metrics['Tail Ratio'] = _stats.tail_ratio(df, prepare_returns=False)
    metrics['Outlier Win Ratio'] = _stats.outlier_win_ratio(
        df, prepare_returns=False)
    metrics['Outlier Loss Ratio'] = _stats.outlier_loss_ratio(
        df, prepare_returns=False)

    # returns
    metrics['~~'] = blank
    comp_func = _stats.comp if compounded else _np.sum

    today = df.index[-1]  # _dt.today()
    metrics['MTD %'] = comp_func(
        df[df.index >= _dt(today.year, today.month, 1)]) * pct

    d = today - _td(3 * 365 / 12)
    metrics['3M %'] = comp_func(
        df[df.index >= _dt(d.year, d.month, d.day)]) * pct

    d = today - _td(6 * 365 / 12)
    metrics['6M %'] = comp_func(
        df[df.index >= _dt(d.year, d.month, d.day)]) * pct

    metrics['YTD %'] = comp_func(df[df.index >= _dt(today.year, 1, 1)]) * pct

    d = today - _td(12 * 365 / 12)
    metrics['1Y %'] = comp_func(
        df[df.index >= _dt(d.year, d.month, d.day)]) * pct
    d = today - _td(3 * 365)
    metrics['3Y (ann.) %'] = _stats.cagr(
        df[df.index >= _dt(d.year, d.month, d.day)], 0., compounded) * pct
    d = today - _td(5 * 365)
    metrics['5Y (ann.) %'] = _stats.cagr(
        df[df.index >= _dt(d.year, d.month, d.day)], 0., compounded) * pct
    d = today - _td(10 * 365)
    metrics['10Y (ann.) %'] = _stats.cagr(
        df[df.index >= _dt(d.year, d.month, d.day)], 0., compounded) * pct
    metrics['All-time (ann.) %'] = _stats.cagr(df, 0., compounded) * pct

    # best/worst
    if mode.lower() == 'full':
        metrics['~~~'] = blank
        metrics['Best Day %'] = _stats.best(df, prepare_returns=False) * pct
        metrics['Worst Day %'] = _stats.worst(df, prepare_returns=False) * pct
        metrics['Best Month %'] = _stats.best(
            df, aggregate='M', prepare_returns=False) * pct
        metrics['Worst Month %'] = _stats.worst(
            df, aggregate='M', prepare_returns=False) * pct
        metrics['Best Year %'] = _stats.best(
            df, aggregate='A', prepare_returns=False) * pct
        metrics['Worst Year %'] = _stats.worst(
            df, aggregate='A', prepare_returns=False) * pct

    # dd
    metrics['~~~~'] = blank
    for ix, row in dd.iterrows():
        metrics[ix] = row
    metrics['Recovery Factor'] = _stats.recovery_factor(df)
    metrics['Ulcer Index'] = _stats.ulcer_index(df)
    metrics['Serenity Index'] = _stats.serenity_index(df, rf)

    # win rate
    if mode.lower() == 'full':
        metrics['~~~~~'] = blank
        metrics['Avg. Up Month %'] = _stats.avg_win(
            df, aggregate='M', prepare_returns=False) * pct
        metrics['Avg. Down Month %'] = _stats.avg_loss(
            df, aggregate='M', prepare_returns=False) * pct
        metrics['Win Days %%'] = _stats.win_rate(df,
                                                 prepare_returns=False) * pct
        metrics['Win Month %%'] = _stats.win_rate(
            df, aggregate='M', prepare_returns=False) * pct
        metrics['Win Quarter %%'] = _stats.win_rate(
            df, aggregate='Q', prepare_returns=False) * pct
        metrics['Win Year %%'] = _stats.win_rate(
            df, aggregate='A', prepare_returns=False) * pct

        if "benchmark" in df:
            metrics['~~~~~~~'] = blank
            greeks = _stats.greeks(df['returns'],
                                   df['benchmark'],
                                   win_year,
                                   prepare_returns=False)
            metrics['Beta'] = [str(round(greeks['beta'], 2)), '-']
            metrics['Alpha'] = [str(round(greeks['alpha'], 2)), '-']

    # prepare for display
    for col in metrics.columns:
        try:
            metrics[col] = metrics[col].astype(float).round(2)
            if display or "internal" in kwargs:
                metrics[col] = metrics[col].astype(str)
        except Exception:
            pass
        if (display or "internal" in kwargs) and "%" in col:
            metrics[col] = metrics[col] + '%'
    try:
        metrics['Longest DD Days'] = _pd.to_numeric(
            metrics['Longest DD Days']).astype('int')
        metrics['Avg. Drawdown Days'] = _pd.to_numeric(
            metrics['Avg. Drawdown Days']).astype('int')

        if display or "internal" in kwargs:
            metrics['Longest DD Days'] = metrics['Longest DD Days'].astype(str)
            metrics['Avg. Drawdown Days'] = metrics[
                'Avg. Drawdown Days'].astype(str)
    except Exception:
        metrics['Longest DD Days'] = '-'
        metrics['Avg. Drawdown Days'] = '-'
        if display or "internal" in kwargs:
            metrics['Longest DD Days'] = '-'
            metrics['Avg. Drawdown Days'] = '-'

    metrics.columns = [
        col if '~' not in col else '' for col in metrics.columns
    ]
    metrics.columns = [
        col[:-1] if '%' in col else col for col in metrics.columns
    ]
    metrics = metrics.T

    if "benchmark" in df:
        metrics.columns = ['Strategy', 'Benchmark']
    else:
        metrics.columns = ['Strategy']

    if display:
        print(_tabulate(metrics, headers="keys", tablefmt='simple'))
        return None

    if not sep:
        metrics = metrics[metrics.index != '']
    return metrics
Esempio n. 6
0
    def __init__(self, uvfitsfile, telescope=None, vsys=0, distance=0, endian=None, **kwargs):
        """

        Reads the uvfits and calculates useful things, e.g. u,v,w,
        phase and amplitude

        .byteswap().newbyteorder() is applied in various places to
        convert to little endian

        """
        f = pfopen(uvfitsfile, **kwargs)
        self.loadendian = endian
        if f[0].header['NAXIS1'] != 0:
            print "error: this file may not be a UV FITS."
            raise FileError('File format error.')
        #~ f.info()
        try:
            self.hdu = f[0]
        except:
            print "error: cannot open uv data HDU."
        self.hdr = self.hdu.header
        self.data = self.hdu.data
        if self.hdr['NAXIS4'] > 1:
            self.datatype = ('CUBE', 3)
        else:
            self.datatype = ('IMAGE', 2)
        
        # find spectral axis
        axis_types = self.WCS.get_axis_types()
        ax_types = np.array([i['coordinate_type'] for i in axis_types])
        try:
            spec_axis = ('spectral' == ax_types).nonzero()[0][0]
            freq = self.hdu.header['CRVAL{0}'.format(spec_axis+1)]
            # assumes the frequency given in Hz
            self.freq = freq * un.Hz
        except (IndexError):
            print('No spectral axis in header.')
            spec_axis = -1
            self.freq = None
        
        
        if 'RESTFREQ' in self.hdu.header.keys():
            self.restfreq = self.hdu.header['RESTFREQ']
            self.restfreq_unit = self.hdu.header['RESTFREQ'] * u.Hz
        else:
            raise StandardError('No restfrequency found, NEED it!')
        #TODO : Read in velocity and frequency array if present
        """
        The standard unit is to give UU and VV in seconds (??!?)
        So we have to convert to whatever we want.
        """
        # standard storing unit here is kilo-lambdas
        # save a million lines of code!
        u.add_enabled_equivalencies(lambdas_equivalencies(self.restfreq_unit))
        self.u = (self.data.par('UU') * u.s).to(klambdas)
        self.v = (self.data.par('VV') * u.s).to(klambdas)
        self.w = (self.data.par('WW') * u.s).to(klambdas)
        self.uvdist = sqrt(self.u.value**2 + self.v.value**2) * klambdas


        # BASELINE
        self.baseline = self.hdu.data.par('BASELINE').byteswap().newbyteorder()
        # DATES
        self.jdate = self.hdu.data.par('DATE')
        # set date to 1900, Jan, 01, 01:00:00 if date before before this
        self.jdate =self.jdate.clip(2415020.5)
        self.date = _sp.array([jd2gd(i) for i in self.jdate])
        self.date0 = self.date.transpose()
        fields = ['year', 'month', 'day', 'hour', 'minute', 'sec']
        self.date1 = {key:value for key,value in zip(fields, self.date0)}
        # convert to datetime objects
        # LOSES the sub-second resolution
        self.date2 = [_dt(int(i[0]), int(i[1]), int(i[2]), int(i[3]), int(i[4]), int(i[5])) for i in self.date]
        # get number of tracks
        # TODO : rough hack, separate track if diff day is >1
        tmp = _sp.where(_sp.diff(_sp.unique(self.jdate.round(0)))>1)[0]
        self.ntracks = len(tmp)+1
        
        ################################################################
        # NB : need to streamline this.
        # only load the complex visibilities, into a complex array
        # AND then work on that
        
        
        # COMPLEX VISIBILITY
        visi_index = len(self.data.parnames)
        if self.hdu.header['NAXIS']  == 7:
            self.visdata = self.data.par(visi_index)[:,0,0,0,0,0,:].byteswap().newbyteorder()
        #~ self.visdata = self.hdu.data.data[:,0,0,0,0,0,:]
        elif self.hdu.header['NAXIS']  == 6:
            self.visdata = self.data.par(visi_index)[:,0,0,0,0,:].byteswap().newbyteorder()
        # load the re, im and weight arrays
        self.re, self.im, self.wt = self.visdata[:,:].T
        #~ self.re = self.visdata[:,0][:]
        #~ self.im = self.visdata[:,1][:]
        #~ self.wt = self.visdata[:,2][:]
        # complex numbers
        #~ self.comp = self.visdata[:,:2].astype(_np.float64).view(_np.complexfloating)
        #~ self.comp = 1j*self.visdata[:,1][:]
        #~ self.comp += self.visdata[:,0][:]
        #~ self.comp = self.visdata[:,:2].astype(_np.float).view(_np.complex)
        
        # below seems a bit dependent...
        self.cvisi = self.visdata[:,:2].astype(_np.float).view(_np.complex).T[0]
        """
        with complex array, you can do
        amp = np.abs(vis)
        np.angle(vis)   
        vis.real
        vis.imag
        
        """
        # the data is not shifted
        self.isshifted = (False, [0,0])
        # AMPLITUDE 
        self.amp = sqrt(self.re**2 + self.im**2)
        # PHASE
        self.pha = arctan2(self.im, self.re)
        self.pha_deg = self.pha / pi * 180.
        # ERROR / SIGMA
        #TODO : check
        # following 1.0e6 is just for GILDAS, change if needed
        #~ print('NB : Error calculated from weights assuming GILDAS '
        #~ 'data (i.e. frequencies in MHz).')
        self.sigma_alt = 1/sqrt(self.wt*1.0e6)
        # Daniels way of calculating sigma
        # test this first
        self.sigma = _sp.sqrt(0.5 / ( self.wt * float(self.amp.shape[0]) ) )
Esempio n. 7
0
    def test_timezone_defaults(self, Sample):
        when = _dt(2001, 9, 1)
        instance = Sample(when)

        assert instance['field'].tzinfo == utc
        assert instance.field.tzinfo == utc
Esempio n. 8
0
	def test_basic_tzinfo_use(self, Sample):
		now = _dt(1902, 4, 24)
		instance = Sample(now)
		
		assert instance.field.tzinfo.tzname(instance.field) in ('PST', 'Canada/Pacific')
Esempio n. 9
0
    def test_basic_tzinfo_use(self, Sample):
        now = _dt(1902, 4, 24)
        instance = Sample(now)

        assert instance.field.tzinfo.tzname(
            instance.field) in ('PST', 'Canada/Pacific')
Esempio n. 10
0
    def test_date_like_oid(self, Sample):
        oid = ObjectId('586846800000000000000000')

        assert Sample(oid).field == _dt(2017, 1, 1, tzinfo=utc)
Esempio n. 11
0
	def test_naive_explicit(self, Sample):
		tz = pytz.timezone('America/Chicago')
		when = _dt(1992, 1, 12)  # Prod. #3
		instance = Sample(when)
		assert tz.normalize(instance['field'].astimezone(tz)).replace(tzinfo=None) == when
Esempio n. 12
0
	def test_timezone_defaults(self, Sample):
		when = _dt(2001, 9, 1)
		instance = Sample(when)
		
		assert instance['field'].tzinfo == utc
		assert instance.field.tzinfo == utc
Esempio n. 13
0
	def test_date_like_oid(self, Sample):
		oid = ObjectId('586846800000000000000000')
		
		assert Sample(oid).field == _dt(2017, 1, 1, tzinfo=utc)
Esempio n. 14
0
def datetime(*args, **kwargs):
    return _dt(*args, **kwargs)
Esempio n. 15
0
 def test_naive_explicit(self, Sample):
     tz = pytz.timezone('America/Chicago')
     when = _dt(1992, 1, 12)  # Prod. #3
     instance = Sample(when)
     assert tz.normalize(
         instance['field'].astimezone(tz)).replace(tzinfo=None) == when
Esempio n. 16
0
def metrics(returns,
            benchmark=None,
            rf=0.,
            display=True,
            mode='basic',
            sep=False,
            **kwargs):

    if isinstance(returns, _pd.DataFrame) and len(returns.columns) > 1:
        raise ValueError("`returns` must be a pandas Series, "
                         "but a multi-column DataFrame was passed")

    if benchmark is not None:
        if isinstance(returns, _pd.DataFrame) and len(returns.columns) > 1:
            raise ValueError("`benchmark` must be a pandas Series, "
                             "but a multi-column DataFrame was passed")

    blank = ['']
    df = _pd.DataFrame({"returns": _utils._prepare_returns(returns, rf)})
    if benchmark is not None:
        blank = ['', '']
        df["benchmark"] = _utils._prepare_benchmark(benchmark, returns.index,
                                                    rf)

    df = df.dropna()

    # pct multiplier
    pct = 100 if display or "internal" in kwargs else 1

    # return df
    dd = _calc_dd(df, display=(display or "internal" in kwargs))

    metrics = _pd.DataFrame()

    s_start = {'returns': df['returns'].index.strftime('%Y-%m-%d')[0]}
    s_end = {'returns': df['returns'].index.strftime('%Y-%m-%d')[-1]}
    s_rf = {'returns': rf}

    if "benchmark" in df:
        s_start['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[0]
        s_end['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[-1]
        s_rf['benchmark'] = rf

    metrics['Start Period'] = _pd.Series(s_start)
    metrics['End Period'] = _pd.Series(s_end)
    metrics['Risk-free rate %'] = _pd.Series(s_rf)
    metrics['Exposure %%'] = _stats.exposure(df) * pct

    metrics['~'] = blank

    metrics['Cumulative Return %'] = _stats.comp(df) * pct
    metrics['CAGR%%'] = _stats.cagr(df, rf) * pct
    metrics['Sharpe'] = _stats.sharpe(df, rf)
    metrics['Sortino'] = _stats.sortino(df, rf)
    metrics['Max Drawdown %'] = blank
    metrics['Longest DD Days'] = blank

    if mode.lower() == 'full':
        ret_vol = _stats.volatility(df['returns']) * pct
        if "benchmark" in df:
            bench_vol = _stats.volatility(df['benchmark']) * pct
            metrics['Volatility (ann.) %'] = [ret_vol, bench_vol]
            metrics['R^2'] = _stats.r_squared(df['returns'], df['benchmark'])
        else:
            metrics['Volatility (ann.) %'] = [ret_vol]

        metrics['Calmar'] = _stats.calmar(df)
        metrics['Skew'] = _stats.skew(df)
        metrics['Kurtosis'] = _stats.kurtosis(df)

    if mode.lower() == 'full':
        metrics['~~~~~~~~~~'] = blank

        metrics['Expected Daily %%'] = _stats.expected_return(df) * pct
        metrics['Expected Monthly %%'] = _stats.expected_return(
            df, aggregate='M') * pct
        metrics['Expected Yearly %%'] = _stats.expected_return(
            df, aggregate='A') * pct
        metrics['Kelly Criterion %'] = _stats.kelly_criterion(df) * pct
        metrics['Risk of Ruin %'] = _stats.risk_of_ruin(df)

        metrics['Daily Value-at-Risk %'] = -abs(_stats.var(df) * pct)
        metrics['Expected Shortfall (cVaR) %'] = -abs(_stats.cvar(df) * pct)

    metrics['~~~~~~'] = blank

    metrics['Payoff Ratio'] = _stats.payoff_ratio(df)
    metrics['Profit Factor'] = _stats.profit_factor(df)
    metrics['Common Sense Ratio'] = _stats.common_sense_ratio(df)
    metrics['CPC Index'] = _stats.cpc_index(df)
    metrics['Tail Ratio'] = _stats.tail_ratio(df)
    metrics['Outlier Win Ratio'] = _stats.outlier_win_ratio(df)
    metrics['Outlier Loss Ratio'] = _stats.outlier_loss_ratio(df)

    # returns
    metrics['~~'] = blank

    today = _dt.today()
    metrics['MTD %'] = _stats.comp(
        df[df.index >= _dt(today.year, today.month, 1)]) * pct

    d = today - _td(3 * 365 / 12)
    metrics['3M %'] = _stats.comp(
        df[df.index >= _dt(d.year, d.month, d.day)]) * pct

    d = today - _td(6 * 365 / 12)
    metrics['6M %'] = _stats.comp(
        df[df.index >= _dt(d.year, d.month, d.day)]) * pct

    metrics['YTD %'] = _stats.comp(df[df.index >= _dt(today.year, 1, 1)]) * pct

    d = today - _td(12 * 365 / 12)
    metrics['1Y %'] = _stats.comp(
        df[df.index >= _dt(d.year, d.month, d.day)]) * pct
    metrics['3Y (ann.) %'] = _stats.cagr(
        df[df.index >= _dt(today.year - 3, today.month, today.day)]) * pct
    metrics['5Y (ann.) %'] = _stats.cagr(
        df[df.index >= _dt(today.year - 5, today.month, today.day)]) * pct
    metrics['10Y (ann.) %'] = _stats.cagr(
        df[df.index >= _dt(today.year - 10, today.month, today.day)]) * pct
    metrics['All-time (ann.) %'] = _stats.cagr(df) * pct

    # best/worst
    if mode.lower() == 'full':
        metrics['~~~'] = blank
        metrics['Best Day %'] = _stats.best(df) * pct
        metrics['Worst Day %'] = _stats.worst(df) * pct
        metrics['Best Month %'] = _stats.best(df, aggregate='M') * pct
        metrics['Worst Month %'] = _stats.worst(df, aggregate='M') * pct
        metrics['Best Year %'] = _stats.best(df, aggregate='A') * pct
        metrics['Worst Year %'] = _stats.worst(df, aggregate='A') * pct

    # dd
    metrics['~~~~'] = blank
    for ix, row in dd.iterrows():
        metrics[ix] = row
    metrics['Recovery Factor'] = _stats.recovery_factor(df)
    metrics['Ulcer Index'] = _stats.ulcer_index(df, rf)

    # win rate
    if mode.lower() == 'full':
        metrics['~~~~~'] = blank
        metrics['Avg. Up Month %'] = _stats.avg_win(df, aggregate='M') * pct
        metrics['Avg. Down Month %'] = _stats.avg_loss(df, aggregate='M') * pct
        metrics['Win Days %%'] = _stats.win_rate(df) * pct
        metrics['Win Month %%'] = _stats.win_rate(df, aggregate='M') * pct
        metrics['Win Quarter %%'] = _stats.win_rate(df, aggregate='Q') * pct
        metrics['Win Year %%'] = _stats.win_rate(df, aggregate='A') * pct

    if mode.lower() == "full" and "benchmark" in df:
        metrics['~~~~~~~'] = blank
        greeks = _stats.greeks(df['returns'], df['benchmark'])
        metrics['Beta'] = [str(round(greeks['beta'], 2)), '-']
        metrics['Alpha'] = [str(round(greeks['alpha'], 2)), '-']

    # prepare for display
    for col in metrics.columns:
        try:
            metrics[col] = metrics[col].astype(float).round(2)
            if display or "internal" in kwargs:
                metrics[col] = metrics[col].astype(str)
        except Exception:
            pass
        if (display or "internal" in kwargs) and "%" in col:
            metrics[col] = metrics[col] + '%'

        metrics['Longest DD Days'] = _pd.to_numeric(
            metrics['Longest DD Days']).astype('int')
        metrics['Avg. Drawdown Days'] = _pd.to_numeric(
            metrics['Avg. Drawdown Days']).astype('int')

        if display or "internal" in kwargs:
            metrics['Longest DD Days'] = metrics['Longest DD Days'].astype(str)
            metrics['Avg. Drawdown Days'] = metrics[
                'Avg. Drawdown Days'].astype(str)

    metrics.columns = [
        col if '~' not in col else '' for col in metrics.columns
    ]
    metrics.columns = [
        col[:-1] if '%' in col else col for col in metrics.columns
    ]
    metrics = metrics.T

    if "benchmark" in df:
        metrics.columns = ['Strategy', 'Benchmark']
    else:
        metrics.columns = ['Strategy']

    if display:
        print(_tabulate(metrics, headers="keys", tablefmt='simple'))
        return

    if not sep:
        metrics = metrics[metrics.index != '']
    return metrics
Esempio n. 17
0
    def __init__(self,
                 uvfitsfile,
                 telescope=None,
                 vsys=0,
                 distance=0,
                 endian=None,
                 **kwargs):
        """

        Reads the uvfits and calculates useful things, e.g. u,v,w,
        phase and amplitude

        .byteswap().newbyteorder() is applied in various places to
        convert to little endian

        """
        f = pfopen(uvfitsfile, **kwargs)
        self.loadendian = endian
        if f[0].header['NAXIS1'] != 0:
            print "error: this file may not be a UV FITS."
            raise FileError('File format error.')
        #~ f.info()
        try:
            self.hdu = f[0]
        except:
            print "error: cannot open uv data HDU."
        self.hdr = self.hdu.header
        self.data = self.hdu.data
        if self.hdr['NAXIS4'] > 1:
            self.datatype = ('CUBE', 3)
        else:
            self.datatype = ('IMAGE', 2)

        # find spectral axis
        axis_types = self.WCS.get_axis_types()
        ax_types = np.array([i['coordinate_type'] for i in axis_types])
        try:
            spec_axis = ('spectral' == ax_types).nonzero()[0][0]
            freq = self.hdu.header['CRVAL{0}'.format(spec_axis + 1)]
            # assumes the frequency given in Hz
            self.freq = freq * un.Hz
        except (IndexError):
            print('No spectral axis in header.')
            spec_axis = -1
            self.freq = None

        if 'RESTFREQ' in self.hdu.header.keys():
            self.restfreq = self.hdu.header['RESTFREQ']
            self.restfreq_unit = self.hdu.header['RESTFREQ'] * u.Hz
        else:
            raise StandardError('No restfrequency found, NEED it!')
        #TODO : Read in velocity and frequency array if present
        """
        The standard unit is to give UU and VV in seconds (??!?)
        So we have to convert to whatever we want.
        """
        # standard storing unit here is kilo-lambdas
        # save a million lines of code!
        u.add_enabled_equivalencies(lambdas_equivalencies(self.restfreq_unit))
        self.u = (self.data.par('UU') * u.s).to(klambdas)
        self.v = (self.data.par('VV') * u.s).to(klambdas)
        self.w = (self.data.par('WW') * u.s).to(klambdas)
        self.uvdist = sqrt(self.u.value**2 + self.v.value**2) * klambdas

        # BASELINE
        self.baseline = self.hdu.data.par('BASELINE').byteswap().newbyteorder()
        # DATES
        self.jdate = self.hdu.data.par('DATE')
        # set date to 1900, Jan, 01, 01:00:00 if date before before this
        self.jdate = self.jdate.clip(2415020.5)
        self.date = _sp.array([jd2gd(i) for i in self.jdate])
        self.date0 = self.date.transpose()
        fields = ['year', 'month', 'day', 'hour', 'minute', 'sec']
        self.date1 = {key: value for key, value in zip(fields, self.date0)}
        # convert to datetime objects
        # LOSES the sub-second resolution
        self.date2 = [
            _dt(int(i[0]), int(i[1]), int(i[2]), int(i[3]), int(i[4]),
                int(i[5])) for i in self.date
        ]
        # get number of tracks
        # TODO : rough hack, separate track if diff day is >1
        tmp = _sp.where(_sp.diff(_sp.unique(self.jdate.round(0))) > 1)[0]
        self.ntracks = len(tmp) + 1

        ################################################################
        # NB : need to streamline this.
        # only load the complex visibilities, into a complex array
        # AND then work on that

        # COMPLEX VISIBILITY
        visi_index = len(self.data.parnames)
        if self.hdu.header['NAXIS'] == 7:
            self.visdata = self.data.par(
                visi_index)[:, 0, 0, 0, 0, 0, :].byteswap().newbyteorder()
        #~ self.visdata = self.hdu.data.data[:,0,0,0,0,0,:]
        elif self.hdu.header['NAXIS'] == 6:
            self.visdata = self.data.par(
                visi_index)[:, 0, 0, 0, 0, :].byteswap().newbyteorder()
        # load the re, im and weight arrays
        self.re, self.im, self.wt = self.visdata[:, :].T
        #~ self.re = self.visdata[:,0][:]
        #~ self.im = self.visdata[:,1][:]
        #~ self.wt = self.visdata[:,2][:]
        # complex numbers
        #~ self.comp = self.visdata[:,:2].astype(_np.float64).view(_np.complexfloating)
        #~ self.comp = 1j*self.visdata[:,1][:]
        #~ self.comp += self.visdata[:,0][:]
        #~ self.comp = self.visdata[:,:2].astype(_np.float).view(_np.complex)

        # below seems a bit dependent...
        self.cvisi = self.visdata[:, :2].astype(_np.float).view(
            _np.complex).T[0]
        """
        with complex array, you can do
        amp = np.abs(vis)
        np.angle(vis)   
        vis.real
        vis.imag
        
        """
        # the data is not shifted
        self.isshifted = (False, [0, 0])
        # AMPLITUDE
        self.amp = sqrt(self.re**2 + self.im**2)
        # PHASE
        self.pha = arctan2(self.im, self.re)
        self.pha_deg = self.pha / pi * 180.
        # ERROR / SIGMA
        #TODO : check
        # following 1.0e6 is just for GILDAS, change if needed
        #~ print('NB : Error calculated from weights assuming GILDAS '
        #~ 'data (i.e. frequencies in MHz).')
        self.sigma_alt = 1 / sqrt(self.wt * 1.0e6)
        # Daniels way of calculating sigma
        # test this first
        self.sigma = _sp.sqrt(0.5 / (self.wt * float(self.amp.shape[0])))