def upside_potential_ratio(rets, mar=0, full=0, expanding=0): if isinstance(rets, pd.Series): above = rets[rets > mar] excess = -mar + above if expanding: n = pd.expanding_count(rets) if full else pd.expanding_count(above) upside = excess.cumsum() / n downside = downside_deviation(rets, mar=mar, full=full, expanding=1) return (upside / downside).reindex( rets.index).fillna(method="ffill") else: n = rets.count() if full else above.count() upside = excess.sum() / n downside = downside_deviation(rets, mar=mar, full=full) return upside / downside else: vals = { c: upside_potential_ratio(rets[c], mar=mar, full=full, expanding=expanding) for c in rets.columns } if expanding: return pd.DataFrame(vals, columns=rets.columns) else: return pd.Series(vals)
def downside_deviation(returns, mar=0, full=1, expanding=0): """ Compute the downside risk. This is the risk of all returns below the mar value. If exclude is 0, then do not include returns above mar, else set them to 0. returns: periodic return stream mar: minimum acceptable return full: if true, use the entire series, else use the subset below mar http://en.wikipedia.org/wiki/Downside_risk """ if isinstance(returns, pd.Series): below = returns[returns < mar] if expanding: n = pd.expanding_count(returns) if full else pd.expanding_count(below) ssum = ((below - mar) ** 2).cumsum() return ((ssum / n) ** (.5)).reindex(returns.index).fillna(method='ffill') else: n = returns.count() if full else below.count() ssum = ((below - mar) ** 2).sum() return np.sqrt(ssum / n) else: vals = {c: downside_deviation(returns[c], mar=mar, full=full, expanding=expanding) for c in returns.columns} if expanding: return pd.DataFrame(vals, columns=returns.columns) else: return pd.Series(vals)
def returns_annualized(returns, geometric=True, scale=None, expanding=False): """return the annualized cumulative returns Parameters ---------- returns : DataFrame or Series geometric : link the returns geometrically scale: None or scalar or string (ie 12 for months in year), If None, attempt to resolve from returns If scalar, then use this as the annualization factor If string, then pass this to periodicity function to resolve annualization factor expanding: bool, default is False If True, return expanding series/frames. If False, return final result. """ scale = _resolve_periods_in_year(scale, returns) if expanding: if geometric: n = pd.expanding_count(returns) return ((1.0 + returns).cumprod()**(scale / n)) - 1.0 else: return pd.expanding_mean(returns) * scale else: if geometric: n = returns.count() return ((1.0 + returns).prod()**(scale / n)) - 1.0 else: return returns.mean() * scale
def clicks_chart(request, pk): # TODO: Add a stats page for each link where you can see the traffic for that link for the last 30 days in a line chart. # FIXME: Add error checking if 0 clicks thirty_days_ago = timezone.now() - timedelta(days=30) clicks = Click.objects.filter(bookmark_id=pk).filter( timestamp__gte=thirty_days_ago) # .annotate(count_recent=Count('click')) df = pd.DataFrame(model_to_dict(click) for click in clicks) df['count'] = 1 df.index = df['timestamp'] counts = df['count'] counts = counts.sort_index() series = pd.expanding_count(counts).resample('D', how=np.max, fill_method='pad') response = HttpResponse(content_type='image/png') fig = plt.figure() # ax = fig.add_subplot(111) # ax.plot(series) series.plot() plt.title("Total clicks over past 30 days") plt.xlabel("") plt.xlim(thirty_days_ago, timezone.now()) canvas = FigureCanvas(fig) # ax = fig.add_subplot(1, 1, 1, axisbg='red') # ax.plot(series) canvas.print_png(response) return response
def returns_annualized(returns, geometric=True, scale=None, expanding=False): """ return the annualized cumulative returns Parameters ---------- returns : DataFrame or Series geometric : link the returns geometrically scale: None or scalar or string (ie 12 for months in year), If None, attempt to resolve from returns If scalar, then use this as the annualization factor If string, then pass this to periodicity function to resolve annualization factor expanding: bool, default is False If True, return expanding series/frames. If False, return final result. """ scale = _resolve_periods_in_year(scale, returns) if expanding: if geometric: n = pd.expanding_count(returns) return ((1. + returns).cumprod() ** (scale / n)) - 1. else: return pd.expanding_mean(returns) * scale else: if geometric: n = returns.count() return ((1. + returns).prod() ** (scale / n)) - 1. else: return returns.mean() * scale
def upside_potential_ratio(rets, mar=0, full=0, expanding=0): if isinstance(rets, pd.Series): above = rets[rets > mar] excess = -mar + above if expanding: n = pd.expanding_count(rets) if full else pd.expanding_count(above) upside = excess.cumsum() / n downside = downside_deviation(rets, mar=mar, full=full, expanding=1) return (upside / downside).reindex(rets.index).fillna(method='ffill') else: n = rets.count() if full else above.count() upside = excess.sum() / n downside = downside_deviation(rets, mar=mar, full=full) return upside / downside else: vals = {c: upside_potential_ratio(rets[c], mar=mar, full=full, expanding=expanding) for c in rets.columns} if expanding: return pd.DataFrame(vals, columns=rets.columns) else: return pd.Series(vals)
def downside_deviation(rets, mar=0, expanding=0, full=0, ann=0): """Compute the downside deviation for the specifed return series :param rets: periodic return series :param mar: minimum acceptable rate of return (MAR) :param full: If True, use the lenght of full series. If False, use only values below MAR :param expanding: :param ann: True if result should be annualized """ below = rets[rets < mar] if expanding: n = pd.expanding_count(rets)[below.index] if full else pd.expanding_count(below) dd = np.sqrt(((below - mar) ** 2).cumsum() / n) if ann: dd *= np.sqrt(periods_in_year(rets)) return dd.reindex(rets.index).ffill() else: n = rets.count() if full else below.count() dd = np.sqrt(((below - mar) **2).sum() / n) if ann: dd *= np.sqrt(periods_in_year(rets)) return dd
def downside_deviation(rets, mar=0, expanding=0, full=0, ann=0): """Compute the downside deviation for the specifed return series :param rets: periodic return series :param mar: minimum acceptable rate of return (MAR) :param full: If True, use the lenght of full series. If False, use only values below MAR :param expanding: :param ann: True if result should be annualized """ below = rets[rets < mar] if expanding: n = pd.expanding_count(rets)[ below.index] if full else pd.expanding_count(below) dd = np.sqrt(((below - mar)**2).cumsum() / n) if ann: dd *= np.sqrt(periods_in_year(rets)) return dd.reindex(rets.index).ffill() else: n = rets.count() if full else below.count() dd = np.sqrt(((below - mar)**2).sum() / n) if ann: dd *= np.sqrt(periods_in_year(rets)) return dd
def expanding_smoother(self, data, stype='rolling_mean', min_periods=None, freq=None): """ Perform a expanding smooting on the data for a complete help refer to http://pandas.pydata.org/pandas-docs/dev/computation.html :param data: pandas dataframe input data :param stype: soothing type :param min_periods: periods :param freq: frequence smoothing types: expanding_count Number of non-null observations expanding_sum Sum of values expanding_mean Mean of values expanding_median Arithmetic median of values expanding_min Minimum expanding_max Maximum expandingg_std Unbiased standard deviation expanding_var Unbiased variance expanding_skew Unbiased skewness (3rd moment) expanding_kurt Unbiased kurtosis (4th moment) """ if stype == 'count': newy = pd.expanding_count(data, min_periods=min_periods, freq=freq) if stype == 'sum': newy = pd.expanding_sum(data, min_periods=min_periods, freq=freq) if stype == 'mean': newy = pd.expanding_mean(data, min_periods=min_periods, freq=freq) if stype == 'median': newy = pd.expanding_median(data, min_periods=min_periods, freq=freq) if stype == 'min': newy = pd.expanding_min(data, min_periods=min_periods, freq=freq) if stype == 'max': newy = pd.expanding_max(data, min_periods=min_periods, freq=freq) if stype == 'std': newy = pd.expanding_std(data, min_periods=min_periods, freq=freq) if stype == 'var': newy = pd.expanding_var(data, min_periods=min_periods, freq=freq) if stype == 'skew': newy = pd.expanding_skew(data, min_periods=min_periods, freq=freq) if stype == 'kurt': newy = pd.expanding_kurt(data, min_periods=min_periods, freq=freq) return newy
def updates_chart(request): updates = Update.objects.all() df = pd.DataFrame(model_to_dict(update) for update in updates) df['count'] = 1 df.index = df['posted_at'] counts = df['count'] counts = counts.sort_index() series = pd.expanding_count(counts).resample('W', how=np.max, fill_method='pad') response = HttpResponse(content_type='image/png') fig = plt.figure() # ax = fig.add_subplot(111) # ax.plot(series) series.plot() plt.title("Total updates over time") plt.xlabel("") canvas = FigureCanvas(fig) canvas.print_png(response) return response
def get(self, request, pk): context = self.get_context_data() bm = get_object_or_404(Bookmark, pk=pk) pf = bm.profile context['bookmark'] = bm context['profile'] = pf clicks = bm.click_set.all() clicks = pd.DataFrame(model_to_dict(click) for click in clicks) clicks['count'] = 1 clicks = clicks.set_index('timestamp') counts = clicks['count'] counts = counts.sort_index() series = pd.expanding_count(counts).resample('D', how=np.max, \ fill_method='pad') context['data'] = list(series) context['data_labels'] = list(range(len(context['data']))) return render(request, 'urlyapp/bookmark.html', context)
def ltd_rets_ann(self): return (1. + self.ltd_rets) ** (self.pds_per_year / pd.expanding_count(self.rets)) - 1.