def test_user_purchases( uw_fixture, purchase_factory, quotation_factory, monkeypatch ): # pylint: disable=unused-argument,redefined-outer-name def mock_latest_quote(stock): assert stock == "ASX1" return quotation_factory.create(asx_code="ASX1", last_price=2.0), "2021-01-01" u = find_user("u2") assert u is not None monkeypatch.setattr(mdl, "latest_quote", mock_latest_quote) purchases = user_purchases(u) Watchlist.objects.create( user=u, asx_code="ASX1" ) # ASX1 must be in the watchlist for it to be reported as a purchase: no "ghost" purchases. And no we are not cascading deletes ;-) assert dict(purchases) == {} purchase_factory.create( asx_code="ASX1", user=u, buy_date="2021-01-01", price_at_buy_date=1.0, amount=5000, n=5000, ) purchases = user_purchases(u) assert "ASX1" in purchases result = purchases["ASX1"][0] assert isinstance(result, VirtualPurchase) assert ( str(result) == "Purchase on 2021-01-01: $5000.0 (5000 shares@$1.00) is now $10000.00 (100.00%)" )
def test_user_purchases(uw_fixture, purchase_factory, quotation_factory, monkeypatch): def mock_latest_quote(stock): assert stock == "ASX1" return quotation_factory.create(asx_code='ASX1', last_price=2.0), '2021-01-01' u = find_user('u2') assert u is not None monkeypatch.setattr(mdl, 'latest_quote', mock_latest_quote) purchases = user_purchases(u) Watchlist.objects.create(user=u, asx_code='ASX1') # ASX1 must be in the watchlist for it to be reported as a purchase: no "ghost" purchases. And no we are not cascading deletes ;-) assert dict(purchases) == {} purchase_factory.create(asx_code='ASX1', user=u, buy_date='2021-01-01', price_at_buy_date=1.0, amount=5000, n=5000) purchases = user_purchases(u) assert 'ASX1' in purchases result = purchases['ASX1'][0] assert isinstance(result, VirtualPurchase) assert str(result) == "Purchase on 2021-01-01: $5000.0 (5000 shares@$1.00) is now $10000.00 (100.00%)"
def data_factory( ld: LazyDictionary, ): # dont create the dataframe unless we have to - avoid exxpensive call! purchase_buy_dates = [] purchases = [] stocks = [] for stock, purchases_for_stock in user_purchases(user).items(): stocks.append(stock) for purchase in purchases_for_stock: purchase_buy_dates.append(purchase.buy_date) purchases.append(purchase) purchase_buy_dates = sorted(purchase_buy_dates) # print("earliest {} latest {}".format(purchase_buy_dates[0], purchase_buy_dates[-1])) timeframe = Timeframe(from_date=str(purchase_buy_dates[0]), to_date=all_available_dates()[-1]) return make_portfolio_performance_dataframe(stocks, timeframe, purchases)
def show_companies( matching_companies, # may be QuerySet or iterable of stock codes (str) request, sentiment_timeframe: Timeframe, extra_context=None, template_name="all_stocks.html", ): """ Support function to public-facing views to eliminate code redundancy """ virtual_purchases_by_user = user_purchases(request.user) if isinstance(matching_companies, QuerySet): stocks_queryset = matching_companies # we assume QuerySet is already sorted by desired criteria elif matching_companies is None or len(matching_companies) > 0: stocks_queryset, _ = latest_quote(matching_companies) # FALLTHRU # sort queryset as this will often be requested by the USER sort_by = tuple(request.GET.get("sort_by", "asx_code").split(",")) info(request, "Sorting by {}".format(sort_by)) stocks_queryset = stocks_queryset.order_by(*sort_by) # keep track of stock codes for template convenience asx_codes = [quote.asx_code for quote in stocks_queryset.all()] n_top_bottom = extra_context['n_top_bottom'] if 'n_top_bottom' in extra_context else 20 print("show_companies: found {} stocks".format(len(asx_codes))) # setup context dict for the render context = { # NB: title and heatmap_title are expected to be supplied by caller via extra_context "timeframe": sentiment_timeframe, "title": "Caller must override", "watched": user_watchlist(request.user), "n_stocks": len(asx_codes), "n_top_bottom": n_top_bottom, "virtual_purchases": virtual_purchases_by_user, } # since we sort above, we must setup the pagination also... assert isinstance(stocks_queryset, QuerySet) paginator = Paginator(stocks_queryset, 50) page_number = request.GET.get("page", 1) page_obj = paginator.page(page_number) context['page_obj'] = page_obj context['object_list'] = paginator if len(asx_codes) <= 0: warning(request, "No matching companies found.") else: df = selected_cached_stocks_cip(asx_codes, sentiment_timeframe) sentiment_heatmap_data, top10, bottom10 = plot_heatmap(df, sentiment_timeframe, n_top_bottom=n_top_bottom) sector_breakdown_plot = plot_breakdown(df) context.update({ "best_ten": top10, "worst_ten": bottom10, "sentiment_heatmap": sentiment_heatmap_data, "sentiment_heatmap_title": "{}: {}".format(context['title'], sentiment_timeframe.description), "sector_breakdown_plot": sector_breakdown_plot, }) if extra_context: context.update(extra_context) add_messages(request, context) #print(context) return render(request, template_name, context=context)
def show_companies( matching_companies, # may be QuerySet or iterable of stock codes (str) request, sentiment_timeframe: Timeframe, extra_context=None, template_name="all_stocks.html", ): """ Support function to public-facing views to eliminate code redundancy """ if isinstance(matching_companies, QuerySet): stocks_queryset = matching_companies # we assume QuerySet is already sorted by desired criteria elif matching_companies is None or len(matching_companies) > 0: stocks_queryset, _ = latest_quote(matching_companies) # FALLTHRU else: # no companies to report? warning(request, "No matching companies.") return render(request, template_name, context={"timeframe": sentiment_timeframe}) # prune companies without a latest price, makes no sense to report them stocks_queryset = stocks_queryset.exclude(last_price__isnull=True) # sort queryset as this will often be requested by the USER arg = request.GET.get("sort_by", "asx_code") #info(request, "Sorting by {}".format(arg)) if arg == "sector" or arg == "sector,-eps": ss = { s["asx_code"]: s["sector_name"] for s in stocks_by_sector().to_dict("records") } if arg == "sector": stocks_queryset = sorted(stocks_queryset, key=lambda s: ss.get(s.asx_code, "Z") ) # companies without sector sort last else: eps_dict = { s.asx_code: s.eps if s.eps is not None else 0.0 for s in stocks_queryset } stocks_queryset = sorted( stocks_queryset, key=lambda s: (ss.get(s.asx_code, "Z"), -eps_dict.get(s.asx_code, 0.0)), ) else: sort_by = tuple(arg.split(",")) stocks_queryset = stocks_queryset.order_by(*sort_by) # keep track of stock codes for template convenience asx_codes = [quote.asx_code for quote in stocks_queryset] n_top_bottom = (extra_context["n_top_bottom"] if "n_top_bottom" in extra_context else 20) print("show_companies: found {} stocks".format(len(asx_codes))) # setup context dict for the render context = { # NB: title and heatmap_title are expected to be supplied by caller via extra_context "timeframe": sentiment_timeframe, "title": "Caller must override", "watched": user_watchlist(request.user), "n_stocks": len(asx_codes), "n_top_bottom": n_top_bottom, "virtual_purchases": user_purchases(request.user), } # since we sort above, we must setup the pagination also... # assert isinstance(stocks_queryset, QuerySet) paginator = Paginator(stocks_queryset, 50) page_number = request.GET.get("page", 1) page_obj = paginator.page(page_number) context["page_obj"] = page_obj context["object_list"] = paginator # compute totals across all dates for the specified companies to look at top10/bottom10 in the timeframe ld = LazyDictionary() ld["cip_df"] = lambda ld: selected_cached_stocks_cip( asx_codes, sentiment_timeframe) ld["sum_by_company"] = lambda ld: ld["cip_df"].sum(axis=1, numeric_only=True) ld["top10"] = lambda ld: ld["sum_by_company"].nlargest(n_top_bottom) ld["bottom10"] = lambda ld: ld["sum_by_company"].nsmallest(n_top_bottom) ld["stocks_by_sector"] = lambda ld: stocks_by_sector() if len(asx_codes) <= 0 or len(ld["top10"]) <= 0: warning(request, "No matching companies found.") else: sorted_codes = "-".join(sorted(asx_codes)) sentiment_heatmap_uri = cache_plot( f"{sorted_codes}-{sentiment_timeframe.description}-stocks-sentiment-plot", lambda ld: plot_heatmap(sentiment_timeframe, ld), datasets=ld, ) key = f"{sorted_codes}-{sentiment_timeframe.description}-breakdown-plot" sector_breakdown_uri = cache_plot(key, plot_breakdown, datasets=ld) top10_plot_uri = cache_plot( f"top10-plot-{'-'.join(ld['top10'].index)}", lambda ld: plot_cumulative_returns(ld["top10"].index, ld), datasets=ld, ) bottom10_plot_uri = cache_plot( f"bottom10-plot-{'-'.join(ld['bottom10'].index)}", lambda ld: plot_cumulative_returns(ld["bottom10"].index, ld), datasets=ld, ) context.update({ "best_ten": ld["top10"], "worst_ten": ld["bottom10"], "sentiment_heatmap_uri": sentiment_heatmap_uri, "sentiment_heatmap_title": "{}: {}".format(context["title"], sentiment_timeframe.description), "sector_breakdown_uri": sector_breakdown_uri, "top10_plot_uri": top10_plot_uri, "bottom10_plot_uri": bottom10_plot_uri, "timeframe_end_performance": timeframe_end_performance(ld), }) if extra_context: context.update(extra_context) add_messages(request, context) # print(context) return render(request, template_name, context=context)
def show_purchase_performance(request): purchase_buy_dates = [] purchases = [] stocks = [] for stock, purchases_for_stock in user_purchases(request.user).items(): stocks.append(stock) for purchase in purchases_for_stock: purchase_buy_dates.append(purchase.buy_date) purchases.append(purchase) purchase_buy_dates = sorted(purchase_buy_dates) # print("earliest {} latest {}".format(purchase_buy_dates[0], purchase_buy_dates[-1])) timeframe = Timeframe(from_date=str(purchase_buy_dates[0]), to_date=all_available_dates()[-1]) df = company_prices(stocks, timeframe, transpose=True) rows = [] stock_count = defaultdict(int) stock_cost = defaultdict(float) portfolio_cost = 0.0 for d in [ datetime.strptime(x, "%Y-%m-%d").date() for x in timeframe.all_dates() ]: d_str = str(d) if d_str not in df.columns: # not a trading day? continue purchases_to_date = filter(lambda vp, d=d: vp.buy_date <= d, purchases) for purchase in purchases_to_date: if purchase.buy_date == d: portfolio_cost += purchase.amount stock_count[purchase.asx_code] += purchase.n stock_cost[purchase.asx_code] += purchase.amount portfolio_worth = sum_portfolio(df, d_str, stock_count.items()) #print(df) # emit rows for each stock and aggregate portfolio for asx_code in stocks: cur_price = df.at[asx_code, d_str] if np.isnan(cur_price): # price missing? ok, skip record continue assert cur_price is not None and cur_price >= 0.0 stock_worth = cur_price * stock_count[asx_code] rows.append({ "portfolio_cost": portfolio_cost, "portfolio_worth": portfolio_worth, "portfolio_profit": portfolio_worth - portfolio_cost, "stock_cost": stock_cost[asx_code], "stock_worth": stock_worth, "stock_profit": stock_worth - stock_cost[asx_code], "date": d_str, "stock": asx_code, }) t = plot_portfolio(pd.DataFrame.from_records(rows)) portfolio_performance_figure, stock_performance_figure, profit_contributors_figure = t context = { "title": "Portfolio performance", "portfolio_title": "Overall", "portfolio_figure": portfolio_performance_figure, "stock_title": "Stock", "stock_figure": stock_performance_figure, "profit_contributors": profit_contributors_figure, } return render(request, "portfolio_trends.html", context=context)