def test_decimal(self): DecimalModel.objects.create(n1=Decimal('-0.8'), n2=Decimal('1.2')) obj = DecimalModel.objects.annotate(n1_abs=Abs('n1'), n2_abs=Abs('n2')).first() self.assertIsInstance(obj.n1_abs, Decimal) self.assertIsInstance(obj.n2_abs, Decimal) self.assertEqual(obj.n1, -obj.n1_abs) self.assertEqual(obj.n2, obj.n2_abs)
def test_float(self): obj = FloatModel.objects.create(f1=-0.5, f2=12) obj = FloatModel.objects.annotate(f1_abs=Abs('f1'), f2_abs=Abs('f2')).first() self.assertIsInstance(obj.f1_abs, float) self.assertIsInstance(obj.f2_abs, float) self.assertEqual(obj.f1, -obj.f1_abs) self.assertEqual(obj.f2, obj.f2_abs)
def test_decimal(self): DecimalModel.objects.create(n1=Decimal("-0.8"), n2=Decimal("1.2")) obj = DecimalModel.objects.annotate(n1_abs=Abs("n1"), n2_abs=Abs("n2")).first() self.assertIsInstance(obj.n1_abs, Decimal) self.assertIsInstance(obj.n2_abs, Decimal) self.assertEqual(obj.n1, -obj.n1_abs) self.assertEqual(obj.n2, obj.n2_abs)
def test_integer(self): IntegerModel.objects.create(small=12, normal=0, big=-45) obj = IntegerModel.objects.annotate( small_abs=Abs('small'), normal_abs=Abs('normal'), big_abs=Abs('big'), ).first() self.assertIsInstance(obj.small_abs, int) self.assertIsInstance(obj.normal_abs, int) self.assertIsInstance(obj.big_abs, int) self.assertEqual(obj.small, obj.small_abs) self.assertEqual(obj.normal, obj.normal_abs) self.assertEqual(obj.big, -obj.big_abs)
def test_update_ordered_by_m2m_annotation(self): foo = Foo.objects.create(target="test") Bar.objects.create(foo=foo) Bar.objects.annotate(abs_id=Abs("m2m_foo")).order_by("abs_id").update( x=3) self.assertEqual(Bar.objects.get().x, 3)
def _get_forecast_mad_single(self): '''Mean absolute deviation (MAD) of a single item abs(Forecast - Order)''' qs = self.annotate( forecast_mad_single=Abs(F('total_forecasted_value') - F('total_ordered_value'), output_field=FloatField())) return qs
def post(self, request): serializer = MatchPostRequestSerializer( data=request.data, context={'user': request.user.cardsuser}) if not serializer.is_valid(): return Response(serializer.errors, status=400) max_delta = serializer.data['max_delta'] b_request = BattleRequest.objects.filter( card__owner=request.user.cardsuser).first() if b_request.battle is not None: battle = b_request.battle else: pair_request = BattleRequest.objects.exclude( card=b_request.card).annotate( delta=Abs(F('card__power') - b_request.card.power)).filter( delta__lte=max_delta).order_by('delta').first() if pair_request is None: return Response({'battle': None}) battle = Battle.objects.create( first_card=b_request.card, second_card=pair_request.card, ) pair_request.battle = battle pair_request.save() b_request.delete() result_ser = BattleSerializer(battle) return result_ser.data
def count_votes(self): """Sum votes for review.""" return self.vote_set.aggregate( upvotes=Coalesce(models.Sum('value', filter=models.Q(value=1)), 0), downvotes=Coalesce( Abs(models.Sum('value', filter=models.Q(value=-1))), 0), )
def test_order_expression_customdefault(self): with CustomOrdering(Costume, Abs('animal_id').desc()): response = self.client.get('/costume/?order_by=-description') self.assertEqual(response.status_code, 200) returned_data = jsonloads(response.content) data = [x['id'] for x in returned_data['data']] self.assertEqual(data, [self.a4.pk, self.a1.pk, self.a3.pk, self.a2.pk])
def closest_users(self, target_user, n): # TODO: Handle PAL too? target_pb = target_user.ntsc_pb usernames = twitch.API.usernames_in_channel(self.context.channel.name) users = (User.objects.select_related("twitch_user").filter( twitch_user__username__in=usernames, ntsc_pb__isnull=False).exclude(ntsc_pb=0).exclude( id=target_user.id).order_by( Abs(F("ntsc_pb") - target_pb).asc()))[:n] return sorted(users, key=lambda user: user.ntsc_pb)
def get_significance_annotation(self, session_token): # Calculate the absolute difference in correlation of the two polarities for a given target. # Exclude any correlation that this session has voted for, either as predicate or target. # (Exclude already voted predicates as they cannot be suggestions, and already # voted targets as we want to maximize effect on future suggestions) # e.g., if A has a 0.3 correlation with B, and ~A has a 0.8 correlation with B, then A has # a significance of 0.5 with B # We want to weigh the raw significance score by the likelihood of the option being chosen. # This centers around 0.5, so that a likelihood of 0.5 is a 1x multiplier, and drops off to # 0x for 100% or 0% likelihood. # This way, options that may have high impact but are unlikely to provide information about # the user will be ranked below options that have a more moderate impact, but are more # likely to reveal something about the user's preferences. likelihood_score = ( 1 - 2 * Abs(0.5 - self.get_likelihood_annotation(session_token))) # Build a subquery of all correlations for relevant options # Since we want the difference, start getting all with polarity=True correlation_change = Subquery( OptionCorrelation.objects.filter(predicate=OuterRef('pk'), predicate_polarity=True). exclude( # Exclude options that the session has voted on, either as the predicate or target, # since we want options that are likely to affect future votes target__uservote__session=session_token ).annotate( # Next, annotate a new column called correlation_false for the same options, # but with polarity=False correlation_false=Subquery( OptionCorrelation.objects.filter( predicate=OuterRef('predicate'), predicate_polarity=False, target=OuterRef('target')).values('correlation')[:1]) ).values( # Then collapse the rows to just the absolute difference up or down correlation_change=Abs( F('correlation') - F('correlation_false')))[:1], output_field=FloatField()) # Finally, average the absolute differences for all targets of a given predicate return likelihood_score * Avg(correlation_change)
def total_expenses(self): expression = Sum(Coalesce(Abs('transaction__amount'), 'budgeted_amount')) total_expenses = Expense.objects.filter(income=self)\ .aggregate(total_expenses=expression) if total_expenses['total_expenses']: return total_expenses['total_expenses'] return 0
def __init__(self, week=None, league_id=None): matchups = Matchup.objects.exclude(league_id=LISTENER_LEAGUE_ID) if week: matchups = matchups.filter(week=week) if league_id: matchups = matchups.filter(league_id=league_id) self.matchups = matchups self.narrow_matchups = matchups.annotate( point_difference=Abs(F('points_one') - F('points_two'))) self.rosters = Roster.objects.exclude( league__sleeper_id=LISTENER_LEAGUE_ID)
def annotate_rank(self): ranking = self if "p" in ranking.query.annotations: ranking = ranking.alias( d=F("occurrence__position") - F("p"), dsum=Abs(F('d') - 1.0, output_field=FloatField()) + F("dsum")) else: ranking = ranking.alias(dsum=Value(1, output_field=FloatField())) ranking = ranking.annotate(rank=ExpressionWrapper( Min("dsum") * F("length") / Count("*"), output_field=FloatField())) ranking = ranking.alias(p=F("occurrence__position")) return ranking
def closest_users_with_pbs(self, target_user, target_pb, n): # TODO: Handle PAL too? usernames = twitch.API.usernames_in_channel(self.context.channel.name) user_ids_with_pbs = (User.objects .filter(twitch_user__username__in=usernames, score_pb__current=True, score_pb__console_type="ntsc") .values("id") .annotate(pb=Max("score_pb__score")) .exclude(pb=0) .exclude(id=target_user.id) .order_by(Abs(F("pb") - target_pb).asc()) )[:n] users = { user.id: user for user in list( User.objects.filter(id__in=[row["id"] for row in user_ids_with_pbs]) .select_related("twitch_user") ) } return sorted([(users[row["id"]], row["pb"]) for row in user_ids_with_pbs], key=lambda row: row[1])
def get_min_max_score_annotation( field_name, min_value, max_value, score_contribution, relaxation_type=None, relaxation_value=None, ): spread = (max_value - min_value) if relaxation_type == RelaxationType.PERCENTAGE: spread = spread * (1 + relaxation_value / 100 * 2) if relaxation_type == RelaxationType.VALUE: spread = spread + relaxation_value * 2 offset = Cast( Abs(Least( F(field_name) - min_value, max_value - F(field_name), 0, )), output_field=FloatField() ) score_annotation = (score_contribution * (1 - offset / spread)) return score_annotation
def scoreboard(request): correct_overtime_answer = GamePreferences.objects.first( ).overtime_question_value sorted_teams = Team.objects.all().order_by( '-total_point_sum', 'difference').annotate( total_point_sum=Sum("point_changes__scoring"), first_round_sum=Sum("point_changes__scoring", filter=Q(point_changes__round_number=1)), second_round_sum=Sum("point_changes__scoring", filter=Q(point_changes__round_number=2)), third_round_sum=Sum("point_changes__scoring", filter=Q(point_changes__round_number=3)), difference=Abs(F('overtime_answer') - correct_overtime_answer)) overtime_winning_team = sorted_teams.order_by('difference').first() if not overtime_winning_team.overtime_answer: overtime_winning_team.id = 0 total_people = sorted_teams.aggregate(Sum('people'))['people__sum'] context = { 'sorted_teams': sorted_teams, 'amount_of_people': total_people, 'overtime_winning_team_id': overtime_winning_team.id } return render(request, 'scoreboard.html', context)
def plot_lightcurve( source: Source, vs_abs_min: float = 4.3, m_abs_min: float = 0.26, use_peak_flux: bool = True, ) -> Row: """Create the lightcurve and 2-epoch metric graph for a source with Bokeh. Args: source (Source): Source object. vs_abs_min (float, optional): MeasurementPair objects with an absolute vs metric greater than `vs_abs_min` and m metric greater than `m_abs_min` will be connected in the metric graph. Defaults to 4.3. m_abs_min (float, optional): See `vs_abs_min`. Defaults to 0.26. use_peak_flux (bool, optional): If True, use peak fluxes, otherwise use integrated fluxes. Defaults to True. Returns: Row: Bokeh Row layout object containing the lightcurve and graph plots. """ PLOT_WIDTH = 800 PLOT_HEIGHT = 300 flux_column = "flux_peak" if use_peak_flux else "flux_int" metric_suffix = "peak" if use_peak_flux else "int" measurements_qs = (Measurement.objects.filter( source__id=source.id).annotate( taustart_ts=F("image__datetime"), flux=F(flux_column), flux_err_lower=F(flux_column) - F(f"{flux_column}_err"), flux_err_upper=F(flux_column) + F(f"{flux_column}_err"), ).values( "id", "pk", "taustart_ts", "flux", "flux_err_upper", "flux_err_lower", "forced", ).order_by("taustart_ts")) candidate_measurement_pairs_qs = (source.measurementpair_set.annotate( m_abs=Abs(f"m_{metric_suffix}"), vs_abs=Abs(f"vs_{metric_suffix}")).filter(vs_abs__gte=vs_abs_min, m_abs__gte=m_abs_min).values( "measurement_a_id", "measurement_b_id", "vs_abs", "m_abs")) candidate_measurement_pairs_df = pd.DataFrame( candidate_measurement_pairs_qs) # lightcurve required cols: taustart_ts, flux, flux_err_upper, flux_err_lower, forced lightcurve = pd.DataFrame(measurements_qs) # remap method values to labels to make a better legend lightcurve["method"] = lightcurve.forced.map({ True: "Forced", False: "Selavy" }) source = ColumnDataSource(lightcurve) method_mapper = factor_cmap("method", palette="Colorblind3", factors=["Selavy", "Forced"]) min_y = min(0, lightcurve.flux_err_lower.min()) max_y = lightcurve.flux_err_upper.max() y_padding = (max_y - min_y) * 0.1 fig_lc = figure( plot_width=PLOT_WIDTH, plot_height=PLOT_HEIGHT, sizing_mode="stretch_width", x_axis_type="datetime", x_range=DataRange1d(default_span=timedelta(days=1)), y_range=DataRange1d(start=min_y, end=max_y + y_padding), ) # line source must be a COPY of the data for the scatter source for the hover and # selection to work properly, using the same ColumnDataSource will break it fig_lc.line("taustart_ts", "flux", source=lightcurve) lc_scatter = fig_lc.scatter( "taustart_ts", "flux", marker="circle", size=6, color=method_mapper, nonselection_color=method_mapper, selection_color="red", nonselection_alpha=1.0, hover_color="red", alpha=1.0, source=source, legend_group="method", ) fig_lc.add_layout( Whisker( base="taustart_ts", upper="flux_err_upper", lower="flux_err_lower", source=source, )) fig_lc.xaxis.axis_label = "Datetime" fig_lc.xaxis[0].formatter = DatetimeTickFormatter(days="%F", hours='%H:%M') fig_lc.yaxis.axis_label = ("Peak flux (mJy/beam)" if use_peak_flux else "Integrated flux (mJy)") # determine legend location: either bottom_left or top_left legend_location = ("top_left" if lightcurve.sort_values("taustart_ts").iloc[0].flux < (max_y - min_y) / 2 else "bottom_left") fig_lc.legend.location = legend_location # TODO add vs and m metrics to graph edges # create plot fig_graph = figure( plot_width=PLOT_HEIGHT, plot_height=PLOT_HEIGHT, x_range=Range1d(-1.1, 1.1), y_range=Range1d(-1.1, 1.1), x_axis_type=None, y_axis_type=None, sizing_mode="fixed", ) hover_tool_lc_callback = None if len(candidate_measurement_pairs_df) > 0: g = nx.Graph() for _row in candidate_measurement_pairs_df.itertuples(index=False): g.add_edge(_row.measurement_a_id, _row.measurement_b_id) node_layout = nx.circular_layout(g, scale=1, center=(0, 0)) # add node positions to dataframe for suffix in ["a", "b"]: pos_df = pd.DataFrame( candidate_measurement_pairs_df[f"measurement_{suffix}_id"].map( node_layout).to_list(), columns=[f"measurement_{suffix}_x", f"measurement_{suffix}_y"], ) candidate_measurement_pairs_df = candidate_measurement_pairs_df.join( pos_df) candidate_measurement_pairs_df["measurement_x"] = list( zip( candidate_measurement_pairs_df.measurement_a_x.values, candidate_measurement_pairs_df.measurement_b_x.values, )) candidate_measurement_pairs_df["measurement_y"] = list( zip( candidate_measurement_pairs_df.measurement_a_y.values, candidate_measurement_pairs_df.measurement_b_y.values, )) node_positions_df = pd.DataFrame.from_dict(node_layout, orient="index", columns=["x", "y"]) node_positions_df["lc_index"] = node_positions_df.index.map( {v: k for k, v in lightcurve.id.to_dict().items()}).values node_source = ColumnDataSource(node_positions_df) edge_source = ColumnDataSource(candidate_measurement_pairs_df) # add edges to plot edge_renderer = fig_graph.multi_line( "measurement_x", "measurement_y", line_width=5, hover_color="red", source=edge_source, name="edges", ) # add nodes to plot node_renderer = fig_graph.circle( "x", "y", size=20, hover_color="red", selection_color="red", nonselection_alpha=1.0, source=node_source, name="nodes", ) # create hover tool for node edges edge_callback_code = """ // get edge index let indices_a = cb_data.index.indices.map(i => edge_data.data.measurement_a_id[i]); let indices_b = cb_data.index.indices.map(i => edge_data.data.measurement_b_id[i]); let indices = indices_a.concat(indices_b); let lightcurve_indices = indices.map(i => lightcurve_data.data.id.indexOf(i)); lightcurve_data.selected.indices = lightcurve_indices; """ hover_tool_edges = HoverTool( tooltips=None, renderers=[edge_renderer], callback=CustomJS( args={ "lightcurve_data": lc_scatter.data_source, "edge_data": edge_renderer.data_source, }, code=edge_callback_code, ), ) fig_graph.add_tools(hover_tool_edges) # create labels for nodes graph_source = ColumnDataSource(node_positions_df) labels = LabelSet( x="x", y="y", text="lc_index", source=graph_source, text_align="center", text_baseline="middle", text_font_size="1em", text_color="white", ) fig_graph.renderers.append(labels) # prepare a JS callback for the lightcurve hover tool to mark the associated nodes hover_tool_lc_callback = CustomJS( args={ "node_data": node_renderer.data_source, "lightcurve_data": lc_scatter.data_source, }, code=""" let ids = cb_data.index.indices.map(i => lightcurve_data.data.id[i]); let node_indices = ids.map(i => node_data.data.index.indexOf(i)); node_data.selected.indices = node_indices; """, ) # create hover tool for lightcurve hover_tool_lc = HoverTool( tooltips=[ ("Index", "@index"), ("Date", "@taustart_ts{%F}"), (f"Flux {metric_suffix}", "@flux mJy"), ], formatters={ "@taustart_ts": "datetime", }, mode="vline", callback=hover_tool_lc_callback, ) fig_lc.add_tools(hover_tool_lc) plot_row = row(fig_lc, fig_graph, sizing_mode="stretch_width") plot_row.css_classes.append("mx-auto") return plot_row
def set_group_data(self, group_type): m = group_type.objects.filter(megasession=self) # WE GET AND ASSIGN SENDER DESIONS TO GROUP OBJECTS HERE subquery_head = group_type.objects.filter( id=OuterRef('id') ).annotate(sender_city=F('sender__city'), receiver_city=F('receiver__city')) sender_decision = Subquery( subquery_head.annotate(sender_decision=Sum('sender__owner__trust_player__decisions__answer', filter=(Q( sender__owner__trust_player__decisions__decision_type='sender_decision' ) & Q( sender__owner__trust_player__decisions__city=F( 'receiver_city') )))).values('sender_decision')[:1] ) receiver_decision = Subquery( subquery_head.annotate(receiver_decision=Sum('receiver__owner__trust_player__decisions__answer', filter=(Q( receiver__owner__trust_player__decisions__decision_type='return_decision' ) & Q( receiver__owner__trust_player__decisions__city=F( 'sender__city') )))).values('receiver_decision')[:1] ) sender_belief_re_receiver = Subquery( subquery_head.annotate(sender_belief=Sum('sender__owner__trust_player__decisions__answer', filter=(Q( sender__owner__trust_player__decisions__decision_type='sender_belief' ) & Q( sender__owner__trust_player__decisions__city=F( 'receiver__city') )))).values('sender_belief')[:1] ) receiver_belief_re_sender = Subquery( subquery_head.annotate(receiver_belief=Sum('receiver__owner__trust_player__decisions__answer', filter=(Q( receiver__owner__trust_player__decisions__decision_type='receiver_belief' ) & Q( receiver__owner__trust_player__decisions__city=F( 'sender__city') )))).values('receiver_belief')[:1] ) m.update(sender_decision_re_receiver=sender_decision, receiver_decision_re_sender=receiver_decision, sender_belief_re_receiver=sender_belief_re_receiver, receiver_belief_re_sender=receiver_belief_re_sender, ) m = group_type.objects.filter(megasession=self) receiver_correct_guess = Case( When(sender_decision_re_receiver=receiver_belief_re_sender, then=Value(True)), default=Value(False), output_field=BooleanField(), ) m.update(sender_belief_diff=Abs(F('receiver_decision_re_sender') - F('sender_belief_re_receiver')), ) m = group_type.objects.filter(megasession=self) m.update( has_sender_sent=Case(When(~Q(sender_decision_re_receiver=0), then=Value(True)), default=Value(False), output_field=BooleanField()), receiver_correct_guess=receiver_correct_guess, sender_guess_payoff=Case( When(sender_belief_diff=0, then=Value(20)), When(sender_belief_diff=3, then=Value(10), ), default=Value(0), output_field=IntegerField() ) )
'sender__city'))))).values('receiver_belief')[:1]) m = m.update( sender_decision_re_receiver=sender_decision, receiver_decision_re_sender=receiver_decision, sender_belief_re_receiver=sender_belief_re_receiver, receiver_belief_re_sender=receiver_belief_re_sender, ) m = MegaGroup.objects.filter(megasession__id=session_id) receiver_correct_guess = Case( When(sender_decision_re_receiver=receiver_belief_re_sender, then=Value(True)), default=Value(False), output_field=BooleanField(), ) m.update(sender_belief_diff=Abs( F('receiver_decision_re_sender') - F('sender_belief_re_receiver')), ) m = MegaGroup.objects.filter(megasession__id=session_id) m = m.update(has_sender_sent=Case(When(~Q(sender_decision_re_receiver=0), then=Value(True)), default=Value(False), output_field=BooleanField()), receiver_correct_guess=receiver_correct_guess, sender_guess_payoff=Case(When(sender_belief_diff=0, then=Value(20)), When( sender_belief_diff=3, then=Value(10), ), default=Value(0), output_field=IntegerField())) # TODO: don't forget to get a real obj when
def test_null(self): IntegerModel.objects.create() obj = IntegerModel.objects.annotate(null_abs=Abs('normal')).first() self.assertIsNone(obj.null_abs)
def test_update_ordered_by_inline_m2m_annotation(self): foo = Foo.objects.create(target="test") Bar.objects.create(foo=foo) Bar.objects.order_by(Abs("m2m_foo")).update(x=2) self.assertEqual(Bar.objects.get().x, 2)