class TeamStandingsPrecedence(MultiValueChoicePreference): help_text = _( "Metrics to use to rank teams (see documentation for further details)") verbose_name = _("Team standings precedence") section = standings name = 'team_standings_precedence' choices = TeamStandingsGenerator.get_metric_choices() nfields = 8 allow_empty = True default = ['wins', 'speaks_avg'] def validate(self, value): super().validate(value) # Check that non-repeatable metrics aren't listed twice classes = [ TeamStandingsGenerator.metric_annotator_classes[metric] for metric in value ] duplicates = [ c for c in classes if c.repeatable is False and classes.count(c) > 1 ] if duplicates: duplicates_str = ", ".join( list(set(force_text(c.name) for c in duplicates))) raise ValidationError( _("The following metrics can't be listed twice: " "%(duplicates)s") % {'duplicates': duplicates_str}) # Check that who-beat-whom isn't listed first if value[0] in ["wbw", "wbwd"]: raise ValidationError( _("Who-beat-whom can't be listed as the first metric"))
class TeamStandingsExtraMetrics(MultiValueChoicePreference): help_text = _("Metrics to calculate, but not used to rank teams") verbose_name = _("Team standings extra metrics") section = standings name = 'team_standings_extra_metrics' choices = TeamStandingsGenerator.get_metric_choices(ranked_only=False) nfields = 5 allow_empty = True default = []
class TeamStandingsPrecedence(MultiValueChoicePreference): help_text = "Metrics to use to rank teams (see documentation for further details)" verbose_name = "Team standings precedence" section = standings name = "team_standings_precedence" choices = TeamStandingsGenerator.get_metric_choices() nfields = 8 allow_empty = True default = ['wins', 'speaks_avg']