def get_context_data(self, **kwargs): if self.get_log_count(): contribution_histogram = { 'type': 'histogram', 'width': 1, 'data': histogram(self.get_queryset(), 'contribution_amount', range(0, 100)), } else: contribution_histogram = None context = { 'dungeon': self.get_dungeon(), 'level': self.get_level(), 'report': drop_report( self.get_queryset(), min_count=0, include_currency=True, exclude_social_points=True, owner_only=True, ), 'contribution_histogram': contribution_histogram } context.update(kwargs) return super().get_context_data(**context)
def get_context_data(self, **kwargs): all_drops = get_drop_querysets(self.get_queryset()) recent_drops = { 'items': all_drops['items'].values( 'item', name=F('item__name'), icon=F('item__icon'), ).annotate( count=Sum('quantity') ).order_by('-count') if 'items' in all_drops else [], 'monsters': replace_value_with_choice( list(all_drops['monsters'].values( name=F('monster__name'), icon=F('monster__image_filename'), element=F('monster__element'), stars=F('grade'), is_awakened=F('monster__is_awakened'), can_awaken=F('monster__can_awaken'), ).annotate( count=Count('pk') ).order_by('-count')), {'element': Monster.ELEMENT_CHOICES}) if 'monsters' in all_drops else [], 'runes': replace_value_with_choice( list(all_drops['runes'].values( 'type', 'quality', 'stars', ).annotate( count=Count('pk') ).order_by('-count') if 'runes' in all_drops else []), { 'type': RuneInstance.TYPE_CHOICES, 'quality': RuneInstance.QUALITY_CHOICES, } ), } if self.get_log_count(): bin_width = 50000 damage_stats = self.get_queryset().aggregate(min=Min('damage'), max=Max('damage')) bin_start = floor_to_nearest(damage_stats['min'], bin_width) bin_end = ceil_to_nearest(damage_stats['max'], bin_width) damage_histogram = { 'type': 'histogram', 'width': bin_width, 'data': histogram(self.get_queryset(), 'damage', range(bin_start, bin_end, bin_width)), } else: damage_histogram = None context = { 'dashboard': { 'recent_drops': recent_drops, }, 'report': drop_report(self.get_queryset(), min_count=0), 'damage_histogram': damage_histogram } context.update(kwargs) return super().get_context_data(**context)
def get_context_data(self, **kwargs): if self.get_log_count(): bin_width = 50000 damage_stats = self.get_queryset().aggregate( min=Min('total_damage'), max=Max('total_damage')) bin_start = floor_to_nearest(damage_stats['min'], bin_width) bin_end = ceil_to_nearest(damage_stats['max'], bin_width) damage_histogram = { 'type': 'histogram', 'width': bin_width, 'data': histogram(self.get_queryset(), 'total_damage', range(bin_start, bin_end, bin_width)), } else: damage_histogram = None context = { 'dungeon': self.get_dungeon(), 'level': self.get_level(), 'report': drop_report(self.get_queryset(), min_count=0), 'damage_histogram': damage_histogram } context.update(kwargs) return super().get_context_data(**context)
def test_multi_histogram(self): hist = histogram(ShirtSales, 'units', bins=[0, 10, 15], slice_on='gender') expected = [{'bin': '0', 'Boy': 0, 'Girl': 0}, {'bin': '10', 'Boy': 0, 'Girl': 0}, {'bin': '15', 'Boy': 0, 'Girl': 0}] for s in ShirtSales.objects.all(): if s.units < 10: if s.gender == 'B': expected[0]['Boy'] += 1 if s.gender == 'G': expected[0]['Girl'] += 1 elif s.units < 15: if s.gender == 'B': expected[1]['Boy'] += 1 if s.gender == 'G': expected[1]['Girl'] += 1 else: if s.gender == 'B': expected[2]['Boy'] += 1 if s.gender == 'G': expected[2]['Girl'] += 1 self.assertEqual(list(hist), expected)
def test_histograms_with_zeros(self): hist = histogram(ShirtSales, 'units', bins=[0, 1, 2, 3, 10, 14, 15, 100, 150], slice_on='gender') # The first 3 buckets have all zero values. self.assertEqual(hist[0], {'bin': '0', 'Boy': 0, 'Girl': 0}) self.assertEqual(hist[1], {'bin': '1', 'Boy': 0, 'Girl': 0}) self.assertEqual(hist[2], {'bin': '2', 'Boy': 0, 'Girl': 0}) # A bucket in the middle has zeros self.assertEqual(hist[5], {'bin': '14', 'Boy': 0, 'Girl': 0}) # The last 3 buckets have zero values self.assertEqual(hist[7], {'bin': '100', 'Boy': 0, 'Girl': 0}) self.assertEqual(hist[8], {'bin': '150', 'Boy': 0, 'Girl': 0})
def get(self, request, format=None): last_record_per_user = Ratings.objects.values('player').annotate( id=Max("id")).values_list('id', flat=True) subrecord = Ratings.objects.filter(id__in=last_record_per_user) ans = { "total": histogram(subrecord, "total", bins=list(range(0, 10000, 250))), "TR": histogram(subrecord, "TR", bins=list(range(0, 100, 5))), "CN": histogram(subrecord, "CN", bins=list(range(0, 100, 5))), "CC": histogram(subrecord, "CC", bins=list(range(0, 100, 5))), "PC": histogram(subrecord, "PC", bins=list(range(0, 100, 5))), "SA": histogram(subrecord, "SA", bins=list(range(0, 100, 5))), "RC": histogram(subrecord, "RC", bins=list(range(0, 100, 5))), "CP": histogram(subrecord, "CP", bins=list(range(0, 100, 5))), } return Response(ans)
def test_histogram(self): hist = histogram(ShirtSales, 'units', bins=[0, 10, 15]) expected = [{'bin': '0', 'units': 0}, {'bin': '10', 'units': 0}, {'bin': '15', 'units': 0}] for s in ShirtSales.objects.all(): if s.units < 10: expected[0]['units'] += 1 elif s.units < 15: expected[1]['units'] += 1 else: expected[2]['units'] += 1 self.assertEqual(hist, expected)
def drop_report(qs, **kwargs): report_data = {} # Get querysets for each possible drop type drops = get_drop_querysets(qs) report_data['summary'] = get_report_summary(drops, qs.count(), **kwargs) # Clear time statistics, if supported by the qs model if hasattr(qs.model, 'clear_time'): successful_runs = qs.filter( Q(success=True) | Q(level__dungeon__category=Dungeon.CATEGORY_RIFT_OF_WORLDS_BEASTS)) if successful_runs.count(): clear_time_aggs = successful_runs.aggregate( std_dev=StdDev(Extract(F('clear_time'), lookup_name='epoch')), avg=Avg('clear_time'), min=Min('clear_time'), max=Max('clear_time'), ) # Use +/- 3 std deviations of clear time avg as bounds for time range in case of extreme outliers skewing chart scale min_time = round_timedelta( max( clear_time_aggs['min'], clear_time_aggs['avg'] - timedelta(seconds=clear_time_aggs['std_dev'] * 3)), CLEAR_TIME_BIN_WIDTH, direction='down', ) max_time = round_timedelta( min( clear_time_aggs['max'], clear_time_aggs['avg'] + timedelta(seconds=clear_time_aggs['std_dev'] * 3)), CLEAR_TIME_BIN_WIDTH, direction='up', ) bins = [ min_time + CLEAR_TIME_BIN_WIDTH * x for x in range( 0, int((max_time - min_time) / CLEAR_TIME_BIN_WIDTH)) ] # Histogram generates on entire qs, not just successful runs. report_data['clear_time'] = { 'min': str(clear_time_aggs['min']), 'max': str(clear_time_aggs['max']), 'avg': str(clear_time_aggs['avg']), 'chart': { 'type': 'histogram', 'width': 5, 'data': histogram(qs, 'clear_time', bins, slice_on='success'), } } # Individual drop details for key, qs in drops.items(): if DROP_TYPES[key]: report_data[key] = DROP_TYPES[key](qs, qs.count(), **kwargs) return report_data
def get_artifact_report(qs, total_log_count, **kwargs): if qs.count() == 0: return None min_count = kwargs.get('min_count', max(1, int(MINIMUM_THRESHOLD * total_log_count))) # Secondary effect distribution # Unable to use database aggregation on an ArrayField without ORM gymnastics, so post-process data in python all_effects = qs.annotate( flat_effects=Func(F('effects'), function='unnest')).values_list( 'flat_effects', flat=True) effect_counts = Counter(all_effects) return { 'element': { 'type': 'occurrences', 'total': qs.filter(slot=Artifact.SLOT_ELEMENTAL).count(), 'data': transform_to_dict( replace_value_with_choice( list( qs.filter(slot=Artifact.SLOT_ELEMENTAL).values( 'element').annotate(count=Count('pk')).filter( count__gt=min_count).order_by('-count')), {'element': qs.model.ELEMENT_CHOICES})), }, 'archetype': { 'type': 'occurrences', 'total': qs.filter(slot=Artifact.SLOT_ARCHETYPE).count(), 'data': transform_to_dict( replace_value_with_choice( list( qs.filter(slot=Artifact.SLOT_ARCHETYPE).values( 'archetype').annotate(count=Count('pk')).filter( count__gt=min_count).order_by('-count')), {'archetype': qs.model.ARCHETYPE_CHOICES})), }, 'quality': { 'type': 'occurrences', 'total': qs.count(), 'data': transform_to_dict( replace_value_with_choice( list( qs.values('quality').annotate( count=Count('pk')).filter( count__gt=min_count).order_by('-count')), {'quality': qs.model.QUALITY_CHOICES})), }, 'main_stat': { 'type': 'occurrences', 'total': qs.count(), 'data': transform_to_dict( replace_value_with_choice( list( qs.values('main_stat').annotate( count=Count('main_stat')).filter( count__gt=min_count).order_by('main_stat')), {'main_stat': qs.model.STAT_CHOICES})) }, 'effects': { 'type': 'occurrences', 'total': len(all_effects), 'data': transform_to_dict( replace_value_with_choice( sorted([{ 'effect': k, 'count': v } for k, v in effect_counts.items()], key=lambda count: count['effect']), {'effect': qs.model.EFFECT_CHOICES}), ) }, 'max_efficiency': { 'type': 'histogram', 'width': 5, 'data': histogram(qs, 'max_efficiency', range(0, 100, 5), slice_on='quality'), }, }
def get_rune_report(qs, total_log_count, **kwargs): if qs.count() == 0: return None min_count = kwargs.get('min_count', max(1, int(MINIMUM_THRESHOLD * total_log_count))) # Substat distribution # Unable to use database aggregation on an ArrayField without ORM gymnastics, so post-process data in python all_substats = qs.annotate( flat_substats=Func(F('substats'), function='unnest')).values_list( 'flat_substats', flat=True) substat_counts = Counter(all_substats) # Sell value ranges min_value, max_value = qs.aggregate(Min('value'), Max('value')).values() min_value = int(floor_to_nearest(min_value, 1000)) max_value = int(ceil_to_nearest(max_value, 1000)) return { 'stars': { 'type': 'occurrences', 'total': qs.count(), 'data': transform_to_dict( list( qs.values(grade=Concat(Cast('stars', CharField( )), Value('⭐'))).annotate(count=Count('pk')).filter( count__gt=min_count).order_by('-count'))), }, 'type': { 'type': 'occurrences', 'total': qs.count(), 'data': transform_to_dict( replace_value_with_choice( list( qs.values('type').annotate(count=Count('pk')).filter( count__gt=min_count).order_by('-count')), {'type': qs.model.TYPE_CHOICES})), }, 'quality': { 'type': 'occurrences', 'total': qs.count(), 'data': transform_to_dict( replace_value_with_choice( list( qs.values('quality').annotate( count=Count('pk')).filter( count__gt=min_count).order_by('-count')), {'quality': qs.model.QUALITY_CHOICES})), }, 'slot': { 'type': 'occurrences', 'total': qs.count(), 'data': transform_to_dict( list( qs.values('slot').annotate(count=Count('pk')).filter( count__gt=min_count).order_by('-count'))), }, 'main_stat': { 'type': 'occurrences', 'total': qs.count(), 'data': transform_to_dict( replace_value_with_choice( list( qs.values('main_stat').annotate( count=Count('main_stat')).filter( count__gt=min_count).order_by('main_stat')), {'main_stat': qs.model.STAT_CHOICES})) }, 'slot_2_main_stat': { 'type': 'occurrences', 'total': qs.filter(slot=2).count(), 'data': transform_to_dict( replace_value_with_choice( list( qs.filter(slot=2).values('main_stat').annotate( count=Count('main_stat')).filter( count__gt=min_count).order_by('main_stat')), {'main_stat': qs.model.STAT_CHOICES})) }, 'slot_4_main_stat': { 'type': 'occurrences', 'total': qs.filter(slot=4).count(), 'data': transform_to_dict( replace_value_with_choice( list( qs.filter(slot=4).values('main_stat').annotate( count=Count('main_stat')).filter( count__gt=min_count).order_by('main_stat')), {'main_stat': qs.model.STAT_CHOICES})) }, 'slot_6_main_stat': { 'type': 'occurrences', 'total': qs.filter(slot=6).count(), 'data': transform_to_dict( replace_value_with_choice( list( qs.filter(slot=6).values('main_stat').annotate( count=Count('main_stat')).filter( count__gt=min_count).order_by('main_stat')), {'main_stat': qs.model.STAT_CHOICES})) }, 'innate_stat': { 'type': 'occurrences', 'total': qs.count(), 'data': transform_to_dict( replace_value_with_choice( list( qs.values('innate_stat').annotate( count=Count('pk')).filter( count__gt=min_count).order_by('innate_stat')), {'innate_stat': qs.model.STAT_CHOICES})) }, 'substats': { 'type': 'occurrences', 'total': len(all_substats), 'data': transform_to_dict( replace_value_with_choice( sorted([{ 'substat': k, 'count': v } for k, v in substat_counts.items()], key=lambda count: count['substat']), {'substat': qs.model.STAT_CHOICES}), ) }, 'max_efficiency': { 'type': 'histogram', 'width': 5, 'data': histogram(qs, 'max_efficiency', range(0, 100, 5), slice_on='quality'), }, 'value': { 'type': 'histogram', 'width': 500, 'data': histogram(qs, 'value', range(min_value, max_value, 500), slice_on='quality') } }
def make_chart(qs, field, groupby=None, logger=None, n_bins=50, cast=None): context = { 'title': title_field(field) + (f' (slice by {groupby})' if groupby else '') } if cast == 'int': cast = IntegerField() elif cast == 'float': cast = FloatField() else: cast = None if '__' in field: related_fields = set() for f in qs.model._meta.related_objects: related_fields.add(f.name) for f in qs.model._meta.many_to_many: related_fields.add(f.name) for f in qs.model._meta.fields: if isinstance(f, RelatedField): related_fields.add(f.name) related_field = field.split('__')[0] if related_field in related_fields or '___' in field: logger and logger.error(f'use of an invalid field = {field}') return cast = cast or IntegerField() qs = qs.annotate(value=Cast(JSONF(field), cast)) else: if cast: qs = qs.annotate(value=Cast(F(field), cast)) else: qs = qs.annotate(value=F(field)) context['queryset'] = qs context['field'] = field qs = qs.filter(value__isnull=False) slice_on = None if groupby == 'resource': slice_on = 'resource__host' elif groupby == 'country': slice_on = 'country' if slice_on: values = get_column_values(qs, slice_on, choices='minimum') fields = [f for f, v in values] n_bins = max(2 * n_bins // len(fields) + 1, 4) context['fields'] = fields context['slice'] = slice_on if not qs.exists(): logger and logger.warning(f'Empty histogram, field = {field}') return src = qs.earliest('value').value dst = qs.latest('value').value bins = make_bins(src=src, dst=dst, n_bins=n_bins, logger=logger, field=field) if isinstance(src, datetime): context['x_type'] = 'time' context['data'] = histogram(qs, 'value', bins=bins, slice_on=slice_on, choices='minimum') return context