def aggregate_stats(user, channel, from_, to_, level, stats=('volume', 'latency')): data = {} for a in stats: data[a] = [] by_ts = {} for stat in ServiceChannelStats.objects.by_time_span(user, channel, start_time=from_, end_time=to_, level=level): by_ts[stat.time_slot] = stat counts = defaultdict(int) for slot in gen_timeslots(from_, to_, level): for stat in stats: stat_obj = by_ts.get(slot, None) if stat_obj: value = getattr( stat_obj, 'average_latency' if stat == 'latency' else stat) else: value = 0 data[stat].append([timeslot_to_timestamp_ms(slot), value]) counts[stat] += value return data, counts
def get_time_data(groups, y_axis): total_counts = defaultdict(int) total_items = defaultdict(int) data = defaultdict(list) for slot in gen_timeslots(from_ts, to_ts): timestamp = timeslot_to_timestamp_ms(slot) features_data = groups.get(slot, {}) for feature in y_axis: feature_key = get_feature_key(feature) if features_data.get(feature_key): count = _get_count(features_data[feature_key]) total_counts[feature_key] += count total_items[feature_key] += 1 data[feature_key].append([timestamp, count]) else: data[feature_key].append([timestamp, 0]) if plot_type == 'response-time': # return average as result result_counts = defaultdict(float) for key, value in total_counts.iteritems(): if total_items.get(key): result_counts[key] = round(value / total_items[key], 2) else: result_counts[key] = 0 else: result_counts = total_counts return data, result_counts
def _get_data(int_id): data = [] for slot in gen_timeslots(from_dt, to_dt, level): timestamp = timeslot_to_timestamp_ms(slot) count = ts_counts.get(slot, 0) data.append((timestamp, count)) return data
def _get_performance_stats(user, channel, from_, to_, level, stats_type): """ Return list of items for Performance stats graph """ if not isinstance(stats_type, list): raise RuntimeError('stats_type should be an array') result = [] for stype in stats_type: if stype not in [ 'number_of_posts', 'number_of_actionable_posts', 'number_of_impressions', 'number_of_clicks', 'number_of_rejected_posts' ]: raise RuntimeError("unsupported stats_type %s" % stype) values = _get_channel_stats_values(user, channel, from_, to_, level, stype) data = [] count = 0 for slot in gen_timeslots(from_, to_, level): value = values.get(slot, 0) data.append([timeslot_to_timestamp_ms(slot), value]) count += value result.append(dict(data=data, label=stype.split("_")[2], count=count)) return jsonify(ok=True, list=result, level=level)
def _get_data(from_dt, to_dt, level, pairs, stat_type): count = len(pairs) date_counts = defaultdict(int) total = 0 for p in pairs: #p[0] - time slot #p[1] - increment date_counts[p[0]] += p[1] total += p[1] data = [] for slot in gen_timeslots(from_dt, to_dt, level): js_time_stamp = timeslot_to_timestamp_ms(slot) data.append((js_time_stamp, date_counts[slot])) if stat_type == 'clicks': count = total return count, data
def get_time_data(self, groups, y_axis): """ Return data formated in a FLOT specific format; eg. [[time, count], [time, count]] so that we can use it for time plots """ real_counts = defaultdict(int) # We need to actually count the response volume across this data, not timeslots # for an accurate average over response time for feature in y_axis: feature_key = self.get_feature_key(feature) for _, value in groups.iteritems(): if feature_key in value: real_counts[feature_key] += value[feature_key].get('rv', 0) total_counts = defaultdict(int) total_items = defaultdict(int) data = defaultdict(list) for slot in gen_timeslots(self.from_ts, self.to_ts): timestamp = timeslot_to_timestamp_ms(slot) features_data = groups.get(slot, {}) for feature in y_axis: feature_key = self.get_feature_key(feature) if features_data.get(feature_key): count = features_data[feature_key].get('count', 0) total_counts[ feature_key] += count * features_data[feature_key].get( 'rv', 1) total_items[feature_key] += 1 data[feature_key].append([timestamp, count]) else: data[feature_key].append([timestamp, 0]) result_counts = defaultdict(float) for key, value in total_counts.iteritems(): if total_items.get(key): if real_counts[key]: result_counts[key] = round(value / real_counts[key], 2) else: result_counts[key] = 0 else: result_counts[key] = 0 return data, result_counts, total_items
def get_time_data(self, groups, y_axis): """ Return data formated in a FLOT specific format; eg. [[time, count], [time, count]] so that we can use it for time plots """ total_counts = defaultdict(int) total_items = defaultdict(int) data = defaultdict(list) for slot in gen_timeslots(self.from_ts, self.to_ts): timestamp = timeslot_to_timestamp_ms(slot) features_data = groups.get(slot, {}) for feature in y_axis: feature_key = self.get_feature_key(feature) if features_data.get(feature_key): count = features_data[feature_key].get('count', 0) total_counts[feature_key] += count total_items[feature_key] += 1 data[feature_key].append([timestamp, count]) else: data[feature_key].append([timestamp, 0]) return data, total_counts, total_items
for resp in Response.objects.find_by_user(user, channel=channel, post_date__gte=from_dt, post_date__lt=to_dt, punks__in=data['terms'], intention_name__in=data['intentions']): for punk in resp.punks: if punk in data['terms']: slot = datetime_to_timeslot(resp.post_date, level=level) cache[(punk, resp.intention_name)]['count'] += 1 cache[(punk, resp.intention_name)]['slots'][slot] += 1 l = [] for ((punk, intention_name), stat) in cache.items(): data = [ (timeslot_to_timestamp_ms(ts), count) for (ts, count) in sorted(stat['slots'].items()) ] l.append({'count': stat['count'], 'label': '%s||%s||%s' % (punk, stat['count'], intention_name), 'data': data}) return jsonify(ok=True, list=l) @app.route('/performance/trends2/json', methods=['POST']) @login_required() def performance_trends_by_responses(user): data = request.json if data is None: raise abort(415)