def _insert_statistics(self, exception=False): """ collect statistic information """ if settings.DEBUG: self.query_count = len(connection.queries) - self.old_queries else: self.query_count = 0 p = dict(process_information()) self.pid = p["Pid"] self.threads = p["Threads"] self.vmpeak = p["VmPeak"] self.memory = p["VmRSS"] # Calculate user/system processor times only for this request: user_time, system_time = get_processor_times() self.user_time = user_time - self.start_user_time self.system_time = system_time - self.start_system_time self.response_time = self.own_start_time - self.start_time self.overall_time = self.own_start_time - overall_start_time process_info, process_created = ProcessInfo.objects.get_or_create( pid=self.pid, defaults={ "db_query_count_min":self.query_count, "db_query_count_max":self.query_count, "db_query_count_avg":self.query_count, "response_time_min":self.response_time, "response_time_max":self.response_time, "response_time_avg":self.response_time, "response_time_sum":self.response_time, "threads_avg": self.threads, "threads_min":self.threads, "threads_max":self.threads, "user_time_min":self.user_time, "user_time_max":self.user_time, "user_time_total":self.user_time, "system_time_total":self.system_time, "system_time_min":self.system_time, "system_time_max":self.system_time, "vm_peak_min":self.vmpeak, "vm_peak_max":self.vmpeak, "vm_peak_avg":self.vmpeak, "memory_min":self.memory, "memory_max":self.memory, "memory_avg":self.memory, } ) if exception: process_info.exception_count += 1 if not process_created: process_info.request_count += 1 request_count = process_info.request_count if settings.DEBUG: process_info.db_query_count_min = min((process_info.db_query_count_min, self.query_count)) process_info.db_query_count_max = max((process_info.db_query_count_max, self.query_count)) process_info.db_query_count_avg = average( process_info.db_query_count_avg, self.query_count, request_count ) process_info.response_time_min = min((process_info.response_time_min, self.response_time)) process_info.response_time_max = max((process_info.response_time_max, self.response_time)) process_info.response_time_avg = average( process_info.response_time_avg, self.response_time, request_count ) process_info.response_time_sum += self.response_time process_info.threads_min = min((process_info.threads_min, self.threads)) process_info.threads_max = max((process_info.threads_max, self.threads)) process_info.threads_avg = average( process_info.threads_avg, self.threads, request_count ) process_info.user_time_min = min((process_info.user_time_min, self.user_time)) process_info.user_time_max = max((process_info.user_time_min, self.user_time)) process_info.user_time_total += self.user_time process_info.system_time_min = min((process_info.system_time_min, self.system_time)) process_info.system_time_max = max((process_info.system_time_min, self.system_time)) process_info.system_time_total += self.system_time process_info.vm_peak_min = min((process_info.vm_peak_min, self.vmpeak)) process_info.vm_peak_max = max((process_info.vm_peak_max, self.vmpeak)) process_info.vm_peak_avg = average( process_info.vm_peak_avg, self.vmpeak, request_count ) process_info.memory_min = min((process_info.memory_min, self.memory)) process_info.memory_max = max((process_info.memory_max, self.memory)) process_info.memory_avg = average( process_info.memory_avg, self.memory, request_count ) process_info.save() current_site = Site.objects.get_current() site_stats, created = SiteStatistics.objects.get_or_create( site=current_site ) if created or process_created: if not created and process_created: site_stats.process_spawn += 1 site_stats.update_informations() site_stats.save()
def changelist_view(self, request, extra_context=None): self.request = request # work-a-round for https://code.djangoproject.com/ticket/13659 site_count = 0 first_start_time = None process_count_current = 0 process_count_max = 0 process_spawn = 0 process_count_avg = 0.0 life_time_values = [] request_count = 0 exception_count = 0 memory_min_avg = 0.0 memory_avg = 0.0 memory_max_avg = 0.0 vm_peak_min_avg = 0.0 vm_peak_max_avg = 0.0 vm_peak_avg = 0.0 threads_current = 0 threads_min = 99999 threads_max = 0 threads_avg = None response_time_min_avg = None response_time_max_avg = None response_time_avg = None response_time_sum = 0.0 user_time_total = 0.0 # total user mode time system_time_total = 0.0 # total system mode time self.aggregate_data = {} queryset = SiteStatistics.objects.all() for site_stats in queryset: site_count += 1 if first_start_time is None or site_stats.start_time < first_start_time: first_start_time = site_stats.start_time living_pids = site_stats.update_informations() site_stats.save() for pid in living_pids: try: p = dict(process_information(pid)) except IOError: # Process dead -> mark as dead process = ProcessInfo.objects.get(pid=pid) process.alive = False process.save() continue threads_current += p["Threads"] living_process_count = len(living_pids) process_count_current += living_process_count process_count_max += site_stats.process_count_max process_spawn += site_stats.process_spawn process_count_avg += site_stats.process_count_avg site = site_stats.site processes = ProcessInfo.objects.filter(site=site).only("start_time", "lastupdate_time") for process in processes: life_time = process.lastupdate_time - process.start_time life_time_values.append(life_time) data = ProcessInfo.objects.filter(site=site).aggregate( # VmRSS Avg("memory_min"), Avg("memory_avg"), Avg("memory_max"), # VmPeak Avg("vm_peak_min"), Avg("vm_peak_avg"), Avg("vm_peak_max"), Sum("request_count"), Sum("exception_count"), Min("threads_min"), Avg("threads_avg"), Max("threads_max"), Avg("response_time_min"), Avg("response_time_avg"), Avg("response_time_max"), Sum("response_time_sum"), Sum("user_time_total"), # total user mode time Sum("system_time_total"), # total system mode time ) data["living_process_count"] = living_process_count self.aggregate_data[site] = data request_count += data["request_count__sum"] or 1 exception_count += data["exception_count__sum"] or 0 # VmRSS memory_min_avg += (data["memory_min__avg"]or 0) * process_count_avg memory_avg += (data["memory_avg__avg"] or 0) * process_count_avg memory_max_avg += (data["memory_max__avg"] or 0) * process_count_avg # VmPeak vm_peak_min_avg += (data["vm_peak_min__avg"] or 0) * process_count_avg vm_peak_avg += (data["vm_peak_avg__avg"] or 0) * process_count_avg vm_peak_max_avg += (data["vm_peak_max__avg"] or 0) * process_count_avg threads_min = min([threads_min, data["threads_min__min"] or 9999]) threads_max = max([threads_max, data["threads_max__max"] or 1]) threads_avg = average( threads_avg, data["threads_avg__avg"] or 1, site_count ) response_time_min_avg = average( response_time_min_avg, data["response_time_min__avg"] or 0, site_count ) response_time_avg = average( response_time_avg, data["response_time_avg__avg"] or 0, site_count ) response_time_max_avg = average( response_time_max_avg, data["response_time_max__avg"] or 0, site_count ) response_time_sum += data["response_time_sum__sum"] or 0 user_time_total += data["user_time_total__sum"] or 0 # total user mode time system_time_total += data["system_time_total__sum"] or 0 # total system mode time # Calculate the process life times # timedelta.total_seconds() is new in Python 2.7 if not life_time_values: # First request with empty data life_time_min = 0 life_time_max = 0 life_time_avg = 0 else: life_time_values = [datetime2float(td) for td in life_time_values] life_time_min = min(life_time_values) life_time_max = max(life_time_values) life_time_avg = sum(life_time_values) / len(life_time_values) # get information from /proc/meminfo meminfo_dict = dict(meminfo()) swap_used = meminfo_dict["SwapTotal"] - meminfo_dict["SwapFree"] mem_free = meminfo_dict["MemFree"] + meminfo_dict["Buffers"] + meminfo_dict["Cached"] mem_used = meminfo_dict["MemTotal"] - mem_free # information from /proc/uptime updatetime = uptime_infomation() swap_total = meminfo_dict["SwapTotal"] if swap_total > 0: swap_perc = float(swap_used) / swap_total * 100 else: # e.g. no SWAP used swap_perc = 0 extra_context = { "site_count":site_count, "first_start_time": first_start_time, "process_count_current":process_count_current, "process_spawn":process_spawn, "process_count_max":process_count_max, "process_count_avg": process_count_avg, "request_count": request_count, "exception_count": exception_count, "memory_min_avg":memory_min_avg, "memory_max_avg":memory_max_avg, "memory_avg":memory_avg, "vm_peak_min_avg": vm_peak_min_avg, "vm_peak_max_avg": vm_peak_max_avg, "vm_peak_avg": vm_peak_avg, "threads_current": threads_current, "threads_min": threads_min, "threads_avg": threads_avg, "threads_max": threads_max, "response_time_min_avg": human_duration(response_time_min_avg), "response_time_max_avg": human_duration(response_time_max_avg), "response_time_avg": human_duration(response_time_avg), "response_time_sum": human_duration(response_time_sum), "user_time_total": human_duration(user_time_total), # total user mode time "system_time_total": human_duration(system_time_total), # total system mode time "processor_time": human_duration(user_time_total + system_time_total), "loads": (user_time_total + system_time_total) / response_time_sum * 100, "version_string": VERSION_STRING, "life_time_min": human_duration(life_time_min), "life_time_max": human_duration(life_time_max), "life_time_avg": human_duration(life_time_avg), "mem_used": mem_used, "mem_perc": float(mem_used) / meminfo_dict["MemTotal"] * 100, "mem_total": meminfo_dict["MemTotal"], "swap_used": swap_used, "swap_perc": swap_perc, "swap_total": swap_total, "updatetime": timesince2(updatetime), "script_filename": self.request.META.get("SCRIPT_FILENAME", "???"), } extra_context.update(STATIC_INFORMATIONS) try: extra_context["loadavg"] = os.getloadavg() except OSError, err: extra_context["loadavg_err"] = "[Error: %s]" % err
def _insert_statistics(self, exception=False): """ collect statistic information """ if settings.DEBUG: self.query_count = len(connection.queries) - self.old_queries else: self.query_count = 0 p = dict(process_information()) self.pid = p["Pid"] self.threads = p["Threads"] self.vmpeak = p["VmPeak"] self.memory = p["VmRSS"] # Calculate user/system processor times only for this request: user_time, system_time = get_processor_times() self.user_time = user_time - self.start_user_time self.system_time = system_time - self.start_system_time self.response_time = self.own_start_time - self.start_time self.overall_time = self.own_start_time - overall_start_time process_info, process_created = ProcessInfo.objects.get_or_create( pid=self.pid, defaults={ "db_query_count_min":self.query_count, "db_query_count_max":self.query_count, "db_query_count_avg":self.query_count, "response_time_min":self.response_time, "response_time_max":self.response_time, "response_time_avg":self.response_time, "response_time_sum":self.response_time, "threads_avg": self.threads, "threads_min":self.threads, "threads_max":self.threads, "user_time_min":self.user_time, "user_time_max":self.user_time, "user_time_total":self.user_time, "system_time_total":self.system_time, "system_time_min":self.system_time, "system_time_max":self.system_time, "vm_peak_min":self.vmpeak, "vm_peak_max":self.vmpeak, "vm_peak_avg":self.vmpeak, "memory_min":self.memory, "memory_max":self.memory, "memory_avg":self.memory, } ) if exception: process_info.exception_count += 1 if not process_created: process_info.request_count += 1 request_count = process_info.request_count if settings.DEBUG: process_info.db_query_count_min = min((process_info.db_query_count_min, self.query_count)) process_info.db_query_count_max = max((process_info.db_query_count_max, self.query_count)) process_info.db_query_count_avg = average( process_info.db_query_count_avg, self.query_count, request_count ) process_info.response_time_min = min((process_info.response_time_min, self.response_time)) process_info.response_time_max = max((process_info.response_time_max, self.response_time)) process_info.response_time_avg = average( process_info.response_time_avg, self.response_time, request_count ) process_info.response_time_sum += self.response_time process_info.threads_min = min((process_info.threads_min, self.threads)) process_info.threads_max = max((process_info.threads_max, self.threads)) process_info.threads_avg = average( process_info.threads_avg, self.threads, request_count ) process_info.user_time_min = min((process_info.user_time_min, self.user_time)) process_info.user_time_max = max((process_info.user_time_min, self.user_time)) process_info.user_time_total += self.user_time process_info.system_time_min = min((process_info.system_time_min, self.system_time)) process_info.system_time_max = max((process_info.system_time_min, self.system_time)) process_info.system_time_total += self.system_time process_info.vm_peak_min = min((process_info.vm_peak_min, self.vmpeak)) process_info.vm_peak_max = max((process_info.vm_peak_max, self.vmpeak)) process_info.vm_peak_avg = average( process_info.vm_peak_avg, self.vmpeak, request_count ) process_info.memory_min = min((process_info.memory_min, self.memory)) process_info.memory_max = max((process_info.memory_max, self.memory)) process_info.memory_avg = average( process_info.memory_avg, self.memory, request_count ) process_info.save() current_site = Site.objects.get_current() site_stats, created = SiteStatistics.objects.get_or_create( site=current_site ) if created or process_created: if not created and process_created: site_stats.process_spawn += 1 site_stats.update_informations() site_stats.save() # Auto cleanup ProcessInfo table to protect against overloading. queryset = ProcessInfo.objects.order_by('-lastupdate_time') max_count = settings.PROCESSINFO.MAX_PROCESSINFO_COUNT ids = tuple(queryset[max_count:].values_list('pk', flat=True)) if ids: queryset.filter(pk__in=ids).delete()