def _get_memory_usage(self, pid): """ calculate usage of private memory per process """ # compute process's own non-shared memory. # See http://www.depesz.com/2012/06/09/how-much-ram-is-postgresql-using/ for the explanation of how # to measure PostgreSQL process memory usage and the stackexchange answer for details on the unshared counts: # http://unix.stackexchange.com/questions/33381/getting-information-about-a-process-memory-usage-from-proc-pid-smaps # there is also a good discussion here: # http://rhaas.blogspot.de/2012/01/linux-memory-reporting.html # we use statm instead of /proc/smaps because of performance considerations. statm is much faster, # while providing slightly outdated results. uss = 0 statm = None fp = None try: fp = open(self.STATM_FILENAME.format(pid), 'r') statm = fp.read().strip().split() logger.info("calculating memory for process {0}".format(pid)) except IOError as e: logger.warning( 'Unable to read {0}: {1}, process memory information will be unavailable' .format(self.format(pid), e)) finally: fp and fp.close() if statm and len(statm) >= 3: uss = (long(statm[1]) - long(statm[2])) * MEM_PAGE_SIZE return uss
def _get_memory_usage(self, pid): """ calculate usage of private memory per process """ # compute process's own non-shared memory. # See http://www.depesz.com/2012/06/09/how-much-ram-is-postgresql-using/ for the explanation of how # to measure PostgreSQL process memory usage and the stackexchange answer for details on the unshared counts: # http://unix.stackexchange.com/questions/33381/getting-information-about-a-process-memory-usage-from-proc-pid-smaps # there is also a good discussion here: # http://rhaas.blogspot.de/2012/01/linux-memory-reporting.html # we use statm instead of /proc/smaps because of performance considerations. statm is much faster, # while providing slightly outdated results. uss = 0 statm = None fp = None try: fp = open(self.STATM_FILENAME.format(pid), 'r') statm = fp.read().strip().split() logger.info("calculating memory for process {0}".format(pid)) except IOError as e: logger.warning( 'Unable to read {0}: {1}, process memory information will be unavailable'.format( self.STATM_FILENAME.format(pid), e)) finally: fp and fp.close() if statm and len(statm) >= 3: uss = (long(statm[1]) - long(statm[2])) * MEM_PAGE_SIZE return uss
def get_subprocesses_pid(self): ppid = self.postmaster_pid result = self.exec_command_with_output('ps -o pid --ppid {0} --noheaders'.format(ppid)) if result[0] != 0: logger.info("Couldn't determine the pid of subprocesses for {0}".format(ppid)) self.pids = [] self.pids = [int(x) for x in result[1].split()]
def exec_command_with_output(cmdline): """ Execute comand (including shell ones), return a tuple with error code (1 element) and output (rest) """ proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) ret = proc.wait() if ret != 0: logger.info('The command {cmd} returned a non-zero exit code'.format(cmd=cmdline)) return ret, proc.stdout.read().strip()
def exec_command_with_output(cmdline): """ Execute comand (including shell ones), return a tuple with error code (1 element) and output (rest) """ proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) ret = proc.wait() if ret != 0: logger.info( 'The command {cmd} returned a non-zero exit code'.format( cmd=cmdline)) return ret, proc.stdout.read().strip()
def refresh(self): """ Reads data from /proc and PostgreSQL stats """ result = [] # fetch up-to-date list of subprocess PIDs self.get_subprocesses_pid() try: if not self.pgcon: # if we've lost the connection, try to reconnect and # re-initialize all connection invariants self.pgcon, self.postmaster_pid = self.reconnect() self.connection_pid = self.pgcon.get_backend_pid() self.max_connections = self._get_max_connections() self.dbver = dbversion_as_float(self.pgcon) self.server_version = self.pgcon.get_parameter_status( 'server_version') stat_data = self._read_pg_stat_activity() except psycopg2.OperationalError as e: logger.info("failed to query the server: {}".format(e)) if self.pgcon and not self.pgcon.closed: self.pgcon.close() self.pgcon = None self._do_refresh([]) return logger.info("new refresh round") for pid in self.pids: if pid == self.connection_pid: continue is_backend = pid in stat_data is_active = is_backend and (stat_data[pid]['query'] != 'idle' or pid in self.always_track_pids) result_row = {} # for each pid, get hash row from /proc/ proc_data = self._read_proc(pid, is_backend, is_active) if proc_data: result_row.update(proc_data) if stat_data and pid in stat_data: # ditto for the pg_stat_activity result_row.update(stat_data[pid]) # result is not empty - add it to the list of current rows if result_row: result.append(result_row) # and refresh the rows with this data self._do_refresh(result)
def refresh(self): """ Reads data from /proc and PostgreSQL stats """ result = [] # fetch up-to-date list of subprocess PIDs self.get_subprocesses_pid() try: if not self.pgcon: # if we've lost the connection, try to reconnect and # re-initialize all connection invariants self.pgcon, self.postmaster_pid = self.reconnect() self.connection_pid = self.pgcon.get_backend_pid() self.max_connections = self._get_max_connections() self.dbver = dbversion_as_float(self.pgcon) self.server_version = self.pgcon.get_parameter_status('server_version') stat_data = self._read_pg_stat_activity() except psycopg2.OperationalError as e: logger.info("failed to query the server: {}".format(e)) if self.pgcon and not self.pgcon.closed: self.pgcon.close() self.pgcon = None self._do_refresh([]) return logger.info("new refresh round") for pid in self.pids: if pid == self.connection_pid: continue is_backend = pid in stat_data is_active = is_backend and (stat_data[pid]['query'] != 'idle' or pid in self.always_track_pids) result_row = {} # for each pid, get hash row from /proc/ proc_data = self._read_proc(pid, is_backend, is_active) if proc_data: result_row.update(proc_data) if stat_data and pid in stat_data: # ditto for the pg_stat_activity result_row.update(stat_data[pid]) # result is not empty - add it to the list of current rows if result_row: result.append(result_row) # and refresh the rows with this data self._do_refresh(result)