def list(app): cmd = base_cmd(app.config) + ['--output', 'json', 'info'] logger.debug(' '.join(cmd)) (rc, out, err) = exec_command(cmd) if rc != 0: return [] # The output is a list of stanzas, we have specified the stanza so there # shall be only one element. info = json.loads(out)[0] normalized = [] tbspat = re.compile(r'(\S+) \((\d+)\) => (.+)$') for b in info['backup']: backup = { 'db_size': b['info']['size'], 'backup_size': b['info']['delta'], 'stored_db_size': b['info']['repository']['size'], 'stored_backup_size': b['info']['repository']['delta'], 'type': b['type'], 'stop_time': b['timestamp']['stop'], 'set': b['label'], 'reference': b['reference'] } # To get the tablespaces details cmd = base_cmd(app.config) + ['--set', b['label'], 'info'] (rc, out, err) = exec_command(cmd) if rc != 0: logger.error("Could not get set info for {}".format(b['label'])) logger.error('\n'.join(err)) backup['tablespaces'] = None else: backup['tablespaces'] = [] start = 0 for l in out.splitlines(): if start: m = tbspat.match(l.strip()) if m is not None: backup['tablespaces'].append({ 'name': m.group(1), 'oid': m.group(2), 'location': m.group(3) }) if l.strip() == "tablespaces:": start = 1 normalized.append(backup) return normalized
def _file_systems_linux(self): fs = [] (rc, out, err) = exec_command([which('df'), '-k']) lines = out.splitlines() # Remove header del lines[0] dev = None for line in lines: cols = line.split() # Skip rootfs which is redundant on Debian if cols[0] == 'rootfs': continue # Output of df can be multiline when the name of the # device is too large if len(cols) == 1: dev = cols[0] continue if dev is not None: fs.append({ 'mount_point': cols[4].decode('utf-8'), 'device': dev.decode('utf-8'), 'total': int(cols[0]) * 1024, 'used': int(cols[1]) * 1024 }) dev = None else: # Single line output from df fs.append({ 'mount_point': cols[5].decode('utf-8'), 'device': cols[0].decode('utf-8'), 'total': int(cols[1]) * 1024, 'used': int(cols[2]) * 1024 }) return fs
def create_report(config): """ TODO """ now = datetime.now() metadata = {} metadata['created_at'] = now metadata['timestamp'] = int(time.mktime(now.timetuple())) # output file output_args = [ '--outfile', json_report_filepath(config, metadata['timestamp']) ] # input file try: input_dir = config['log_directory'] or POSTGRESQL_LOG_DIRECTORY except: msg = 'Internal Error.' raise UserError(msg) input_filename = 'postgresql.log' input_args = [os.path.join(input_dir, input_filename)] # Launch command = ['perl', pgbadger_bin(config)] + output_args + input_args (return_code, stdout, stderr) = exec_command(command) if return_code != 0: msg = "pgBadger failed" logger.error("%s during command %s with error %s" % (msg, command, stderr)) raise UserError(msg) return metadata
def fetch_report_html(config, timestamp): """ TODO """ html_file = html_report_filepath(config, timestamp) # if the report does not exist let's build it from the JSON data if not os.path.isfile(html_file): # Generate the HTML report from the JSON version output_args = ['-f', 'json', json_report_filepath(config, timestamp)] input_args = ['-o', html_file] command = ['perl', pgbadger_bin(config)] + output_args + input_args # This operation should be very quick (return_code, stdout, stderr) = exec_command(command) if return_code != 0: msg = "pgBadger failed." logger.error("%s during command %s with error %s" % (msg, command, stderr)) raise UserError(msg) try: f = open(html_file, 'r') html_content = f.read() except: msg = "Internal Error." logger.error("Can't open file : %s" % html_file) raise UserError(msg) return html_content
def info(app): cmd = base_cmd(app.config) + ['version'] logger.debug(' '.join(cmd)) (rc, out, err) = exec_command(cmd) if rc != 0: raise UserError("Could not get version of pgBackRest") v = out.strip().split()[1] return {"tool": "pgBackRest", "version": v, "supported": True}
def get_version(app): (rc, out, err) = exec_command([app.config.backup.path, "-V"]) if rc != 0: raise UserError m = re.match(r'pitrery ([\d.]+)', out.strip()) if m is None: raise UserError return m.group(1)
def purge(config): cmd = base_cmd(config) + ['purge'] logger.debug(' '.join(cmd)) (rc, out, err) = exec_command(cmd) result = {'rc': rc, 'stdout': out.split('\n'), 'stderr': err.split('\n')} logger.debug(out) logger.debug(err) if rc != 0: raise UserError(json.dumps(result)) return json.dumps(result)
def _ip_addresses_linux(self): """Find the host's IP addresses.""" addrs = [] try: ip = which('ip') (rc, out, err) = exec_command([ip, "addr", "show"]) if rc == 0: for line in out.decode('utf8').splitlines(): m = re.match(r'^\s+inet ([\d\.]+)/\d+\s', line) if m: addrs.append(m.group(1)) m = re.match(r'^\sinet6 ([\dabcdef\:]+)/\d+ scope global', line) if m: addrs.append(m.group(1)) return addrs except OSError: pass try: ifconfig = which('ifconfig', ['/sbin']) (rc, out, err) = exec_command([ifconfig, "-a"]) if rc == 0: for line in out.splitlines(): m = re.match(r'^\s+inet (addr:)?([\d\.]+)\s', line) if m: addrs.append(m.group(2)) m = re.match( r'^\sinet6 (addr: )?([\dabcdef\:]+)(/\d+)? ' '.+[Gg]lobal$', line) if m: addrs.append(m.group(2)) return addrs except OSError: pass return addrs
def say_hello_something4(config, http_context): """ "Hello <something>" using slug & exec_command. Usage: $ export XSESSION=`curl -s -k -X POST --data '{"username":"******", "password":"******"}' https://localhost:2345/login | sed -E "s/^.+\"([a-f0-9]+)\".+$/\1/"` $ curl -s -k -H "X-Session:$XSESSION" "https://localhost:2345/hello4/toto" | python -m json.tool { "content": "Hello toto" } """ from temboardagent.command import exec_command, oneline_cmd_to_array (return_code, stdout, stderr) = exec_command(oneline_cmd_to_array("echo 'Hello %s'" % (http_context['urlvars'][0]))) return {"content": stdout[:-1]}
def get_hello4_exec(config, http_context): """ "Hello <something>" using slug & exec_command. Usage: $ export XSESSION=`curl -s -k -X POST --data '{"username":"******", "password":"******"}' https://localhost:2345/login | sed -E "s/^.+\"([a-f0-9]+)\".+$/\1/"` $ curl -s -k -H "X-Session:$XSESSION" "https://localhost:2345/hello4/toto" | python -m json.tool { "content": "Hello toto" } """ # noqa (return_code, stdout, stderr) = exec_command([ 'echo', 'Hello', http_context['urlvars'][0], ])
def _hostname_linux(self): """ Returns system hostname. """ # Default value found using platform hostname = platform.node() try: # Try to get hostname (FQDN) using 'hostname -f' (rc, out, err) = exec_command([which('hostname'), '-f']) if rc == 0: hostname = out.encode('utf-8').strip() except Exception: try: # Try to get hostname (FQDN) using socket module (hostname, _, _) = socket.gethostbyaddr(socket.gethostname()) hostname = hostname.strip() except Exception: pass return hostname
def check_version(config): """ check if the pgBadger version is correct, throws an error is pgBadger is not present or too old :param path: directory containing the pgBadger binary (optional) :return: the version in different formats """ command = ['perl', pgbadger_bin(config), '--version'] (return_code, stdout, stderr) = exec_command(command) if return_code != 0: msg = "Seems like pgBadger is not installed : %s" % stderr raise UserError(msg) version = parse_version(stdout) if version['int_version'] < PGBADGER_MIN_VERSION: msg = "This version of pgBadger is too old : %s" % version[ 'full_version'] raise UserError(msg) return version
def list(app): if not check_version(app): raise UserError cmd = base_cmd(app.config) + ['list', '-j'] logger.debug(' '.join(cmd)) (rc, out, err) = exec_command(cmd) if rc != 0: return [] info = json.loads(out) normalized = [] for b in info['backups']: # Ensure the reported space used for the backup is in bytes. The value # comes from du -sh, so there may be a size suffix to process if re.match(r'\d$', b['space_used']) is not None: stored_size = int(b['space_used']) else: stored_size = int(b['space_used'][0:-1]) suffix = b['space_used'][-1] candidates = ['K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'] if suffix in candidates: for s in candidates: stored_size = stored_size * 1024 if suffix == s: break else: logger.error("Could not compute size of the stored backup: " "invalid value {}".format(b['space_used'])) stored_size = None tbs = [] db_size = 0 for t in b['tablespaces']: # pg_size_unpretty the size of the tablespace from bytes, kB, MB, # GB or TB (tbs_size, unit) = t['size'].split() for u in ['bytes', 'kB', 'MB', 'GB', 'TB']: if unit == u: break tbs_size = int(tbs_size) * 1024 db_size = db_size + tbs_size del t['size'] # Exclude pg_global and pg_default from the output if t['location'] is not None: tbs.append(t) # Convert the stop time of the backup to a timestamp (seconds from # Epoch) stop_time = datetime.strptime(b['stop_time'], "%Y-%m-%d %H:%M:%S %Z") timestamp = int((stop_time - datetime(1970, 1, 1)).total_seconds()) # pitrery only creates full backups so the sizes between db and backup # are the same normalized.append({ 'db_size': db_size, 'backup_size': db_size, 'stored_db_size': stored_size, 'stored_backup_size': stored_size, 'type': 'full', 'stop_time': timestamp, 'set': b['directory'], 'reference': None, 'tablespaces': tbs }) return normalized
def get_unix_os_version(): (returncode, stdout, stderrout) = exec_command(['/bin/uname', '-sri']) if returncode == 0: return stdout.strip()
def _get_hostname_linux(self, ): (returncode, stdout, stderrout) = exec_command(['/bin/hostname']) if returncode == 0: return stdout.replace('\n', '')
def _get_os_version_linux(self, ): (returncode, stdout, stderrout) = exec_command(['/bin/uname', '-sri']) if returncode == 0: return stdout.replace('\n', '')