def startup(self): """ Hooks before the Oracle gate operations starts. """ # Do we have sudo permission? self.check_sudo('oracle') # Always set FRA to the current size of the media. curr_fds = self.get_current_rfds() target_fds = self.size_pretty(self.media_usage( self.get_current_fra_dir())['free'], int_only=True, no_whitespace=True).replace("B", "") if curr_fds != target_fds: eprint( "WARNING: Reserved space for the backup is smaller than available disk space. Adjusting." ) if not self.autoresize_available_archive(target_fds): eprint( "WARNING: Could not adjust system for backup reserved space!" ) else: print( "INFO: System settings for the backup recovery area has been altered successfully." )
def do_listener_start(self, *args, **params): # pylint: disable=W0613 """ Start the SUSE Manager database listener. """ if 'quiet' not in args: print("Starting database listener...\t", end="") sys.stdout.flush() dbstatus = self.get_status() if dbstatus.ready: if 'quiet' not in args: print("Failed") eprint("Error: listener already running.") return ready = False stdout, stderr = self.syscall("sudo", "-u", "oracle", "ORACLE_HOME=" + self.ora_home, self.lsnrctl, "start") if stdout: for line in stdout.split("\n"): if line.lower().startswith("uptime"): ready = True break if 'quiet' not in args: print((ready and "done" or "failed")) if stderr and 'quiet' not in args: self.to_stderr(stderr)
def do_listener_stop(self, *args, **params): # pylint: disable=W0613 """ Stop the SUSE Manager database listener. @help quiet\tSuppress any output. """ if 'quiet' not in args: print("Stopping database listener...\t", end="") sys.stdout.flush() dbstatus = self.get_status() if not dbstatus.ready: if 'quiet' not in args: print("Failed") eprint("Error: listener is not running.") return success = False stdout, stderr = self.syscall("sudo", "-u", "oracle", "ORACLE_HOME=" + self.ora_home, self.lsnrctl, "stop") if stdout: for line in stdout.split("\n"): if line.lower().find("completed successfully") > -1: success = True break if 'quiet' not in args: print((success and "done" or "failed")) if stderr and 'quiet' not in args: self.to_stderr(stderr)
def get_backup_info(self): """ Return list of BackupInfo objects, representing backups. """ stdout, stderr = self.call_scenario('rman-backup-info', target='rman') if stderr: eprint("Backup information listing failure:") eprint(stderr) raise GateException("Unable to get information about backups.") capture = False idx = [] info = {} for line in stdout.split("\n"): line = line.strip() if line.startswith("---"): # Table delimeter capture = True continue if capture: if not line: capture = False continue tkn = list(filter(None, line.replace("\t", " ").split(" "))) info[tkn[5]] = BackupInfo(tkn[0], tkn[5], tkn[-1]) idx.append(tkn[5]) return [info[bid] for bid in reversed(sorted(idx))]
def do_space_tables(self, **args: str) -> None: # pylint: disable=W0613 """ Show space report for each table """ stdout, stderr = self.call_scenario('pg-tablesizes', target='psql') if stderr: eprint(stderr) raise GateException("Unhandled underlying error occurred, see above.") if stdout: t_index = [] t_ref = {} t_total = 0 longest = 0 for line in stdout.strip().split("\n")[2:]: line = list(filter(None, map(lambda el: el.strip(), line.split('|')))) # type: ignore if len(line) == 3: t_name, t_size_pretty, t_size = line[0], line[1], int(line[2]) t_ref[t_name] = t_size_pretty t_total += t_size t_index.append(t_name) longest = len(t_name) if len(t_name) > longest else longest t_index.sort() table = [('Table', 'Size',)] for name in t_index: table.append((name, t_ref[name],)) table.append(('', '',)) table.append(('Total', ('%.2f' % round(t_total / 1024. / 1024)) + 'M',)) print("\n", TablePrint(table), "\n")
def _rst_shutdown_db(self) -> None: """ Gracefully shutdown the database. """ if self._get_db_status(): self.do_db_stop() self.do_db_status() if self._get_db_status(): eprint("Error: Unable to stop database.") sys.exit(1)
def _apply_db_conf(self) -> None: """ Reload the configuration. """ stdout, stderr = self.call_scenario('pg-reload-conf', target='psql') if stderr: eprint(stderr) raise GateException("Unhandled underlying error occurred, see above.") if stdout and stdout.strip() == 't': print("INFO: New configuration has been applied.")
def autoresize_available_archive(self, target_fds): """ Set Oracle environment always up to the current media size. """ stdout, stderr = self.call_scenario('ora-archive-setup', destsize=target_fds) if stdout.find("System altered") > -1: return True eprint("ERROR:", stderr) return False
def _cleanup_pids(self) -> None: """ Cleanup PostgreSQL garbage in /tmp """ for fname in os.listdir('/tmp'): if fname.startswith('.s.PGSQL.'): os.unlink('/tmp/' + fname) # Remove postgresql.pid (versions 9.x) if postmaster was just killed if os.path.exists(self._pid_file): eprint('Info: Found stale PID file, removing') os.unlink(self._pid_file)
def _get_sysconfig(self) -> None: """ Read the system config for the postgresql. """ for line in filter(None, map(lambda line: line.strip(), open('/etc/sysconfig/postgresql').readlines())): if line.startswith('#'): continue try: key, val = line.split("=", 1) self.config['sysconfig_' + key] = val except Exception as ex: eprint("Cannot parse line", line, "from sysconfig.") eprint(ex)
def do_backup_restore(self, *opts: str, **args: str) -> None: # pylint: disable=W0613 """ Restore the SUSE Manager Database from backup """ # Go out from the current position, in case user is calling SMDBA inside the "data" directory location_begin = os.getcwd() os.chdir('/') # This is the ratio of compressing typical PostgreSQL cluster tablespace ratio = 0.134 backup_dst, backup_on = self.do_backup_status('--silent') if not backup_on: eprint("No backup snapshots are available.") sys.exit(1) # Check if we have enough space to fit enough copy of the tablespace curr_ts_size = self._get_tablespace_size(self.config['pcnf_pg_data']) bckp_ts_size = self._get_tablespace_size(backup_dst) disk_size = self._get_partition_size(self.config['pcnf_pg_data']) print("Current cluster size:\t", self.size_pretty(curr_ts_size)) print("Backup size:\t\t", self.size_pretty(bckp_ts_size)) print("Current disk space:\t", self.size_pretty(disk_size)) print( "Predicted space:\t", self.size_pretty(disk_size - (curr_ts_size * ratio) - bckp_ts_size)) # At least 1GB free disk space required *after* restore from the backup if disk_size - curr_ts_size - bckp_ts_size < 0x40000000: eprint( "At least 1GB free disk space required after backup restoration." ) sys.exit(1) # Requirements were met at this point. # # Shutdown the db self._rst_shutdown_db() # Save current tablespace self._rst_save_current_cluster() # Replace with new backup self._rst_replace_new_backup(backup_dst) self.do_db_start() # Move back where backup has been invoked os.chdir(location_begin)
def _get_pg_config(self) -> None: """ Get entire PostgreSQL configuration. """ stdout, stderr = self.syscall("sudo", "-u", "postgres", "/bin/bash", input=self.get_scenario_template(target='psql').replace('@scenario', 'show all')) if stdout: for line in stdout.strip().split("\n")[2:]: try: key, val = map(lambda line: line.strip(), line.split('|')[:2]) self.config['pcnf_' + key] = val except Exception: print("Cannot parse line:", line) else: eprint(stderr) raise Exception("Underlying error: unable get backend configuration.")
def do_space_tables(self, *args, **params): # pylint: disable=W0613 """ Show space report for each table. """ dbstatus = self.get_db_status() if not dbstatus.ready: raise GateException("Database is not running!") table = [( 'Table', 'Size', )] total = 0 stdout, stderr = self.call_scenario('tablesizes', user=self.config.get( 'db_user', '').upper()) self.to_stderr(stderr) ora_error = self.has_ora_error(stdout) if ora_error: raise GateException( "Please visit http://%s.ora-code.com/ page to know more details." % ora_error.lower()) for tname, tsize in filter(None, [ filter(None, line.replace("\t", " ").split(" ")) for line in stdout.split("\n") ]): table.append(( tname, ('%.2fK' % round(float(tsize) / 1024.)), )) total += float(tsize) table.append(( '', '', )) table.append(('Total', ('%.2fM' % round(total / 1024. / 1024.)))) if table: print("\n", TablePrint(table), "\n") if stderr: eprint("Error dump:") eprint(stderr) raise Exception("Unhandled underlying error.")
def do_space_reclaim(self, **args: str) -> None: # pylint: disable=W0613 """ Free disk space from unused objects in tables and/or indexes """ print("Examining database...\t", end="") sys.stdout.flush() if not self._get_db_status(): time.sleep(1) raise GateException("Database must be online.") eprint("finished") time.sleep(1) operations = [ ('Analyzing database', 'vacuum analyze;'), ('Reclaiming space', 'cluster;'), ] for msg, operation in operations: print("%s...\t" % msg, end="") sys.stdout.flush() _, stderr = self.syscall("sudo", "-u", "postgres", "/bin/bash", input=self.get_scenario_template(target='psql').replace('@scenario', operation)) if stderr: eprint("failed") sys.stdout.flush() eprint(stderr) raise GateException("Unhandled underlying error occurred, see above.") print("done") sys.stdout.flush()
def do_backup_purge(self, *args, **params): # pylint: disable=W0613 """ Purge all backups. Useful after successfull reliable recover from the disaster. """ self.vw_check_database_ready( "Database must be healthy and running in order to purge assigned backups of it!" ) print("Checking backups:\t", end="") roller = Roller() roller.start() info = self.get_backup_info() if not info: roller.stop("failed") time.sleep(1) eprint("No backup snapshots available.") sys.exit(1) roller.stop("finished") time.sleep(1) print("Removing %s backup%s:\t" % (len(info), len(info) > 1 and 's' or ''), end="") roller = Roller() roller.start() _, stderr = self.call_scenario('rman-backup-purge', target='rman') if stderr: roller.stop("failed") time.sleep(1) self.to_stderr(stderr) roller.stop("finished") time.sleep(1)
def do_system_check(self, *args, **params): # pylint: disable=W0613 """ Common backend healthcheck. @help force-archivelog-off\tForce archivelog mode to off. """ print("Checking SUSE Manager database backend\n") # Set data table autoextensible. stdout, stderr = self.call_scenario('cnf-get-noautoext') if stderr: eprint("Autoextend check error:") eprint(stderr) raise GateException("Unable continue system check") if stdout: print("Autoextensible:\tOff") scenario = [] for fname in stdout.strip().split("\n"): scenario.append( "alter database datafile '{}' autoextend on;".format( fname)) self.syscall("sudo", "-u", "oracle", "/bin/bash", input=self.get_scenario_template().replace( '@scenario', '\n'.join(scenario))) print("%s table%s has been autoextended" % (len(scenario), len(scenario) > 1 and 's' or '')) else: print("Autoextensible:\tYes") # Turn on archivelog. # if 'force-archivelog-off' in args: if self.get_archivelog_mode(): self.set_archivelog_mode(status=False) else: print("Archivelog mode is not used.") else: if not self.get_archivelog_mode(): self.set_archivelog_mode(True) if not self.get_archivelog_mode(): eprint("No archive log") else: print("Database is now running in archivelog mode.") else: print("Archivelog:\tYes") # Free space on the storage. # # TBD print("\nFinished\n")
def do_db_start(self, *args, **params): # pylint: disable=W0613 """ Start SUSE Manager database. """ print("Starting listener:\t", end="") sys.stdout.flush() roller = Roller() roller.start() dbstatus = self.get_status() if dbstatus.ready: roller.stop('failed') time.sleep(1) raise GateException("Error: listener is already running") self.do_listener_start('quiet') roller.stop('done') time.sleep(1) print("Starting core...\t", end="") sys.stdout.flush() roller = Roller() roller.start() stdout, stderr = self.syscall("sudo", "-u", "oracle", self.ora_home + "/bin/dbstart") roller.stop('done') time.sleep(1) self.to_stderr(stderr) if stdout and stdout.find("Database opened") > -1 and stdout.find( "Database mounted") > -1: roller.stop('done') time.sleep(1) else: roller.stop('failed') time.sleep(1) eprint("Output dump:") eprint(stdout) if stderr: eprint("Error dump:") eprint(stderr)
def do_listener_status(self, *args, **params): # pylint: disable=W0613 """ Show database status. """ print("Listener:\t", end="") sys.stdout.flush() dbstatus = self.get_status() print((dbstatus.ready and "running" or "down")) print("Uptime:\t\t", dbstatus.uptime and dbstatus.uptime or "") print("Instances:\t", dbstatus.available) if dbstatus.stderr: eprint("Error dump:") eprint(dbstatus.stderr) if dbstatus.unknown: eprint("Warning: %s unknown instance%s." % (dbstatus.unknown, dbstatus.unknown > 1 and 's' or '')) if not dbstatus.available: eprint("Critical: No available instances found!")
def to_stderr(stderr: str): """ Format an error output to STDERR and terminate everything at once. """ if not (stderr + "").strip(): return False out = [] for line in filter(None, str(stderr).replace("\\n", "\n").split("\n")): out.append(" " + line.strip()) eprint("\nError:\n" + ("-" * 80)) eprint("\n".join(out)) eprint("-" * 80) sys.exit(1)
def to_stderr(stderr): """ Format an error output to STDERR and terminate everything at once. """ if not (stderr + "").strip(): return False out = [] for line in filter(None, str(stderr).replace("\\n", "\n").split("\n")): out.append(" " + line.strip()) eprint("\nError:\n" + ("-" * 80)) eprint("\n".join(out)) eprint("-" * 80) sys.exit(1)
def check_backup_info(self): """ Check if backup is consistent. """ failed_backups = [] healthy_backups = [] failed_archivelogs = [] healthy_archivelogs = [] bkpsout = None arlgout = None # Get database backups stdout, stderr = self.call_scenario('rman-backup-check-db', target='rman') if stderr: eprint("Backup information check failure:") eprint(stderr) raise GateException("Unable to check the backups.") for chunk in stdout.split("RMAN>"): chunk = chunk.strip() if not chunk: continue if chunk.find("crosschecked backup piece") > -1: bkpsout = chunk break # Get database archive logs check stdout, stderr = self.call_scenario('rman-backup-check-al', target='rman') if stderr: eprint("Archive log information check failure:") eprint(stderr) raise GateException("Unable to check the archive logs backup.") for chunk in stdout.split("RMAN>"): chunk = chunk.strip() if not chunk: continue if chunk.find("archived log file name") > -1: arlgout = chunk break # Check failed backups if bkpsout: for line in map(lambda elm: elm.strip(), bkpsout.split("crosschecked")): if not line.startswith("backup piece"): continue obj_raw = line.split("\n")[:2] if len(obj_raw) == 2: status = obj_raw[0].strip().split(" ")[-1].replace( "'", '').lower() data = dict( filter( None, map( lambda elm: "=" in elm and tuple( elm.split("=", 1)) or None, filter(None, obj_raw[-1].split(" "))))) hinfo = HandleInfo(status, handle=data['handle'], recid=data['RECID'], stamp=data['STAMP']) if hinfo.availability == 'available': healthy_backups.append(hinfo) else: failed_backups.append(hinfo) # Check failed archive logs if arlgout: for archline in map( lambda elm: elm.strip(), arlgout.split( "validation", 1)[-1].split("Crosschecked")[0].split("validation")): obj_raw = archline.split("\n") if len(obj_raw) == 2: status = obj_raw[0].split(" ")[0] data = dict( filter( None, map( lambda elm: '=' in elm and tuple( elm.split('=', 1)) or None, obj_raw[1].split(" ")))) # Ask RMAN devs why this time it is called "name" hinfo = HandleInfo(status == 'succeeded' and 'available' or 'unavailable', recid=data['RECID'], stamp=data['STAMP'], handle=data['name']) if hinfo.availability == 'available': healthy_archivelogs.append(hinfo) else: failed_archivelogs.append(hinfo) return healthy_backups, failed_backups, healthy_archivelogs, failed_archivelogs
def do_backup_hot(self, *args, **params): # pylint: disable=W0613 """ Perform hot backup on running database. """ self.vw_check_database_ready( "Database must be healthy and running in order to take a backup of it!" ) # Check DBID is around all the time (when DB is healthy!) self.get_dbid(known_db_status=True) if not self.get_archivelog_mode(): raise GateException( "Archivelog is not turned on.\n\tPlease shutdown SUSE Manager and run system-check first!" ) print("Backing up the database:\t", end="") roller = Roller() roller.start() stdout, stderr = self.call_scenario('rman-hot-backup', target='rman') if stderr: roller.stop("failed") time.sleep(1) self.to_stderr(stderr) if stdout: roller.stop("finished") time.sleep(1) files = [] arclogs = [] for line in stdout.split("\n"): line = line.strip() if line.startswith("input") and line.find('datafile') > -1: files.append(line.split("name=")[-1]) elif line.startswith("archived"): arclogs.append(line.split("name=")[-1].split(" ")[0]) print("Data files archived:") for fname in files: print("\t" + fname) print() print("Archive logs:") for arc in arclogs: print("\t" + arc) print() # Rotate and check self.autoresolve_backup() self._backup_rotate() # Finalize hbk, fbk, harch, farch = self.check_backup_info() print("Backup summary as follows:") if hbk: print("\tBackups:") for bkp in hbk: print("\t\t", bkp.handle) print() if harch: print("\tArchive logs:") for bkp in harch: print("\t\t", bkp.handle) print() if fbk: eprint("WARNING! Broken backups has been detected:") for bkp in fbk: eprint("\t\t", bkp.handle) eprint() if farch: eprint("WARNING! Broken archive logs has been detected:") for bkp in farch: eprint("\t\t", bkp.handle) eprint() print("\nFinished.")
def do_backup_check(self, *args, **params): # pylint: disable=W0613 """ Check the consistency of the backup. @help autoresolve\t\tTry to automatically resolve errors and inconsistencies.\n """ self.vw_check_database_ready( "Database must be healthy and running in order to check assigned backups of it!" ) info = self.get_backup_info() if info: print("Last known backup:", info[0].completion) else: raise GateException("No backups has been found!") hbk, fbk, harch, farch = self.check_backup_info() # Display backups info if fbk: eprint("WARNING! Failed backups has been found as follows:") for bkp in fbk: eprint("\tName:", bkp.handle) eprint() else: print(("%s available backup%s seems healthy." % (len(hbk), len(hbk) > 1 and 's are' or ''))) # Display ARCHIVELOG info if farch: eprint("WARNING! Failed archive logs has been found as follows:") for arc in farch: eprint("\tName:", arc.handle) eprint() if 'autoresolve' not in args: eprint("Try using \"autoresolve\" directive.") sys.exit(1) else: self.autoresolve_backup() hbk, fbk, harch, farch = self.check_backup_info() if farch: eprint("WARNING! Still are failed archive logs:") for arc in farch: eprint("\tName:", arc.handle) eprint() if 'ignore-errors' not in args: eprint( "Maybe you want to try \"ignore-errors\" directive and... cross the fingers." ) sys.exit(1) else: print("Hooray! No failures in backups found!") else: print(("%s available archive log%s seems healthy." % (len(harch), len(harch) > 1 and 's are' or '')))
def do_stats_overview(self, *args, **params): # pylint: disable=W0613 """ Show tables with stale or empty statistics. """ self.vw_check_database_ready( "Database must be healthy and running in order to get stats overview!" ) print("Preparing data:\t\t", end="") roller = Roller() roller.start() stdout, stderr = self.call_scenario('stats', owner=self.config.get( 'db_user', '').upper()) roller.stop('finished') time.sleep(1) self.to_stderr(stderr) stale = [] empty = [] if stdout: segment = None for line in stdout.strip().split("\n"): if line.find('stale objects') > -1: segment = 'stale' continue elif line.find('empty objects') > -1: segment = 'empty' continue line = line.split(" ")[-1].strip() if segment and segment == 'stale': stale.append(line) elif segment and segment == 'empty': empty.append(line) else: print("Ignoring", repr(line)) if stale: print("\nList of stale objects:") for obj in stale: print("\t", obj) print("\nFound %s stale objects\n" % len(stale)) else: print("No stale objects found") if empty: print("\nList of empty objects:") for obj in empty: print("\t", obj) print("\nFound %s objects that currently have no statistics.\n" % len(stale)) else: print("No empty objects found.") if stderr: eprint("Error dump:") eprint(stderr)
def do_space_reclaim(self, *args, **params): # pylint: disable=W0613 """ Free disk space from unused object in tables and indexes. """ self.vw_check_database_ready( "Database must be healthy and running in order to reclaim the used space!" ) print("Examining the database...\t", end="") roller = Roller() roller.start() self.call_scenario('shrink-segments-advisor') stderr = None if stderr: roller.stop('failed') time.sleep(1) self.to_stderr(stderr) else: roller.stop('done') time.sleep(1) print("Gathering recommendations...\t", end="") roller = Roller() roller.start() # get the recomendations stdout, stderr = self.call_scenario('recomendations') if stdout: roller.stop("done") time.sleep(1) elif stderr: roller.stop("failed") time.sleep(1) eprint("Error dump:") eprint(stderr) else: roller.stop("finished") time.sleep(1) print("\nNo space reclamation possible at this time.\n") return messages = { 'TABLE': 'Tables', 'INDEX': 'Indexes', 'AUTO': 'Recommended segments', 'MANUAL': 'Non-shrinkable tablespace', } tree = {} wseg = 0 if stdout: lines = [ tuple(filter(None, line.strip().replace("\t", " ").split(" "))) for line in stdout.strip().split("\n") ] for ssm, sname, rspace, tsn, stype in lines: tsns = tree.get(tsn, {}) stypes = tsns.get(stype, {}) ssms = stypes.get(ssm, []) ssms.append(( sname, int(rspace), )) wseg = len(sname) if len(sname) > wseg else wseg stypes[ssm] = ssms tsns[stype] = stypes tree[tsn] = tsns total = 0 for tsn in tree: print("\nTablespace:", tsn) for obj in tree[tsn].keys(): print("\n\t" + messages.get(obj, "Object: " + obj)) for stype in tree[tsn][obj].keys(): typetotal = 0 print("\t" + messages.get(stype, "Type: " + stype)) for segment, size in tree[tsn][obj][stype]: print("\t\t", (segment + ((wseg - len(segment)) * " ")), "\t", '%.2fM' % (size / 1024. / 1024.)) total += size typetotal += size total_message = "Total " + messages.get(obj, '').lower() print("\n\t\t", (total_message + ((wseg - len(total_message)) * " ")), "\t", '%.2fM' % (typetotal / 1024. / 1024.)) print("\nTotal reclaimed space: %.2fGB" % (total / 1024. / 1024. / 1024.)) # Reclaim space if tree: for tsn in tree: for obj in tree[tsn]: if tree[tsn][obj].get('AUTO', None): print("\nReclaiming space on %s:" % messages[obj].lower()) for segment, size in tree[tsn][obj]['AUTO']: print("\t", segment + "...\t", end="") sys.stdout.flush() stdout, stderr = self.syscall( "sudo", "-u", "oracle", "/bin/bash", input=self.get_scenario_template().replace( '@scenario', self.__get_reclaim_space_statement( segment, obj))) if stderr: print("failed") eprint(stderr) else: print("done") print("Reclaiming space finished")
def do_backup_list(self, *args, **params): # pylint: disable=W0613 """ List of available backups. """ self.vw_check_database_ready("Database must be running and ready!", output_shift=2) roller = Roller() roller.start() print("Getting available backups:\t", end="") infoset = [] stdout, stderr = self.call_scenario('rman-list-backups', target='rman') self.to_stderr(stderr) roller.stop("finished") time.sleep(1) if stdout: for chunk in filter(None, [ re.sub('=+', '', c).strip() for c in stdout.split("\n=")[-1].split('BS Key') ]): try: info = InfoNode() info.files = [] piece_chnk, files_chnk = chunk.split('List of Datafiles') # Get backup place for line in [l.strip() for l in piece_chnk.split("\n")]: if line.lower().startswith('piece name'): info.backup = line.split(" ")[-1] if line.lower().find('status') > -1: status_line = list( filter( None, line.replace( ':', '').split("Status")[-1].split(" "))) if len(list(status_line)) == 5: info.status = status_line[0] info.compression = status_line[2] info.tag = status_line[4] # Get the list of files for line in [l.strip() for l in files_chnk.split("\n")]: if line.startswith('-'): continue else: line = list(filter(None, line.split(" "))) if len(list(line)) > 4: if line[0] == 'File': continue dbf = InfoNode() dbf.type = line[1] dbf.file = line[-1] dbf.date = line[-2] info.files.append(dbf) infoset.append(info) except Exception: eprint("No backup snapshots available.") sys.exit(1) # Display backup data if infoset: print("Backups available:\n") for info in infoset: print("Name:\t", info.backup) print("Files:") for dbf in info.files: print("\tType:", dbf.type, end="") print(sys.stdout, "\tDate:", dbf.date, end="") print("\tFile:", dbf.file) print()