def progress_bar(items, errors, repairs, fix_value, cycle): if fix_value: inline_print("[%s] Cycle %s | Items Scanned: %12d | Errors: %6d | Repairs: %6d | " % (time.strftime("%Y-%m-%d %H:%M:%S"), cycle, items, errors, repairs)) else: inline_print("[%s] Cycle %s | Items Scanned: %12d | Errors: %6d | " % (time.strftime("%Y-%m-%d %H:%M:%S"), cycle, items, errors))
def progress_bar(items, errors, repairs, fix_value, cycle): if fix_value: inline_print( "[%s] Cycle %s | Items Scanned: %12d | Errors: %6d | Repairs: %6d | " % (time.strftime("%Y-%m-%d %H:%M:%S"), cycle, items, errors, repairs)) else: inline_print( "[%s] Cycle %s | Items Scanned: %12d | Errors: %6d | " % (time.strftime("%Y-%m-%d %H:%M:%S"), cycle, items, errors))
def reindex_dmd_objects(name, type, dmd, log): """Performs the reindex. Returns False if no issues encountered, otherwise True""" try: inline_print("[%s] Reindexing/rebuilding %s ... " % (time.strftime("%Y-%m-%d %H:%M:%S"), name)) if (name == "DeviceSearch"): if USE_MODEL_CATALOG: raise Exception("DeviceSearch catalog is deprecated") print("\n") catalogReference = eval(type) catalogReference.refreshCatalog(clear=1, pghandler=StdoutHandler()) print("finished") log.info("%s refreshCatalog() completed successfully", name) elif ( name == 'Devices' ): # Special case for Devices, using method from altReindex ZEN-10793 log.info("Reindexing Devices") output_count = 0 for dev in dmd.Devices.getSubDevicesGen_recursive(): index_device(dev, dmd, log) output_count += 1 dev._p_deactivate() transaction.commit() if (output_count % 10) == 0: # sync after 10 devices dmd._p_jar.sync() if (output_count % 100) == 0: log.debug("Device Reindex has passed %d devices" % (output_count)) inline_print( "[%s] Reindexing %s ... %8d devices processed" % (time.strftime("%Y-%m-%d %H:%M:%S"), "Devices", output_count)) inline_print( "[%s] Reindexing %s ... finished " % (time.strftime("%Y-%m-%d %H:%M:%S"), "Devices")) print "" log.info("%d Devices reindexed successfully" % (output_count)) else: object_reference = eval(type) object_reference.reIndex() print("finished") log.info("%s reIndex() completed successfully", name) dmd._p_jar.sync() transaction.commit() return False except Exception as e: print " FAILED (check log file for details)" log.error("%s.reIndex() failed" % (name)) log.exception(e) return True
def reindex_dmd_objects(name, type, dmd, log): """Performs the reindex. Returns False if no issues encountered, otherwise True""" try: inline_print("[%s] Reindexing/rebuilding %s ... " % (time.strftime("%Y-%m-%d %H:%M:%S"), name)) if (name == "DeviceSearch"): print("\n") catalogReference = eval(type) catalogReference.refreshCatalog(clear=1,pghandler=StdoutHandler()) print("finished") log.info("%s refreshCatalog() completed successfully", name) elif (name == 'Devices'): # Special case for Devices, using method from altReindex ZEN-10793 log.info("Reindexing Devices") output_count = 0 for dev in dmd.Devices.getSubDevicesGen_recursive(): index_device(dev, dmd, log) output_count += 1 dev._p_deactivate() transaction.commit() if (output_count % 10) == 0: # sync after 10 devices dmd._p_jar.sync() if (output_count % 100) == 0: log.debug("Device Reindex has passed %d devices" % (output_count)) inline_print("[%s] Reindexing %s ... %8d devices processed" % (time.strftime("%Y-%m-%d %H:%M:%S"), "Devices", output_count)) inline_print("[%s] Reindexing %s ... finished " % (time.strftime("%Y-%m-%d %H:%M:%S"), "Devices")) print "" log.info("%d Devices reindexed successfully" % (output_count)) else: object_reference = eval(type) object_reference.reIndex() print("finished") log.info("%s reIndex() completed successfully", name) dmd._p_jar.sync() transaction.commit() return False except Exception as e: print " FAILED (check log file for details)" log.error("%s.reIndex() failed" % (name)) log.exception(e) return True
def scan_progress_message(done, fix, cycle, catalog, issues, chunk, log): '''Handle output to screen and logfile, remove output from scan_catalog logic''' # Logic for log file output messages based on done, issues if not done: log.debug("Scan of %s catalog is %2d%% complete" % (catalog, 2 * chunk)) else: if issues > 0: log.warning("Scanned %s - found %d issue(s)" % (catalog, issues)) else: log.info("No issues found scanning: %s" % (catalog)) log.debug("Scan of %s catalog is complete" % (catalog)) # Logic for screen output messages based on done, issues, and fix if issues > 0: if fix: if not done: inline_print( "[%s] Cleaning [%-50s] %3d%% [%d Issues Detected]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '=' * chunk, 2 * chunk, issues)) else: inline_print( "[%s] Clean #%2.0d [%-50s] %3.0d%% [%d Issues Detected]\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), cycle, '=' * 50, 100, issues)) else: if not done: inline_print( "[%s] Scanning [%-50s] %3d%% [%d Issues Detected]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '=' * chunk, 2 * chunk, issues)) else: inline_print( "[%s] WARNING [%-50s] %3.0d%% [%d Issues Detected]\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '=' * 50, 100, issues)) else: if not done: inline_print( "[%s] Scanning [%-50s] %3d%% " % (time.strftime("%Y-%m-%d %H:%M:%S"), '=' * chunk, 2 * chunk)) else: inline_print("[%s] Verified [%-50s] %3.0d%%\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '=' * 50, 100))
def scan_progress_message(done, fix, cycle, catalog, issues, total_number_of_issues, percentage, chunk, log): '''Handle output to screen and logfile, remove output from scan_catalog logic''' # Logic for log file output messages based on done, issues if not done: log.debug("Scan of %s catalog is %2d%% complete" % (catalog, 2 * chunk)) else: if issues > 0: log.warning("Scanned %s - found %d stale reference(s)" % (catalog, issues)) else: log.info("No stale references found scanning: %s" % (catalog)) log.debug("Scan of %s catalog is complete" % (catalog)) # Logic for screen output messages based on done, issues, and fix if issues > 0: if fix: if not done: inline_print( "[%s] Cleaning [%-50s] %3d%% [%d orphaned IPs are deleted]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '=' * chunk, 2 * chunk, issues)) else: inline_print( "[%s] Clean #%2.0d [%-50s] %3.0d%% [%d orphaned IPs are deleted]\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), cycle, '=' * 50, 100, issues)) else: if not done: inline_print( "[%s] Scanning [%-50s] %3d%% [%d orphaned IPs are detected]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '=' * chunk, 2 * chunk, issues)) else: inline_print( "[%s] WARNING [%-50s] %3.0d%% [There are %d orphaned IPs (%.1f%%)]\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '=' * 50, 100, issues, percentage)) else: if not done: inline_print( "[%s] Scanning [%-50s] %3d%% " % (time.strftime("%Y-%m-%d %H:%M:%S"), '=' * chunk, 2 * chunk)) else: if (total_number_of_issues == 0): inline_print( "[%s] Verified [%-50s] %3.0d%% [No issues] \n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '=' * 50, 100)) else: inline_print( "[%s] Verified [%-50s] %3.0d%% [%d orphaned IPs are deleted (%.1f%%)] \n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '=' * 50, 100, total_number_of_issues, percentage))
def verify(self, root, log, number_of_issues): database_size = self._size scanned_count = 0 progress_bar_chunk_size = 1 if (database_size > 50): progress_bar_chunk_size = (database_size//50) + 1 inline_print("[%s] Scanning [%-50s] %3d%% " % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*0, 0)) seen = set() path = () stack = deque([(root, path)]) curstack, stack = stack, deque([]) while curstack or stack: oid, path = curstack.pop() scanned_count = len(seen) if (scanned_count % progress_bar_chunk_size) == 0: chunk_number = scanned_count // progress_bar_chunk_size if number_of_issues.value() > 2: inline_print("[%s] CRITICAL [%-50s] %3d%% [%d Dangling References]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk_number, 2*chunk_number, number_of_issues.value())) elif number_of_issues.value() == 1: inline_print("[%s] CRITICAL [%-50s] %3d%% [%d Dangling Reference]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk_number, 2*chunk_number, number_of_issues.value())) else: inline_print("[%s] Scanning [%-50s] %3d%% " % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk_number, 2*chunk_number)) if (oid not in seen): try: state = self._storage.load(oid)[0] seen.add(oid) except POSKeyError: self.report(oid, path, log) number_of_issues.increment() else: refs = get_refs(state) stack.extend((o, path + (o,)) for o in set(refs) - seen) if not curstack: curstack = stack stack = deque([]) if number_of_issues.value() > 0: inline_print("[%s] CRITICAL [%-50s] %3.0d%% [%d Dangling References]\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*50, 100, number_of_issues.value())) else: inline_print("[%s] Verified [%-50s] %3.0d%%\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*50, 100)) return number_of_issues, len(seen), self._size
def main(): '''Gathers metrics and statistics about the database that Zenoss uses for Zope/ZEP.''' execution_start = time.time() scriptName = os.path.basename(__file__).split('.')[0] parser = ZenToolboxUtils.parse_options( scriptVersion, scriptName + scriptSummary + documentationURL) # Add in any specific parser arguments for %scriptName parser.add_argument("-n", "-t", "--times", action="store", default=1, type=int, help="number of times to gather data") parser.add_argument("-g", "--gap", action="store", default=60, type=int, help="gap between gathering subsequent datapoints") parser.add_argument("-l3", "--level3", action="store_true", default=False, help="Data gathering for L3 (standardized parameters)") cli_options = vars(parser.parse_args()) log, logFileName = ZenToolboxUtils.configure_logging( scriptName, scriptVersion, cli_options['tmpdir']) log.info("Command line options: %s" % (cli_options)) if cli_options['debug']: log.setLevel(logging.DEBUG) print "\n[%s] Initializing %s v%s (detailed log at %s)" % \ (time.strftime("%Y-%m-%d %H:%M:%S"), scriptName, scriptVersion, logFileName) # Attempt to get the zenoss.toolbox lock before any actions performed if not ZenToolboxUtils.get_lock("zenoss.toolbox.checkdbstats", log): sys.exit(1) if cli_options['level3']: cli_options['times'] = 120 cli_options['gap'] = 60 cli_options['debug'] = True if cli_options['debug']: log.setLevel(logging.DEBUG) # Load up the contents of global.conf for using with MySQL global_conf_dict = parse_global_conf( os.environ['ZENHOME'] + '/etc/global.conf', log) # ZEN-19373: zencheckdbstats needs to take into account split databases databases_to_examine = [] intermediate_dict = { 'prettyName': "'zodb' Database", 'host': global_conf_dict['zodb-host'], 'port': global_conf_dict['zodb-port'], 'admin-user': global_conf_dict['zodb-admin-user'], 'admin-password': global_conf_dict['zodb-admin-password'], 'database': global_conf_dict['zodb-db'], 'mysql_results_list': [] } if global_conf_dict['zodb-host'] == 'localhost': if 'zodb-socket' in global_conf_dict: intermediate_dict['socket'] = global_conf_dict['zodb-socket'] databases_to_examine.append(intermediate_dict) if global_conf_dict['zodb-host'] != global_conf_dict['zep-host']: intermediate_dict = { 'prettyName': "'zenoss_zep' Database", 'host': global_conf_dict['zep-host'], 'port': global_conf_dict['zep-port'], 'admin-user': global_conf_dict['zep-admin-user'], 'admin-password': global_conf_dict['zep-admin-password'], 'database': global_conf_dict['zep-db'], 'mysql_results_list': [] } if global_conf_dict['zep-host'] == 'localhost': # No zep-socket param, use zodb-socket if 'zodb-socket' in global_conf_dict: intermediate_dict['socket'] = global_conf_dict['zodb-socket'] databases_to_examine.append(intermediate_dict) # If running in debug, log global.conf, grab 'SHOW VARIABLES' and zends.cnf, if straightforward (localhost) if cli_options['debug']: if global_conf_dict['zodb-host'] == 'localhost': log_zends_conf(os.environ['ZENDSHOME'] + '/etc/zends.cnf', log) try: for item in databases_to_examine: mysql_connection = connect_to_mysql(item, log) log_MySQL_variables(mysql_connection, log) if mysql_connection: mysql_connection.close() log.info( "Closed connection to MySQL/ZenDS for database %s at %s", item['prettyName'], item['host']) except Exception as e: print "Exception encountered: ", e log.error(e) exit(1) sample_count = 0 mysql_results_list = [] while sample_count < cli_options['times']: sample_count += 1 current_time = time.time() inline_print( "[%s] Gathering MySQL/ZenDS metrics... (%d/%d)" % (time.strftime(TIME_FORMAT), sample_count, cli_options['times'])) try: for item in databases_to_examine: mysql_connection = connect_to_mysql(item, log) mysql_results = gather_MySQL_statistics(mysql_connection, log) item['mysql_results_list'].append( (current_time, mysql_results)) if mysql_connection: mysql_connection.close() log.info( "Closed connection to MySQL/ZenDS for database %s at %s", item['prettyName'], item['host']) except Exception as e: print "Exception encountered: ", e log.error(e) exit(1) if sample_count < cli_options['times']: time.sleep(cli_options['gap']) # Process and display results (calculate statistics) print("") for database in databases_to_examine: print("\n[%s] Results for %s:" % (time.strftime(TIME_FORMAT), database['prettyName'])) log.info("[%s] Final Results for %s:", time.strftime(TIME_FORMAT), database['prettyName']) observed_results_dict = OrderedDict([]) observed_results_dict['History List Length'] = [ item[1]['history_list_length'] for item in database['mysql_results_list'] ] observed_results_dict['Bufferpool Used (%)'] = [ item[1]['buffer_pool_used_percentage'] for item in database['mysql_results_list'] ] observed_results_dict['ACTIVE TRANSACTIONS'] = [ item[1]['number_active_transactions'] for item in database['mysql_results_list'] ] observed_results_dict['ACTIVE TRANS > 100s'] = [ item[1]['number_active_transactions_over'] for item in database['mysql_results_list'] ] for key in observed_results_dict: values = observed_results_dict[key] if min(values) != max(values): output_message = "[{}] {}: {:<10} (Average {:.2f}, Minimum {}, Maximum {})".format( time.strftime(TIME_FORMAT), key, values[-1], float(sum(values) / len(values)), min(values), max(values)) else: output_message = "[{}] {}: {}".format( time.strftime(TIME_FORMAT), key, values[-1]) print output_message log.info(output_message) # Print final status summary, update log file with termination block print("\n[%s] Execution finished in %s\n" % (time.strftime(TIME_FORMAT), datetime.timedelta( seconds=int(math.ceil(time.time() - execution_start))))) print("** Additional information and next steps at %s **\n" % documentationURL) log.info("zencheckdbstats completed in %1.2f seconds", time.time() - execution_start) log.info("############################################################") sys.exit(0)
def scan_progress_message(done, fix, cycle, catalog, issues, chunk, log): '''Handle output to screen and logfile, remove output from scan_catalog logic''' # Logic for log file output messages based on done, issues if not done: log.debug("Scan of %s catalog is %2d%% complete" % (catalog, 2*chunk)) else: if issues > 0: log.warning("Scanned %s - found %d issue(s)" % (catalog, issues)) else: log.info("No issues found scanning: %s" % (catalog)) log.debug("Scan of %s catalog is complete" % (catalog)) # Logic for screen output messages based on done, issues, and fix if issues > 0: if fix: if not done: inline_print("[%s] Cleaning [%-50s] %3d%% [%d Issues Detected]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk, 2*chunk, issues)) else: inline_print("[%s] Clean #%2.0d [%-50s] %3.0d%% [%d Issues Detected]\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), cycle, '='*50, 100, issues)) else: if not done: inline_print("[%s] Scanning [%-50s] %3d%% [%d Issues Detected]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk, 2*chunk, issues)) else: inline_print("[%s] WARNING [%-50s] %3.0d%% [%d Issues Detected]\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*50, 100, issues)) else: if not done: inline_print("[%s] Scanning [%-50s] %3d%% " % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk, 2*chunk)) else: inline_print("[%s] Verified [%-50s] %3.0d%%\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*50, 100))
def main(): '''Gathers metrics and statistics about the database that Zenoss uses for Zope/ZEP.''' execution_start = time.time() scriptName = os.path.basename(__file__).split('.')[0] parser = ZenToolboxUtils.parse_options(scriptVersion, scriptName + scriptSummary + documentationURL) # Add in any specific parser arguments for %scriptName parser.add_argument("-n", "-t", "--times", action="store", default=1, type=int, help="number of times to gather data") parser.add_argument("-g", "--gap", action="store", default=60, type=int, help="gap between gathering subsequent datapoints") parser.add_argument("-l3", "--level3", action="store_true", default=False, help="Data gathering for L3 (standardized parameters)") cli_options = vars(parser.parse_args()) log, logFileName = ZenToolboxUtils.configure_logging(scriptName, scriptVersion, cli_options['tmpdir']) log.info("Command line options: %s" % (cli_options)) if cli_options['debug']: log.setLevel(logging.DEBUG) print "\n[%s] Initializing %s v%s (detailed log at %s)" % \ (time.strftime("%Y-%m-%d %H:%M:%S"), scriptName, scriptVersion, logFileName) # Attempt to get the zenoss.toolbox lock before any actions performed if not ZenToolboxUtils.get_lock("zenoss.toolbox.checkdbstats", log): sys.exit(1) if cli_options['level3']: cli_options['times'] = 120 cli_options['gap'] = 60 cli_options['debug'] = True if cli_options['debug']: log.setLevel(logging.DEBUG) # Load up the contents of global.conf for using with MySQL global_conf_dict = parse_global_conf(os.environ['ZENHOME'] + '/etc/global.conf', log) # ZEN-19373: zencheckdbstats needs to take into account split databases databases_to_examine = [] intermediate_dict = { 'prettyName': "'zodb' Database", 'socket': global_conf_dict['zodb-socket'], 'host': global_conf_dict['zodb-host'], 'port': global_conf_dict['zodb-port'], 'admin-user': global_conf_dict['zodb-admin-user'], 'admin-password': global_conf_dict['zodb-admin-password'], 'database': global_conf_dict['zodb-db'], 'mysql_results_list': [] } databases_to_examine.append(intermediate_dict) if global_conf_dict['zodb-host'] != global_conf_dict['zep-host']: intermediate_dict = { 'prettyName': "'zenoss_zep' Database", 'socket': global_conf_dict['zodb-socket'], 'host': global_conf_dict['zep-host'], 'port': global_conf_dict['zep-port'], 'admin-user': global_conf_dict['zep-admin-user'], 'admin-password': global_conf_dict['zep-admin-password'], 'database': global_conf_dict['zep-db'], 'mysql_results_list': [] } databases_to_examine.append(intermediate_dict) # If running in debug, log global.conf, grab 'SHOW VARIABLES' and zends.cnf, if straightforward (localhost) if cli_options['debug']: if global_conf_dict['zodb-host'] == 'localhost': log_zends_conf(os.environ['ZENDSHOME'] + '/etc/zends.cnf', log) try: for item in databases_to_examine: mysql_connection = connect_to_mysql(item, log) log_MySQL_variables(mysql_connection, log) if mysql_connection: mysql_connection.close() log.info("Closed connection to MySQL/ZenDS for database %s at %s", item['prettyName'], item['host']) except Exception as e: print "Exception encountered: ", e log.error(e) exit(1) sample_count = 0 mysql_results_list = [] while sample_count < cli_options['times']: sample_count += 1 current_time = time.time() inline_print("[%s] Gathering MySQL/ZenDS metrics... (%d/%d)" % (time.strftime(TIME_FORMAT), sample_count, cli_options['times'])) try: for item in databases_to_examine: mysql_connection = connect_to_mysql(item, log) mysql_results = gather_MySQL_statistics(mysql_connection, log) item['mysql_results_list'].append((current_time, mysql_results)) if mysql_connection: mysql_connection.close() log.info("Closed connection to MySQL/ZenDS for database %s at %s", item['prettyName'], item['host']) except Exception as e: print "Exception encountered: ", e log.error(e) exit(1) if sample_count < cli_options['times']: time.sleep(cli_options['gap']) # Process and display results (calculate statistics) print ("") for database in databases_to_examine: print("\n[%s] Results for %s:" % (time.strftime(TIME_FORMAT), database['prettyName'])) log.info("[%s] Final Results for %s:", time.strftime(TIME_FORMAT), database['prettyName']) observed_results_dict = OrderedDict([]) observed_results_dict['History List Length'] = [item[1]['history_list_length'] for item in database['mysql_results_list']] observed_results_dict['Bufferpool Used (%)'] = [item[1]['buffer_pool_used_percentage'] for item in database['mysql_results_list']] observed_results_dict['ACTIVE TRANSACTIONS'] = [item[1]['number_active_transactions'] for item in database['mysql_results_list']] observed_results_dict['ACTIVE TRANS > 100s'] = [item[1]['number_active_transactions_over'] for item in database['mysql_results_list']] for key in observed_results_dict: values = observed_results_dict[key] if min(values) != max(values): output_message = "[{}] {}: {:<10} (Average {:.2f}, Minimum {}, Maximum {})".format(time.strftime(TIME_FORMAT), key, values[-1], float(sum(values)/len(values)), min(values), max(values)) else: output_message = "[{}] {}: {}".format(time.strftime(TIME_FORMAT), key, values[-1]) print output_message log.info(output_message) # Print final status summary, update log file with termination block print("\n[%s] Execution finished in %s\n" % (time.strftime(TIME_FORMAT), datetime.timedelta(seconds=int(math.ceil(time.time() - execution_start))))) print("** Additional information and next steps at %s **\n" % documentationURL) log.info("zencheckdbstats completed in %1.2f seconds", time.time() - execution_start) log.info("############################################################") sys.exit(0)
def scan_progress_message(done, fix, cycle, catalog, issues, total_number_of_issues, percentage, chunk, log): '''Handle output to screen and logfile, remove output from scan_catalog logic''' # Logic for log file output messages based on done, issues if not done: log.debug("Scan of %s catalog is %2d%% complete" % (catalog, 2*chunk)) else: if issues > 0: log.warning("Scanned %s - found %d stale reference(s)" % (catalog, issues)) else: log.info("No stale references found scanning: %s" % (catalog)) log.debug("Scan of %s catalog is complete" % (catalog)) # Logic for screen output messages based on done, issues, and fix if issues > 0: if fix: if not done: inline_print("[%s] Cleaning [%-50s] %3d%% [%d orphaned IPs are deleted]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk, 2*chunk, issues)) else: inline_print("[%s] Clean #%2.0d [%-50s] %3.0d%% [%d orphaned IPs are deleted]\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), cycle, '='*50, 100, issues)) else: if not done: inline_print("[%s] Scanning [%-50s] %3d%% [%d orphaned IPs are detected]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk, 2*chunk, issues)) else: inline_print("[%s] WARNING [%-50s] %3.0d%% [There are %d orphaned IPs (%.1f%%)]\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*50, 100, issues, percentage)) else: if not done: inline_print("[%s] Scanning [%-50s] %3d%% " % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk, 2*chunk)) else: if (total_number_of_issues == 0): inline_print("[%s] Verified [%-50s] %3.0d%% [No issues] \n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*50, 100)) else: inline_print("[%s] Verified [%-50s] %3.0d%% [%d orphaned IPs are deleted (%.1f%%)] \n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*50, 100, total_number_of_issues, percentage))