def test_pbench_logger(): config = PbenchConfig(cfg_name) logger = get_pbench_logger(_NAME_, config) logger_type = config.get("logging", "logger_type") logger = mock_the_handler(logger, logger_type, log_files[logger_type]) logger.debug(log_msgs[logger_type]) if os.path.isfile(os.path.join(logdir,log_files[logger_type])): with open(os.path.join(logdir,log_files[logger_type]), 'r') as f: assert f.read()[:-1] == log_msgs[logger_type], "Mismatch: the file did not contain the expected message."
help="The type of report document to index, one of status|error") parser.add_argument( "file_to_index", nargs=1, help="The file containing the report to index") parsed = parser.parse_args() from pbench import report_status, PbenchConfig, BadConfig, get_es, \ get_pbench_logger, PbenchTemplates try: config = PbenchConfig(parsed.cfg_name) except BadConfig as e: print("{}: {}".format(_prog, e), file=sys.stderr) sys.exit(1) logger = get_pbench_logger(_prog, config) try: es = get_es(config, logger) idx_prefix = config.get('Indexing', 'index_prefix') except Exception: # If we don't have an Elasticsearch configuration just pass None es = None idx_prefix = None else: _dir = os.path.dirname(os.path.abspath(sys.argv[0])) templates = PbenchTemplates(_dir, idx_prefix, logger) templates.update_templates(es, 'server-reports') status = report_status(es, logger, config.LOGSDIR, idx_prefix, parsed.name, parsed.timestamp, parsed.doctype, parsed.file_to_index[0]) sys.exit(status)
def main(): cfg_name = os.environ.get("_PBENCH_SERVER_CONFIG") if not cfg_name: print( "{}: ERROR: No config file specified; set _PBENCH_SERVER_CONFIG env variable or" " use --config <file> on the command line".format(_NAME_), file=sys.stderr) return 2 try: config = PbenchConfig(cfg_name) except BadConfig as e: print("{}: {}".format(_NAME_, e), file=sys.stderr) return 1 logger = get_pbench_logger(_NAME_, config) archive = config.ARCHIVE if not os.path.isdir(archive): logger.error( "The setting for ARCHIVE in the config file is {}, but that is" " not a directory", archive) return 1 # add a BACKUP field to the config object config.BACKUP = backup = config.conf.get("pbench-server", "pbench-backup-dir") if len(backup) == 0: logger.error( "Unspecified backup directory, no pbench-backup-dir config in" " pbench-server section") return 1 if not os.path.isdir(backup): logger.error( "The setting for BACKUP in the config file is {}, but that is" " not a directory", backup) return 1 # instantiate the s3config class s3_config_obj = S3Config(config, logger) s3_config_obj = sanity_check(s3_config_obj, logger) logger.info('start-{}', config.TS) start = config.timestamp() prog = os.path.basename(sys.argv[0]) sts = 0 # N.B. tmpdir is the pathname of the temp directory. with tempfile.TemporaryDirectory() as tmpdir: archive_obj = BackupObject("ARCHIVE", config.ARCHIVE, tmpdir, logger) local_backup_obj = BackupObject("BACKUP", config.BACKUP, tmpdir, logger) s3_backup_obj = BackupObject("S3", s3_config_obj, tmpdir, logger) with tempfile.NamedTemporaryFile(mode='w+t', dir=tmpdir) as reportfp: reportfp.write("{}.{} ({}) started at {}\n".format( prog, config.TS, config.PBENCH_ENV, start)) if s3_config_obj is None: reportfp.write( "\nNOTICE: S3 backup service is inaccessible; skipping" " ARCHIVE to S3 comparison\n\n") # FIXME: Parallelize these three ... # Create entry list for archive logger.debug('Starting archive list creation') ar_start = config.timestamp() ret_sts = archive_obj.entry_list_creation() if ret_sts == Status.FAIL: sts += 1 logger.debug('Finished archive list ({!r})', ret_sts) # Create entry list for backup logger.debug('Starting local backup list creation') lb_start = config.timestamp() ret_sts = local_backup_obj.entry_list_creation() if ret_sts == Status.FAIL: sts += 1 logger.debug('Finished local backup list ({!r})', ret_sts) # Create entry list for S3 if s3_config_obj is not None: logger.debug('Starting S3 list creation') s3_start = config.timestamp() ret_sts = s3_backup_obj.entry_list_creation() if ret_sts == Status.FAIL: sts += 1 logger.debug('Finished S3 list ({!r})', ret_sts) logger.debug('Checking MD5 signatures of archive') ar_md5_start = config.timestamp() try: # Check the data integrity in ARCHIVE (Question 1). md5_result_archive = archive_obj.checkmd5() except Exception as ex: msg = "Failed to check data integrity of ARCHIVE ({})".format( config.ARCHIVE) logger.exception(msg) reportfp.write("\n{} - '{}'\n".format(msg, ex)) sts += 1 else: if md5_result_archive > 0: # Create a report for failed MD5 results from ARCHIVE (Question 1) archive_obj.report_failed_md5(reportfp) sts += 1 logger.debug('Finished checking MD5 signatures of archive') logger.debug('Checking MD5 signatures of local backup') lb_md5_start = config.timestamp() try: # Check the data integrity in BACKUP (Question 2). md5_result_backup = local_backup_obj.checkmd5() except Exception as ex: msg = "Failed to check data integrity of BACKUP ({})".format( config.BACKUP) logger.exception(msg) reportfp.write("\n{} - '{}'\n".format(msg, ex)) else: if md5_result_backup > 0: # Create a report for failed MD5 results from BACKUP (Question 2) local_backup_obj.report_failed_md5(reportfp) sts += 1 logger.debug('Finished checking MD5 signatures of local backup') # Compare ARCHIVE with BACKUP (Questions 3 and 3a). msg = "Comparing ARCHIVE with BACKUP" reportfp.write("\n{}\n{}\n".format(msg, "-" * len(msg))) compare_entry_lists(archive_obj, local_backup_obj, reportfp) if s3_config_obj is not None: # Compare ARCHIVE with S3 (Questions 4, 4a, and 4b). msg = "Comparing ARCHIVE with S3" reportfp.write("\n{}\n{}\n".format(msg, "-" * len(msg))) compare_entry_lists(archive_obj, s3_backup_obj, reportfp) if s3_config_obj is None: s3_start = "<skipped>" reportfp.write("\n\nPhases (started):\n" "Archive List Creation: {}\n" "Local Backup List Creation: {}\n" "S3 List Creation: {}\n" "Archive MD5 Checks: {}\n" "Local Backup MD5 Checks: {}\n".format( ar_start, lb_start, s3_start, ar_md5_start, lb_md5_start)) end = config.timestamp() reportfp.write("\n{}.{} ({}) finished at {}\n".format( prog, config.TS, config.PBENCH_ENV, end)) # Rewind to the beginning. reportfp.seek(0) report = Report(config, _NAME_) report.init_report_template() try: report.post_status(config.timestamp(), "status", reportfp.name) except Exception: pass logger.info('end-{}', config.TS) return sts
def main(): cfg_name = os.environ.get("CONFIG") if not cfg_name: print("{}: ERROR: No config file specified; set CONFIG env variable or" " use --config <file> on the command line".format(_NAME_), file=sys.stderr) return 2 try: config = PbenchConfig(cfg_name) except BadConfig as e: print("{}: {}".format(_NAME_, e), file=sys.stderr) return 1 logger = get_pbench_logger(_NAME_, config) archive = config.ARCHIVE if not os.path.isdir(archive): logger.error( "The setting for ARCHIVE in the config file is {}, but that is not a directory", archive) return 1 # add a BACKUP field to the config object config.BACKUP = backup = config.conf.get("pbench-server", "pbench-backup-dir") if len(backup) == 0: logger.error( "Unspecified backup directory, no pbench-backup-dir config in pbench-server section" ) return 1 if not os.path.isdir(backup): logger.error( "The setting for BACKUP in the config file is {}, but that is not a directory", backup) return 1 # instantiate the s3config class s3_config_obj = S3Config(config, logger) s3_config_obj = sanity_check(s3_config_obj, logger) logger.info('start-{}', config.TS) prog = os.path.basename(sys.argv[0]) sts = 0 # N.B. tmpdir is the pathname of the temp directory. with tempfile.TemporaryDirectory() as tmpdir: archive_obj = BackupObject("ARCHIVE", config.ARCHIVE) local_backup_obj = BackupObject("BACKUP", config.BACKUP) s3_backup_obj = BackupObject("S3", s3_config_obj) # Create entry list for archive archive_entry_list = entry_list_creation(archive_obj, config.ARCHIVE, logger) if archive_entry_list == Status.FAIL: sts += 1 # Create entry list for backup backup_entry_list = entry_list_creation(local_backup_obj, config.BACKUP, logger) if backup_entry_list == Status.FAIL: sts += 1 # Create entry list for S3 s3_entry_list = entry_list_creation_s3(s3_config_obj, logger) if s3_entry_list == Status.FAIL: sts += 1 with tempfile.NamedTemporaryFile(mode='w+t', dir=tmpdir) as reportfp: reportfp.write("{}.{}({})\n".format(prog, config.TS, config.PBENCH_ENV)) try: # Check the data integrity in ARCHIVE (Question 1). md5_result_archive = checkmd5(config.ARCHIVE, tmpdir, archive_obj, logger) except Exception: msg = "Failed to check data integrity of ARCHIVE ({})".format( config.ARCHIVE) logger.exception(msg) reportfp.write("{}\n".format(msg)) sts += 1 else: if md5_result_archive > 0: # Create a report for failed MD5 results from ARCHIVE (Question 1) report_failed_md5(archive_obj, tmpdir, reportfp, logger) sts += 1 try: # Check the data integrity in BACKUP (Question 2). md5_result_backup = checkmd5(config.BACKUP, tmpdir, local_backup_obj, logger) except Exception: msg = "Failed to check data integrity of BACKUP ({})".format( config.BACKUP) logger.exception(msg) reportfp.write("{}\n".format(msg)) else: if md5_result_backup > 0: # Create a report for failed MD5 results from BACKUP (Question 2) report_failed_md5(local_backup_obj, tmpdir, reportfp, logger) sts += 1 # Compare ARCHIVE with BACKUP (Questions 3 and 3a). compare_entry_lists(archive_obj, local_backup_obj, archive_entry_list, backup_entry_list, reportfp) if s3_config_obj is None: reportfp.write('S3 backup service is inaccessible.\n') else: # Compare ARCHIVE with S3 (Questions 4, 4a, and 4b). compare_entry_lists(archive_obj, s3_backup_obj, archive_entry_list, s3_entry_list, reportfp) # Rewind to the beginning. reportfp.seek(0) report = Report(config, _NAME_) report.init_report_template() try: report.post_status(config.timestamp(), "status", reportfp.name) except Exception: pass logger.info('end-{}', config.TS) return sts
def main(): cfg_name = os.environ.get("_PBENCH_SERVER_CONFIG") if not cfg_name: print( "{}: ERROR: No config file specified; set _PBENCH_SERVER_CONFIG env variable or" " use --config <file> on the command line".format(_NAME_), file=sys.stderr) return 2 try: config = PbenchConfig(cfg_name) except BadConfig as e: print("{}: {}".format(_NAME_, e), file=sys.stderr) return 1 logger = get_pbench_logger(_NAME_, config) # Add a BACKUP and QDIR field to the config object config.BACKUP = config.conf.get("pbench-server", "pbench-backup-dir") config.QDIR = config.get('pbench-server', 'pbench-quarantine-dir') # call the LocalBackupObject class lb_obj = LocalBackupObject(config) # call the S3Config class s3_obj = S3Config(config, logger) lb_obj, s3_obj = sanity_check(lb_obj, s3_obj, config, logger) if lb_obj is None and s3_obj is None: return 3 logger.info('start-{}'.format(config.TS)) # Initiate the backup counts = backup_data(lb_obj, s3_obj, config, logger) result_string = ("Total processed: {}," " Local backup successes: {}," " Local backup failures: {}," " S3 upload successes: {}," " S3 upload failures: {}," " Quarantined: {}".format(counts.ntotal, counts.nbackup_success, counts.nbackup_fail, counts.ns3_success, counts.ns3_fail, counts.nquaran)) logger.info(result_string) prog = os.path.basename(sys.argv[0]) # prepare and send report with tempfile.NamedTemporaryFile(mode='w+t', dir=config.TMP) as reportfp: reportfp.write("{}.{}({})\n{}\n".format(prog, config.timestamp(), config.PBENCH_ENV, result_string)) reportfp.seek(0) report = Report(config, _NAME_) report.init_report_template() try: report.post_status(config.timestamp(), "status", reportfp.name) except Exception: pass logger.info('end-{}'.format(config.TS)) return 0
parser.add_argument( "-n", "--name", dest="name", required=True, help="Specify name of program reporting its status") parser.add_argument( "-t", "--timestamp", dest="timestamp", required=True, help="The timestamp that should be associated with the file to index," " takes the form run-<yyyy>-<mm>-<dd>T<HH>:<MM>:<SS>-<TZ>") parser.add_argument( "-T", "--type", dest="doctype", required=True, help="The type of report document to index, one of status|error") parser.add_argument( "file_to_index", nargs=1, help="The file containing the report to index") parsed = parser.parse_args() from pbench import init_report_template, report_status, PbenchConfig, \ BadConfig, get_es, get_pbench_logger, PbenchTemplates try: config = PbenchConfig(parsed.cfg_name) except BadConfig as e: print("{}: {}".format(_prog, e), file=sys.stderr) sys.exit(1) logger = get_pbench_logger(_prog, config) es, idx_prefix = init_report_template(config, logger) status = report_status(es, logger, config.LOGSDIR, idx_prefix, parsed.name, parsed.timestamp, parsed.doctype, parsed.file_to_index[0]) sys.exit(status)
def __init__( self, config, name, es=None, pid=None, group_id=None, user_id=None, hostname=None, version=None, templates=None, ): self.config = config self.name = name self.logger = pbench.get_pbench_logger(name, config) # We always create a base "tracking" document composed of parameters # from the caller, and other environmental data. This document is used # as the foundation for the first document posted to the target # Elasticsearch instance with the `post_status()` method. All # subsequent calls to the `post_status()` method will use that first # document ID as their parent document ID. This allows us to have # multiple status updates associated with the initial Report() caller. if config._unittests: _hostname = "example.com" _pid = 42 _group_id = 43 _user_id = 44 else: _hostname = hostname if hostname else socket.gethostname() _pid = pid if pid else os.getpid() _group_id = group_id if group_id else os.getgid() _user_id = user_id if user_id else os.getuid() self.generated_by = dict([ ("commit_id", self.config.COMMIT_ID), ("group_id", _group_id), ("hostname", _hostname), ("pid", _pid), ("user_id", _user_id), ("version", version if version else ""), ]) # The "tracking_id" is the final MD5 hash of the first document # indexed via the `post_status()` method. self.tracking_id = None try: self.idx_prefix = config.get("Indexing", "index_prefix") except (NoOptionError, NoSectionError): # No index prefix so reporting will be performed via logging. self.idx_prefix = None self.es = None else: if es is None: try: self.es = get_es(config, self.logger) except Exception: self.logger.exception("Unexpected failure fetching" " Elasticsearch configuration") # If we don't have an Elasticsearch configuration just use # None to indicate logging should be used instead. self.es = None else: self.es = es if templates is not None: self.templates = templates else: self.templates = PbenchTemplates(self.config.BINDIR, self.idx_prefix, self.logger)