def dumpDatabase(self, db, data=True): if data: dump_type = "data" dump_desc = "MySQL Database Contents" else: dump_type = "db" dump_desc = "MySQL Database Container" dump_filename = "%s_%s_%s.dump.%s" % ( self._conf["filename_dump_db"], db, dump_type, self._conf["suffix_compress"], ) dump_path = os.path.join(self._conf["job_path"], dump_filename) args = [self._conf["cmd_mysqldump"]] args.extend(self._connArgs) if db in ("information_schema", "mysql"): args.append("--skip-lock-tables") if not data: args.extend(["--no-create-info", "--no-data", "--databases"]) args.append(db) logger.info("Starting dump of %s: %s" " Backup: %s", dump_desc, db, dump_path) returncode, out, err = self._execBackupCmd( args, self._env, out_path=dump_path, out_compress=True # @UnusedVariable ) if returncode == 0: logger.info("Finished dump of %s: %s" " Backup: %s", dump_desc, db, dump_path) else: raise errors.BackupError( "Dump of %s for %s failed " "with error code: %s" % (dump_desc, db, returncode), *utils.splitMsg(err) )
def postExec(self): """Executes post_exec script if defined in general options section of the configuration file. """ dry_run = self._globalConf.get('dry_run', False) post_exec = self._globalConf.get('post_exec') if post_exec is not None: logmgr.setContext('POST-EXEC') logger.info("Executing general post-execution script.") execExternalCmd(post_exec.split(), None, dry_run)
def loggingInit(self): """Initializes logging at the beginning of the execution of the backup process. """ if self._help is None: logmgr.setContext('STARTUP', self._globalConf.get('dry_run', False)) else: logmgr.setContext('HELP') level = logmgr.getLogLevel(self._globalConf['console_loglevel']) logmgr.configConsole(level) if self._help is None: logger.info("Start Execution of Backup Jobs.")
def backupDirs(self): archive_filename = "%s.%s" % (self._conf['filename_archive'], self._conf['suffix_tgz']) index_filename = "%s.%s" % (self._conf['filename_archive'], self._conf['suffix_index']) archive_path = os.path.join(self._conf['job_path'], archive_filename) backup_index = parse_value(self._conf.get('backup_index'), True) index_path = os.path.join(self._conf['job_path'], index_filename) base_dir = self._conf.get('base_dir') path_list = [os.path.normpath(path) for path in re.split('\s*,\s*|\s+', self._conf['path_list'])] if self._conf.has_key('exclude_patterns'): exclude_patterns = re.split('\s*,\s*|\s+', self._conf['exclude_patterns']) else: exclude_patterns = None exclude_patterns_file = self._conf.get('exclude_patterns_file') logger.info("Starting backup of paths: %s", ', '.join(path_list)) args = [self._conf['cmd_tar'],] if base_dir is not None: if os.path.isdir(base_dir): args.extend(['-C', base_dir]) else: raise errors.BackupConfigError("Invalid base directory " "(base_dir): %s"% base_dir) if backup_index: args.append('-v') if exclude_patterns is not None: for pattern in exclude_patterns: args.append("--exclude=%s" % pattern) if exclude_patterns_file is not None: if os.path.isfile(exclude_patterns_file): args.append("--exclude-from=%s" % exclude_patterns_file) else: raise errors.BackupConfigError("Invalid exclude patterns file: %s" % exclude_patterns_file) args.extend(['-zcf', archive_path]) self._checkSrcPaths(path_list) args.extend(path_list) if backup_index: returncode, out, err = self._execBackupCmd(args, #@UnusedVariable out_path=index_path) else: returncode, out, err = self._execBackupCmd(args) #@UnusedVariable if returncode == 0: logger.info("Finished backup of paths: %s", ', '.join(path_list)) else: raise errors.BackupError("Backup of paths failed with error code: %s" % returncode, *utils.splitMsg(err))
def loggingEnd(self): """Writes-out the final log message before finalizing the execution of the backup process. """ if self._help is None: logmgr.setContext('FINAL') logger.info("Finished Execution of %s Backup Jobs." " Enabled/Disabled: %s / %s" " Succesful/Failed: %s / %s", self._numJobs, self._numJobs - self._numJobsDisabled, self._numJobsDisabled, self._numJobsSuccess, self._numJobsError)
def dumpDatabase(self, db): dump_filename = "%s_%s.dump" % (self._conf['filename_dump_db'], db) dump_path = os.path.join(self._conf['job_path'], dump_filename) args = [self._conf['cmd_pg_dump'], '-w', '-Fc'] args.extend(self._connArgs) args.extend(['-f', dump_path, db]) logger.info("Starting dump of PostgreSQL Database: %s" " Backup: %s", db, dump_path) returncode, out, err = self._execBackupCmd(args, self._env) #@UnusedVariable if returncode == 0: logger.info("Finished dump of PostgreSQL Database: %s" " Backup: %s", db, dump_path) else: raise errors.BackupError("Dump of PostgreSQL database %s failed " "with error code %s." % (db, returncode), *utils.splitMsg(err))
def syncDirs(self): self._initSrc() self._initDest() compress = parse_value(self._conf.get("compress"), True) delete = parse_value(self._conf.get("delete"), False) backup_index = parse_value(self._conf.get("backup_index"), True) if self._conf.has_key("exclude_patterns"): exclude_patterns = re.split("\s*,\s*|\s+", self._conf["exclude_patterns"]) else: exclude_patterns = None exclude_patterns_file = self._conf.get("exclude_patterns_file") logger.info("Starting backup of paths: %s", ", ".join(self._path_list)) args = [self._conf["cmd_rsync"]] if self._dryRun: args.append("-n") args.append("-aR") if compress: args.append("-z") if backup_index: args.append("-v") args.append("--stats") if delete: args.append("--delete") if exclude_patterns is not None: for pattern in exclude_patterns: args.append("--exclude=%s" % pattern) if exclude_patterns_file is not None: if os.path.isfile(exclude_patterns_file): args.append("--exclude-from=%s" % exclude_patterns_file) else: raise errors.BackupConfigError("Invalid exclude patterns file: %s" % exclude_patterns_file) if len(self._src_list) > 0: args.extend(self._src_list) else: raise errors.BackupConfigError("No valid source paths defined for backup.") args.append(self._archive_path) if backup_index: returncode, out, err = self._execBackupCmd( args, out_path=self._index_path, force_exec=True # @UnusedVariable ) else: returncode, out, err = self._execBackupCmd(args, force_exec=True) # @UnusedVariable if returncode == 0: logger.info("Finished backup of paths: %s", ", ".join(self._path_list)) else: raise errors.BackupError("Backup of paths failed with error code: %s" % returncode, *utils.splitMsg(err))
def dumpGlobals(self): dump_path = os.path.join(self._conf['job_path'], "%s.%s" % (self._conf['filename_dump_globals'], self._conf['suffix_compress'])) args = [self._conf['cmd_pg_dumpall'], '-w', '-g'] args.extend(self._connArgs) logger.info("Starting PostgreSQL Global Objects dump." " Backup: %s", dump_path) returncode, out, err = self._execBackupCmd(args, #@UnusedVariable self._env, out_path=dump_path, out_compress=True) if returncode == 0: logger.info("Finished PostgreSQL Global Objects dump." " Backup: %s", dump_path) else: raise errors.BackupError("Dump failed with error code: %s" % returncode, *utils.splitMsg(err))
def runJobs(self): """Runs the requested backup jobs. Backup jobs are either explicitly listed on the command line or all active backup jobs in configuration file are run. """ dry_run = self._globalConf.get('dry_run', False) for job_name in self._jobs: self._numJobs += 1 logmgr.setContext(job_name) job_conf = self._jobsConf.get(job_name) if job_conf is not None: active = parse_value(job_conf.get('active', 'yes'), True) if active: job_pre_exec = job_conf.get('job_pre_exec') job_post_exec = job_conf.get('job_post_exec') if job_pre_exec is not None: logger.info("Executing job pre-execution script.") try: execExternalCmd(job_pre_exec.split(), None, dry_run) job_pre_exec_ok = True except errors.ExternalCmdError, e: job_pre_exec_ok = False job_ok = False logger.error("Job pre-execution script failed.") logger.error(e.desc) for line in e: logger.error(" %s" , line) else: job_pre_exec_ok = True if job_pre_exec_ok: try: logger.info("Starting execution of backup job.") job = BackupJob(job_name, self._globalConf, job_conf) job.run() logger.info("Finished execution of backup job.") job_ok = True except errors.BackupError, e: logger.error("Execution of backup job failed.") job_ok = False if e.trace or e.fatal: raise else: if e.fatal: level = logging.CRITICAL else: level = logging.ERROR logger.log(level, e.desc) for line in e: logger.log(level, " %s" , line) if job_post_exec is not None and job_pre_exec_ok: logger.info("Executing job post-execution script.") try: execExternalCmd(job_post_exec.split(), None, dry_run) except errors.ExternalCmdError, e: job_ok = False logger.error("Job pre-execution script failed.") logger.error(e.desc) for line in e: logger.error(" %s" , line) if job_ok: self._numJobsSuccess += 1 else: self._numJobsError += 1