def do_dumpattr(self, arg, opts=None): '''Write extended attributes to every first slice in a job store.''' if opts.job not in self.cf.sections(): self.stdout.write("Unexistent job.\n") else: r = Report(opts.job) ct = r.get_catalogs() sect = getattr(self.cf, opts.job) for each in ct: self.logger.debug("Trying to xattr on %s" % each) save_xattr(each, sect)
def db_backup(self): if not self.cf.mysql: raise ConfigException('MySQL dumps are not enabled ' 'for this job.\n') lock = self.lock("db_backup") mpath = os.path.join(self.cf.archive_store, self.section) mkdir(mpath) if self.cf.mysql_compr: btype_name = "gzMysqlDump" ext = ".dmp.gz" else: btype_name = "MysqlDump" ext = ".dmp" btype = self.get_or_create(BackupType, name=btype_name) cat = Catalog(type=btype, job=self.Job) self.sess.add(cat) lock.cat = cat self.sess.add(lock) self.sess.commit() # we use a slice naming convention as in dar, in a future, it would #be nice to be able to split dump files. file_name = os.path.join(mpath, cat.id + ".1" + ext) if self.cf.redundancy and self.cf.par_local: dest_file_name = file_name file_name = os.path.join(self.cf.local_store, self.section, cat.id + ".1" + ext) args = "mysqldump --defaults-extra-file=%s --single-transaction " args += "--all-databases -e --opt " #mk_mysql_auth_file will create the config for the job auth_file = mk_mysql_auth_file( id=cat.id.encode(), mysql_host=self.cf.mysql_host, mysql_user=self.cf.mysql_user, mysql_pass=self.cf.mysql_pass) self.logger.debug("Mysql authfile created: %s" % auth_file) args = shlex.split(args % auth_file) testcmd = 'echo status | mysql --defaults-extra-file=%s' % auth_file try: t = subprocess.check_call(testcmd, shell=True, **commands['popen_defaults']) except subprocess.CalledProcessError: self.logger.debug('Deleting auth_file %s' % auth_file) os.unlink(auth_file) raise BackupDBException('Could not contact the mysql server, check' ' your configuration/connectivity.\n') _dir_name = os.path.dirname(file_name) if not os.path.exists(_dir_name): os.makedirs(_dir_name, 0722) fd = os.open(file_name, os.O_WRONLY | os.O_CREAT, 0600) with os.fdopen(fd, 'w') as dmpfile: if self.cf.mysql_compr: cmdline = " ".join(args) + " | gzip - " if self.cf.encryption: pfile = mk_ssl_auth_file(self.cf.encryption.split(":")[1]) cmdline += '| openssl aes-256-cbc -salt -pass file:%s' \ % pfile cat.enc = True elif self.cf.encryption: pfile = mk_ssl_auth_file(self.cf.encryption.split(":")[1]) cmdline = " ".join(args) + \ ' | openssl aes-256-cbc -salt -pass file:%s ' % pfile cat.enc = True else: cmdline = " ".join(args) self.logger.debug(cmdline) self.logger.debug(cmdline) p = subprocess.Popen( cmdline, stdin=None, stderr=subprocess.PIPE, stdout=dmpfile, shell=True) start_time = time.time() comm = p.communicate() end_time = time.time() cat.ttook = int(end_time - start_time) cat.log = "stdout:\n%s\nstderr:\n%s\n" % comm self.sess.commit() if p.returncode == 0: if self.cf.redundancy: if self.cf.par_local: store = self.cf.local_store else: store = self.cf.archive_store fpath = str( "%s/%s" % (store, cat.job.name)) dar_par(mode="Creating", cmd=[fpath, cat.id.encode(), '1', ext[1:], '', str(self.cf.redundancy)]) if self.cf.par_local: dar_move( cmd=["-j", self.section, "-i", cat.id.encode(), "-s", "1"], sect=self.cf) self.save_stats(cat) cat.clean = True cat.status = p.returncode dmpfile.flush() save_xattr(cat, self.cf) self.sess.commit() self.logger.debug("Deleting authfile %s.." % auth_file) os.unlink(auth_file) if self.cf.encryption: if os.path.exists(pfile): os.unlink(pfile) self.sess.delete(lock) self.sess.commit() return cat