def do_recover(self, arg, opts=None): '''Recover files through dar_manager''' cf = getattr(self.cf, opts.job) rpath = opts.rpath if opts.rpath else cf.recover_path s = Scheme(self.cf, opts.job) r = Report(opts.job, session=s.sess) self.logger.debug("Checking dmd sync..") bkp_count = r.get_catalogs( after=cf.catalog_begin, types=("Full", "Incremental")).count() self.logger.debug('Checking backup count on db.. %s' % bkp_count) dmd_count = len(s.load_dmd()) self.logger.debug('Checking backup count on dmd.. %s' % dmd_count) if bkp_count != dmd_count: self.stdout.write("Outdated DMD please rebuild it or recover " "manually.\n") sys.exit(2) if opts.extract: cat = opts.jobid if opts.jobid else None try: run = s.recover_all( rpath, stdout=self.stdout, stderr=sys.stderr, catalog=cat) except RecoverError, e: sys.stderr.write("%s\n" % e.message) sys.exit(2)
def do_dbrecover(self, arg, opts=None): '''Load a db backup to file or stdout.''' if not opts.id and not opts.job: sys.stderr.write("Please specify either -j or -i options.\n") else: if opts.id: r = Report(None) dmp = r.get_catalogs(catalog=opts.id)[0] else: r = Report(opts.job) dmp = r.get_catalogs(entries=1, types=('MysqlDump', 'gzMysqlDump'))[0] cs = getattr(self.cf, dmp.job.name) if dmp.enc and not cs.encryption: raise RecoverException( 'Archive %s is encrypted, you must ' 'provide proper credentials in order to recover, or ' 'recover manually.' % dmp.id) dmp_file = os.path.join( cs.archive_store, dmp.job.name.encode(), "%s.1.%s" % ( dmp.id.encode(), find_ext(dmp.type.name) ) ) #unencrypted types if not cs.encryption: if dmp.type.name == "gzMysqlDump": comm = 'zcat %s' % dmp_file elif dmp.type.name == "MysqlDump": comm = 'cat %s' % dmp_file #encrypted types elif cs.encryption: pfile = mk_ssl_auth_file(cs.encryption.split(":")[1]) if dmp.type.name == "gzMysqlDump": comm = 'openssl enc -in %s -d -aes-256-cbc -pass '\ 'file:%s | gunzip' % (dmp_file, pfile) elif dmp.type.name == "MysqlDump": comm = 'openssl enc -in %s -d -aes-256-cbc -pass '\ 'file:%s' % (dmp_file, pfile) if opts.filename == "-": retcode = subprocess.call(comm, shell=True) else: if os.path.exists(opts.filename): raise BackupDBException('Cowardly refusing to overwrite' ' %s.' % opts.filename) fd = os.open(opts.filename, os.O_WRONLY | os.O_CREAT, 0600) with os.fdopen(fd, 'w') as out: retcode = subprocess.call(comm, stdout=out, shell=True) if cs.encryption: os.unlink(pfile) if retcode != 0: sys.stderr.write("Recovery failed with status %s.\n" % retcode)
def do_dumpattr(self, arg, opts=None): '''Write extended attributes to every first slice in a job store.''' if opts.job not in self.cf.sections(): self.stdout.write("Unexistent job.\n") else: r = Report(opts.job) ct = r.get_catalogs() sect = getattr(self.cf, opts.job) for each in ct: self.logger.debug("Trying to xattr on %s" % each) save_xattr(each, sect)
def stats(sect): if sect in self.cf.sections(): r = Report(sect) self.stdout.write('%s: \n' % sect) for each in r.types(): last = r.last_run(backup_type=each) avg = r.avg(backup_type=each) self.stdout.write('\t %s backup: \n' % each) self.stdout.write('\t\t last run time:\t%s\n' % last) self.stdout.write('\t\t average time:\t%s\n' % avg) else: self.stdout.write("Can't find section %s!\n" % sect)
def do_rebuild_dmd(self, arg, opts=None): '''Re-creates the dmd for a given job.''' cf = getattr(self.cf, opts.job) sl = Scheme(self.cf, opts.job) lock = sl.lock("rebuild_dmd") self.logger.debug("Removing dmd for %s" % opts.job) dmdfile = os.path.expanduser("~/.dardrive/dmd/%s.dmd" % opts.job) if os.path.exists(dmdfile): os.unlink(dmdfile) s = Scheme(self.cf, opts.job) r = Report(opts.job, session=s.sess) for cat in r.get_catalogs(after=cf.catalog_begin, order="asc"): s.add_to_dmd(cat.id) sl.sess.delete(lock) sl.sess.commit() s.sess.commit()
def do_parity(self, arg, opts): '''Generate "par2" error correction files.''' r = Report(None) ct = r.get_catalogs(catalog=opts.id).one() cf = getattr(self.cf, ct.job.name) if not cf.redundancy and opts.action == "create": raise ConfigSectionException( 'Please enable redundacy for this job ' 'in order to build recovery information.') base = "%s/%s/%s.*" % (cf.archive_store, ct.job.name, opts.id) self.logger.debug(base) reobj = re.compile(r''' ^.*\. #anything, ending in dot (?P<slice>\d*) #any number \.dar$ #ends in .dar ''', re.VERBOSE) for files in glob.glob(base): self.logger.debug(files) m = reobj.match(files) if m: self.logger.debug("Slice matched for %s" % m.group('slice')) args = [str("%s/%s" % (cf.archive_store, ct.job.name)), opts.id, str(m.group('slice')), 'dar', ''] self.logger.debug(args) if opts.action == "create": mode = "Creating" args.append(str(cf.redundancy)) else: mode = "Testing" try: dar_par(mode=mode, cmd=args) except SystemExit, e: status = "Ok" if e.code == 0 else "Archive needs repair!" self.stdout.write("Par exited with code %s, %s\n" % (e.code, status))
def do_import(self, arg, opts=None): '''Import an untracked job store to db.''' if opts.job not in self.cf.sections(): self.stdout.write("Unexistent job.\n") else: cs = getattr(self.cf, opts.job) s = Scheme(self.cf, opts.job) lock = s.lock('import') i = Importer(self.cf, opts.job, session=s.sess) i.load() self.stdout.write("Job store imported.\n") self.stdout.write("Rebuilding the dmd database...\n") dmdfile = os.path.expanduser("~/.dardrive/dmd/%s.dmd" % opts.job) if os.path.exists(dmdfile): os.unlink(dmdfile) s.sess.delete(lock) s = Scheme(self.cf, opts.job) lock2 = s.lock('rebuild') r = Report(opts.job, session=s.sess) for cat in r.get_catalogs(order="asc"): if cat.date >= dt.strptime(cs.catalog_begin, "%Y-%m-%d"): s.add_to_dmd(cat.id) s.sess.delete(lock2) s.sess.commit()
def do_show(self, arg, opts=None): '''Shows various listings''' # show jobs if opts.action == "jobs" and opts.long: if opts.job and opts.job not in self.cf.sections(): raise ConfigSectionException( "Unexistent job: %s" % opts.job) for sect in self.cf.sections(): if opts.job and sect != opts.job: continue else: self.stdout.write('%s:\n' % sect) each_sect = getattr(self.cf, sect) for eopt in each_sect.options(): self.stdout.write('\t%s: %s\n' % ( eopt, getattr(each_sect, eopt))) elif opts.action == "jobs": self.stdout.write('\n'.join(self.cf.sections()) + "\n") elif opts.action == "ver": ver = pkg_resources.get_distribution("dardrive").version self.stdout.write('\ndardrive %s\n\n' % ver) #show stats elif opts.action == "stats": def stats(sect): if sect in self.cf.sections(): r = Report(sect) self.stdout.write('%s: \n' % sect) for each in r.types(): last = r.last_run(backup_type=each) avg = r.avg(backup_type=each) self.stdout.write('\t %s backup: \n' % each) self.stdout.write('\t\t last run time:\t%s\n' % last) self.stdout.write('\t\t average time:\t%s\n' % avg) else: self.stdout.write("Can't find section %s!\n" % sect) if opts.job: stats(opts.job) else: for sect in self.cf.sections(): try: stats(sect) except NoResultFound: self.stdout.write('There\'s no info on %s in the ' 'database.\n' % sect) # show logs elif opts.action == "logs": if opts.job and opts.id: self.stdout.write( " -c and -j are mutually exclusive options.\n") else: r = Report(opts.job) for cat in r.get_catalogs(catalog=opts.id, types=opts.type, entries=opts.num): log_str = "%s\n%s\n%s\n\n%s\n" % ( "=" * 32, cat.id, "=" * 32, cat.log) self.stdout.write(log_str) # show files elif opts.action == "files": if opts.id is None and opts.job is None: self.stdout.write("show files requires either " "-j or -i options.\n") else: try: r = Report(opts.job) except NoResultFound: self.stdout.write("Unexistent job: %s\n" % opts.job) return ct = r.get_catalogs(catalog=opts.id, types=opts.type) for each in ct: if not opts.base: sect = getattr(self.cf, each.job.name) arc = glob.glob("%s/%s/%s.*" % (sect.archive_store, each.job.name, each.id)) cat = glob.glob("%s/%s/%s.*" % (sect.catalog_store, each.job.name, each.id)) for fl in arc + cat: self.stdout.write("%s\n" % fl) else: sect = getattr(self.cf, each.job.name) self.stdout.write("%s/%s/%s\n" % (sect.archive_store, each.job.name, each.id)) elif opts.action == "archives": if opts.id: r = Report(opts.job) ar = r.s.query(Catalog).get(opts.id) if ar is not None: self.stdout.write("%s\n" % self._archive_info(ar)) else: self.stdout.write("Unexistent job id.\n") else: r = Report(opts.job) ids = [] names = [] types = [] dates = [] statuses = [] for cat in r.get_catalogs(types=opts.type, entries=opts.num): ids.append(cat.id) names.append(cat.job.name) types.append(cat.type.name) dates.append(cat.date.strftime("%d/%m/%y %H:%M:%S")) st = "%s" % cat.status if cat.status is None: locks = r.s.query( Lock).filter(Lock.cat_id == cat.id).all() for l in locks: if l.check_pid(): st = "Running" break statuses.append(st) self.stdout.write("%s\n" % Table( Col("Archive Id", ids, "-"), Col("Job name", names), Col("Job type", types), Col("Created", dates), Col("Dar Status", statuses)))
'-s', '--slice', required=True, type=int, help="Slice number") #Check if we've been called as an entry point console-script. if cmd is not None: opts = parser.parse_args(cmd) else: opts = parser.parse_args() if sect is None: cf = Config("~/.dardrive/jobs.cfg", DARDRIVE_DEFAULTS) try: sect = getattr(cf, opts.job) except ConfigSectionException, e: sys.stderr.write('Invalid job name "%s"\n' % opts.job) sys.exit(1) r = Report(opts.job) try: cat = r.get_catalogs(catalog=opts.id).one() except NoResultFound, e: sys.stderr.write('Invalid backup id "%s"\n' % opts.id) sys.exit(1) fname = "%s.%s.%s" % (cat.id, opts.slice, find_ext(cat.type.name)) origin = os.path.join(sect.local_store, opts.job, fname) dest = os.path.join(sect.archive_store, opts.job) start_time = time.time() for each in glob.glob("%s*" % origin): if os.path.isdir(sect.archive_store): shutil.move(each, dest)