def new_saveset(self, host, volume): """Set up new saveset entry in database Args: host (str): host from which to copy files volume (str): volume path of destination Returns: dict: id and name of new record in saveset table """ vol = self.session.query(Volume).filter_by(volume=volume).one() host_record = self.session.query(Host).filter_by(hostname=host).one() backup_host_record = self.session.query(Host).filter_by( hostname=self.backup_host).one() if (not host or not vol): sys.exit('Invalid host or volume') saveset = Saveset(saveset='%(host)s-%(volume)s-%(date)s' % dict( host=host, volume=volume, date=Syslog._now().strftime('%Y%m%d-%H')), location=Constants.SYNC_PATH, host=host_record, backup_host=backup_host_record) try: self.session.add(saveset) self.session.commit() except sqlalchemy.exc.IntegrityError as ex: if ('Duplicate entry' in str(ex)): sys.exit('ERROR: duplicate saveset=%s' % saveset.saveset) Syslog.logger.info('START saveset=%s' % saveset.saveset) return dict(id=saveset.id, saveset=saveset.saveset)
def test_syslog_error(self, mock_logger, mock_stderr): logfile_name = tempfile.mkstemp(prefix='_test')[1] Syslog({'log-level': 'info', 'logfile': logfile_name, 'verbose': None}) Syslog.logger.error('test') mock_stderr.assert_called_once_with('ERROR: test\n') # TODO: not yet working in pipeline # mock_logger.assert_called_once_with('secondshot test') os.remove(logfile_name)
def test_data_prefix(self, mock_now): mock_now.return_value = datetime.strptime('Aug 1 2018 1:47PM', '%b %d %Y %I:%M%p') ret = Syslog({ 'verbose': False, 'logfile': None, 'log-level': 'none' })._date_prefix('W', 'test log') self.assertEqual(ret, '[01/Aug/2018-13:47:00] W test log\n')
def setUp(self, mock_log): super(TestActions, self).setUp() self.snapshot_root = tempfile.mkdtemp(prefix='_testdir') self.saveset = 'saveset1' self.volume = 'test1' self.volume_path = os.path.join(self.snapshot_root, Constants.SYNC_PATH) self.testdata_path = tempfile.mkdtemp(prefix='_backup') subprocess.call([ 'tar', 'xf', os.path.join(os.path.abspath(os.path.dirname(__file__)), 'testdata', 'testfiles.tar.bz'), '-C', self.testdata_path, '.' ]) self.rsnapshot_conf = tempfile.mkstemp(prefix='_test')[1] with open(self.rsnapshot_conf, 'w') as f: f.write("include_conf /etc/rsnapshot.conf\n" "snapshot_root %(snapshot_root)s\n" "backup %(testdata)s %(hostname)s\n" "retain short 2\n" "retain long 3\n" "retain longer 99\n" % dict(snapshot_root=self.snapshot_root, testdata=self.testdata_path, hostname=self.testhost)) self.cli.update({ 'action': 'start', 'sequence': 'short,long,longer', 'rsnapshot-conf': self.rsnapshot_conf, 'verbose': True, 'volume': self.volume }) saveset = Saveset(location=Constants.SYNC_PATH, saveset=self.saveset, host_id=self.testhost_id, backup_host_id=self.testhost_id) volume = Volume(volume=self.volume, path=self.snapshot_root, host_id=self.testhost_id) self.session.add(saveset) self.session.add(volume) self.session.flush() self.saveset_id = saveset.id self.session.commit() Syslog.logger = Syslog(self.cli)
def setUp(self, mock_log): super(TestSchemaUpdate, self).setUp() test_config = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..', 'etc', 'backup-daily.conf') self.cli.update({ 'db-url': 'sqlite:///:memory:', 'rsnapshot-conf': test_config }) Syslog.logger = Syslog(self.cli) cfg = alembic.config.Config() cfg.set_main_option( 'script_location', os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'secondshot', 'alembic')) cfg.set_main_option('url', str(self.engine.url)) script = alembic.script.ScriptDirectory.from_config(cfg) self.alembic_ver = script.get_heads()[0]
def main(): opts = Config().docopt_convert(docopt.docopt(__doc__)) Syslog.logger = Syslog(opts) obj = Actions(opts) result = {} status = 'ok' if (opts['list-hosts']): result = obj.list_hosts() elif (opts['list-savesets']): result = obj.list_savesets() elif (opts['list-volumes']): result = obj.list_volumes() elif (opts['verify']): result = obj.verify(opts['verify']) elif (opts['version']): result = dict(version=[dict(name='secondshot %s' % __version__)]) elif (opts['action'] == 'start'): result = obj.start(obj.hosts, obj.volume) status = result['start']['status'] elif (opts['action'] == 'rotate'): result = obj.rotate(opts['interval']) elif (opts['action'] == 'schema-update'): result = obj.schema_update() status = result['status'] else: sys.exit('Unknown action: %s' % opts['action']) if (opts['format'] == 'json'): sys.stdout.write(json.dumps(result) + '\n') elif (opts['format'] == 'text' and result and next(iter(result.keys())) in ['hosts', 'savesets', 'schema-update', 'version', 'volumes']): for item in result[next(iter(result.keys()))]: sys.stdout.write(item['name'] + '\n') if (status != 'ok'): exit(1)
def test_syslog_init_badopt(self, mock_stderr): Syslog({'log-level': 'invalid', 'logfile': None, 'verbose': None}) mock_stderr.assert_called_once_with( 'Unrecognized --log-level=invalid\n')
def inject(self, host, volume, pathname, saveset_id): """Inject filesystem metadata for each file in a saveset into manifest Args: host (str): host from which to copy files volume (str): saveset's volume name pathname (str): path where current backup is stored saveset_id (int): record ID of new saveset Returns: result (dict): results summary """ try: host_record = self.session.query(Host).filter_by( hostname=host).one() saveset = self.session.query(Saveset).filter_by( id=saveset_id).one() except Exception as ex: sys.exit('action=inject Invalid host or volume: %s' % str(ex)) mfile = open(os.path.join(pathname, host, Config.manifest), 'w') mfile.write('file_id,type,file_size,has_checksum\n') (count, numbytes, skipped) = (0, 0, 0) for dirpath, _, filenames in os.walk(os.path.join(pathname, host)): for filename in filenames: if filename == Config.manifest: continue try: stat = os.lstat(os.path.join(dirpath, filename)) _path = pymysql.escape_string( os.path.relpath( dirpath, os.path.join(Config.snapshot_root, Constants.SYNC_PATH)).encode( 'utf8', 'surrogateescape').decode('utf8')) _filename = pymysql.escape_string( filename.encode('utf8', 'surrogateescape').decode('utf8')) except OSError as ex: if ex.errno != 2: Syslog.logger.error( 'action=inject filename=%s message=%s' % (filename, str(ex))) raise skipped += 1 Syslog.logger.debug('action=inject path=%s filename=%s ' 'msg=%s' % (dirpath, filename, str(ex))) continue except UnicodeDecodeError as ex: msg = 'action=inject inode=inode=%d dev=%s' % (stat.st_ino, stat.st_dev) try: msg += ' path=%s filename=%s msg=%s' % ( dirpath, filename, str(ex)) except Exception: pass skipped += 1 Syslog.logger.debug(msg) continue record = dict( path=_path, filename=_filename, ctime=datetime.datetime.fromtimestamp( stat.st_ctime).strftime(self.time_fmt), gid=stat.st_gid, last_backup=Syslog._now().strftime('%Y-%m-%d %H:%M:%S'), links=stat.st_nlink, mode=stat.st_mode, mtime=datetime.datetime.fromtimestamp( stat.st_mtime).strftime(self.time_fmt), size=stat.st_size, sparseness=1, type=self._filetype(stat.st_mode), uid=stat.st_uid, host_id=host_record.id) try: owner = pwd.getpwuid(stat.st_uid).pw_name group = grp.getgrgid(stat.st_gid).gr_name except KeyError: owner = None group = None for retry in range(4): try: # Bypass sqlalchemy for ON DUPLICATE KEY UPDATE and # LAST_INSERT_ID functionality if (self.engine.name == 'sqlite'): # sqlite lacks UPSERT capability, but this is # good enough for existing unit-test. TODO: fix # this so we don't require a 'real' SQL. sql_insert = ( u"INSERT OR IGNORE INTO files (%(columns)s)" u" VALUES('%(values)s');" % dict(columns=','.join(record.keys()), values="','".join( str(item) for item in record.values()), owner=owner, group=group)) sql_id = 'LAST_INSERT_ROWID()' else: sql_insert = ( u"INSERT INTO files (%(columns)s)" u" VALUES('%(values)s')" u" ON DUPLICATE KEY UPDATE owner='%(owner)s'," u"grp='%(group)s',id=LAST_INSERT_ID(id)," u"last_backup=NOW();" % dict(columns=','.join(record.keys()), values="','".join( str(item) for item in record.values()), owner=owner, group=group)) sql_id = 'LAST_INSERT_ID()' self.session.execute(sql_insert) break except sqlalchemy.exc.OperationalError as ex: Syslog.logger.warn('action=inject path=%s filename=%s ' 'msg=%s' % (_path, _filename, str(ex))) if ('Deadlock found' in str(ex)): time.sleep((retry + 1) * 10) else: time.sleep(1) except Exception as ex: Syslog.logger.warn('action=inject path=%s filename=%s ' 'msg=%s' % (_path, _filename, str(ex))) time.sleep(1) raise if (retry == 4): skipped += 1 file_id = self.session.execute('SELECT %s' % sql_id).fetchone()[0] file = self.session.query(File).filter_by(id=file_id).one() has_sha = 'Y' if file.shasum else 'N' mfile.write('%d,%s,%d,%s\n' % (file_id, self._filetype( stat.st_mode), stat.st_size, has_sha)) count += 1 numbytes += stat.st_size if (count % Constants.MAX_INSERT == 0): Syslog.logger.debug('action=inject count=%d' % count) self.session.commit() mfile.close() self.session.commit() saveset.finished = sqlalchemy.func.now() saveset.files = count saveset.size = numbytes self.session.add(saveset) self.session.commit() Syslog.logger.info('FINISHED action=inject saveset=%s, file_count=%d, ' 'skipped=%d' % (saveset.saveset, count, skipped)) return { 'inject': dict(status='ok', saveset=saveset.saveset, file_count=count, skipped=skipped) }