def show_glacier_inventory(**kwargs): if config.get("aws", "s3_bucket"): conf = kwargs.get("conf", None) glacier_backend = GlacierBackend(conf) loaded_archives = glacier_backend.load_archives_from_s3() log.info(json.dumps(loaded_archives, sort_keys=True, indent=4, separators=(",", ": "))) else: log.error("No S3 bucket defined.") return loaded_archives
def restore_glacier_inventory(**kwargs): """Restore custom Glacier inventory from S3. :type conf: dict :keyword conf: Override/set AWS configuration. """ conf = kwargs.get("conf", None) glacier_backend = GlacierBackend(conf) glacier_backend.restore_inventory()
def backup_glacier_inventory(**kwargs): """Backup Glacier inventory to S3. :type conf: dict :keyword conf: Override/set AWS configuration. """ conf = kwargs.get("conf", None) glacier_backend = GlacierBackend(conf) glacier_backend.backup_inventory()
def upgrade_to_dump_truck(): glacier_backend = GlacierBackend() glacier_backend.upgrade_to_dump_truck() s3_backend = S3Backend() regex_key = re.compile(r"(?P<backup_name>.+)\.(?P<date_component>\d{14})\.tgz(?P<is_enc>\.enc)?") # old regex for backward compatibility (for files without dot before the date component). old_regex_key = re.compile(r"(?P<backup_name>.+)(?P<date_component>\d{14})\.tgz(?P<is_enc>\.enc)?") for generator, backend in [(s3_backend, "s3"), (glacier_backend, "glacier")]: for key in generator.ls(): match = regex_key.match(key) # Backward compatibility if not match: match = old_regex_key.match(key) if match: filename = match.group("backup_name") is_enc = bool(match.group("is_enc")) backup_date = int(datetime.strptime(match.group("date_component"), "%Y%m%d%H%M%S").strftime("%s")) else: filename = key is_enc = False backup_date = 0 if backend == "s3": backend_hash = hashlib.sha512( s3_backend.conf.get("access_key") + s3_backend.conf.get(s3_backend.container_key) ).hexdigest() elif backend == "glacier": backend_hash = hashlib.sha512( glacier_backend.conf.get("access_key") + glacier_backend.conf.get(glacier_backend.container_key) ).hexdigest() new_backup = dict( backend=backend, is_deleted=0, backup_date=backup_date, tags=[], stored_filename=key, filename=filename, last_updated=int(datetime.utcnow().strftime("%s")), metadata=dict(is_enc=is_enc), size=0, backend_hash=backend_hash, ) try: dump_truck_insert_backup(new_backup) except: pass
def upgrade_from_shelve(): if os.path.isfile(os.path.expanduser("~/.bakthat.db")): glacier_backend = GlacierBackend() glacier_backend.upgrade_from_shelve() s3_backend = S3Backend() regex_key = re.compile(r"(?P<backup_name>.+)\.(?P<date_component>\d{14})\.tgz(?P<is_enc>\.enc)?") # old regex for backward compatibility (for files without dot before the date component). old_regex_key = re.compile(r"(?P<backup_name>.+)(?P<date_component>\d{14})\.tgz(?P<is_enc>\.enc)?") for generator, backend in [(s3_backend.ls(), "s3"), ([ivt.filename for ivt in Inventory.select()], "glacier")]: for key in generator: match = regex_key.match(key) # Backward compatibility if not match: match = old_regex_key.match(key) if match: filename = match.group("backup_name") is_enc = bool(match.group("is_enc")) backup_date = int(datetime.strptime(match.group("date_component"), "%Y%m%d%H%M%S").strftime("%s")) else: filename = key is_enc = False backup_date = 0 if backend == "s3": backend_hash = hashlib.sha512(s3_backend.conf.get("access_key") + \ s3_backend.conf.get(s3_backend.container_key)).hexdigest() elif backend == "glacier": backend_hash = hashlib.sha512(glacier_backend.conf.get("access_key") + \ glacier_backend.conf.get(glacier_backend.container_key)).hexdigest() new_backup = dict(backend=backend, is_deleted=0, backup_date=backup_date, tags="", stored_filename=key, filename=filename, last_updated=int(datetime.utcnow().strftime("%s")), metadata=dict(is_enc=is_enc), size=0, backend_hash=backend_hash) try: Backups.upsert(**new_backup) except Exception, exc: print exc os.remove(os.path.expanduser("~/.bakthat.db"))
def show_local_glacier_inventory(**kwargs): conf = kwargs.get("conf", None) glacier_backend = GlacierBackend(conf) archives = glacier_backend.load_archives() log.info(json.dumps(archives, sort_keys=True, indent=4, separators=(",", ": "))) return archives
def test_glacier_backup_restore(self): if raw_input("Test glacier upload/download ? It can take up to 4 hours ! (y/N): ").lower() == "y": # Backup dummy file bakthat.backup(self.test_file.name, "glacier", password="") # Check that file is showing up in bakthat ls self.assertEqual(bakthat.match_filename(self.test_filename, "glacier")[0]["filename"], self.test_filename) # We initialize glacier backend # to check that the file appear in both local and remote (S3) inventory glacier_backend = GlacierBackend(None) archives = glacier_backend.load_archives() archives_s3 = glacier_backend.load_archives_from_s3() # Check that local and remote custom inventory are equal self.assertEqual(archives, archives_s3) # Next we check that the file is stored in both inventories inventory_key_name = bakthat.match_filename(self.test_filename, "glacier")[0]["key"] self.assertTrue(inventory_key_name in archives) self.assertTrue(inventory_key_name in archives_s3) # Restore backup job = bakthat.restore(self.test_filename, "glacier", job_check=True) # Check that a job is initiated self.assertEqual(job.__dict__["action"], "ArchiveRetrieval") self.assertEqual(job.__dict__["status_code"], "InProgress") while 1: # Check every ten minutes if the job is done result = bakthat.restore(self.test_filename, "glacier") # If job is done, we can download the file if result: restored_hash = hashlib.sha1(open(self.test_filename).read()).hexdigest() # Check if the hash of the restored file is equal to inital file hash self.assertEqual(self.test_hash, restored_hash) os.remove(self.test_filename) # Now, we can delete the restored file bakthat.delete(self.test_filename, "glacier") # Check that the file is deleted self.assertEqual(bakthat.match_filename(self.test_filename, "glacier"), []) archives = glacier_backend.load_archives() archives_s3 = glacier_backend.load_archives_from_s3() # Check if the file has been removed from both archives self.assertEqual(archives, archives_s3) self.assertTrue(inventory_key_name not in archives) self.assertTrue(inventory_key_name not in archives_s3) break else: time.sleep(600)