def _get_vault_from_arn(arn, settings): g = Glacier(aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY) for i in g.list_vaults(): if arn == i.arn: return i else: raise CommandError("The specified vault could not be accessed.")
def _get_vault_from_arn(arn, settings): g = Glacier(aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY) for i in g.list_vaults(): if arn == i.arn: return i else: raise CommandError('The specified vault could not be accessed.')
def _get_vault_from_arn(arn, settings): logger.info('getting vault from arn') if hasattr(settings, 'USING_IAM_ROLE') and settings.USING_IAM_ROLE: # GLACIER_REGION_NAME is required when USING_IAM_ROLE is True g = Glacier(region_name=settings.GLACIER_REGION_NAME) else: g = Glacier(aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY) logger.info('Glacier: {}'.format(g.__dict__)) for i in g.list_vaults(): if arn == i.arn: print(i.arn) return i else: raise CommandError('The specified vault could not be accessed.')
def main(): """Main routine.""" parser = ArgumentParser(description='Amazon Glacier console backup tool') group = parser.add_mutually_exclusive_group() group.add_argument('--upload', type=str, metavar='FILENAME') group.add_argument('--notify', type=str, metavar='ARCHIVE_ID', help='notify Glacier about backup retrieval') group.add_argument('--download', type=str, metavar='JOB_ID', help='actually download archive') group.add_argument('--remove', type=str, metavar='ARCHIVE_ID') parser.add_argument('--download-to', type=str) args = parser.parse_args() vault = Layer2(aws_access_key_id=AMAZON_LOGIN, aws_secret_access_key=AMAZON_PASSWORD, region_name='eu-west-1').get_vault(VAULT_NAME) if args.notify: print('Job {}'.format(notify_download(vault, args.notify))) elif args.download: print('Download checksum: {}'.format( download(vault, args.download, args.download_to))) elif args.remove: remove(vault, args.remove) print('Removed archive {}'.format(args.remove)) elif args.upload: print('{}: {}'.format(args.upload, upload(vault, args.upload))) else: print(list_jobs(vault))
def main(num_of_workers=0, worker_id=0, glacier=True, parity=True, dry_run=True): global container_primary global container_parity global vault global audit_temp_path # Set up storage backends init_app(set_backends=True, routes=False) try: # Authenticate to Rackspace pyrax.settings.set('identity_type', 'rackspace') pyrax.set_credentials(storage_settings.USERNAME, storage_settings.API_KEY, region=storage_settings.REGION) container_primary = pyrax.cloudfiles.get_container( storage_settings.PRIMARY_CONTAINER_NAME) container_parity = pyrax.cloudfiles.get_container( storage_settings.PARITY_CONTAINER_NAME) # Connect to AWS layer2 = Layer2( aws_access_key_id=storage_settings.AWS_ACCESS_KEY, aws_secret_access_key=storage_settings.AWS_SECRET_KEY, ) vault = layer2.get_vault(storage_settings.GLACIER_VAULT) # Log to file if not dry_run: scripts_utils.add_file_logger(logger, __file__, suffix=worker_id) audit_temp_path = os.path.join(storage_settings.AUDIT_TEMP_PATH, str(worker_id)) if not dry_run: try: os.makedirs(audit_temp_path) except OSError: pass if glacier: logger.info('glacier audit start') audit(glacier_targets(), num_of_workers, worker_id, dry_run) logger.info('glacier audit complete') if parity: logger.info('parity audit start') audit(parity_targets(), num_of_workers, worker_id, dry_run) logger.info('parity audit complete') except Exception as err: logger.error('=== Unexpected Error ===') logger.exception(err) raise err
def main(id): layer2 = Layer2( aws_access_key_id=config.get("glacier", "aws_access_key_id"), aws_secret_access_key=config.get("glacier", "aws_secret_access_key"), region_name=config.get("glacier", "region"), ) vault = layer2.get_vault(config.get("glacier", "vault")) job_id = vault.retrieve_archive(id) print("Inventory job id: {0}".format(job_id))
def connect_dynamodb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.dynamodb.layer2.Layer2` :return: A connection to the Layer2 interface for DynamoDB. """ from boto.dynamodb.layer2 import Layer2 return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_glacier(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.glacier.layer2.Layer2` :return: A connection to Amazon's Glacier Service """ from boto.glacier.layer2 import Layer2 return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudsearch(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.autoscale.CloudSearchConnection` :return: A connection to Amazon's CloudSearch service """ from boto.cloudsearch.layer2 import Layer2 return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudsearch2(aws_access_key_id=None, aws_secret_access_key=None, sign_request=False, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :type sign_request: bool :param sign_request: whether or not to sign search and upload requests :rtype: :class:`boto.cloudsearch2.layer2.Layer2` :return: A connection to Amazon's CloudSearch2 service """ from boto.cloudsearch2.layer2 import Layer2 return Layer2(aws_access_key_id, aws_secret_access_key, sign_request=sign_request, **kwargs)
def get_vault(credentials, settings): layer2 = Layer2( aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], ) return layer2.get_vault(settings['vault'])
def get_vault(): layer2 = Layer2( aws_access_key_id=storage_settings.AWS_ACCESS_KEY, aws_secret_access_key=storage_settings.AWS_SECRET_KEY, ) return layer2.get_vault(storage_settings.GLACIER_VAULT)
def main(): config = read_config() # Cool! Let's set up everything. connect_to_region(config.region, aws_access_key_id=access_key_id, aws_secret_access_key=secret_key) glacier = Layer2(aws_access_key_id=access_key_id, aws_secret_access_key=secret_key, region_name=config.region) vault = glacier.get_vault(config.vault_name) # workaround for UnicodeDecodeError # https://github.com/boto/boto/issues/3318 vault.name = str(vault.name) print "Beginning job on " + vault.arn # Ah, we don't have a vault listing yet. if not config.ls_present: # No job yet? Initiate a job. if not config.inventory_job: config.inventory_job = vault.retrieve_inventory() config.write() print "Requested an inventory. This usually takes about four hours." terminate(0) # We have a job, but it's not finished. job = vault.get_job(config.inventory_job) if not job.completed: print "Waiting for an inventory. This usually takes about four hours." terminate(0) # Finished! try: data = json.loads(job.get_output().read()) except ValueError: print "Something went wrong interpreting the data Amazon sent!" terminate(1) config.ls = {} for archive in data['ArchiveList']: config.ls[archive['ArchiveDescription']] = { 'id': archive['ArchiveId'], 'last_modified': int(float(time.mktime(parse_ts(archive['CreationDate']).timetuple()))), 'size': int(archive['Size']), 'hash': archive['SHA256TreeHash'] } config.ls_present = '-' config.inventory_job = '' config.write() print "Imported a new inventory from Amazon." database = Database( host=db_host, port=db_port, username=db_username, password=db_password, name=db_name ) print "Connected to database." # Let's upload! os.stat_float_times(False) try: i = 0 transferred = 0 time_begin = time.time() for dir in config.dirs: print "Syncing " + dir for file in database.files(): path = dir + os.sep + file if not os.path.exists(path): #print >> sys.stderr, "'%s' does not exist" % path print "\n" + "'%s' does not exist" % path continue # If it's a directory, then ignore it if not os.path.isfile(path): continue last_modified = int(os.path.getmtime(path)) size = os.path.getsize(path) updating = False if file in config.ls: # Has it not been modified since? if config.ls[file]['last_modified'] >= last_modified and config.ls[file]['size'] == size: continue # It's been changed... we should delete the old one else: vault.delete_archive(config.ls[file]['id']) del config.ls[file] updating = True config.write() try: print file + ": uploading... ", id = vault.concurrent_create_archive_from_file(path, file) config.ls[file] = { 'id': id, 'size': size, 'last_modified': last_modified } config.write() i += 1 transferred += size if updating: print "updated." else: print "done." database.update(file, id, vault) except UploadArchiveError: print "FAILED TO UPLOAD." finally: database.close() elapsed = time.time() - time_begin print "\n" + str(i) + " files successfully uploaded." print "Transferred " + format_bytes(transferred) + " in " + format_time(elapsed) + " at rate of " + format_bytes(transferred / elapsed) + "/s." terminate(0)
init_app(set_backends=True, routes=False) try: # Authenticate to Rackspace pyrax.settings.set('identity_type', 'rackspace') pyrax.set_credentials(storage_settings.USERNAME, storage_settings.API_KEY, region=storage_settings.REGION) container_primary = pyrax.cloudfiles.get_container( storage_settings.PRIMARY_CONTAINER_NAME) container_parity = pyrax.cloudfiles.get_container( storage_settings.PARITY_CONTAINER_NAME) # Connect to AWS layer2 = Layer2( aws_access_key_id=storage_settings.AWS_ACCESS_KEY, aws_secret_access_key=storage_settings.AWS_SECRET_KEY, ) vault = layer2.get_vault(storage_settings.GLACIER_VAULT) # Log to file if not dry_run: scripts_utils.add_file_logger(logger, __file__, suffix=worker_id) audit_temp_path = os.path.join(storage_settings.AUDIT_TEMP_PATH, str(worker_id)) try: os.makedirs(audit_temp_path) except OSError: pass main(nworkers, worker_id, dry_run=dry_run) except Exception as err: logger.error('=== Unexpected Error ===')
def setUp(self): self.layer2 = Layer2() self.vault_name = 'testvault%s' % int(time.time())
import sys import datetime import os from boto.glacier.layer2 import Layer2 from . import app from .models import db, Archive LAYER2 = Layer2( aws_access_key_id=app.config['AWS_ACCESS_KEY_ID'], aws_secret_access_key=app.config['AWS_SECRET_ACCESS_KEY'], region_name=app.config['AWS_REGION_NAME']) def size(path): if os.path.isdir(path): total_size = 0 for dirpath, dirnames, filenames in os.walk(path): for f in filenames: fp = os.path.join(dirpath, f) total_size += os.path.getsize(fp) return total_size return os.path.getsize(path) def upload(path): with app.app_context(): name = os.path.basename(path) a = Archive()
def setUp(self): GlacierLayer2Base.setUp(self) self.layer2 = Layer2(layer1=self.mock_layer1)
if not len(dirs): print r"You need to give the full path to a folder to sync in the second line of the config file, e.g. `C:\backups`. You can list multiple folders, e.g. `C:\backups|D:\backups`" terminate(1) for dir in dirs: if not os.path.exists(dir): print "Sync directory not found: " + dir terminate(1) # Cool! Let's set up everything. connect_to_region(vault_info[1], aws_access_key_id=access_key_id, aws_secret_access_key=secret_key) glacier = Layer2(aws_access_key_id=access_key_id, aws_secret_access_key=secret_key, region_name=region) vault = glacier.get_vault(vault_name) print "Beginning job on " + vault.arn # Ah, we don't have a vault listing yet. if not ls_present: # No job yet? Initiate a job. if not inventory_job: inventory_job = vault.retrieve_inventory() write() print "Requested an inventory. This usually takes about four hours." terminate(0) # We have a job, but it's not finished.