def connect_dynamodb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.dynamodb.layer2.Layer2` :return: A connection to the Layer2 interface for DynamoDB. """ from boto.dynamodb.layer2 import Layer2 return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_glacier(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.glacier.layer2.Layer2` :return: A connection to Amazon's Glacier Service """ from boto.glacier.layer2 import Layer2 return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudsearch(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.autoscale.CloudSearchConnection` :return: A connection to Amazon's CloudSearch service """ from boto.cloudsearch.layer2 import Layer2 return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudsearch2(aws_access_key_id=None, aws_secret_access_key=None, sign_request=False, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :type sign_request: bool :param sign_request: whether or not to sign search and upload requests :rtype: :class:`boto.cloudsearch2.layer2.Layer2` :return: A connection to Amazon's CloudSearch2 service """ from boto.cloudsearch2.layer2 import Layer2 return Layer2(aws_access_key_id, aws_secret_access_key, sign_request=sign_request, **kwargs)
def run_main(num_of_workers, worker_id, dry_run): # Set up storage backends init_app(set_backends=True, routes=False) try: # Authenticate to Rackspace pyrax.settings.set('identity_type', 'rackspace') pyrax.set_credentials(storage_settings.USERNAME, storage_settings.API_KEY, region=storage_settings.REGION) container_primary = pyrax.cloudfiles.get_container( storage_settings.PRIMARY_CONTAINER_NAME) container_parity = pyrax.cloudfiles.get_container( storage_settings.PARITY_CONTAINER_NAME) # Connect to AWS layer2 = Layer2( aws_access_key_id=storage_settings.AWS_ACCESS_KEY, aws_secret_access_key=storage_settings.AWS_SECRET_KEY, ) vault = layer2.get_vault(storage_settings.GLACIER_VAULT) # Log to file if not dry_run: scripts_utils.add_file_logger(logger, __file__, suffix=worker_id) audit_temp_path = os.path.join(storage_settings.AUDIT_TEMP_PATH, str(worker_id)) try: os.makedirs(audit_temp_path) except OSError: pass main(num_of_workers, worker_id, dry_run) except Exception as err: logger.error('=== Unexpected Error ===') logger.exception(err) raise err
def get_vault(credentials, settings): layer2 = Layer2( aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], ) return layer2.get_vault(settings['vault'])
def get_vault(): layer2 = Layer2( aws_access_key_id=storage_settings.AWS_ACCESS_KEY, aws_secret_access_key=storage_settings.AWS_SECRET_KEY, ) return layer2.get_vault(storage_settings.GLACIER_VAULT)
init_app(set_backends=True, routes=False) try: # Authenticate to Rackspace pyrax.settings.set('identity_type', 'rackspace') pyrax.set_credentials(storage_settings.USERNAME, storage_settings.API_KEY, region=storage_settings.REGION) container_primary = pyrax.cloudfiles.get_container( storage_settings.PRIMARY_CONTAINER_NAME) container_parity = pyrax.cloudfiles.get_container( storage_settings.PARITY_CONTAINER_NAME) # Connect to AWS layer2 = Layer2( aws_access_key_id=storage_settings.AWS_ACCESS_KEY, aws_secret_access_key=storage_settings.AWS_SECRET_KEY, ) vault = layer2.get_vault(storage_settings.GLACIER_VAULT) # Log to file if not dry_run: scripts_utils.add_file_logger(logger, __file__, suffix=worker_id) audit_temp_path = os.path.join(storage_settings.AUDIT_TEMP_PATH, str(worker_id)) try: os.makedirs(audit_temp_path) except OSError: pass main(nworkers, worker_id, dry_run=dry_run) except Exception as err: logger.error('=== Unexpected Error ===')
def setUp(self): self.layer2 = Layer2() self.vault_name = 'testvault%s' % int(time.time())
if not len(dirs): print r"You need to give the full path to a folder to sync in the second line of the config file, e.g. `C:\backups`. You can list multiple folders, e.g. `C:\backups|D:\backups`" terminate(1) for dir in dirs: if not os.path.exists(dir): print "Sync directory not found: " + dir terminate(1) # Cool! Let's set up everything. connect_to_region(vault_info[1], aws_access_key_id=access_key_id, aws_secret_access_key=secret_key) glacier = Layer2(aws_access_key_id=access_key_id, aws_secret_access_key=secret_key, region_name=region) vault = glacier.get_vault(vault_name) print "Beginning job on " + vault.arn # Ah, we don't have a vault listing yet. if not ls_present: # No job yet? Initiate a job. if not inventory_job: inventory_job = vault.retrieve_inventory() write() print "Requested an inventory. This usually takes about four hours." terminate(0) # We have a job, but it's not finished.
def setUp(self): GlacierLayer2Base.setUp(self) self.layer2 = Layer2(layer1=self.mock_layer1)
def main(): config = read_config() # Cool! Let's set up everything. connect_to_region(config.region, aws_access_key_id=access_key_id, aws_secret_access_key=secret_key) glacier = Layer2(aws_access_key_id=access_key_id, aws_secret_access_key=secret_key, region_name=config.region) vault = glacier.get_vault(config.vault_name) # workaround for UnicodeDecodeError # https://github.com/boto/boto/issues/3318 vault.name = str(vault.name) print "Beginning job on " + vault.arn # Ah, we don't have a vault listing yet. if not config.ls_present: # No job yet? Initiate a job. if not config.inventory_job: config.inventory_job = vault.retrieve_inventory() config.write() print "Requested an inventory. This usually takes about four hours." terminate(0) # We have a job, but it's not finished. job = vault.get_job(config.inventory_job) if not job.completed: print "Waiting for an inventory. This usually takes about four hours." terminate(0) # Finished! try: data = json.loads(job.get_output().read()) except ValueError: print "Something went wrong interpreting the data Amazon sent!" terminate(1) config.ls = {} for archive in data['ArchiveList']: config.ls[archive['ArchiveDescription']] = { 'id': archive['ArchiveId'], 'last_modified': int(float(time.mktime(parse_ts(archive['CreationDate']).timetuple()))), 'size': int(archive['Size']), 'hash': archive['SHA256TreeHash'] } config.ls_present = '-' config.inventory_job = '' config.write() print "Imported a new inventory from Amazon." database = Database( host=db_host, port=db_port, username=db_username, password=db_password, name=db_name ) print "Connected to database." # Let's upload! os.stat_float_times(False) try: i = 0 transferred = 0 time_begin = time.time() for dir in config.dirs: print "Syncing " + dir for file in database.files(): path = dir + os.sep + file if not os.path.exists(path): #print >> sys.stderr, "'%s' does not exist" % path print "\n" + "'%s' does not exist" % path continue # If it's a directory, then ignore it if not os.path.isfile(path): continue last_modified = int(os.path.getmtime(path)) size = os.path.getsize(path) updating = False if file in config.ls: # Has it not been modified since? if config.ls[file]['last_modified'] >= last_modified and config.ls[file]['size'] == size: continue # It's been changed... we should delete the old one else: vault.delete_archive(config.ls[file]['id']) del config.ls[file] updating = True config.write() try: print file + ": uploading... ", id = vault.concurrent_create_archive_from_file(path, file) config.ls[file] = { 'id': id, 'size': size, 'last_modified': last_modified } config.write() i += 1 transferred += size if updating: print "updated." else: print "done." database.update(file, id, vault) except UploadArchiveError: print "FAILED TO UPLOAD." finally: database.close() elapsed = time.time() - time_begin print "\n" + str(i) + " files successfully uploaded." print "Transferred " + format_bytes(transferred) + " in " + format_time(elapsed) + " at rate of " + format_bytes(transferred / elapsed) + "/s." terminate(0)
import sys import datetime import os from boto.glacier.layer2 import Layer2 from . import app from .models import db, Archive LAYER2 = Layer2( aws_access_key_id=app.config['AWS_ACCESS_KEY_ID'], aws_secret_access_key=app.config['AWS_SECRET_ACCESS_KEY'], region_name=app.config['AWS_REGION_NAME']) def size(path): if os.path.isdir(path): total_size = 0 for dirpath, dirnames, filenames in os.walk(path): for f in filenames: fp = os.path.join(dirpath, f) total_size += os.path.getsize(fp) return total_size return os.path.getsize(path) def upload(path): with app.app_context(): name = os.path.basename(path) a = Archive()