示例#1
0
    def concurrent_create_archive_from_file(self, filename, description,
                                            **kwargs):
        """
        Create a new archive from a file and upload the given
        file.

        This is a convenience method around the
        :class:`boto.glacier.concurrent.ConcurrentUploader`
        class.  This method will perform a multipart upload
        and upload the parts of the file concurrently.

        :type filename: str
        :param filename: A filename to upload

        :param kwargs: Additional kwargs to pass through to
            :py:class:`boto.glacier.concurrent.ConcurrentUploader`.
            You can pass any argument besides the ``api`` and
            ``vault_name`` param (these arguments are already
            passed to the ``ConcurrentUploader`` for you).

        :raises: `boto.glacier.exception.UploadArchiveError` is an error
            occurs during the upload process.

        :rtype: str
        :return: The archive id of the newly created archive

        """
        uploader = ConcurrentUploader(self.layer1, self.name, **kwargs)
        archive_id = uploader.upload(filename, description)
        return archive_id
 def test_calculate_required_part_size(self):
     self.stat_mock.return_value.st_size = 1024 * 1024 * 8
     uploader = ConcurrentUploader(mock.Mock(), 'vault_name')
     total_parts, part_size = uploader._calculate_required_part_size(
         1024 * 1024 * 8)
     self.assertEqual(total_parts, 2)
     self.assertEqual(part_size, 4 * 1024 * 1024)
示例#3
0
def upload_archive(fname,vault,real_name,chunk=None,description=''):
  """
  Upload a file to glacier via the web-server.
  """
  if not os.path.isfile(fname):
    print("%s is not a valid file!  Upload failed!" % fname)
    return None
  if chunk is None:
    chunk=WG.app.config.get("CHUNK_SIZE",1048576)
  handler = get_handler()
  uploader = ConcurrentUploader(handler,str(vault.name),part_size=chunk)
  if WG.app.config.get("VERBOSE",False):
    print("Beginning upload of file %s to Glacier.  Please by patient, there is no progress bar..." % fname)
  file_size = os.path.getsize(fname)
  if file_size==0:
    if WG.app.config.get("VERBOSE",False):
      print("File size is 0.  Cannot upload empty file.")
    return None
  csum = chunkedmd5(fname)
  itime=time.time()
  file_name = os.path.basename(real_name)
  machine_id = str(request.remote_addr)
  #Construct a meaningful description object for the file
  #The limits are that the description can be no more than 1024 characters in length and must use only ascii characters between 32 and 126 (i.e., 32<=ord(char)<=126)
  dscrip = description+'\\n'
  dscrip = dscrip + "Uploaded at "+str(itime)+'\\n'+ "Full path "+str(real_name)+'\\n'+ "File size "+str(file_size)+'\\n' + "MD5 "+str(csum)+'\\n' + "Source machine id "+machine_id+'\\n'
  archive_id = uploader.upload(fname,description)
  if WG.app.config.get("VERBOSE",False):
    print("Successfully uploaded %s" % fname)
  archive = Archive(archive_id,description,vault,filename=file_name,fullpath=real_name,filesize=file_size,md5sum=csum)
  archive.insertion_date = datetime.fromtimestamp(int(itime))
  WG.db.session.add(archive)
  WG.db.session.commit()
示例#4
0
def uploadToGlacier(tempTarFile=None,
                    DEBUG_MODE=False,
                    GLACIER_VAULT=None,
                    SECRET_ACCESS_KEY=None,
                    ACCESS_KEY=None,
                    GLACIER_REALM=None):
    global logger

    if not tempTarFile:
        return 0
    # Establish a connection to the Glacier
    glacier_vault_in = None
    my_archive = None
    archive_id = None
    try:
        #my_glacier = GlacierConnection(ACCESS_KEY,SECRET_ACCESS_KEY,region=GLACIER_REALM)
        my_glacier = Layer1(aws_access_key_id=ACCESS_KEY,
                            aws_secret_access_key=SECRET_ACCESS_KEY,
                            region_name=GLACIER_REALM)
        if DEBUG_MODE:
            logger.debug("Glacier Connection: %s" % my_glacier)
        # Create a new vault (not neccessary if you already have one!)
        if GLACIER_VAULT:
            #glacier_vault_in = my_glacier.get_vault(GLACIER_VAULT)
            #vaults = my_glacier.get_all_vaults()
            vaults = my_glacier.list_vaults()
            glacier_vault_in = None
            if GLACIER_VAULT not in vaults:
                glacier_vault_in = my_glacier.create_vault(GLACIER_VAULT)
        else:
            GLACIER_VAULT = id_generator(size=16)
            glacier_vault_in = my_glacier.create_vault(GLACIER_VAULT)

        if DEBUG_MODE:
            logger.debug("Glacier Vault: %s" % glacier_vault_in)

        #my_archive = GlacierArchive(tempTarFile)
        uploader = ConcurrentUploader(my_glacier, GLACIER_VAULT,
                                      64 * 1024 * 1024)

        if DEBUG_MODE:
            #logger.debug("Archive created in mem: %s " % my_archive)
            logger.debug("Archive created in mem:%s" % uploader)

        #glacier_vault_in.upload(my_archive)
        archive_id = uploader.upload(tempTarFile, tempTarFile)
        if DEBUG_MODE:
            logger.info("upload created: %s" % glacier_vault_in)
    except Exception, exc:
        if exc.args > 0:
            x, y = exc.args
            errstr = None
            try:
                errstr = json.loads(y.read())
            except:
                errstr = y
            logger.error("Error in glacier upload %s" % errstr)
        else:
            logger.error("Error in glacier upload %s" % (exc))
示例#5
0
 def test_calculate_required_part_size_too_small(self):
     too_small = 1 * 1024 * 1024
     self.stat_mock.return_value.st_size = 1024 * 1024 * 1024
     uploader = ConcurrentUploader(mock.Mock(), 'vault_name',
                                   part_size=too_small)
     total_parts, part_size = uploader._calculate_required_part_size(
         1024 * 1024 * 1024)
     self.assertEqual(total_parts, 256)
     # Part size if 4MB not the passed in 1MB.
     self.assertEqual(part_size, 4 * 1024 * 1024)
示例#6
0
    def upload_archive(self, file_name):
        glacier_layer1 = Layer1(region_name=self.region_name)

        uploader = ConcurrentUploader(glacier_layer1, self.target_vault_name,
                                      32 * 1024 * 1024)

        print("operation starting...")

        archive_id = uploader.upload(file_name, file_name)

        print("Success! archive id: '%s'" % archive_id)
示例#7
0
文件: client.py 项目: kant/WebGlacier
def upload_file(command):
  """
  Uploads a file from the local machine that is specified in the given command.
  """
  if 'action' not in command or command['action']!="UPLOAD":
    raise ValueError("Command not of type UPLOAD")
  if 'file_pattern' not in command: 
    raise ValueError("Missing file pattern")
  path = command['file_pattern'] 
  if not os.path.exists(path):
    raise ValueError("No valid file for upload found")
  returner={}
  handler = Layer1(aws_access_key_id = command['access_key'],aws_secret_access_key = command['secret_access_key'],region_name=command['region_name'])
  uploader = ConcurrentUploader(handler,command['vault_name'],part_size=uchunk)
  file_size = os.path.getsize(path)
  if file_size==0:
    raise ValueError("File is empty.  Nothing to upload.")
  csum = chunkedmd5(path)
  itime=time.time()
  file_name = os.path.basename(path)
  machine_id = str(command['target']) if client_name == '' else client_name+' ('+str(command['target']) + ')'
  #Construct a meaningful description object for the file
  #The limits are that the description can be no more than 1024 characters in length and must use only ascii characters between 32 and 126 (i.e., 32<=ord(char)<=126)
  dscrip = command['description']+'\\n'
  dscrip = dscrip + "Uploaded at "+str(itime)+'\\n'+ "Full path "+str(path)+'\\n'+ "File size "+str(file_size)+'\\n' + "MD5 "+str(csum)+'\\n' + "Source machine id "+machine_id+'\\n'
  print "Uploading file %s"%file_name
  #Put some validation stuff here...
  #Do the upload
  archive_id = uploader.upload(path,dscrip)
  print "Completed successfully.  Archive ID: %s"%archive_id
  #Done the upload, send the bastard back
  returner['archive_id'] = archive_id
  returner['description'] = dscrip
  returner['file_name'] = file_name
  returner['true_path'] = path
  returner['file_size'] = file_size
  returner['md5sum'] = csum
  returner['insert_time']=int(itime)
  returner['region_name']=command['region_name']
  returner['vault_name'] = command['vault_name']
  return returner
示例#8
0
        # Upload archive file to AWS Glacier
                        try:
                            vault_name = str(data['vault_name'])
                            part_size = int(data['part_size'])
                            num_threads = int(data['num_threads'])
                            print "Backing archive of " + volid + " to Glacier"
                            glacierconn = Layer1(
                                aws_access_key_id=data['aws_access_key_id'],
                                aws_secret_access_key=data[
                                    'aws_secret_access_key'],
                                region_name=data['region'])
                            vault = glacierconn.describe_vault(vault_name)
                            uploader = ConcurrentUploader(
                                glacierconn,
                                vault_name,
                                part_size=part_size,
                                num_threads=num_threads)
                            archive_id = uploader.upload(
                                (archive_file + ".tar.gz"), "first upload")
                            print "Download the archive using: " + archive_id
                        except Exception as e:
                            logger.error("Glacier error: " + str(e))

        # Detach the volume from temporary instance and associate it back to
        # respective instance
                        try:
                            conn.detach_volume(volid)
                            time.sleep(15)
                            vol = conn.get_all_volumes(volid)
                            volstatus = vol[0].attachment_state()
示例#9
0
import sys
import os.path

from boto.glacier.layer1 import Layer1
from boto.glacier.vault import Vault
from boto.glacier.concurrent import ConcurrentUploader

access_key_id = None
secret_key = None
target_vault_name = 'backups'
region_name = 'us-west-2'
fname = sys.argv[1]

if (os.path.isfile(fname) == False):
    print("Can't find the file to upload!")
    sys.exit(-1)

glacier_layer1 = Layer1(aws_access_key_id=access_key_id,
                        aws_secret_access_key=secret_key,
                        region_name=region_name)

uploader = ConcurrentUploader(glacier_layer1, target_vault_name,
                              32 * 1024 * 1024)

print("operation starting...")

archive_id = uploader.upload(fname, fname)

print("Success! archive id: '%s'" % (archive_id))
示例#10
0
from boto.glacier.layer1 import Layer1
from boto.glacier.concurrent import ConcurrentUploader
import sys
import os.path

# XXX: replace these with your credentials
ACCESS_KEY = "AWS_ACCESS_KEY"
SECRET_KEY = "AWS_SECRET_KEY"
VAULT_NAME = "VAULT_NAME"
REGION_NAME = 'us-west-2'

try:
    backup_file = sys.argv[1]
except IndexError:
    sys.stderr.write("Usage: {} <file to backup>\n".format(sys.argv[0]))
    sys.exit(1)
if not os.path.isfile(backup_file):
    sys.stderr.write("Bad upload file {}\n".format(backup_file))
    sys.exit(2)

glacier_layer1 = Layer1(aws_access_key_id=ACCESS_KEY,
                        aws_secret_access_key=SECRET_KEY,
                        region_name=REGION_NAME)

uploader = ConcurrentUploader(glacier_layer1, VAULT_NAME, 32*1024*1024)

sys.stdout.write("Uploading {} as {}...".format(
    backup_file, os.path.basename(backup_file)))
archive_id = uploader.upload(backup_file, os.path.basename(backup_file))
sys.stdout.write("done\n")