Exemplo n.º 1
0
def upload_key(email, name, key):
    k = s3key(email, name)
    k.set_contents_from_string(key.strip())
Exemplo n.º 2
0
  def upload(self, mpi, source, target, pos = 0, chunk = 0, part = 0):
    '''Thread worker for upload operation.'''
    s3url = S3URL(target)
    bucket = self.s3.lookup(s3url.bucket, validate=self.opt.validate)

    # Initialization: Set up multithreaded uploads.
    if not mpi:
      fsize = os.path.getsize(source)
      key = bucket.get_key(s3url.path)

      # optional checks
      if self.opt.dry_run:
        message('%s => %s', source, target)
        return
      elif self.opt.sync_check and self.sync_check(source, key):
        message('%s => %s (synced)', source, target)
        return
      elif not self.opt.force and key:
        raise Failure('File already exists: %s' % target)

      # extra headers
      extra_headers = {}
      if self.opt.add_header:
        for hdr in self.opt.add_header:
          try:
            key, val = hdr.split(":", 1)
          except ValueError:
            raise Failure("Invalid header format: %s" % hdr)
          key_inval = re.sub("[a-zA-Z0-9-.]", "", key)
          if key_inval:
            key_inval = key_inval.replace(" ", "<space>")
            key_inval = key_inval.replace("\t", "<tab>")
            raise ParameterError("Invalid character(s) in header name '%s': \"%s\"" % (key, key_inval))
          extra_headers[key.strip().lower()] = val.strip()

      # Small file optimization.
      if fsize < self.opt.max_singlepart_upload_size:
        key = boto.s3.key.Key(bucket)
        key.key = s3url.path
        key.set_metadata('privilege',  self.get_file_privilege(source))
        key.set_contents_from_filename(source, reduced_redundancy=self.opt.reduced_redundancy, headers=extra_headers)
        if self.opt.acl_public:
          key.set_acl('public-read')
        message('%s => %s', source, target)
        return

      # Here we need to have our own md5 value because multipart upload calculates
      # different md5 values.
      mpu = bucket.initiate_multipart_upload(s3url.path, metadata = {'md5': self.file_hash(source), 'privilege': self.get_file_privilege(source)})

      for args in self.get_file_splits(mpu.id, source, target, fsize, self.opt.multipart_split_size):
        self.pool.upload(*args)
      return

    # Handle each part in parallel, post initialization.
    for mp in bucket.list_multipart_uploads():
      if mp.id == mpi.id:
        mpu = mp
        break
    if mpu is None:
      raise Failure('Could not find MultiPartUpload %s' % mpu_id)

    data = None
    with open(source, 'rb') as f:
      f.seek(pos)
      data = f.read(chunk)
    if not data:
      raise Failure('Unable to read data from source: %s' % source)

    mpu.upload_part_from_file(StringIO(data), part)

    # Finalize
    if mpi.complete():
      try:
        mpu.complete_upload()
        message('%s => %s', source, target)
      except Exception as e:
        mpu.cancel_upload()
        raise RetryFailure('Upload failed: Unable to complete upload %s.' % source)
Exemplo n.º 3
0
def upload_key(email, name, key):
    k = s3key(email, name)
    k.set_contents_from_string(key.strip())