Пример #1
0
    def upload_part_from_file(self, fp, part_num, headers=None, replace=True,
                              cb=None, num_cb=10, md5=None, size=None):
        """
        Upload another part of this MultiPart Upload.

        .. note::

            After you initiate multipart upload and upload one or more parts,
            you must either complete or abort multipart upload in order to stop
            getting charged for storage of the uploaded parts. Only after you
            either complete or abort multipart upload, Amazon S3 frees up the
            parts storage and stops charging you for the parts storage.

        :type fp: file
        :param fp: The file object you want to upload.

        :type part_num: int
        :param part_num: The number of this part.

        The other parameters are exactly as defined for the
        :class:`boto.s3.key.Key` set_contents_from_file method.

        :rtype: :class:`boto.s3.key.Key` or subclass
        :returns: The uploaded part containing the etag.
        """
        if part_num < 1:
            raise ValueError('Part numbers must be greater than zero')
        query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num)
        key = self.bucket.new_key(self.key_name)
        key.set_contents_from_file(fp, headers=headers, replace=replace,
                                   cb=cb, num_cb=num_cb, md5=md5,
                                   reduced_redundancy=False,
                                   query_args=query_args, size=size)
        return key
Пример #2
0
    def upload_part_from_file(self, fp, part_num, headers=None, replace=True,
                              cb=None, num_cb=10, md5=None, size=None):
        """
        Upload another part of this MultiPart Upload.

        .. note::

            After you initiate multipart upload and upload one or more parts,
            you must either complete or abort multipart upload in order to stop
            getting charged for storage of the uploaded parts. Only after you
            either complete or abort multipart upload, Amazon S3 frees up the
            parts storage and stops charging you for the parts storage.

        :type fp: file
        :param fp: The file object you want to upload.

        :type part_num: int
        :param part_num: The number of this part.

        The other parameters are exactly as defined for the
        :class:`boto.s3.key.Key` set_contents_from_file method.

        :rtype: :class:`boto.s3.key.Key` or subclass
        :returns: The uploaded part containing the etag.
        """
        if part_num < 1:
            raise ValueError('Part numbers must be greater than zero')
        query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num)
        key = self.bucket.new_key(self.key_name)
        key.set_contents_from_file(fp, headers=headers, replace=replace,
                                   cb=cb, num_cb=num_cb, md5=md5,
                                   reduced_redundancy=False,
                                   query_args=query_args, size=size)
        return key
Пример #3
0
 def basic_upload(self, uploaded, filename):
     """
     Uploads the uploaded file to S3 using 
     """
     key = boto.s3.key.Key(self.bucket)
     key.key = filename
     key.set_contents_from_file(uploaded)
     return True
Пример #4
0
    def put_file(self):
        if self.key:
            key = self.key
        else:
            key = boto.s3.key.Key(bucket=self.bucket)
            key.key = self.key_name

        with self.open('u') as f:
            key.set_contents_from_file(f)
Пример #5
0
 def stream_write(self, path, fp):
     try:
         buf = fp.read()
         io = compat.StringIO(buf)
         path = self._init_path(path)
         key = self.makeKey(path)
         key.set_contents_from_file(io)
         io.close() 
     except IOError as e:
         raise e
Пример #6
0
 def stream_write(self, path, fp):
     try:
         buf = fp.read()
         io = compat.StringIO(buf)
         path = self._init_path(path)
         key = self.makeKey(path)
         key.set_contents_from_file(io)
         io.close()
     except IOError as e:
         raise e
Пример #7
0
def cdn_upload(local_path, cdn_path, content_type, bucket, binary=False):
    print(":uploading to CDN: %s" % cdn_path)
    if CONFIG.dry_run: return
    key = boto.s3.key.Key(bucket, cdn_path)
    key.metadata = {'Cache-Control': 'max-age=31536000', 'Content-Type': content_type}
    if binary:
        data = open(local_path, "rb").read()
    else:
        data = open(local_path).read().encode('utf-8')
    fp = BytesIO(data)
    key.set_contents_from_file(fp)
Пример #8
0
def _upload(key, callback, local_path, replace=False, rrs=False):
    local_file_path = utils.file_path(local_path)

    with open(local_file_path, 'rb') as local_file:
        key.set_contents_from_file(
            local_file,
            replace=replace,
            cb=callback,
            num_cb=settings.UPLOAD_CB_NUM,
            reduced_redundancy=rrs,
            rewind=True,
        )
Пример #9
0
 def uploadBundleBackend(cls, bundleItemGen, fileCount, totalSize,
         bucket, permittedUsers=None, callback=None, policy="private"):
     current = 0
     for i, (biName, biSize, biFileObj) in enumerate(bundleItemGen):
         keyName = os.path.basename(biName)
         key = bucket.new_key(keyName)
         if callback:
             cb = cls.UploadCallback(callback, biName,
                 i + 1, fileCount, current, totalSize).callback
         else:
             cb = None
         current += biSize
         key.set_contents_from_file(biFileObj, cb=cb, policy=policy)
         if permittedUsers:
         # Grant additional permissions
             key = bucket.get_key(keyName)
             acl = key.get_acl()
             for user in permittedUsers:
                 acl.acl.add_user_grant('READ', user)
             key.set_acl(acl)
Пример #10
0
    def upload_part_from_file(self, fp, part_num, headers=None, replace=True,
                               cb=None, num_cb=10, policy=None, md5=None):
        """
        Upload another part of this MultiPart Upload.
        
        :type fp: file
        :param fp: The file object you want to upload.
        
        :type part_num: int
        :param part_num: The number of this part.

        The other parameters are exactly as defined for the
        :class:`boto.s3.key.Key` set_contents_from_file method.
        """
        if part_num < 1:
            raise ValueError('Part numbers must be greater than zero')
        query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num)
        key = self.bucket.new_key(self.key_name)
        key.set_contents_from_file(fp, headers, replace, cb, num_cb, policy,
                                   md5, reduced_redundancy=False, query_args=query_args)
Пример #11
0
def singlepart_upload_vsfile_to_s3(file_ref, filename, mime_type):
    """
    Attempts to add the given file
    :param file_ref: VSFile reference
    :param filename: file name to upload as
    :return: boto.s3.Key for the uploaded file
    """
    import boto.s3.key
    from gnmvidispine.vs_storage import VSFile
    from chunked_downloader import ChunkedDownloader, ChunkDoesNotExist

    if not isinstance(file_ref, VSFile):
        raise TypeError

    download_url = "{u}:{p}/API/storage/file/{id}/data".format(
        u=settings.VIDISPINE_URL, p=settings.VIDISPINE_PORT, id=file_ref.name)

    d = ChunkedDownloader(download_url,
                          auth=(settings.VIDISPINE_USERNAME,
                                settings.VIDISPINE_PASSWORD),
                          chunksize=CHUNK_SIZE)

    s3conn = s3_connect()
    bucket = s3conn.get_bucket(settings.DOWNLOADABLE_LINK_BUCKET)
    key = boto.s3.key.Key(bucket)
    key.key = filename

    datastream = d.stream_chunk(0)
    total_size = key.set_contents_from_file(
        datastream,
        headers={'Content-Type': mime_type},
        reduced_redundancy=True)

    if int(total_size) != int(file_ref.size):
        logger.error(
            "Expected to upload {0} bytes but only uploaded {1}".format(
                file_ref.size, total_size))
        raise NeedsRetry

    return key
def test():
    print '--- running AWS s3 examples ---'
    c = boto.s3.connection.S3Connection(conf.AWS_ACCESS_KEY, conf.AWS_SECRET_ACCESS_KEY)

    print 'original bucket number:', len(c.get_all_buckets())
    
    bucket_name = 'yet.another.s3.example.code'
    print 'creating a bucket:', bucket_name
    try:
        bucket = c.create_bucket(bucket_name)
    except boto.exception.S3CreateError  as e:
        print ' ' * 4, 'error occured:'
        print ' ' * 8, 'http status code:', e.status
        print ' ' * 8, 'reason:', e.reason
        print ' ' * 8, 'body:', e.body
        return

    test_bucket_name = 'no.existence.yet.another.s3.example.code'
    print 'if you just want to know whether the bucket(\'%s\') exists or not' % (test_bucket_name,), \
        'and don\'t want to get this bucket'
    try:
        test_bucket = c.head_bucket(test_bucket_name)
    except boto.exception.S3ResponseError as e:
        if e.status == 403 and e.reason == 'Forbidden':
            print ' ' * 4, 'the bucket(\'%s\') exists but you don\'t have the permission.' % (test_bucket_name,)
        elif e.status == 404 and e.reason == 'Not Found':
            print ' ' * 4, 'the bucket(\'%s\') doesn\'t exist.' % (test_bucket_name,)

    print 'or use lookup() instead of head_bucket() to do the same thing.', \
        'it will return None if the bucket does not exist instead of throwing an exception.'
    test_bucket = c.lookup(test_bucket_name)
    if test_bucket is None:
        print ' ' * 4, 'the bucket(\'%s\') doesn\'t exist.' % (test_bucket_name,)

    print 'now you can get the bucket(\'%s\')' % (bucket_name,)
    bucket = c.get_bucket(bucket_name)

    print 'add some objects to bucket ', bucket_name
    keys = ['sample.txt', 'notes/2006/January/sample.txt', 'notes/2006/February/sample2.txt',\
           'notes/2006/February/sample3.txt', 'notes/2006/February/sample4.txt', 'notes/2006/sample5.txt']
    print ' ' * 4, 'these key names are:'
    for name in keys:
        print ' ' * 8, name
    
    filename = './_test_dir/sample.txt'
    print ' ' * 4, 'you can contents of object(\'%s\') from filename(\'%s\')' % (keys[0], filename,)
    key = boto.s3.key.Key(bucket, keys[0])
    bytes_written = key.set_contents_from_filename(filename)
    assert bytes_written == os.path.getsize(filename), '    error occured:broken file'
        
    print ' ' * 4, 'or set contents of object(\'%s\') by opened file object' % (keys[1],)
    fp = open(filename, 'r')
    key = boto.s3.key.Key(bucket, keys[1])
    bytes_written = key.set_contents_from_file(fp)
    assert bytes_written == os.path.getsize(filename), '    error occured:broken file'

    print ' ' * 4, 'you can also set contents the remaining key objects from string'
    for name in keys[2:]:
        print ' ' * 8, 'key:', name
        key = boto.s3.key.Key(bucket, name)
        s = 'This is the content of %s ' % (name,)
        key.set_contents_from_string(s)
        print ' ' * 8, '..contents:', key.get_contents_as_string()
        # use get_contents_to_filename() to save contents to a specific file in the filesystem.

    #print 'You have %d objects in bucket %s' % ()    
    
    print 'list all objects added into \'%s\' bucket' % (bucket_name,)
    print ' ' * 4, 'list() automatically handles all of the result paging from S3.'
    print ' ' * 4, 'You just need to keep iterating until there are no more results.'
    print ' ' * 4, '---------------------------------------------------------------'
    bucket_size = 0
    for key in bucket.list():
        print ' ' * 4, key.name
        bucket_size += key.size
    print ' ' * 4, 'bucket size:', bucket_size, 'bytes.'
    # do not caculate bucket size or number of objects when you have millions of objects in a bucket.

    p = 'notes/2006/'
    print 'list objects start with \'%s\'' % (p,)
    objs = bucket.list(prefix = p)
    for key in objs:
        print ' ' * 4, key.name

    print 'list objects or key prefixs like \'%s/*\', something like what\'s in the top of \'%s\' folder ?' % (p, p,)
    objs = bucket.list(prefix = p, delimiter = '/')
    for key in objs:
        print ' ' * 4, key.name

    keys_per_page = 4
    print 'manually handle the results paging from s3,', ' number of keys per page:', keys_per_page
    print ' ' * 4, 'get page 1'
    objs = bucket.get_all_keys(max_keys = keys_per_page)
    for key in objs:
        print ' ' * 8, key.name

    print ' ' * 4, 'get page 2'
    last_key_name = objs[-1].name   #last key of last page is the marker to retrive next page.
    objs = bucket.get_all_keys(max_keys = keys_per_page, marker = last_key_name)
    for key in objs:
        print ' ' * 8, key.name
    """
    get_all_keys() a lower-level method for listing contents of a bucket.
    This closely models the actual S3 API and requires you to manually handle the paging of results. 
    For a higher-level method that handles the details of paging for you, you can use the list() method.
    """

    print 'you must delete all objects in the bucket \'%s\' before delete this bucket' % (bucket_name, )
    print ' ' * 4, 'you can delete objects one by one'
    bucket.delete_key(keys[0])
    print ' ' * 4, 'or you can delete multiple objects using a single HTTP request with delete_keys().'
    bucket.delete_keys(keys[1:])

    #TODO print 'after previous deletion, we now have %d objects in bucket(\'%s\')' % (len(bucket.list()), bucket_name,)
    print 'now you can delete the bucket \'%s\'' % (bucket_name,)
    c.delete_bucket(bucket)
Пример #13
0
import sys,os, time
sys.path.extend(['/home/ubuntu/boto'])

import boto
import boto.s3.key
import boto.s3.bucket

program_name = sys.argv[0]
filename_to_upload = sys.argv[1]
destination_name = sys.argv[2]

file_handle = open(filename_to_upload)

x_aws_access_key_id = os.environ.get("x_aws_access_key_id")
x_aws_secret_access_key = os.environ.get("x_aws_secret_access_key")
bucket = os.environ.get("bucket")

print program_name,"started"
t0 = time.time()

conn = boto.connect_s3(
    aws_access_key_id=x_aws_access_key_id,
    aws_secret_access_key = x_aws_secret_access_key,
    )

print time.time()-t0, "connected"
bucket = boto.s3.bucket.Bucket(conn, name=bucket)
print time.time()-t0, "bucket found"
key = boto.s3.key.Key(bucket, name=destination_name)
key.set_contents_from_file(file_handle)
print time.time()-t0, "upload complete"
Пример #14
0
  def process_item(self, item):
    filepath = item['file']
    filename = item['filename']
    room_id = item['room_id']
    user_id = item['user_id']
    username = item['username']
    room_token = item['room_token']

    print "got this job: %s" % item

    im = thumbnail = None
    try:
      im = Image.open(filepath)
    except:
      pass

    message_type = im and 'image' or 'file'

    # Generate thumbnail
    if im:
      thumbnail = Image.open(filepath)
      thumbnail.thumbnail((300, 300), Image.ANTIALIAS)

    print im
    print thumbnail

    # Upload thumbnail if necessary
    if thumbnail:
      name, ext = os.path.splitext(filename)
      thumbname = '/uploads/%s/%s_thumb%s' % (room_id, name, ext)
      thumbfile = tempfile.NamedTemporaryFile()
      thumbnail.save(thumbfile, im.format)

    # Determine file mimetype
    if im:
      mime_type = 'image/%s' % im.format.lower()
    else:
      mime_type, _ = mimetypes.guess_type(filename)

    # Create keys for file
    key = boto.s3.key.Key(self.bucket)
    key.key = '/uploads/%s/%s' % (room_id, filename)

    if mime_type:
      key.set_metadata('Content-Type', mime_type)

    file = open(filepath)
    filesize = os.path.getsize(filepath)
    key.set_contents_from_file(file)
    file.close()
    os.remove(filepath)

    print "Uploaded file"

    # Upload thumbnail
    if thumbnail:
      thumb_key = boto.s3.key.Key(self.bucket)
      thumb_key.key = thumbname
      if mime_type:
        thumb_key.set_metadata('Content-Type', mime_type)
      thumb_key.set_contents_from_file(thumbfile.file)

    print "Uploaded thumbnail"

    # Create a message
    content = '%s posted a file' % username
    message = {
      'room': room_id,
      'user_id': user_id,
      'user_name': username,
      'type': message_type,
      'filename': filename,
      's3_key': key.key,
      'content': content,
      'created_at': datetime.datetime.utcnow(),
    }
    if message_type == 'image':
      message['size'] = im.size
      message['s3_thumbnail_key'] = thumb_key.key
      message['thumb_size'] = thumbnail.size

    if mime_type:
      message['mime_type'] = mime_type

    message['filesize'] = filesize

    message_id = self.db.messages.insert(message)

    m = {
      'channel': room_token,
      'message': {
        'id': str(message_id),
        'content': message['content'],
        'user_id': str(message['user_id']),
        'user_name': message['user_name'],
        'type': message_type,
        'url': key.generate_url(3600),
      }
    }

    if message_type == 'image':
      m['message']['size'] = message['size']
      m['message']['thumb_url'] = thumb_key.generate_url(3600)

    self.pubnub.publish(m)
Пример #15
0
import sys, os, time
sys.path.extend(['/home/ubuntu/boto'])

import boto
import boto.s3.key
import boto.s3.bucket

program_name = sys.argv[0]
filename_to_upload = sys.argv[1]
destination_name = sys.argv[2]

file_handle = open(filename_to_upload)

x_aws_access_key_id = os.environ.get("x_aws_access_key_id")
x_aws_secret_access_key = os.environ.get("x_aws_secret_access_key")
bucket = os.environ.get("bucket")

print program_name, "started"
t0 = time.time()

conn = boto.connect_s3(
    aws_access_key_id=x_aws_access_key_id,
    aws_secret_access_key=x_aws_secret_access_key,
)

print time.time() - t0, "connected"
bucket = boto.s3.bucket.Bucket(conn, name=bucket)
print time.time() - t0, "bucket found"
key = boto.s3.key.Key(bucket, name=destination_name)
key.set_contents_from_file(file_handle)
print time.time() - t0, "upload complete"