Exemplo n.º 1
0
    def start_upload(self, bucketname, keyname, stream, policy, threads=4):
        self.bucketname = bucketname
        self.stream = stream
        headers = {}
        obj = self.connect()
        bucket = obj.get_bucket(self.bucketname)
        mtype = mimetypes.guess_type(keyname)[0] or 'application/octet-stream'
        headers.update({'Content-Type': mtype})
        mp = bucket.initiate_multipart_upload(keyname, headers=headers)

        def cancel_upload_handler(signal, frame):
            self.cancel_upload(mp)
        umobj_add_handler(signal.SIGINT, cancel_upload_handler)
        self.mp_id = mp.id
        bytes_per_chunk = (1024*1024*10)
        logging.info("Chunk Size: %16d   " % bytes_per_chunk)

        # read initial bytes from data stream and upload in parts
        bytes_in = stream.read(bytes_per_chunk)
        part_num = 1
        logging.info("Starting multipart uploads from stream")
        try:
            while(bytes_in):
                # bytes are read from stream as a string, wrap in StringIO
                # object to be able to use upload_part_from_file function
                mp.upload_part_from_file(StringIO(bytes_in), part_num=part_num)
                part_num += 1
                bytes_in = stream.read(bytes_per_chunk)
        except Exception as e:
            log.error(e)
            self.cancel_upload(mp)
            sys.exit(1)

        if not stream.read(1):
            mp.complete_upload()
            key = bucket.get_key(keyname)
            logging.debug("%s : Applying bucket policy %s" % (mp.id, policy))
            policy.owner = key.get_acl().owner
            key.set_acl(policy)
        else:
            self.cancel_upload(mp)
Exemplo n.º 2
0
def obj_upload(bucket_name, src, dest_name, recursive=False, multi=False,
               checksum=False):
    # retranslate to the full absolute path
    src = os.path.abspath(src)
    # retieve the bucket
    bucket = Obj.conn.get_bucket(bucket_name)
    try:
        size = os.stat(src).st_size
    except OSError as e:
        logging.error(e)
        return
    policy = bucket.get_acl()

    # use a closure to capture the current bucket
    def cancel_multipart_handler(signal, frame):
        for upload in bucket.get_all_multipart_uploads():
            upload.cancel_upload()
            logging.info("Removed incomplete multipart upload: %s" %
                         str(upload))

    umobj_add_handler(signal.SIGINT, cancel_multipart_handler)

    if recursive and os.path.isdir(src):
        prefix = src.split(os.sep)[-1]
        if dest_name:
            prefix = dest_name.rstrip(os.sep) + '/' + prefix
        for directory in walk_path(prefix):
            directory = directory + '/'
            if not check_directory(bucket, directory):
                create_directory(bucket, directory)
        operations = sum([len(files) for r, d, files in
                          os.walk(src.rstrip(os.sep))])
        pbar = progressbar.ProgressBar(maxval=operations)
        pbar.start()
        count = 0
        for root, dirs, files in os.walk(src.rstrip(os.sep)):
            # we will not create the base directory
            if root != src:
                directory = '%s/%s/' % (prefix,
                                        lremove(src, root).lstrip(os.sep))
                if not check_directory(bucket, directory):
                    create_directory(bucket, directory)
                    count += 1
                    if count < operations:
                        pbar.update(count)
            for f in files:
                filename = root + os.sep + f
                try:
                    file_st = os.stat(filename)
                    size = file_st.st_size
                except OSError as e:
                    logging.error(e)
                    continue
                if root != src:
                    keyname = '%s/%s/%s' % (prefix,
                                            lremove(src, root).lstrip(os.sep),
                                            f)
                else:
                    keyname = '%s/%s' % (prefix, f)
                if checksum and not check_key_upload(
                        bucket, keyname, filename):
                    continue
                logging.info("Upload key %s from file %s" %
                             (keyname, filename))
                if (multi and size > 0) or (size > (1024 * 1024 * 1024)):
                    m = MultiPart()
                    m.start_upload(bucket_name, keyname, filename, policy)
                else:
                    key = bucket.new_key(keyname)
                    res = upload_file(key, filename, progress=False)
                    if res >= 0:
                        logging.debug("Applying bucket policy %s" % policy)
                        ## set the owner of the policy to the upload user
                        policy.owner = key.get_acl().owner
                        key.set_acl(policy)
                count += 1
                if count < operations:
                    pbar.update(count)
        pbar.finish()
    else:
        if os.path.isdir(src):
            logging.warning("Skipping directory %s, " % src +
                            "use the recursive option.")
            return
        if not os.path.isfile(src):
            logging.error("File %s does not exist." % src)
            return
        if dest_name:
            current_path = ''
            for dir_part in dest_name.lstrip(os.sep).split(os.sep)[:-1]:
                current_path = current_path + dir_part + '/'
                create_directory(bucket, current_path)
            if dest_name.endswith('/'):
                key_name = current_path + os.path.basename(src)
            else:
                key_name = dest_name
        else:
            key_name = os.path.basename(src)
        if checksum and not check_key_upload(bucket, key_name, src):
            return
        if multi or (size > (1024 * 1024 * 1024)):
            logging.info("Starting a multipart upload.")
            m = MultiPart()
            m.start_upload(bucket_name, key_name, src, policy)
        else:
            key = bucket.new_key(key_name)
            res = upload_file(key, src)
            if res >= 0:
                logging.debug("Applying bucket policy %s" % policy)
                ## set the owner of the policy to the upload user
                policy.owner = key.get_acl().owner
                key.set_acl(policy)