def test_multipart_upload(self):
        object_name = "test_multipart_upload"
        part_num = 3
        part_content = "1" * 5242880
        upload_list = []
        upload_token = self.client.init_multipart_upload(
            self.bucket_name, object_name)

        for i in xrange(part_num):
            upload_list.append(
                self.client.upload_part(self.bucket_name, object_name,
                                        upload_token.upload_id, i + 1,
                                        part_content))

        upload_part_result = UploadPartResultList(
            {"uploadPartResultList": upload_list})
        print json.dumps(upload_part_result)
        self.client.complete_multipart_upload(
            bucket_name=self.bucket_name,
            object_name=object_name,
            upload_id=upload_token.upload_id,
            metadata=None,
            upload_part_result_list=json.dumps(upload_part_result))

        obj = self.client.get_object(self.bucket_name, object_name)
        length = 0
        for chunk in obj.stream:
            length += len(chunk)
            for t in chunk:
                self.assertEqual(t, "1")

        obj.stream.close()
        print length
        self.assertEqual(length, part_num * 5242880)
def multipart_upload(bucket_name, object_name, metadata, stream):
    upload_token = None
    try:
        logger.debug('Put object in multipart upload mode')
        upload_token = fds_client.init_multipart_upload(
            bucket_name=bucket_name, object_name=object_name)
        logger.debug('Upload id [' + upload_token.upload_id + ']')
        part_number = 1
        upload_list = []
        while True:
            data = stream.read(multipart_upload_buffer_size)
            if len(data) <= 0:
                break
            logger.info("Part %d read %d bytes" % (part_number, len(data)))

            rtn = None
            for i in range(max_upload_retry_time):
                try:
                    rtn = fds_client.upload_part(
                        bucket_name=upload_token.bucket_name,
                        object_name=upload_token.object_name,
                        upload_id=upload_token.upload_id,
                        part_number=part_number,
                        data=data)
                    upload_list.append(rtn)
                    break
                except:
                    sleepSeconds = (i + 1) * 5
                    logger.warning(
                        "upload part %d failed, retry after %d seconds" %
                        (part_number, sleepSeconds))
                    sleep(sleepSeconds)

            if not rtn:
                raise GalaxyFDSClientException("Upload part %d failed" %
                                               part_number)
            part_number += 1

        upload_part_result = UploadPartResultList(
            {"uploadPartResultList": upload_list})
        logger.info("Upload data end, result : %s" %
                    json.dumps(upload_part_result))
        return fds_client.complete_multipart_upload(
            bucket_name=upload_token.bucket_name,
            object_name=upload_token.object_name,
            upload_id=upload_token.upload_id,
            metadata=metadata,
            upload_part_result_list=json.dumps(upload_part_result))
        logger.info("Upload complete")
    except Exception as e:
        try:
            logger.error("Upload id %s will be abort" % upload_token.upload_id)
            fds_client.abort_multipart_upload(bucket_name, object_name,
                                              upload_token.upload_id)
        except:
            pass
        raise e
Beispiel #3
0
def put_object(data_file, bucket_name, object_name, metadata):
    check_bucket_name(bucket_name)
    check_object_name(object_name)
    check_metadata(metadata)
    fds_metadata = parse_metadata_from_str(metadata)
    if data_file:
        fd = open(data_file, 'r')
        fds_client.put_object(bucket_name=bucket_name,
                              object_name=object_name,
                              data=fd,
                              metadata=fds_metadata)
    else:
        logger.debug('Put object with multipart upload')
        upload_token = fds_client.init_multipart_upload(
            bucket_name=bucket_name, object_name=object_name)
        logger.debug('Upload id [' + upload_token.upload_id + ']')
        byte_buffer = bytearray(10 * 1024 * 1024)
        part_number = 0
        upload_list = []
        while True:
            length = sys.stdin.readinto(byte_buffer)
            if length <= 0:
                break
            print(length)

            rtn = fds_client.upload_part(bucket_name=upload_token.bucket_name,
                                         object_name=upload_token.object_name,
                                         upload_id=upload_token.upload_id,
                                         part_number=part_number,
                                         data=byte_buffer[0:length])
            upload_list.append(rtn)
            part_number += 1

        upload_part_result = UploadPartResultList(
            {"uploadPartResultList": upload_list})
        print(json.dumps(upload_part_result))
        fds_client.complete_multipart_upload(
            bucket_name=upload_token.bucket_name,
            object_name=upload_token.object_name,
            upload_id=upload_token.upload_id,
            metadata=fds_metadata,
            upload_part_result_list=json.dumps(upload_part_result))
Beispiel #4
0
def multipart_upload(bucket_name, object_name, metadata, stream):
    upload_token = None
    try:
        logger.debug('Put object in multipart upload mode')
        upload_token = fds_client.init_multipart_upload(
            bucket_name=bucket_name, object_name=object_name)
        logger.debug('Upload id [' + upload_token.upload_id + ']')
        part_number = 1
        upload_list = []
        while True:
            data = stream.read(multipart_upload_buffer_size)
            if len(data) <= 0:
                break
            logger.info("Part %d read %d bytes" % (part_number, len(data)))

            rtn = fds_client.upload_part(bucket_name=upload_token.bucket_name,
                                         object_name=upload_token.object_name,
                                         upload_id=upload_token.upload_id,
                                         part_number=part_number,
                                         data=data)
            upload_list.append(rtn)
            part_number += 1

        upload_part_result = UploadPartResultList(
            {"uploadPartResultList": upload_list})
        logger.info("Upload data end, result : %s" %
                    json.dumps(upload_part_result))
        fds_client.complete_multipart_upload(
            bucket_name=upload_token.bucket_name,
            object_name=upload_token.object_name,
            upload_id=upload_token.upload_id,
            metadata=metadata,
            upload_part_result_list=json.dumps(upload_part_result))
        logger.info("Upload complete")
    except Exception as e:
        try:
            logger.error("Upload id %s will be abort" % upload_token.upload_id)
            fds_client.abort_multipart_upload(bucket_name, object_name,
                                              upload_token.upload_id)
        except:
            pass
        raise e
    def _upload(self, filename, dst_url, autodetect_mimetype, sync=False):
        if not os.path.exists(filename):
            CLIPrinter.warn("{} is a bad file".format(filename))
            return
        dst_bucket_name = dst_url.bucket_name()
        if dst_url.is_object_url():
            dst_object_name = dst_url.object_name()
        elif sync:
            dst_object_name = filename[2:]
        elif dst_url.is_object_dir():
            dst_object_name = dst_url.object_dir() + os.path.basename(filename)
        else:
            dst_object_name = os.path.basename(filename)
        try:
            if self._fds.does_object_exists(dst_bucket_name, dst_object_name):
                # check md5 firstly
                metadata = self._fds.get_object_metadata(
                    dst_bucket_name, dst_object_name)
                if metadata.metadata.get(Common.CONTENT_MD5) is not None:
                    local_md5 = file_md5(filename)
                    if local_md5 == metadata.metadata.get(Common.CONTENT_MD5):
                        CLIPrinter.done(
                            'upload object %s/%s(skip because of same md5)' %
                            (dst_bucket_name, dst_object_name))
                        return

                # check last-modified
                mtime = None
                if os.path.isfile(filename):
                    mtime = os.path.getmtime(filename)

                lm = metadata.metadata[Common.LAST_MODIFIED]
                remote_modified = rfc822_timestamp(lm)

                # if last-modified of local file is not less last-modified of remote file, skip
                if mtime is not None and datetime.fromtimestamp(
                        mtime) <= remote_modified:
                    CLIPrinter.done(
                        'upload object %s/%s(skip because of updated)' %
                        (dst_bucket_name, dst_object_name))
                    return
        except Exception as e:
            CLIPrinter.fail(e.message)
            return
        mimetype = None
        if autodetect_mimetype:
            mimetype = mimetypes.guess_type(filename)[0]
        metadata = FDSObjectMetadata()
        if mimetype is not None:
            metadata.add_header(Common.CONTENT_TYPE, mimetype)
        result = None

        with open(filename, "rb") as f:
            file_length = os.path.getsize(filename)
            if file_length < multipart_upload_buffer_size:
                try:
                    result = self._fds.put_object(dst_bucket_name,
                                                  dst_object_name,
                                                  f,
                                                  metadata=metadata)
                except GalaxyFDSClientException as e:
                    CLIPrinter.fail(e.message)
            else:
                try:
                    upload_token = self._fds.init_multipart_upload(
                        dst_bucket_name, dst_object_name)
                    part_number = 1
                    result_list = []
                    while True:
                        data = f.read(multipart_upload_buffer_size)
                        if len(data) <= 0:
                            break
                        for i in range(max_upload_retry_time):
                            upload_result = None
                            try:
                                upload_result = self._fds.upload_part(
                                    dst_bucket_name, dst_object_name,
                                    upload_token.upload_id, part_number, data)
                                result_list.append(upload_result)
                                break
                            except GalaxyFDSClientException as e:
                                sleep_seconds = (i + 1) * 10
                                CLIPrinter.warn(
                                    "upload part %d failed, retry after %d seconds"
                                    % (part_number, sleep_seconds))
                                time.sleep(sleep_seconds)
                        part_number = part_number + 1
                    upload_part_result = UploadPartResultList(
                        {"uploadPartResultList": result_list})
                    result = self._fds.complete_multipart_upload(
                        upload_token.bucket_name, upload_token.object_name,
                        upload_token.upload_id, metadata,
                        json.dumps(upload_part_result))
                except Exception as e:
                    self._fds.abort_multipart_upload(dst_bucket_name,
                                                     dst_object_name,
                                                     upload_token.upload_id)
                    CLIPrinter.fail(e.message)
        if result is not None:
            CLIPrinter.done('upload object %s/%s' %
                            (dst_bucket_name, dst_object_name))
        else:
            CLIPrinter.fail('upload object %s/%s' %
                            (dst_bucket_name, dst_object_name))