def _upload(retries_left=amount_of_retries): try: if debug == 1: # print 'Start uploading part #%d ...' % part.part_num six.print_('Start uploading part #%d ...' % part.part_num) bucket = SCSBucket(bucket_name) with FileChunkWithCallback(source_path, 'rb', offset=offset, bytes=chunk_bytes, cb=cb, upload_id=upload_id, part_num=part.part_num) as fp: headers={"Content-Length":str(chunk_bytes)} with FileChunkIO(source_path, 'rb', offset=offset, bytes=chunk_bytes) as fpForMd5: headers["s-sina-sha1"] = aws_md5(fpForMd5) scsResponse = bucket.put(key_name, fp, headers=headers, args={'partNumber':'%i'%part.part_num, 'uploadId':upload_id}) part.etag = scsResponse.urllib2Response.info()['ETag'] if num_cb:num_cb(upload_id, parts_amount, part) return part except Exception as exc: raise exc if retries_left: return _upload(retries_left=retries_left - 1) else: # print 'Failed uploading part #%d' % part.part_num six.print_('Failed uploading part #%d' % part.part_num) # print exc six.print_(exc) raise exc else: if debug == 1: # print '... Uploaded part #%d' % part.part_num six.print_('... Uploaded part #%d' % part.part_num)
def _upload_part(bucket_name, key_name, upload_id, parts_amount, part, source_path, offset, chunk_bytes, cb, num_cb, part_failed_cb, amount_of_retries=3, debug=1): from sinastorage.vendored.filechunkio import FileChunkIO from sinastorage.multipart import FileChunkWithCallback """ Uploads a part with retries. """ if debug == 1: six.print_("_upload_part(%s, %s, %s, %s, %s)" % (source_path, offset, bytes, upload_id, part.part_num)) def _upload(retries_left=amount_of_retries): try: if debug == 1: # print 'Start uploading part #%d ...' % part.part_num six.print_('Start uploading part #%d ...' % part.part_num) bucket = SCSBucket(bucket_name) with FileChunkWithCallback(source_path, 'rb', offset=offset, bytes=chunk_bytes, cb=cb, upload_id=upload_id, part_num=part.part_num) as fp: headers={"Content-Length":str(chunk_bytes)} # with FileChunkIO(source_path, 'rb', offset=offset, # bytes=chunk_bytes) as fpForMd5: # headers["s-sina-sha1"] = aws_md5(fpForMd5) scsResponse = bucket.put(key_name, fp, headers=headers, args={'partNumber':'%i'%part.part_num, 'uploadId':upload_id}) part.etag = scsResponse.urllib2Response.info()['ETag'] if num_cb:num_cb(upload_id, parts_amount, part) return part except :#Exception as exc if retries_left: return _upload(retries_left=retries_left - 1) else: six.print_('Failed uploading part #%d' % part.part_num) # six.print_(exc) if part_failed_cb : part_failed_cb(upload_id, part) return None else: if debug == 1: six.print_('... Uploaded part #%d' % part.part_num) return _upload()
def _upload(retries_left=amount_of_retries): try: bucket = SCSBucket(bucket_name) headers = {"Content-Length": str(fileChunkWithCallback.bytes)} with FileChunkIO(fileChunkWithCallback.name, 'rb', offset=fileChunkWithCallback.offset, bytes=fileChunkWithCallback.bytes) as fpForMd5: headers["s-sina-sha1"] = aws_md5(fpForMd5) scsResponse = bucket.put(key_name, fileChunkWithCallback, headers=headers, args={ 'partNumber': '%i' % part.part_num, 'uploadId': upload_id }) part.etag = scsResponse.urllib2Response.info()['ETag'] part.response = scsResponse if num_cb: num_cb(upload_id, parts_amount, part) return part except Exception as exc: # raise exc if retries_left: return _upload(retries_left=retries_left - 1) else: # print 'Failed uploading part #%d' % part.part_num six.print_('Failed uploading part #%d' % part.part_num) # print exc six.print_(exc) raise exc else: # print '... Uploaded part #%d' % part.part_num six.print_('... Uploaded part #%d' % part.part_num)
def _upload(retries_left=amount_of_retries): try: bucket = SCSBucket(bucket_name) headers={"Content-Length":str(fileChunkWithCallback.bytes)} with FileChunkIO(fileChunkWithCallback.name, 'rb', offset=fileChunkWithCallback.offset, bytes=fileChunkWithCallback.bytes) as fpForMd5: headers["s-sina-sha1"] = aws_md5(fpForMd5) scsResponse = bucket.put(key_name, fileChunkWithCallback, headers=headers, args={'partNumber':'%i'%part.part_num, 'uploadId':upload_id}) part.etag = scsResponse.urllib2Response.info()['ETag'] part.response = scsResponse if num_cb:num_cb(upload_id, parts_amount, part) return part except Exception as exc: raise exc if retries_left: return _upload(retries_left=retries_left - 1) else: # print 'Failed uploading part #%d' % part.part_num six.print_('Failed uploading part #%d' % part.part_num) # print exc six.print_(exc) raise exc else: # print '... Uploaded part #%d' % part.part_num six.print_('... Uploaded part #%d' % part.part_num)
def from_urllib(cls, e, **extra): self = cls("HTTP error", **extra) self.urllib2Response = e if hasattr(e, 'hdrs'): self.hdrs = e.hdrs else: self.hdrs = [] if hasattr(e, 'url'): self.url = e.url else: self.url = '' self.urllib2Request = self.extra['req'] for attr in ("reason", "code", "filename"): if attr not in extra and hasattr(e, attr): self.extra[attr] = getattr(e, attr) self.fp = getattr(e, "fp", None) if self.fp: # The except clause is to avoid a bug in urllib2 which has it read # as in chunked mode, but SCS gives an empty reply. try: self.data = data = self.fp.read() except ( http_client.HTTPException, urllib.error.URLError, ) as e: self.extra["read_error"] = e self.data = u'%s' % self.extra['reason'] else: data = data.decode("utf-8") try: msgJsonDict = json.loads(data) self.msg = msgJsonDict['Message'] except Exception as e: self.data = u'%s' % self.extra['reason'] # print e six.print_(e) else: self.data = u'%s' % self.extra['reason'] return self
def _upload(retries_left=amount_of_retries): try: if debug == 1: # print 'Start uploading part #%d ...' % part.part_num six.print_('Start uploading part #%d ...' % part.part_num) bucket = SCSBucket(bucket_name) with FileChunkWithCallback(source_path, 'rb', offset=offset, bytes=chunk_bytes, cb=cb, upload_id=upload_id, part_num=part.part_num) as fp: headers = {"Content-Length": str(chunk_bytes)} with FileChunkIO(source_path, 'rb', offset=offset, bytes=chunk_bytes) as fpForMd5: headers["s-sina-sha1"] = aws_md5(fpForMd5) scsResponse = bucket.put(key_name, fp, headers=headers, args={ 'partNumber': '%i' % part.part_num, 'uploadId': upload_id }) part.etag = scsResponse.urllib2Response.info()['ETag'] if num_cb: num_cb(upload_id, parts_amount, part) return part except Exception as exc: raise exc if retries_left: return _upload(retries_left=retries_left - 1) else: # print 'Failed uploading part #%d' % part.part_num six.print_('Failed uploading part #%d' % part.part_num) # print exc six.print_(exc) raise exc else: if debug == 1: # print '... Uploaded part #%d' % part.part_num six.print_('... Uploaded part #%d' % part.part_num)
def from_urllib(cls, e, **extra): self = cls("HTTP error", **extra) self.urllib2Response = e if hasattr(e, 'hdrs'): self.hdrs = e.hdrs else: self.hdrs = [] if hasattr(e, 'url'): self.url = e.url else: self.url = '' self.urllib2Request = self.extra['req'] for attr in ("reason", "code", "filename"): if attr not in extra and hasattr(e, attr): self.extra[attr] = getattr(e, attr) self.fp = getattr(e, "fp", None) if self.fp: # The except clause is to avoid a bug in urllib2 which has it read # as in chunked mode, but SCS gives an empty reply. try: self.data = data = self.fp.read() except (http_client.HTTPException, urllib.error.URLError, ) as e: self.extra["read_error"] = e self.data = u'%s'%self.extra['reason'] else: data = data.decode("utf-8") try: msgJsonDict = json.loads(data) self.msg = msgJsonDict['Message'] except Exception as e: self.data = u'%s'%self.extra['reason'] # print e six.print_(e) else: self.data = u'%s'%self.extra['reason'] return self
def sign(self, cred): ''' 对stringToSign进行签名 http://sinastorage.sinaapp.com/developer/interface/aws/auth.html ''' stringToSign = self.descriptor() six.print_("stringToSign------",stringToSign) key = cred.secret_key.encode("utf-8") hasher = hmac.new(key, stringToSign.encode("utf-8"), hashlib.sha1) six.print_("b64encode(hasher.digest())-----",b64encode(hasher.digest())) sign = (b64encode(hasher.digest())[5:15]).decode("utf-8") #ssig six.print_("sign------",sign) ''' Authorization=SINA product:/PL3776XmM Authorization:"SINA"+" "+"accessKey":"ssig" ''' self.headers["Authorization"] = "SINA %s:%s" % (cred.access_key, sign) return sign
def sign(self, cred): ''' 对stringToSign进行签名 http://sinastorage.sinaapp.com/developer/interface/aws/auth.html ''' stringToSign = self.descriptor() six.print_("stringToSign------", stringToSign) key = cred.secret_key.encode("utf-8") hasher = hmac.new(key, stringToSign.encode("utf-8"), hashlib.sha1) six.print_("b64encode(hasher.digest())-----", b64encode(hasher.digest())) sign = (b64encode(hasher.digest())[5:15]).decode("utf-8") #ssig six.print_("sign------", sign) ''' Authorization=SINA product:/PL3776XmM Authorization:"SINA"+" "+"accessKey":"ssig" ''' self.headers["Authorization"] = "SINA %s:%s" % (cred.access_key, sign) return sign
def multipart_upload(self, key_name, source_path, acl=None, metadata={}, mimetype=None, headers={}, cb=None, num_cb=None, part_failed_cb=None): try: # multipart portions copyright Fabian Topfstedt # https://pypi.python.org/pypi/filechunkio/1.5 import math import mimetypes from multiprocessing import Pool from sinastorage.vendored.filechunkio import FileChunkIO multipart_capable = True parallel_processes = 4 min_bytes_per_chunk = 5 * 1024 * 1024 #每片分片最大文件大小 usage_flag_multipart_capable = """ [--multipart]""" usage_string_multipart_capable = """ multipart - Upload files as multiple parts. This needs filechunkio. Requires ListBucket, ListMultipartUploadParts, ListBucketMultipartUploads and PutObject permissions.""" except ImportError as err: multipart_capable = False usage_flag_multipart_capable = "" usage_string_multipart_capable = '\n\n "' + \ err.message[len('No module named '):] + \ '" is missing for multipart support ' raise err """ Parallel multipart upload. """ multipart = self.initiate_multipart_upload(key_name, acl, metadata, mimetype, headers) source_size = getSize(source_path) bytes_per_chunk = max( int(math.sqrt(min_bytes_per_chunk) * math.sqrt(source_size)), min_bytes_per_chunk) chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk))) multipart.bytes_per_part = bytes_per_chunk multipart.parts_amount = chunk_amount pool = Pool(processes=parallel_processes) i = 0 for part in multipart.get_next_part(): offset = i * bytes_per_chunk remaining_bytes = source_size - offset chunk_bytes = min([bytes_per_chunk, remaining_bytes]) pool.apply_async(func=_upload_part, args=( self.name, key_name, multipart.upload_id, multipart.parts_amount, part, source_path, offset, chunk_bytes, cb, num_cb, part_failed_cb, ), callback=lambda part: multipart.parts.append(part) if part is not None else None) # partResult = _upload_part(bucketName, key_name, multipart.upload_id, multipart.parts_amount, part, source_path, offset, chunk_bytes, # cb, num_cb) # multipart.parts.append(partResult) i = i + 1 pool.close() pool.join() if len(multipart.parts) == chunk_amount: self.complete_multipart_upload(multipart) # multipart.complete_upload() # key = bucket.get_key(keyname) # key.set_acl(acl) else: # mp.cancel_upload() # print len(multipart.parts) , chunk_amount six.print_(len(multipart.parts), chunk_amount) raise RuntimeError("multipart upload is failed!!")
def multipart_upload(self, key_name, source_path, acl=None, metadata={}, mimetype=None, headers={}, cb=None, num_cb=None): try: # multipart portions copyright Fabian Topfstedt # https://pypi.python.org/pypi/filechunkio/1.5 import math import mimetypes from multiprocessing import Pool from sinastorage.vendored.filechunkio import FileChunkIO multipart_capable = True parallel_processes = 4 min_bytes_per_chunk = 5 * 1024 * 1024 #每片分片最大文件大小 usage_flag_multipart_capable = """ [--multipart]""" usage_string_multipart_capable = """ multipart - Upload files as multiple parts. This needs filechunkio. Requires ListBucket, ListMultipartUploadParts, ListBucketMultipartUploads and PutObject permissions.""" except ImportError as err: multipart_capable = False usage_flag_multipart_capable = "" usage_string_multipart_capable = '\n\n "' + \ err.message[len('No module named '):] + \ '" is missing for multipart support ' raise err """ Parallel multipart upload. """ multipart = self.initiate_multipart_upload(key_name, acl, metadata, mimetype, headers) source_size = getSize(source_path) bytes_per_chunk = max(int(math.sqrt(min_bytes_per_chunk) * math.sqrt(source_size)), min_bytes_per_chunk) chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk))) multipart.bytes_per_part = bytes_per_chunk multipart.parts_amount = chunk_amount pool = Pool(processes=parallel_processes) i = 0 for part in multipart.get_next_part(): offset = i * bytes_per_chunk remaining_bytes = source_size - offset chunk_bytes = min([bytes_per_chunk, remaining_bytes]) pool.apply_async(func = _upload_part, args = (self.name, key_name, multipart.upload_id, multipart.parts_amount, part, source_path, offset, chunk_bytes,cb, num_cb,), callback = lambda part : multipart.parts.append(part)) # partResult = _upload_part(bucketName, key_name, multipart.upload_id, multipart.parts_amount, part, source_path, offset, chunk_bytes, # cb, num_cb) # multipart.parts.append(partResult) i = i + 1 pool.close() pool.join() if len(multipart.parts) == chunk_amount: self.complete_multipart_upload(multipart) # multipart.complete_upload() # key = bucket.get_key(keyname) # key.set_acl(acl) else: # mp.cancel_upload() # print len(multipart.parts) , chunk_amount six.print_(len(multipart.parts) , chunk_amount) raise RuntimeError("multipart upload is failed!!")