def _make_bput(self, f, ctx, offset): bputnum = 1 offset_next = offset + self.bput_size bput_next = readfile(f, offset_next, self.bput_size) bputcode = 200 bputtext = {'ctx': ctx} try: bput_retries = int(self.cfg.bput_retries) except ValueError as e: warning( 'parameter bput_retries is invalid, so use default value 3') bput_retries = 3 while bput_next and bputnum < self.block_size / self.bput_size: bputcode, bputtext, _ = self._make_bput_post( ctx, bputnum, bput_next) while bput_retries and self.__need_retry(bputcode): debug('bput fail.retry upload') bputcode, bputtext, _ = self._make_bput_post( ctx, bputnum, bput_next) bput_retries -= 1 if bputcode != 200: return offset, bputcode, bputtext['message'] ctx = bputtext['ctx'] offset_next = offset + bputtext['offset'] bput_next = readfile(f, offset_next, self.bput_size) bputnum += 1 return offset, bputcode, bputtext['ctx']
def move(self, srcbucket, srckey, dstbucket, dstkey): url = self._make_move_url(srcbucket, srckey, dstbucket, dstkey) debug('Move object %s from %s to %s:%s' % (srckey, srcbucket, dstbucket, dstkey)) return _post(url=url, headers=super(BucketManager, self)._gernerate_headers(url))
def _make_block(self, offset): url, size = self._mlk_url(offset) url = https_check(url) headers = self.__generate_headers() try: mkblk_retries = int(self.cfg.mkblk_retries) except ValueError as e: warning( 'parameter mkblk_retries is invalid, so use default value 3') mkblk_retries = 3 with open(self.path, 'rb') as f: bput = readfile(f, offset, self.bput_size) blkcode, blktext, _ = _post(url=url, headers=headers, data=bput) while mkblk_retries and self.__need_retry(blkcode): blkcode, blktext, _ = _post(url=url, headers=headers, data=bput) mkblk_retries -= 1 if blkcode != 200: result = [offset, blkcode, blktext['message']] debug('make block fail,code :{0},message :{1}'.format( blkcode, blktext)) else: result = self._make_bput(f, blktext['ctx'], offset) self._record_upload_progress(result, size) return blkcode
def test_multipart_upload(self): path = 'F:\\5_.zip' key = '5_.zip' self.cfg.overwrite = 1 return_data = self.cli.multipart_upload(path, self.bucket, key) debug(return_data) self.assertEqual(return_data[0],200)
def bucketlist(self, bucket, prefix=None, marker=None, limit=None, mode=None, starttime=None, endtime=None): options = { 'bucket': bucket, } if marker: options['marker'] = marker if limit: if limit in self._limit_check(): options['limit'] = limit else: error('Invalid limit ! Please redefine limit') raise ValueError("Invalid limit") if prefix: options['prefix'] = urlsafe_base64_encode(prefix) if mode == 1 or mode == 0: options['mode'] = mode if starttime: options['startTime'] = starttime if endtime: options['endTime'] = endtime url = https_check(self._make_url('list', options)) if options: debug('List options is %s' % options) debug('List bucket %s' % bucket) return _get(url=url, headers=super(BucketManager, self)._gernerate_headers(url))
def test_fmgr_fetch(self): url = 'http://a20170704-weihb.w.wcsapi.biz.matocloud.com/1.doc' key = '1.doc' fetchurl = urlsafe_base64_encode(url) enbucket = urlsafe_base64_encode(self.bucket) enkey = urlsafe_base64_encode(key) fops = 'fetchURL/%s/bucket/%s/key/%s' % (fetchurl, enbucket, enkey) debug(self.cli.fmgr_fetch(fops))
def test_fmgr_fetch(self): url = 'http://www.example.com/1.doc' key = '' fetchurl = urlsafe_base64_encode(url) enbucket = urlsafe_base64_encode(self.bucket) enkey = urlsafe_base64_encode(key) fops = 'fetchURL/%s/bucket/%s/key/%s' % (fetchurl, enbucket, enkey) debug(self.cli.fmgr_fetch(fops))
def test_fmgr_copy(self): srckey = '2.doc' dstkey = '1.doc' resource = urlsafe_base64_encode('%s:%s' % (self.bucket, srckey)) fops = 'resource/%s/bucket/%s/key/%s' % ( resource, urlsafe_base64_encode( self.bucket), urlsafe_base64_encode(dstkey)) debug(self.cli.fmgr_copy(fops))
def _fmgr_commons(self, reqdata, method): url = https_check('{0}/fmgr/{1}'.format(self.mgr_host, method)) debug('Request body is: %s' % (reqdata)) debug('Start to execute opration: %s' % method) return _post(url=url, data=reqdata, headers=super(Fmgr, self)._gernerate_headers(url, body=reqdata))
def _record_upload_progress(self, result, size): result_dict = dict(zip(['offset', 'code', 'ctx'], result)) result_dict['size'] = size if result_dict['code'] == 200: #lock.acquire() self.progress += size debug('Current block size: %d, total upload size: %d' % (int(size), self.progress)) #lock.release() self.recorder.set_upload_record(result_dict['offset'],result_dict)
def test_fmgr_fetch(self): url = 'http://big-caiyz-fmgr-cache.com/1m.jpg' key = 'fetch_1m.jpg' fetchurl = urlsafe_base64_encode(url) enbucket = urlsafe_base64_encode(self.bucket) enkey = urlsafe_base64_encode(key) fops = 'fetchURL/%s/bucket/%s/key/%s' % (fetchurl, enbucket, enkey) return_data = self.cli.fmgr_fetch(fops) debug(return_data) self.assertEqual(return_data[0],200)
def test_copy(self): path = 'F:\\5_.zip' key = '5_.zip' self.cfg.overwrite = 1 self.cli.multipart_upload(path, self.bucket, key) srckey = '5_.zip' dstkey = '5_2.zip' return_data = self.cli.copy(self.bucket, srckey, self.bucket,dstkey) debug(return_data) self.assertEqual(return_data[0],200)
def _is_complete(self): self.results = self.recorder.get_upload_record() debug(self.results) if len(self.results['upload_record']) < self.blocknum: return 0 for result in self.results['upload_record']: result = eval(result) if result['code'] != 200: return 0 return 1
def test_ops(self): self.cfg.overwrite = 1 key = 'huhu.mp4' path = 'E:\\huhu.mp4' debug('start to upload huhu.mp4') self.cli.simple_upload(path, self.bucket, key) fops = 'vframe/jpg/offset/10|saveas/cXotbXVsaXR1cGxvYWQtY2FpeXotdGVzdDrop4bpopHmiKrlm74uanBn' return_data = self.cli.ops_execute(fops,self.bucket,key) debug(return_data) self.assertEqual(return_data[0],200)
def image_detect(self, image, dtype, bucket): url = https_check('{0}/imageDetect'.format(self.mgr_host)) param = {'image': urlsafe_base64_encode(image)} param['type'] = dtype param['bucket'] = bucket body = super(BucketManager, self)._params_parse(param) debug('image detect for %s to %s' % (image, dtype)) return _post(url=url, data=body, headers=super(BucketManager, self)._gernerate_headers(url, body))
def setdeadline(self, bucket, key, deadline): url = https_check('{0}/setdeadline'.format(self.mgr_host)) param = {'bucket': urlsafe_base64_encode(bucket)} param['key'] = urlsafe_base64_encode(key) param['deadline'] = deadline body = super(BucketManager, self)._params_parse(param) debug('Set deadline of %s to %s' % (key, deadline)) return _post(url=url, data=body, headers=super(BucketManager, self)._gernerate_headers(url, body))
def test_fmgr_delete(self): path = 'F:\\5_.zip' key = '5_.zip' self.cfg.overwrite = 1 self.cli.multipart_upload(path, self.bucket, key) enbucket = urlsafe_base64_encode(self.bucket) enkey = urlsafe_base64_encode(key) fops = 'bucket/%s/key/%s' % (enbucket, enkey) return_data = self.cli.fmgr_delete(fops) debug(return_data) self.assertEqual(return_data[0],200)
def test_fmgr_prefix_del(self): path = 'F:\\5_.zip' key = 'aa/5_.zip' self.cfg.overwrite = 1 self.cli.multipart_upload(path, self.bucket, key) prefix = 'aa' enbucket = urlsafe_base64_encode(self.bucket) enprefix = urlsafe_base64_encode(prefix) fops = 'bucket/%s/prefix/%s' % (enbucket, enprefix) return_data = self.cli.prefix_delete(fops) debug(return_data) self.assertEqual(return_data[0],200)
def test_fmgr_copy(self): path = 'F:\\5_.zip' key = '5_.zip' self.cfg.overwrite = 1 self.cli.multipart_upload(path, self.bucket, key) srckey = key dstkey = '5_4.zip' resource = urlsafe_base64_encode('%s:%s' % (self.bucket,srckey)) fops = 'resource/%s/bucket/%s/key/%s' % (resource,urlsafe_base64_encode(self.bucket), urlsafe_base64_encode(dstkey)) return_data = self.cli.fmgr_copy(fops) debug(return_data) self.assertEqual(return_data[0],200)
def _make_file(self, ctx_string): try: mkfile_retries = int(self.cfg.mkfile_retries) except ValueError as e: warning( u"parameter mkfile_retries is invalid, so use default value 3") mkfile_retries = 3 url = https_check(self.__file_url()) body = ctx_string #','.join(blkstatus) headers = self.__generate_headers() code, text, logid = _post(url=url, headers=headers, data=body) while mkfile_retries and self.__need_retry(code): debug('make file fail.retry upload') code, text, logid = _post(url=url, headers=headers, data=body) mkfile_retries -= 1 return code, text, logid
def execute(self, fops, bucket, key, force=0, separate=0, notifyurl=None): data = { 'bucket': urlsafe_base64_encode(bucket), 'key': urlsafe_base64_encode(key), 'fops': urlsafe_base64_encode(fops) } if notifyurl is not None: data['notifyURL'] = urlsafe_base64_encode(notifyurl) if force == 1: data['force'] = 1 if separate == 1: data['separate'] = 1 url = https_check('{0}/fops'.format(self.mgr_host)) headers, reqdata = self._gernerate_headers(url, data) debug('PersistentFops is %s' % fops) debug('Start to post persistentFops') return _post(url=url, data=reqdata, headers=headers)
def bucket_statistics(self, name, stype, startdate, enddate, isListDetails='false'): encode_name = urlsafe_base64_encode(name) options = { 'name': encode_name, 'type': stype, 'startdate': startdate, 'enddate': enddate } url = https_check(self._make_url('bucket/statistics', options)) debug('Now get bucket %s of %s from %s to %s' % (stype, name, startdate, enddate)) return _get(url=url, headers=super(BucketManager, self)._gernerate_headers(url))
def bucket_stat(self, name, startdate, enddate, isListDetails='false', storageType=None): encode_name = urlsafe_base64_encode(name) options = { 'name': encode_name, 'startdate': startdate, 'enddate': enddate, 'isListDetails': isListDetails } if storageType: options['storageType'] = str(storageType) url = https_check(self._make_url('bucket/stat', options)) debug('Now check storage of %s from %s to %s' % (name, startdate, enddate)) return _get(url=url, headers=super(BucketManager, self)._gernerate_headers(url))
def _records_parse(self, upload_id): records = self.recorder.get_upload_record() offsetlist = [i * (self.block_size) for i in range(0, self.blocknum)] debug(records) if records: self.uploadBatch = records['uploadBatch'] self.results = records['upload_record'] for record in self.results: try: record = eval(record) except SyntaxError as e: debug('Get ctx/offset fail,error ctx/offset:{0}'.format( record)) except Exception as exc_e: debug('Get ctx/offset fail,errorinfo:{0}'.format(exc_e)) if record['code'] == 200: offsetlist.remove(record['offset']) blockid = record['offset'] / self.block_size if blockid < self.blocknum - 1: self.progress += self.block_size else: self.progress += self.size - (blockid * self.block_size) return offsetlist
def parse_file(self, file, sections=[]): #debug("ConfigParse: Reading file '%s'" % file) if type(sections) != type([]): sections = [sections] in_our_section = True r_section = re.compile("^\[([^\]]+)\]") r_comment = re.compile("^\s*#.*") r_empty = re.compile("\s*$") r_data = re.compile("^\s*(?P<key>\w+)\s*=\s*(?P<value>.*)") r_quotes = re.compile("^\"(.*)\"\s*$") #匹配双引号 with io.open(file, "r", encoding=self.get('encoding', 'UTF-8')) as fp: for line in fp: if r_comment.match(line) or r_empty.match(line): continue is_section = r_section.match(line) if is_section: section = is_section.group()[0] in_our_section = (section in sections) or (len(sections) == 0) continue is_data = r_data.match(line) if is_data and in_our_section: data = is_data.groupdict() if r_quotes.match(data["value"]): data["value"] = data["value"][1:-1] self.__setitem__(data["key"], data["value"]) if data["key"] in ("access_key", "secret_key"): try: #caiyz 20180315 添加异常处理 print_value = ("%s...%d_chars...%s") % ( data["value"][-2], len(data["value"]) - 3, data["value"][-1:1]) except IndexError, e: debug(u"{0} is empty".format(data["key"])) else: print_value = data["value"] #debug("ConfigParser: %s->%s" % (data["key"], print_value)) continue warning("Ingnoring invalid line in '%s': %s" % (file, line))
def wslive_list(self, channelname, startTime, endTime, bucket, start=None, limit=None): query = { 'channelname': channelname, 'startTime': startTime, 'endTime': endTime, 'bucket': bucket, } if start is not None: query['start'] = start if limit is not None: query['limit'] = limit url = https_check(self._make_list_url(query)) if query is not None: debug('List params is %s' % query) debug('List bucket %s' % bucket) return _get(url=url, headers=super(WsLive, self)._gernerate_headers(url))
def test_fmgr_m3u8_del(self): self.cfg.overwrite = 1 key = 'M3U8_FILE.m3u8' key_ts = '000001.ts' path = 'E:\\m3u8\\M3U8_FILE.m3u8' path_ts = 'E:\\m3u8\\000001.ts' debug('start to upload m3u8') self.cli.simple_upload(path, self.bucket, key) debug('start to upload ts file') self.cli.simple_upload(path_ts, self.bucket, key_ts) enbucket = urlsafe_base64_encode(self.bucket) enkey = urlsafe_base64_encode(key) fops = 'bucket/%s/key/%s' % (enbucket, enkey) return_data = self.cli.m3u8_delete(fops) debug(return_data) self.assertEqual(return_data[0],200)
def etag(filePath, block_size=1024 * 1024 * 4): """计算文件的etag: Args: filePath: 待计算etag的文件路径 Returns: 输入文件的etag值 """ with open(filePath, 'rb') as f: # 查看文件的修改时间 file_stat = os.stat(filePath) debug('文件绝对路径:{0}'.format(os.path.abspath(filePath))) # 查看文件的上次访问时间 debug('文件创建时间:{0}'.format( time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(file_stat.st_ctime)))) #最后一次修改的时间 debug('最后一次修改的时间:{0}'.format( time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(file_stat.st_mtime)))) # 查看文件的上次访问时间 debug('上次访问的时间:{0}'.format( time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(file_stat.st_atime)))) return etag_stream(f, block_size)
def test_fmgr_prefix_del(self): prefix = 'test' enbucket = urlsafe_base64_encode(self.bucket) enprefix = urlsafe_base64_encode(prefix) fops = 'bucket/%s/prefix/%s' % (enbucket, enprefix) debug(self.cli.prefix_delete(fops))
def test_fmgr_delete(self): key = '1.doc' enbucket = urlsafe_base64_encode(self.bucket) enkey = urlsafe_base64_encode(key) fops = 'bucket/%s/key/%s' % (enbucket, enkey) debug(self.cli.fmgr_delete(fops))