def make_data(self, e, call_back, label, progress=True): data = None if self.disable_progress or not progress: return encoder.MultipartEncoderMonitor(e, None) else: if call_back is not None: data = encoder.MultipartEncoderMonitor(e, call_back) else: data = encoder.MultipartEncoderMonitor(e, self.create_callback(label)) return data
def _build_dynamic_upload_data(self, fields, callback=None): # The monitor is the data! encoded_data = encoder.MultipartEncoder(fields=fields) if self.state.is_using_cli is True and self.state.verbose: bar = Bar('Uploading ' + fields['file'][0], suffix='%(percent)d%%') return encoder.MultipartEncoderMonitor(encoded_data, lambda m: bar.goto(m.bytes_read / m.len * 100)) elif self.state.is_using_cli is False and callback: return encoder.MultipartEncoderMonitor(encoded_data, lambda m: callback(EVENT_METHOD_PROGRESS_PERCENT, m.bytes_read / m.len * 100)) else: return encoder.MultipartEncoderMonitor(encoded_data, None)
def create_task_fallback(self, files, options={}, name=None, progress_callback=None): # Pre chunked API create task implementation, used as fallback if len(files) == 0: raise NodeResponseError("Not enough images") # Equivalent as passing the open file descriptor, since requests # eventually calls read(), but this way we make sure to close # the file prior to reading the next, so we don't run into open file OS limits def read_file(file_path): with open(file_path, 'rb') as f: return f.read() fields = { 'name': name, 'options': options_to_json(options), 'images': [(os.path.basename(f), read_file(f), (mimetypes.guess_type(f)[0] or "image/jpg")) for f in files] } def create_callback(mpe): total_bytes = mpe.len def callback(monitor): if progress_callback is not None and total_bytes > 0: progress_callback(100.0 * monitor.bytes_read / total_bytes) return callback e = MultipartEncoder(fields=fields) m = encoder.MultipartEncoderMonitor(e, create_callback(e)) result = self.post('/task/new', data=m, headers={'Content-Type': m.content_type}) return self.handle_task_new_response(result)
def uploadFile(self, filePath, name='runner.ipa'): self.p(' 开始上传' + filePath) url = 'https://www.pgyer.com/apiv2/app/upload' file = open(filePath, 'rb') # dataJson = { # '_api_key': '2e8571d626b9a8c8b752e59624481847', # 'buildInstallType': '3', # 'buildPassword': '', # } # 没有进度条的上传 # fileDir = {'file': file} # response = requests.post(url, files=fileDir, data=dataJson) # jsonStr = response.json() e = encoder.MultipartEncoder( fields={ '_api_key': '2e8571d626b9a8c8b752e59624481847', # 'buildInstallType': '3', # 'buildPassword': '', 'file': (name, file, 'application/x-www-form-urlencoded'), }, ) m = encoder.MultipartEncoderMonitor(e, self.my_callback) h = {'Content-Type': m.content_type, "enctype": "multipart/form-data"} self.fileLength = os.path.getsize(filePath) r = requests.post(url, data=m, headers=h).json() if int(dict(r).get('code')) != 0: raise Exception('上传失败:{0}'.format(dict(r).get('message')), ) else: self.p(str(r).replace('u\'', '\'')) # 去掉前边的u self.p(filePath + '上传成功')
def upload(cls, file_name, task, progressCallback=None): """Upload a resource e.g. """ form = task['data']['result']['form'] port_url = form['url'] params = form['parameters'] params['x:timestamp'] = int(time.time()) fields = params if params else {} try: file = open(file_name, 'rb') _, shortName, ext = util.get_fileNameExt(file_name) for key in fields.keys(): if isinstance(fields[key], int): fields[key] = str(fields[key]) fields['file'] = (shortName+ext, file, 'video/mp4') e = encoder.MultipartEncoder(fields=fields) m = encoder.MultipartEncoderMonitor(e, progressCallback) res = requests.request(method='POST', url=port_url, data=m, headers={'Content-Type': m.content_type}) file.close() if res.status_code != 200: raise Exception("got exception while uploading file") else: result = json.loads(res.text) if result['code'] > 0: raise Exception(result['msg']) if result['data']['status']=='failed': raise Exception(result['data']['message']) return result except Exception as e: raise Exception("got exception while uploading file")
def upload_datafile_with_post(datafile_path, datafile_dict, upload): """ Upload a file to the MyTardis API via POST, creating a new DataFile record. """ url = "%s/api/v1/mydata_dataset_file/" % settings.general.mytardis_url upload.buffered_reader = io.open(datafile_path, "rb") encoded = encoder.MultipartEncoder( fields={ "json_data": json.dumps(datafile_dict), "attached_file": ( upload.filename, upload.buffered_reader, "application/octet-stream", ), }) # Workaround for issue with httplib's hard-coded read size # of 8192 bytes which can lead to slow uploads, see: # http://toolbelt.readthedocs.io/en/latest/uploading-data.html # https://github.com/requests/toolbelt/issues/75 multipart_encoder_read_method = encoded.read encoded.read = lambda size: multipart_encoder_read_method(1024 * 1024) multipart = encoder.MultipartEncoderMonitor(encoded) headers = settings.default_headers headers["Content-Type"] = multipart.content_type response = requests.post(url, data=multipart, headers=headers) return response
def storeImg(filename): #store the image on a web server. global deviceId, imgUploadServer, app_key e = encoder.MultipartEncoder( fields={'file': (filename, open(filename, 'rb'), 'image/jpeg')}) m = encoder.MultipartEncoderMonitor(e, imgStoreMonitor_callback) reqParams = { 'value': filename, 'name': filename, 'filename': filename, 'st_filename': filename, 'st_deviceid': deviceId, 'app_key': app_key } contentType = 'image/jpeg' contentDisposition = 'form-data' #send image as a post. r = requests.post(imgUploadServer, data=m, params=reqParams, headers={'Content-Type': m.content_type}) if r.status_code != requests.codes.ok: print 'there was an error' + str(r.status_code) return None else: jsonData = json.loads(r.text) newURL = jsonData['attachment']['url'] print 'Image sent. returned URL=' + newURL return newURL
def uploadFile(filename): e = encoder.MultipartEncoder( fields={ 'file': ('xianzixun.apk', open(filename, 'rb'), 'application/vnd.android.package-archive') }) m = encoder.MultipartEncoderMonitor(e, my_callback) headers = { 'Authorization': "Bearer" + token, 'Referer': "http://xianzixun.net:8000/swagger-ui.html", 'Origin': 'http://xianzixun.net:8000', 'Content-Type': m.content_type } print("上传中...") pbar.maxval = m.len pbar.term_width = 60 print("total len:" + str(m.len)) pbar.start() resp = requests.post(server_url, data=m, headers=headers) print(resp.status_code, resp.text) down_url = json.loads(resp.text)["data"] print("更新apk_info中的下载链接...") apk_info["downUrl"] = down_url print("更新后apk_info:" + json.dumps(apk_info)) print("------------------------------------------") return json.loads(resp.text)["data"]
def create_version(image_name, version, workspace_path, monitor_callback, token): with open(workspace_path, 'rb') as file: e = encoder.MultipartEncoder( fields={ 'version': version, 'source': ('workspace', file, 'text/plain') }) m = encoder.MultipartEncoderMonitor(e, monitor_callback) response = requests.post(base_images_url + '/' + image_name + '/versions', data=m, headers={ 'Content-Type': m.content_type, 'Authorization': 'JWT ' + token }) if response.status_code == 404: raise ResourceNotFoundError( response=response, message="There is no image named {}, please check for typo". format(image_name)) if response.status_code != 200: raise get_exception(response) else: return response.text
def blob_upload(self, blob_id: str, fd, total_length: int): """ Upload the BLOB and use a callback function for progress indicator. """ self.__total_length = total_length # fields={'field0': 'value', 'field1': 'value', # 'field2': ('filename', open('file.py', 'rb'), 'text/plain')} the_fields: dict = dict() the_fields["blob_id"] = blob_id the_fields["file"] = ("filename", fd, "application/octet-stream") e = encoder.MultipartEncoder(fields=the_fields) m = encoder.MultipartEncoderMonitor(e, self.my_callback) the_headers = dict(self.__headers) # or orig.copy() the_headers["Content-Type"] = m.content_type the_url = self._url + "blob_upload" # requests.post(the_url, data=m, headers={'Content-Type': m.content_type}, proxies=self.__proxies, # auth=self.__auth) requests.post(the_url, data=m, headers=the_headers, proxies=self.__proxies, auth=self.__auth)
def upload_source(workspace_path, manifest, monitor_callback): with open(workspace_path, 'rb') as file: e = encoder.MultipartEncoder( fields={ 'source': ('workspace', file, 'text/plain'), 'manifest': manifest }) m = encoder.MultipartEncoderMonitor(e, monitor_callback) response = get_session().post(base_sources_url, data=m, headers={'Content-Type': m.content_type}) if response.status_code != 200: raise get_exception(response) return response.json()
def handle_uploaded_file_monitor(file): """ :param file: Of type UploadedFile """ e = encoder.MultipartEncoder(fields={file.name: file.read()}) encoder_len = e.len bar = ProgressBar(expected_size=encoder_len, filled_char='=') def callback(monitor): bar.show(monitor.bytes_read) m = encoder.MultipartEncoderMonitor(e, callback) r = requests.put('http://localhost:4444/', data=m, headers={'Content-Type': m.content_type}) print('\nUpload finished! (Returned status {} {})'.format( r.status_code, r.reason))
def new_task(self, images, name=None, options=[], progress_callback=None): """ Starts processing of a new task :param images: list of path images :param name: name of the task :param options: options to be used for processing ([{'name': optionName, 'value': optionValue}, ...]) :param progress_callback: optional callback invoked during the upload images process to be used to report status. :return: UUID or error """ # Equivalent as passing the open file descriptor, since requests # eventually calls read(), but this way we make sure to close # the file prior to reading the next, so we don't run into open file OS limits def read_file(path): with open(path, 'rb') as f: return f.read() fields = { 'name': name, 'options': json.dumps(options), 'images': [(os.path.basename(image), read_file(image), (mimetypes.guess_type(image)[0] or "image/jpg")) for image in images] } def create_callback(mpe): total_bytes = mpe.len def callback(monitor): if progress_callback is not None and total_bytes > 0: progress_callback(monitor.bytes_read / total_bytes) return callback e = MultipartEncoder(fields=fields) m = encoder.MultipartEncoderMonitor(e, create_callback(e)) return requests.post(self.url("/task/new"), data=m, headers={ 'Content-Type': m.content_type }).json()
def upload_model_expfs(self, model_id, expfs, **kwargs): """上传模型扩展文件集 Args: model_id (str): 模型编号。 expfs (path): 模型扩展文件集。 Returns: Model: 任务id、时间、状态和返回值; """ if isinstance(expfs, text_type): filename = split(expfs)[1] else: filename = expfs.name def upload_callback(monitor): total_bytes = monitor.len bytes_consumed = monitor.bytes_read sys.stdout.write('\r文件大小:{}, 上传进度: {}%,已上传 {}'.format( human_byte(total_bytes, 2), '%.2f' % ((bytes_consumed / total_bytes) * 100), human_byte(bytes_consumed, 2))) sys.stdout.flush() e = encoder.MultipartEncoder( fields={ 'model_id': model_id, 'expfs': (filename, open(expfs, 'rb'), 'application/zip') }) m = encoder.MultipartEncoderMonitor(e, upload_callback) timeout = self.timeout verbose = self.verbose max_retries = self.max_retries request = HTTPRequest(self.endpoint, max_retries=max_retries, verbose=verbose) request.set_authorization(self.token_type, self.access_token) model_url = '/model/expfs/upload' result = request.post(model_url, data=m, timeout=timeout, headers={'Content-Type': m.content_type}) sys.stdout.write('') return models.Model(result)
def upload_image_to_imageset(self, imageset_id: int, filename: str) -> bool: e = encoder.MultipartEncoder( fields={ 'files[]': (os.path.basename(filename), open(filename, 'rb'), 'application/octet-stream') }) m = encoder.MultipartEncoderMonitor(e, self.upload_monitor) headers = {'Content-Type': m.content_type, 'referer': self.serverurl} self.log(1, 'Uploading image', filename, 'to', imageset_id) status, obj = self.post('images/image/upload/%d/' % imageset_id, data=m, headers=headers, timeout=120) if (status == 200): return obj else: raise ExactProcessError('Unable to upload, response is: ' + str(obj))
def upload_source(workspace_path, manifest, monitor_callback): with open(workspace_path, 'rb') as file: e = encoder.MultipartEncoder( fields={ 'source': ('workspace', file, 'text/plain'), 'manifest': manifest }) m = encoder.MultipartEncoderMonitor(e, monitor_callback) response = get_session().post(base_sources_url, data=m, headers={'Content-Type': m.content_type}) if response.status_code != 200: if response.status_code == 400: document = get_manifest_document( list(response.json().keys())[0]) click.echo(format_text(document, TextStyle.WARNING)) raise get_exception(response) else: return response.json()
def get_upload_request_data(self, fields, callback, force_update=False): """ Add auth data to upload request fields given and return multipart/form data object @param fields Request data 'fields' [dict] @param callback Callback to be called on file chunk read [Function] @param force_update Instructs to update node sign [bool] @return multipart/form data object [MultipartEncoderMonitor] """ if not self.node_sign or force_update: self.update_node_sign() # Add auth data fields['user_hash'] = self.cfg.user_hash fields['node_hash'] = self.cfg.node_hash fields['node_sign'] = self.node_sign e = encoder.MultipartEncoder(fields) m = encoder.MultipartEncoderMonitor(e, callback) return m
def multipart_data(self, fields, create_callback): """Creates a multipart/form-data body. Args: fields(dict,list): form data values. create_callback(function): function that creates a function that monitors the progress of the upload. boundary: MultipartEncoder's boundary. Default value: None. encoding(string): MultipartEncoder's encoding. Default value: utf-8. """ if fields is not None: e = encoder.MultipartEncoder(fields=fields) if create_callback is not None: callback = create_callback(e) m = encoder.MultipartEncoderMonitor(e, callback) return m else: return e else: return None
def send_file (file): #creates the multipart encoder e = encoder.MultipartEncoder( fields={'files[]': (os.path.basename(file.name), file, 'text/plain')} ) #creates callback function print_progress = create_callback(e) #creates the multipart encoder monitor m = encoder.MultipartEncoderMonitor(e, print_progress) headers= { 'Content-Type': m.content_type } #send the file to ipad's vlc r = requests.post(PAGE_URL, data=m, headers=headers) #if status_code is 200, show that the upload was a success if (r.status_code == 200): print("Upload done. SUCCESS!") else: print("Upload ERROR!") print(str(r.status_code) + " " + str(r.error))
def upload(self, local_src_path, remote_dest_path, options): """ Upload file. """ if remote_dest_path[0] != '/': logger.error("remote_dest_path must be absolute and begin with /") return None remote_dirname = os.path.dirname(remote_dest_path) remote_basename = os.path.basename(remote_dest_path) # From docs: # Where to put tmp (If you used share root path(/Download, /Public, ...) # as the value of parameter "upload_root_dir", it will be auto cleaned in period of 7 days later. if remote_dirname.count("/") <= 1: upload_root_dir = remote_dirname else: upload_root_dir = "/" + re.split("\/", remote_dirname)[1] if len(upload_root_dir) < 2: logger.error( 'Unable to determine valid qnap tmp dir from %s, got %s' % (remote_dir_name, upload_root_dir)) return None start_res = self.post_form('start_chunked_upload', {'upload_root_dir': upload_root_dir}) start_res = FileStation.decode_response( start_res, 'Failed to start chunked upload') if start_res['status'] != 0: logger.error('Failed to start chunked upload with error code %d' % start_res['status']) return None upload_id = start_res['upload_id'] params = { 'func': 'chunked_upload', 'upload_id': upload_id, 'dest_path': remote_dirname, 'upload_root_dir': upload_root_dir, 'upload_name': remote_basename, 'overwrite': 1 if options["overwrite"] else 0, 'offset': 0, 'filesize': os.stat(local_src_path).st_size } request_path = '/cgi-bin/filemanager/utilRequest.cgi?' + urlencode( params) progress_reporter = FileStation.ProgressReporter( params['filesize'], options) with open(local_src_path, 'rb') as src_file: e = encoder.MultipartEncoder( fields=([('fileName', remote_basename), ('file', ('blob', src_file, 'application/octet-stream'))])) # Monkey patch MultipartEncoder to send in 64KB chunks (vs 8KB by # default) # See https://github.com/requests/toolbelt/issues/75#issuecomment-237189952 e._read = e.read e.read = lambda size: e._read(64 * 1024) m = encoder.MultipartEncoderMonitor(e, progress_reporter.report) res = self.qnap_client.post_multipart(request_path, m) if options["stats"] or options["verbose"]: progress_reporter.finish() return FileStation.decode_response(res, 'Failed to upload file')
'options': options_to_json(options), 'images': [(os.path.basename(f), read_file(f), (mimetypes.guess_type(f)[0] or "image/jpg")) for f in files] } def create_callback(mpe): total_bytes = mpe.len def callback(monitor): if progress_callback is not None and total_bytes > 0: progress_callback(100.0 * monitor.bytes_read / total_bytes) return callback e = MultipartEncoder(fields=fields) m = encoder.MultipartEncoderMonitor(e, create_callback(e)) result = self.post('/task/new', data=m, headers={'Content-Type': m.content_type}) return self.handle_task_new_response(result) def handle_task_new_response(self, result): if isinstance(result, dict) and 'uuid' in result: return Task(self, result['uuid']) elif isinstance(result, dict) and 'error' in result: raise NodeResponseError(result['error']) else: raise NodeServerError('Invalid response: ' + str(result)) def get_task(self, uuid): """Helper method to initialize a task from an existing UUID
from requests_toolbelt.multipart import encoder from requests_toolbelt.utils import dump, formdata import json url = "http://192.168.0.201/person/add" person = {'faceId': '8888', 'valid_time_type': 0, 'valid_start': 0, 'valid': 60, 'username': '******', 'idcard': '420124197802025936', 'sex': 1, 'nation': 'han', 'state': 1, 'wgId': 1} facepose = {'x': 74, 'y': 97, 'w': 304, 'h': 402} person1 = json.dumps(person) print(person1) print(type(person1)) print(json.dumps(person1)) facepose1 = json.dumps(facepose) print(facepose1) # m = MultipartEncoder( # fields={'person': json.dumps(person1), 'image': ('out.jpg', open('out.jpg', 'rb'), 'image/jpeg'), 'facepose': json.dumps(facepose1)}, # boundary='-----------------------------' + str(random.randint(1e28, 1e29 - 1)) # ) def my_callback(monitor): # Your callback function pass e = encoder.MultipartEncoder( fields={'person': person1, 'image': ('out.jpg', open('out.jpg', 'rb'), 'image/jpeg'), 'facepose': facepose1} ) m = encoder.MultipartEncoderMonitor(e, my_callback) r = requests.post(url, data=m, headers={'Content-Type': m.content_type, 'Referer': url}) print(r.json()) print(dump.dump_all(r).decode('utf-8'))
def event(audio_data): metadata = { 'clientContext': [], 'event': {}, } metadata['event']['header'] = { "namespace": "ai.dueros.device_interface.voice_input", "name": "ListenStarted", "messageId": "71c0cf96-6243-4fff-853d-7d63ef4123dd", "dialogRequestId": "e5c713d0-f5ec-48c6-89bf-a023c38512d7" } metadata['event']['payload'] = { "format": "AUDIO_L16_RATE_16000_CHANNELS_1" } metadata['clientContext'] = [{ "header": { "namespace": "ai.dueros.device_interface.audio_player", "name": "PlaybackState" }, "payload": { "token": "", "offsetInMilliseconds": 0, "playerActivity": "IDLE" } }, { "header": { "namespace": "ai.dueros.device_interface.voice_output", "name": "SpeechState" }, "payload": { "token": "", "offsetInMilliseconds": 0, "playerActivity": "FINISHED" } }, { "header": { "namespace": "ai.dueros.device_interface.alerts", "name": "AlertsState" }, "payload": { "allAlerts": [], "activeAlerts": [] } }, { "header": { "namespace": "ai.dueros.device_interface.speaker_controller", "name": "VolumeState" }, "payload": { "volume": 50, "muted": False } }] meta = json.dumps(metadata) files = { 'metadata': (None, meta, 'application/json; charset=UTF-8', None), 'audio': (None, audio_data), } multiple_files = [ ('metadata', ('metadata', meta, 'application/json; charset=UTF-8')), ('audio', ('audio', audio_data, 'application/octet-stream')), ] multipart_encoder = encoder.MultipartEncoder(fields=multiple_files) monitor_encoder = encoder.MultipartEncoderMonitor(multipart_encoder, post_callback) s = requests.Session() #s.mount('https://dueros-h2.baidu.com', TimeOutHTTP20Adapter(30)) s.mount('https://dueros-h2.baidu.com', HTTP20Adapter()) headers = { 'authorization': 'Bearer 23.0e617b06679ee45113810a4252139a71.2592000.1502932213.151016915-9892205', 'dueros-device-id': 'hyxbiao-smart', } #headers['Content-Type'] = multipart_encoder.content_type headers['Content-Type'] = monitor_encoder.content_type ''' r = s.post('https://dueros-h2.baidu.com/dcs/v1/events', headers=headers, files=files, stream=True) ''' r = s.post( 'https://dueros-h2.baidu.com/dcs/v1/events', headers=headers, #data=multipart_encoder, data=monitor_encoder, #data=gen(monitor_encoder), timeout=60, stream=True) print(r.status_code) play(r) r.close()
def __init__(self, fields): mp_encoder = encoder.MultipartEncoder(fields=fields) self.monitor = encoder.MultipartEncoderMonitor(mp_encoder, callback=self._create_callback(mp_encoder))