def upload_document(self, browser, raw_token, payload, document, new_file): with self.as_officeconnector(browser): encoder = MultipartEncoder({ 'form.widgets.file.action': 'replace', 'form.buttons.upload': 'oc-file-upload', 'form.widgets.file': ( basename(new_file.name), new_file, payload.get('content-type'), ), '_authenticator': payload.get('csrf-token'), }) headers = { 'Authorization': ' '.join(('Bearer', raw_token, )), 'Content-Type': encoder.content_type, } browser.open( document, view=payload.get('upload-form'), method='POST', headers=headers, data=encoder.to_string(), ) self.assertEquals(204, browser.status_code)
def check_read_file_with_chunks(self, file_size, read_size): #print "===== Testing with file_size=",file_size,"read_size=",read_size boundary="deterministic-test-boundary" a_file = LargeFileMock(file_size) parts = {'some_field': 'this is the value...', 'some_file': a_file.read(), } expected_bytes = encode_multipart_formdata(parts, boundary)[0] content_length = len(expected_bytes) # Now read from our encoder : a_file = LargeFileMock(file_size) parts = {'some_field': 'this is the value...', 'some_file': a_file, } encoder = MultipartEncoder(parts, boundary=boundary) raw_bytes_count = 0 while True: data = encoder.read(read_size) if not data: break #print "read",len(data),"bytes : ",repr(data) assert data == expected_bytes[raw_bytes_count:raw_bytes_count+len(data)] raw_bytes_count += len(data) #if raw_bytes_count != content_length: # print "Test failed with file_size=",file_size,"and read_size=",read_size assert raw_bytes_count == content_length
def login(username, password): url = 'https://www.zhihu.com/api/v3/oauth/sign_in' headers = getheaders() checkcapthca(s, headers) data = getdata(username, password) encoder = MultipartEncoder(data, boundary='----WebKitFormBoundarycGPN1xiTi2hCSKKZ') headers['Content-Type'] = encoder.content_type z2 = s.post(url, headers=headers, data=encoder.to_string()) print(z2.json())
def test_reads_file_from_url_wrapper(self): s = requests.Session() recorder = get_betamax(s) url = ('https://stxnext.com/static/img/logo.830ebe551641.svg') with recorder.use_cassette( 'file_for_download'): m = MultipartEncoder( [('field', 'foo'), ('file', FileFromURLWrapper(url))]) assert m.read() is not None
def test_encodes_with_readable_data(self): s = io.BytesIO(b'value') m = MultipartEncoder([('field', s)], boundary=self.boundary) assert m.read() == ( '--this-is-a-boundary\r\n' 'Content-Disposition: form-data; name="field"\r\n\r\n' 'value\r\n' '--this-is-a-boundary--\r\n' ).encode()
def test_handles_empty_unicode_values(self): """Verify that the Encoder can handle empty unicode strings. See https://github.com/requests/toolbelt/issues/46 for more context. """ fields = [(b'test'.decode('utf-8'), b''.decode('utf-8'))] m = MultipartEncoder(fields=fields) assert len(m.read()) > 0
def do_callback_request(self, browser, fields): meeting = self.meeting encoder = MultipartEncoder(fields=fields) data = encoder.to_string() headers = {'Content-Type': encoder.content_type} with self.logout(): browser.open( meeting, view='receive_meeting_zip_pdf', method='POST', data=data, headers=headers)
def login(username, password): url = 'https://www.zhihu.com/api/v3/oauth/sign_in' headers = getheaders() data = getdata(username, password) checkcapthca(headers) # multipart_encoder = MultipartEncoder(fieles=data, boundary='----WebKitFormBoundarycGPN1xiTi2hCSKKZ') # todo:boundary后面的几位数可以随机,现在是固定的 encoder = MultipartEncoder(data, boundary='----WebKitFormBoundarycGPN1xiTi2hCSKKZ') headers['Content-Type'] = encoder.content_type z2 = s.post(url, headers=headers, data=encoder.to_string(), ) print(z2.json()) print('123')
def test_upload(self): self.login() manager = PluginRegistry.getInstance("UploadManager") fpath = os.path.join(os.path.dirname(__file__), 'create_user.zip') with open(fpath, "rb") as f: m = MultipartEncoder( fields={'file': ('create_user.zip', f, 'text/plain')} ) data = m.to_string() # try to use unregistered path uuid, path = manager.registerUploadPath("admin", self.session_id, "workflow") response = self.fetch("/uploads/unknown_path", method="POST", body=data, headers={ 'Content-Type': m.content_type }) assert response.code == 404 assert manager.unregisterUploadPath(uuid) is True # try to use path from another user uuid, path = manager.registerUploadPath("other_user", self.session_id, "workflow") response = self.fetch(path, method="POST", body=data, headers={ 'Content-Type': m.content_type }) assert response.code == 403 assert manager.unregisterUploadPath(uuid) is True # try to use path from another session uuid, path = manager.registerUploadPath("admin", "other session id", "workflow") response = self.fetch(path, method="POST", body=data, headers={ 'Content-Type': m.content_type }) assert response.code == 403 assert manager.unregisterUploadPath(uuid) is True # try to use path for unhandled type uuid, path = manager.registerUploadPath("admin", self.session_id, "unknown-type") response = self.fetch(path, method="POST", body=data, headers={ 'Content-Type': m.content_type }) assert response.code == 501 assert manager.unregisterUploadPath(uuid) is True # finally a working example uuid, path = manager.registerUploadPath("admin", self.session_id, "workflow") response = self.fetch(path, method="POST", body=data, headers={ 'Content-Type': m.content_type, 'X-File-Name': 'create_user.zip' }) assert response.code == 200 # path should have been removed by successfully unsigning it assert manager.unregisterUploadPath(uuid) is False
def test_streams_its_data(self): large_file = LargeFileMock() parts = {'some field': 'value', 'some file': large_file, } encoder = MultipartEncoder(parts) read_size = 1024 * 1024 * 128 while True: read = encoder.read(read_size) if not read: break assert encoder._buffer.tell() <= read_size
def test_accepts_custom_content_type(self): """Verify that the Encoder handles custom content-types. See https://github.com/requests/toolbelt/issues/52 """ fields = [ (b'test'.decode('utf-8'), (b'filename'.decode('utf-8'), b'filecontent', b'application/json'.decode('utf-8'))) ] m = MultipartEncoder(fields=fields) output = m.read().decode('utf-8') assert output.index('Content-Type: application/json\r\n') > 0
def test_accepts_custom_headers(self): """Verify that the Encoder handles custom headers. See https://github.com/requests/toolbelt/issues/52 """ fields = [ (b'test'.decode('utf-8'), (b'filename'.decode('utf-8'), b'filecontent', b'application/json'.decode('utf-8'), {'X-My-Header': 'my-value'})) ] m = MultipartEncoder(fields=fields) output = m.read().decode('utf-8') assert output.index('X-My-Header: my-value\r\n') > 0
def prepare_request(self, userid='', destination='inbox', org_unit=''): fields = { 'userid': userid or self.regular_user.getId(), 'destination': destination, 'file': ('mydocument.txt', 'my text', 'text/plain'), } if org_unit: fields['org_unit'] = org_unit encoder = MultipartEncoder(fields=fields) return encoder.to_string(), { 'Content-Type': encoder.content_type, 'Accept': 'application/json', }
def test_streams_its_data(self): large_file = LargeFileMock() parts = {'some field': 'value', 'some file': large_file, } encoder = MultipartEncoder(parts) total_size = encoder.len read_size = 1024 * 1024 * 128 already_read = 0 while True: read = encoder.read(read_size) already_read += len(read) if not read: break assert encoder._buffer.tell() <= read_size assert already_read == total_size
def checkin_document(self, browser, tokens, payload, document, comment=None): # noqa with self.as_officeconnector(browser): headers = { 'Authorization': ' '.join(( 'Bearer', tokens.get('raw_token'), )), } if comment: encoder = MultipartEncoder({ 'form.widgets.comment': comment, 'form.buttons.button_checkin': 'Checkin', '_authenticator': payload.get('csrf-token'), }) headers['Content-Type'] = encoder.content_type browser.open( document, view=payload.get('checkin-with-comment'), headers=headers, method='POST', data=encoder.to_string(), ) else: browser.open( document, headers=headers, view=payload.get('checkin-without-comment'), send_authenticator=True, ) self.assert_journal_entry( document, DOCUMENT_CHECKED_IN, u'Document checked in', ) journal_comments = get_journal_entry(document).get('comments') if comment: self.assertTrue(journal_comments) else: self.assertFalse(journal_comments) self.assertEquals(200, browser.status_code)
def test_regression_2(self): """Ensure issue #31 doesn't ever happen again.""" fields = { "test": "t" * 8100 } m = MultipartEncoder(fields=fields) total_size = m.len blocksize = 8192 read_so_far = 0 while True: data = m.read(blocksize) if not data: break read_so_far += len(data) assert read_so_far == total_size
def setUp(self): self.sample_1 = ( ('field 1', 'value 1'), ('field 2', 'value 2'), ('field 3', 'value 3'), ('field 4', 'value 4'), ) self.boundary = 'test boundary' self.encoded_1 = MultipartEncoder(self.sample_1, self.boundary) self.decoded_1 = MultipartDecoder( self.encoded_1.to_string(), self.encoded_1.content_type )
def test_regresion_1(self): """Ensure issue #31 doesn't ever happen again.""" fields = { "test": "t" * 100 } for x in range(30): fields['f%d' % x] = ( 'test', open('tests/test_multipart_encoder.py', 'rb') ) m = MultipartEncoder(fields=fields) total_size = m.len blocksize = 8192 read_so_far = 0 while True: data = m.read(blocksize) if not data: break read_so_far += len(data) assert read_so_far == total_size
def __register_one(self, Session): """ 注册一个账户 Args: Session:Session对象 present_website:当前网站名,用于数据库表名 email_and_passwd:邮箱账户和密码,email_and_passwd[0]是邮箱,[1]是密码 Returns: 注册成功返回注册数据字典对象registerData,需要包含user_id, username, password, email user_id这样获取:(示例) # 将注册的账户写入数据库(sql自己写,这边只是个示例) sql = "INSERT INTO "+present_website+"(username, password, mail, status) VALUES('" + name + \ "', '" + psd + "', '" + email_and_passwd[0] + "', '" + str(0) + "');" last_row_id = MysqlHandler().insert(sql) if last_row_id != -1: registerData["user_id"] = last_row_id return registerData else: g_var.logger.error("数据库插入用户注册数据失败") return 0 注册失败返回状态码 0:注册成功,但是激活失败或插入数据库失败 -1:表示requests请求页面失败,需要更换代理 -2:注册失败,可能是邮箱密码不符合要求、或ip被封等原因,需要排查 """ g_var.logger.info("register...") csrftoken = get_csrf(Session) if csrftoken == -1: return -1 elif csrftoken == -2: return -2 headers = generate_headers(0, csrftoken) if headers == -1: g_var.logger.info("获取注册headers失败...") return -1 try: username = generate_random_string(8, 12) password = generate_random_string(10, 14) email = username + '@hotmail.com' multipart_encoder = MultipartEncoder( fields={ 'email': email, 'password': password, 'username': username, 'ch': 'y', }, boundary='----WebKitFormBoundary' + generate_random_string( 16, 16, 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'), ) headers['Content-Type'] = multipart_encoder.content_type except Exception as e: g_var.ERR_CODE = 5000 g_var.ERR_MSG = "注册数据生成中出现异常..." g_var.logger.info("注册数据生成中出现异常...") g_var.logger.info(e) return -2 url_register = 'https://www.mixcloud.com/authentication/email-register/' g_var.logger.info("提交注册中...") html = Session.post(url_register, data=multipart_encoder, headers=headers, timeout=g_var.TIMEOUT) if html == -1: return html try: text = json.loads(html.text) if not text['success']: g_var.logger.info(text) return -2 except Exception as e: g_var.logger.info(e) g_var.ERR_CODE = 5000 g_var.ERR_MSG = "此次注册未完成安全检查(IP问题)..." g_var.logger.error("此次注册未完成安全检查(IP问题)") return -2 try: cookie = html.headers['Set-Cookie'] csrftoken = re.findall('csrftoken=(.*?);', cookie) c = re.findall('secure, c=(.*?);', cookie) sql = "insert into mixcloud_com (id, username, password, mail) values('{0}', '{1}', '{2}', '{3}');" \ .format(str(text['authentication']['currentUser']['id']), username, password, email) last_row_id = MysqlHandler().insert(sql) userData = {} if last_row_id != -1: userData["id"] = last_row_id userData["username"] = username userData["password"] = password userData["csrftoken"] = csrftoken[0] userData["c"] = c[0] return userData else: g_var.ERR_CODE = 2004 g_var.ERR_MSG = "数据库插入失败..." g_var.logger.error("数据库插入失败") return 0 except Exception as e: g_var.logger.info(e) g_var.ERR_CODE = 2004 g_var.ERR_MSG = "数据库插入失败..." g_var.logger.error("数据库插入失败") return 0
def __postMessage(self, userData, present_website): """ 发文章 Args: Session:Session对象 loginData:用户信息,包括user_id,username,password,cookie present_website:当前网站名,用于数据库表名 Returns: 成功返回:"ok" 失败返回状态值: 1:跳出循环,重新取号 0:cookie失效,将cookie清空,跳出循环重新取号 -1:连续代理错误或页面发生改变等取不到关键数据等,需要停止程序 -2:本次出错,继续循环 """ g_var.logger.info("post article ...") headers = generate_headers(0) if headers == -1: g_var.logger.info("获取headers失败...") return -1 g_var.logger.info("article ...") article = get_new_article() if article == -1: return -2 content = get_code_content(article[1]) if content == -2: return -2 g_var.logger.info("postarticle_tok ...") uid_upwd = userData['cookie'].split('|_|') postarticle_tok = get_postarticle_tok(uid_upwd[0]) if postarticle_tok == -1: return -1 elif postarticle_tok == -2: return -2 g_var.logger.info("new_article_Id ...") new_article_Id = get_newarticle_Id(uid_upwd, article[0], headers) if new_article_Id == -1: return -1 elif new_article_Id == -2: return -2 elif new_article_Id == 1: return 1 headers['Origin'] = 'https://www.liveinternet.ru' headers[ 'Referer'] = 'https://www.liveinternet.ru/journal_post.php?journalid=' + uid_upwd[ 0] headers['Cookie'] = 'bbuserid=' + uid_upwd[ 0] + '; bbpassword='******'action': 'newpost', 'parsing': '', 'journalid': uid_upwd[0], 'backurl': '', 'selectforum': '/journal_post.php?journalid=' + uid_upwd[0], 'headerofpost': article[0], 'mode': str(0), 'status': 'Use these controls to insert vBcode', 'LiNewPostForm': content, # 文章内容 'tags': article[-1], # 标签 'uploader_count': str(0), 'music': '', 'mood': '', 'attachfile1': ("", '', 'application/octet-stream'), 'MAX_FILE_SIZE': '', 'nocomment': str(0), 'commentsubscribe': 'yes', 'parseurl': 'yes', 'autosave_postid': new_article_Id, # blog ID 'close_level': str(0), 'tok': postarticle_tok, }, boundary='------WebKitFormBoundary' + generate_random_string( 16, 16, 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'), ) headers['Content-Type'] = multipart_encoder.content_type g_var.logger.info("正在发布文章 ...") url_article = 'https://www.liveinternet.ru/journal_addpost.php' html = requestsW.post(url_article, proxies=ip_proxy("en"), data=multipart_encoder, headers=headers) if html == -1: return -1 # 发布成功与否验证 prove = 'Вы добавили сообщение в Ваш дневник' if prove not in html.text: g_var.ERR_CODE = 5000 g_var.ERR_MSG = "文章发送失败,IP异常等原因..." g_var.logger.info('文章发送失败,IP异常等原因...') return 0 del headers['Origin'] headers[ 'Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9' g_var.logger.info("正在获取新文章id ...") url_new_article = 'https://www.liveinternet.ru/users/' + userData[ 'username'] + '/blog/' res = requestsW.get(url_new_article, proxies=ip_proxy("en"), headers=headers) if res == -1: return -1 article_url = re.search( 'https://www.liveinternet.ru/users/' + userData['username'].lower() + '/post(.*?)/', res.text) if not article_url: ('获取新发布文章url失败。。。') return 0 try: new_article_url = article_url.group() sql = "INSERT INTO liveinternet_ru_article(url, keyword, user_id) VALUES('" + new_article_url + "', '" + article[ 0] + "', '" + str(userData["id"]) + "');" last_row_id = MysqlHandler().insert(sql) g_var.logger.info(last_row_id) if last_row_id != -1: g_var.logger.info('文章成功!' + userData['username']) return 'ok' else: g_var.ERR_CODE = 2004 g_var.ERR_MSG = "数据库插入用户注册数据失败..." g_var.logger.error("数据库插入用户注册数据失败...") return 0 except Exception as e: g_var.logger.info(e) g_var.ERR_CODE = 2004 g_var.ERR_MSG = "数据库插入用户注册数据异常..." g_var.logger.error("数据库插入用户注册数据异常...") return 0
def test_reads_open_file_objects_using_to_string(self): with open('setup.py', 'rb') as fd: m = MultipartEncoder([('field', 'foo'), ('file', fd)]) assert m.to_string() is not None
class TestMultipartDecoder(unittest.TestCase): def setUp(self): self.sample_1 = ( ('field 1', 'value 1'), ('field 2', 'value 2'), ('field 3', 'value 3'), ('field 4', 'value 4'), ) self.boundary = 'test boundary' self.encoded_1 = MultipartEncoder(self.sample_1, self.boundary) self.decoded_1 = MultipartDecoder( self.encoded_1.to_string(), self.encoded_1.content_type ) def test_non_multipart_response_fails(self): jpeg_response = mock.NonCallableMagicMock(spec=requests.Response) jpeg_response.headers = {'content-type': 'image/jpeg'} with pytest.raises(NonMultipartContentTypeException): MultipartDecoder.from_response(jpeg_response) def test_length_of_parts(self): assert len(self.sample_1) == len(self.decoded_1.parts) def test_content_of_parts(self): def parts_equal(part, sample): return part.content == encode_with(sample[1], 'utf-8') parts_iter = zip(self.decoded_1.parts, self.sample_1) assert all(parts_equal(part, sample) for part, sample in parts_iter) def test_header_of_parts(self): def parts_header_equal(part, sample): return part.headers[b'Content-Disposition'] == encode_with( 'form-data; name="{0}"'.format(sample[0]), 'utf-8' ) parts_iter = zip(self.decoded_1.parts, self.sample_1) assert all( parts_header_equal(part, sample) for part, sample in parts_iter ) def test_from_response(self): response = mock.NonCallableMagicMock(spec=requests.Response) response.headers = { 'content-type': 'multipart/related; boundary="samp1"' } cnt = io.BytesIO() cnt.write(b'\r\n--samp1\r\n') cnt.write(b'Header-1: Header-Value-1\r\n') cnt.write(b'Header-2: Header-Value-2\r\n') cnt.write(b'\r\n') cnt.write(b'Body 1, Line 1\r\n') cnt.write(b'Body 1, Line 2\r\n') cnt.write(b'--samp1\r\n') cnt.write(b'\r\n') cnt.write(b'Body 2, Line 1\r\n') cnt.write(b'--samp1--\r\n') response.content = cnt.getvalue() decoder_2 = MultipartDecoder.from_response(response) assert decoder_2.content_type == response.headers['content-type'] assert ( decoder_2.parts[0].content == b'Body 1, Line 1\r\nBody 1, Line 2' ) assert decoder_2.parts[0].headers[b'Header-1'] == b'Header-Value-1' assert len(decoder_2.parts[1].headers) == 0 assert decoder_2.parts[1].content == b'Body 2, Line 1'
def test_read(self): new_encoder = MultipartEncoder(self.fields, self.boundary) assert new_encoder.read() == self.monitor.read()
else: print( '!!!! Test not executed, add CLASSIFICATION_MODEL_PRIO path !!!!!') def test_preflight(): event = dict( httpMethod='OPTIONS', path='/object_detection', ) resp = lambda_handler_classification(event, None) print(resp) assert resp['statusCode'] == 200 if __name__ == '__main__': mp_encoder = MultipartEncoder( fields={ 'field0': open("tests/binary1.dat", "rb"), 'field1': open("tests/binary2.dat", "rb") }) mp_encoder = MultipartEncoder( fields={'field0': open("tests/clio4.jpg", "rb")}) body = mp_encoder.to_string() event = dict(httpMethod='POST', path='/predict', headers={'Content-Type': mp_encoder.content_type}, body=body) test_handler(event)
def request_upload(self, path, fields=None): ''' Generic HTTP MultiPart POST method for MSO uploads. ''' self.path = path self.url = urljoin(self.baseuri, path) if not HAS_MULTIPART_ENCODER: self.fail_json( msg= 'requests-toolbelt is required for the upload state of this module' ) mp_encoder = MultipartEncoder(fields=fields) self.headers['Content-Type'] = mp_encoder.content_type self.headers['Accept-Encoding'] = "gzip, deflate, br" resp, info = fetch_url(self.module, self.url, headers=self.headers, data=mp_encoder, method='POST', timeout=self.params.get('timeout'), use_proxy=self.params.get('use_proxy')) self.response = info.get('msg') self.status = info.get('status') # Get change status from HTTP headers if 'modified' in info: self.has_modified = True if info.get('modified') == 'false': self.result['changed'] = False elif info.get('modified') == 'true': self.result['changed'] = True # 200: OK, 201: Created, 202: Accepted, 204: No Content if self.status in (200, 201, 202, 204): output = resp.read() if output: return json.loads(output) # 400: Bad Request, 401: Unauthorized, 403: Forbidden, # 405: Method Not Allowed, 406: Not Acceptable # 500: Internal Server Error, 501: Not Implemented elif self.status >= 400: try: payload = json.loads(resp.read()) except (ValueError, AttributeError): try: payload = json.loads(info.get('body')) except Exception: self.fail_json(msg='MSO Error:', info=info) if 'code' in payload: self.fail_json( msg='MSO Error {code}: {message}'.format(**payload), info=info, payload=payload) else: self.fail_json(msg='MSO Error:'.format(**payload), info=info, payload=payload) return {}
def webwxuploadmedia(self, image_name): url = 'https://file2.wx.qq.com/cgi-bin/mmwebwx-bin/webwxuploadmedia?f=json' # 计数器 self.media_count = self.media_count + 1 # 文件名 file_name = image_name # MIME格式 # mime_type = application/pdf, image/jpeg, image/png, etc. mime_type = mimetypes.guess_type(image_name, strict=False)[0] # 微信识别的文档格式,微信服务器应该只支持两种类型的格式。pic和doc # pic格式,直接显示。doc格式则显示为文件。 media_type = 'pic' if mime_type.split('/')[0] == 'image' else 'doc' # 上一次修改日期 lastModifieDate = 'Thu Mar 17 2016 00:55:10 GMT+0800 (CST)' # 文件大小 file_size = os.path.getsize(file_name) # PassTicket pass_ticket = self.pass_ticket # clientMediaId client_media_id = str(int(time.time() * 1000)) + \ str(random.random())[:5].replace('.', '') # webwx_data_ticket webwx_data_ticket = '' for item in self.cookie: if item.name == 'webwx_data_ticket': webwx_data_ticket = item.value break if (webwx_data_ticket == ''): return "None F**k Cookie" uploadmediarequest = json.dumps( { "BaseRequest": self.BaseRequest, "ClientMediaId": client_media_id, "TotalLen": file_size, "StartPos": 0, "DataLen": file_size, "MediaType": 4 }, ensure_ascii=False).encode('utf8') multipart_encoder = MultipartEncoder( fields={ 'id': 'WU_FILE_' + str(self.media_count), 'name': file_name, 'type': mime_type, 'lastModifieDate': lastModifieDate, 'size': str(file_size), 'mediatype': media_type, 'uploadmediarequest': uploadmediarequest, 'webwx_data_ticket': webwx_data_ticket, 'pass_ticket': pass_ticket, 'filename': (file_name, open(file_name, 'rb'), mime_type.split('/')[1]) }, boundary='-----------------------------1575017231431605357584454111' ) headers = { 'Host': 'file2.wx.qq.com', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:42.0) Gecko/20100101 Firefox/42.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate', 'Referer': 'https://wx2.qq.com/', 'Content-Type': multipart_encoder.content_type, 'Origin': 'https://wx2.qq.com', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache' } r = requests.post(url, data=multipart_encoder, headers=headers) response_json = r.json() if response_json['BaseResponse']['Ret'] == 0: return response_json return None
def import_fhir_resources(hc_api, hc_fhirstore, fhir_refs, fw_api, fw_project): log.info('Importing FHIR resources...') for resource_ref in fhir_refs: resource_type, resource_id = resource_ref.split('/') resource = hc_api.fhirStores.fhir.read(name='{}/fhir/{}/{}'.format( hc_fhirstore, resource_type, resource_id)) log.debug(' Creating metadata...') resource_obj = FHIRResource(resource, hc_api, hc_fhirstore) subj_code_payload = { 'patient_id': resource_obj.patient_id, 'first_name': resource_obj.subject_firstname, 'last_name': resource_obj.subject_lastname, 'date_of_birth': resource_obj.dob.strftime('%Y-%m-%d'), 'use_patient_id': bool(resource_obj.patient_id) } master_subject_code = get_master_subject_code(subj_code_payload, fw_api) log.debug(master_subject_code) subject = get_subject_by_master_code(master_subject_code, fw_project, fw_api) metadata = get_metadata(resource_obj) metadata.setdefault('group', {})['_id'] = fw_project['group'] metadata.setdefault('project', {})['label'] = fw_project['label'] subject_info = copy.deepcopy(metadata['session']['subject']) metadata['session']['subject'] = {'master_code': master_subject_code} collection = metadata['session'][ 'subject'] if resource_type == 'Patient' else metadata[ 'session'] if resource_type == 'Encounter' else metadata[ 'acquisition'] filename = resource_type.lower() if resource_type in [ 'Patient', 'Encounter' ] else resource['id'] collection['files'] = [{ 'name': filename + '.fhir.json', 'type': 'fhir', 'info': { 'fhir': resource, **resource_obj.extra_info } }] if resource_type in ['Patient', 'Encounter']: del metadata['acquisition'] for key in ('code', 'firstname', 'lastname', 'sex', 'type'): if not (subject and subject.get(key)) and subject_info.get(key): metadata['session']['subject'][key] = subject_info[key] log.debug(' Upload metadata:\n%s', pprint.pformat(metadata)) log.debug(' Uploading...') metadata_json = json.dumps(metadata, default=metadata_encoder) msg_json = json.dumps(resource, sort_keys=True, indent=4, default=metadata_encoder) mpe = MultipartEncoder(fields={ 'metadata': metadata_json, 'file': (filename + '.fhir.json', msg_json) }) resp = fw_api.post('upload/label', data=mpe, headers={'Content-Type': mpe.content_type}) log.debug(' Upload response:\n%s', pprint.pformat(resp.json())) resp.raise_for_status()
def make_request(self, path, data=None, ajax=False, debug=True, force_login=False): url = path if path.startswith("http") else self.url + path logger.error(f" make_request ----------> url: {url}") if ajax: url += f"{('&' if '?' in url else '?')}force_ajax=true" self._session.headers['X_REQUESTED_WITH'] = "XMLHttpRequest" cookie_value = self._session.cookies.get(settings.SESSION_COOKIE_NAME) if force_login and cookie_value: self.response_cookies += f"; {settings.SESSION_COOKIE_NAME}={cookie_value}" if self.csrf_token: self._session.headers['X-CSRFToken'] = self.csrf_token if self.response_cookies: self._session.headers['cookie'] = self.response_cookies if data: for name, value in data.items(): if isinstance(value, IOBase): data[name] = (os.path.basename(value.name), value) encoder = MultipartEncoder(fields=data) self._session.headers['Content-Type'] = encoder.content_type self._session.mount(f"{urlsplit(url).scheme}://", self._adapter) self._session.verify = False self._action = getattr(self._session, 'post', None) _retry = 0 _not_done = True while _not_done and _retry < 3: try: response = self._action(url=url, data=encoder, headers=self._session.headers, timeout=10, stream=False) _not_done = False except (ProtocolError, ConnectionError, ConnectionResetError): time.sleep(1.0) _not_done = True finally: _retry += 1 else: self._session.mount(f"{urlsplit(url).scheme}://", self._adapter) self._session.verify = False self._action = getattr(self._session, 'get', None) _retry = 0 _not_done = True while _not_done and _retry < 3: try: response = self._action(url=url, data=None, headers=self._session.headers, timeout=10, stream=False) _not_done = False except (ProtocolError, ConnectionError, ConnectionResetError): time.sleep(1.0) _not_done = True finally: _retry += 1 try: response.raise_for_status() except requests.exceptions.HTTPError as ex: message = '' if hasattr(ex, 'message'): if debug: logger.error(f'error in request to {path}') logger.error(ex.message) message = ex.message[ex.message.index(':') + 2:] else: message = str(ex) message = f"{message} (Content: {response.content.decode()}" raise HTTPError(url, response.status_code, message, response.headers, None) logger.error(f" make_request ----------> response: {response}") return response
person = '*****@*****.**' images = { 'ayb': config['IMAGES']['AYB'], 'developer': config['IMAGES']['Developer'], 'afx': config['IMAGES']['Afx'], 'automation': config['IMAGES']['Automation'], 'hephaestus': config['IMAGES']['Hephaestus'], 'turk': config['IMAGES']['Turk'], 'matters': config['IMAGES']['Matters'], 'lunch': config['IMAGES']['Lunch'], 'garfield': config['IMAGES']['Garfield'], 'towel': config['IMAGES']['Towel'] } m = MultipartEncoder({ 'roomId': room, 'text': 'test', 'files': (images['hephaestus'], open(images['hephaestus'], 'rb'), '') }) r = requests.post('https://api.ciscospark.com/v1/messages', data=m, headers={ 'Authorization': 'Bearer {auth}'.format(auth=aAuth), 'Content-Type': m.content_type }) print(r.text)
def upload_video(oauth, secret, options): def get_userinfo(): str_response = oauth.get(url+"/api/v1/users/me").content.decode('utf-8') return json.loads(str_response) #return json.loads(oauth.get(url+"/api/v1/users/me").content) def get_file(path): mimetypes.init() return (basename(path), open(abspath(path), 'rb'), mimetypes.types_map[splitext(path)[1]]) path = options['file'] url = str(secret['peertube_url']).rstrip('/') user_info = get_userinfo() # We need to transform fields into tuple to deal with tags as # MultipartEncoder does not support list refer # https://github.com/requests/toolbelt/issues/190 and # https://github.com/requests/toolbelt/issues/205 fields = [ ("name", options['name'] or splitext(basename(options['file']))[0]), ("licence", "1"), ("description", "KircheNeuenburg.de"), ("nsfw", "0"), ("videofile", get_file(path)) ] # if no category, set default to 2 (Films) fields.append(("category", "2")) if options['language']: fields.append(("language", str(utils.getLanguage(options['language'], "peertube")))) else: # if no language, set default to 1 (English) fields.append(("language", "en")) fields.append(("commentsEnabled", "1")) privacy = None fields.append(("privacy", str(PEERTUBE_PRIVACY["public"]))) playlist_id = get_default_playlist(user_info) fields.append(("channelId", str(playlist_id))) multipart_data = MultipartEncoder(fields) headers = { 'Content-Type': multipart_data.content_type } response = oauth.post(url + "/api/v1/videos/upload", data=multipart_data, headers=headers) if response is not None: if response.status_code == 200: jresponse = response.json() jresponse = jresponse['video'] uuid = jresponse['uuid'] idvideo = str(jresponse['id']) logging.info('Peertube : Video was successfully uploaded.') template = '%s/videos/watch/%s' logging.info(template % (url, uuid)) print(template % (url, uuid)) return template % (url, uuid) else: logging.error(('Peertube: The upload failed with an unexpected response: ' '%s') % response) exit(1)
def detect_sync(*args, **kwargs): render_boxes = kwargs.get('render_boxes') detection_confidence = kwargs.get('detection_confidence') images = kwargs.get('images') image_names = kwargs.get('image_names') # consolidate the images into batches and perform detection on them try: print('runserver, post_detect_sync, batching and inferencing...') # detections is an array of dicts tic = datetime.now() detections = detector.generate_detections_batch(images) toc = datetime.now() inference_duration = toc - tic print('runserver, post_detect_sync, inference duration: {} seconds.'.format(inference_duration)) except Exception as e: print('Error performing detection on the images: ' + str(e)) log.log_exception('Error performing detection on the images: ' + str(e)) abort(500, 'Error performing detection on the images: ' + str(e)) # filter the detections by the confidence threshold try: result = {} # json to return to the user along with the rendered images if they opted for it for image_name, d in zip(image_names, detections): result[image_name] = [] for box, score, category in zip(d['box'], d['score'], d['category']): if score > detection_confidence: # each result is [ymin, xmin, ymax, xmax, confidence, category] res = convert_numpy_floats(box) # numpy float doesn't jsonify res.append(float(score)) res.append(int(category)) # category is an int here, not string as in the async API result[image_name].append(res) except Exception as e: print('Error consolidating the detection boxes: ' + str(e)) log.log_exception('Error consolidating the detection boxes: ' + str(e)) abort(500, 'Error consolidating the detection boxes: ' + str(e)) # return results; optionally render the detections on the images and send the annotated images back try: print('runserver, post_detect_sync, rendering and sending images back...') files = { 'result': ('result', json.dumps(result), 'application/json') } if render_boxes: for image_name, d in zip(image_names, detections): image = d['image'] TFDetector.render_bounding_boxes(d['box'], d['score'], d['category'], image, confidence_threshold=detection_confidence) output_img_stream = BytesIO() image.save(output_img_stream, format='jpeg') output_img_stream.seek(0) files[image_name] = (image_name, output_img_stream, 'image/jpeg') m = MultipartEncoder(fields=files) log.log_info('runserver, post_detect_sync, inference duration: {} seconds.'.format(inference_duration), additionalProperties={ 'inference_duration': str(inference_duration), 'num_images': len(image_names), 'render_boxes': render_boxes, 'detection_confidence': detection_confidence }) return Response(m.to_string(), mimetype=m.content_type) except Exception as e: print('Error returning result or rendering the detection boxes: ' + str(e)) log.log_exception('Error returning result or rendering the detection boxes: ' + str(e)) abort(500, 'Error returning result or rendering the detection boxes: ' + str(e))
def sync(self): # noqa: CCR001 ws = self._wb['Media'] row_indexes = trange( 2, ws.max_row + 1, disable=self._silent, leave=True, bar_format=DEFAULT_BAR_FORMAT, ) for row_idx in row_indexes: data = _RowData(*[ws.cell(row_idx, col_idx).value for col_idx in range(1, 7)]) row_indexes.set_description(f'Processing Media {data.id or data.position or "New"}') if data.action == '-': self._mstats.skipped() continue row_errors = self._validate_row(data) if row_errors: self._mstats.error(row_errors, row_idx) continue if data.action == 'delete': try: self._client.products[self._product_id].media[data.id].delete() self._mstats.deleted() continue except ClientError as e: if e.status_code == 404: self._mstats.deleted() else: self._mstats.error(str(e), row_idx) continue if data.type == 'image': payload = MultipartEncoder( fields={ 'type': (data.type, data.type), 'position': (str(data.position), str(data.position)), 'thumbnail': ( data.image_file, open( os.path.join( self._media_path, 'media', data.image_file, ), "rb", ), ), }, ) else: payload = MultipartEncoder( fields={ 'type': (data.type, data.type), 'position': (str(data.position), str(data.position)), 'thumbnail': ( data.image_file, open( os.path.join( self._media_path, 'media', data.image_file, ), "rb", ), ), 'url': data.video_url_location, }, ) try: if data.action == 'update': media = self._client.products[self._product_id].media[data.id].update( data=payload, headers={'Content-Type': payload.content_type}, ) self._update_sheet_row(ws, row_idx, media) self._mstats.updated() else: media = self._client.products[self._product_id].media.create( data=payload, headers={'Content-Type': payload.content_type}, ) self._update_sheet_row(ws, row_idx, media) self._mstats.created() except Exception as e: self._mstats.error(str(e), row_idx)
'Referer': url } #路径 file = 'D:\\test\上传测试.doc' multipart_encoder = MultipartEncoder( fields = { #这里根据服务器需要的参数格式进行修改 'params': json.dumps({ 'folderId':-100, 'type':'onlinedisk', 'name':'5106024f8a22422172bd88d455be48a0.gif', 'size':16043, 'md5':'57c1a6348e35d4f86ed4d520da8e1dc2', 'ignoreSame':false, 'autoRename':false, 'startPosition':0, 'blockMd5':'57c1a6348e35d4f86ed4d520da8e1dc2', 'blockSize':16043, 'quickVerifyCode':'3c91184c5c91e13a60ebbf144f13783c', 'repaire':false }), 'file': ('file', open(file, 'rb'), 'application/octet-stream') }, boundary=boundary ) headers['Content-Type'] = multipart_encoder.content_type #请求头必须包含一个特殊的头信息,类似于Content-Type: multipart/form-data; boundary=${bound} #注意:这里请求头也可以自己设置Content-Type信息,用于自定义boundary r = requests.post(url, data=multipart_encoder, headers=headers) print(r.text)
class TestMultipartEncoder(unittest.TestCase): def setUp(self): self.parts = [('field', 'value'), ('other_field', 'other_value')] self.boundary = 'this-is-a-boundary' self.instance = MultipartEncoder(self.parts, boundary=self.boundary) def test_to_string(self): assert self.instance.to_string() == ( '--this-is-a-boundary\r\n' 'Content-Disposition: form-data; name="field"\r\n\r\n' 'value\r\n' '--this-is-a-boundary\r\n' 'Content-Disposition: form-data; name="other_field"\r\n\r\n' 'other_value\r\n' '--this-is-a-boundary--\r\n' ).encode() def test_content_type(self): expected = 'multipart/form-data; boundary=this-is-a-boundary' assert self.instance.content_type == expected def test_encodes_data_the_same(self): encoded = filepost.encode_multipart_formdata(self.parts, self.boundary)[0] assert encoded == self.instance.read() def test_streams_its_data(self): large_file = LargeFileMock() parts = {'some field': 'value', 'some file': large_file, } encoder = MultipartEncoder(parts) total_size = encoder.len read_size = 1024 * 1024 * 128 already_read = 0 while True: read = encoder.read(read_size) already_read += len(read) if not read: break assert encoder._buffer.tell() <= read_size assert already_read == total_size def test_length_is_correct(self): encoded = filepost.encode_multipart_formdata(self.parts, self.boundary)[0] assert len(encoded) == self.instance.len def test_encodes_with_readable_data(self): s = io.BytesIO(b'value') m = MultipartEncoder([('field', s)], boundary=self.boundary) assert m.read() == ( '--this-is-a-boundary\r\n' 'Content-Disposition: form-data; name="field"\r\n\r\n' 'value\r\n' '--this-is-a-boundary--\r\n' ).encode() def test_reads_open_file_objects(self): with open('setup.py', 'rb') as fd: m = MultipartEncoder([('field', 'foo'), ('file', fd)]) assert m.read() is not None def test_reads_open_file_objects_with_a_specified_filename(self): with open('setup.py', 'rb') as fd: m = MultipartEncoder( [('field', 'foo'), ('file', ('filename', fd, 'text/plain'))] ) assert m.read() is not None def test_reads_open_file_objects_using_to_string(self): with open('setup.py', 'rb') as fd: m = MultipartEncoder([('field', 'foo'), ('file', fd)]) assert m.to_string() is not None def test_handles_encoded_unicode_strings(self): m = MultipartEncoder([ ('field', b'this is a unicode string: \xc3\xa9 \xc3\xa1 \xc7\xab \xc3\xb3') ]) assert m.read() is not None def test_handles_uncode_strings(self): s = b'this is a unicode string: \xc3\xa9 \xc3\xa1 \xc7\xab \xc3\xb3' m = MultipartEncoder([ ('field', s.decode('utf-8')) ]) assert m.read() is not None def test_regresion_1(self): """Ensure issue #31 doesn't ever happen again.""" fields = { "test": "t" * 100 } for x in range(30): fields['f%d' % x] = ( 'test', open('tests/test_multipart_encoder.py', 'rb') ) m = MultipartEncoder(fields=fields) total_size = m.len blocksize = 8192 read_so_far = 0 while True: data = m.read(blocksize) if not data: break read_so_far += len(data) assert read_so_far == total_size def test_regression_2(self): """Ensure issue #31 doesn't ever happen again.""" fields = { "test": "t" * 8100 } m = MultipartEncoder(fields=fields) total_size = m.len blocksize = 8192 read_so_far = 0 while True: data = m.read(blocksize) if not data: break read_so_far += len(data) assert read_so_far == total_size def test_handles_empty_unicode_values(self): """Verify that the Encoder can handle empty unicode strings. See https://github.com/requests/toolbelt/issues/46 for more context. """ fields = [(b'test'.decode('utf-8'), b''.decode('utf-8'))] m = MultipartEncoder(fields=fields) assert len(m.read()) > 0 def test_accepts_custom_content_type(self): """Verify that the Encoder handles custom content-types. See https://github.com/requests/toolbelt/issues/52 """ fields = [ (b'test'.decode('utf-8'), (b'filename'.decode('utf-8'), b'filecontent', b'application/json'.decode('utf-8'))) ] m = MultipartEncoder(fields=fields) output = m.read().decode('utf-8') assert output.index('Content-Type: application/json\r\n') > 0 def test_accepts_custom_headers(self): """Verify that the Encoder handles custom headers. See https://github.com/requests/toolbelt/issues/52 """ fields = [ (b'test'.decode('utf-8'), (b'filename'.decode('utf-8'), b'filecontent', b'application/json'.decode('utf-8'), {'X-My-Header': 'my-value'})) ] m = MultipartEncoder(fields=fields) output = m.read().decode('utf-8') assert output.index('X-My-Header: my-value\r\n') > 0
class TestMultipartEncoder(unittest.TestCase): def setUp(self): self.parts = [('field', 'value'), ('other_field', 'other_value')] self.boundary = 'this-is-a-boundary' self.instance = MultipartEncoder(self.parts, boundary=self.boundary) def test_to_string(self): assert self.instance.to_string() == ( '--this-is-a-boundary\r\n' 'Content-Disposition: form-data; name="field"\r\n\r\n' 'value\r\n' '--this-is-a-boundary\r\n' 'Content-Disposition: form-data; name="other_field"\r\n\r\n' 'other_value\r\n' '--this-is-a-boundary--\r\n' ).encode() def test_content_type(self): expected = 'multipart/form-data; boundary=this-is-a-boundary' assert self.instance.content_type == expected def test_encodes_data_the_same(self): encoded = encode_multipart_formdata(self.parts, self.boundary)[0] assert encoded == self.instance.read() def test_streams_its_data(self): large_file = LargeFileMock() parts = {'some field': 'value', 'some file': large_file, } encoder = MultipartEncoder(parts) read_size = 1024 * 1024 * 128 while True: read = encoder.read(read_size) if not read: break assert encoder._buffer.tell() <= read_size def test_length_is_correct(self): encoded = encode_multipart_formdata(self.parts, self.boundary)[0] assert len(encoded) == len(self.instance) def test_encodes_with_readable_data(self): s = io.BytesIO(b'value') m = MultipartEncoder([('field', s)], boundary=self.boundary) assert m.read() == ( '--this-is-a-boundary\r\n' 'Content-Disposition: form-data; name="field"\r\n\r\n' 'value\r\n' '--this-is-a-boundary--\r\n' ).encode() def test_reads_open_file_objects(self): with open('setup.py', 'rb') as fd: m = MultipartEncoder([('field', 'foo'), ('file', fd)]) assert m.read() is not None def test_reads_open_file_objects_with_a_specified_filename(self): with open('setup.py', 'rb') as fd: m = MultipartEncoder( [('field', 'foo'), ('file', ('filename', fd, 'text/plain'))] ) assert m.read() is not None def test_reads_open_file_objects_using_to_string(self): with open('setup.py', 'rb') as fd: m = MultipartEncoder([('field', 'foo'), ('file', fd)]) assert m.to_string() is not None def test_handles_encoded_unicode_strings(self): m = MultipartEncoder([ ('field', b'this is a unicode string: \xc3\xa9 \xc3\xa1 \xc7\xab \xc3\xb3') ]) assert m.read() is not None def test_handles_uncode_strings(self): s = b'this is a unicode string: \xc3\xa9 \xc3\xa1 \xc7\xab \xc3\xb3' m = MultipartEncoder([ ('field', s.decode('utf-8')) ]) assert m.read() is not None
def __register_one(self, present_website, email_and_passwd): """ 注册一个账户,需要实现注册、激活、并将注册数据存入数据库的功能 Args: Session:Session对象 present_website:当前网站名,用于数据库表名 email_and_passwd:邮箱账户和密码,email_and_passwd[0]是邮箱,[1]是密码 Returns: 注册成功返回注册数据字典对象registerData,需要包含id, username, password, email, cookie(在访问激活链接时能取到,\ 取不到返回空) user_id这样获取:(示例) # 将注册的账户写入数据库(sql自己写,这边只是个示例) sql = "INSERT INTO "+present_website+"(username, password, mail, status, cookie) VALUES('" + \ username + "', '" + password + "', '" + email + "', '" + str(0) + cookie + "');" last_row_id = MysqlHandler().insert(sql) if last_row_id != -1: registerData["user_id"] = last_row_id return registerData else: g_var.logger.error("数据库插入用户注册数据失败") return 0 注册失败返回状态码 0:某些报错需要跳出while循环,更换邮箱 -1:连续代理错误或页面发生改变等取不到关键数据等,需要停止程序 -2:注册失败,可能是打码出错等原因,邮箱可以继续使用(邮箱资源成本较高,因此要确保注册成功后再更换邮箱),不跳出循环 """ g_var.logger.info('register......') url = 'http://www.liveinternet.ru/journal_register.php' headers = generate_headers(0) if headers == -1: g_var.logger.info("获取headers失败...") return -1 tok = get_tok(url, headers) if tok == -1: return -1 elif tok == -2: return -2 googlekey = '6Lcl3BYUAAAAAG1gTAOhNtJIeTrPn68melrC1gbV' captcha_value = google_captcha("", googlekey, url) if captcha_value == -1: return -2 registerData = generate_register_data(email_and_passwd, captcha_value, tok) headers['Origin'] = 'http://www.liveinternet.ru' headers['Referer'] = 'http://www.liveinternet.ru/journal_register.php' headers['Content-Type'] = 'application/x-www-form-urlencoded' g_var.logger.info("提交注册中...") html = requestsW.post(url, proxies=ip_proxy("en"), data=registerData, headers=headers, timeout=g_var.TIMEOUT) if html == -1: return html # 第一步注册成功与否的验证 result = re.findall(email_and_passwd[0], html.text) if len(result) != 2: g_var.logger.info("第一步注册失败...") g_var.logger.info(html.status_code) return -2 # 邮箱验证 time.sleep(2) verify_url = get_verify_url(email_and_passwd) if verify_url == 0: g_var.logger.info("未读取到邮箱验证的url...") return 0 # 邮箱验证的tok获取 email_tok = get_tok_email(verify_url) if email_tok == -1: return 0 elif email_tok == -2: return 0 id = re.findall('id=(.*?)&', verify_url)[0] h = re.findall('h=(.*)', verify_url)[0] headers['Referer'] = verify_url captcha_value = google_captcha("", googlekey, verify_url) if captcha_value == -1: return 0 username = generate_random_string( 10, 12, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") day = str(random.randint(1, 28)) month = str(random.randint(1, 12)) year = str(random.randint(1980, 2010)) sex = ['M', 'W'] multipart_encoder = MultipartEncoder( fields={ 'username': username, 'comm': '0', 'sexchar': random.choice(sex), 'day': day, 'month': month, 'year': year, 'city': '1870', 'icq': '', 'emails': '', 'addinfo': username, 'avatarfile': ('', '', 'application/octet-stream'), 'g-recaptcha-response': captcha_value, 'dailynews': '1', 'Submit.x': '80', 'Submit.y': '20', 'familyname': '', 'firstname': '', 'password': registerData['password1'], 'email': email_and_passwd[0], 'passwordconfirm': registerData['password1'], 'imagehash': '', 'regkey': '', 'invite_id': '0', 'regkeynb': '', 'url_redirect': '', 'url2': '', 'action': 'add_step1', 'h': h, 'id': id, 'tok': email_tok, }, boundary='----WebKitFormBoundary' + generate_random_string( 16, 16, 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'), ) headers['Content-Type'] = multipart_encoder.content_type g_var.logger.info("注册第二步,邮箱验证提交信息中...") url_email_prove = 'http://www.liveinternet.ru/journal_register.php' html = requestsW.post(url_email_prove, proxies=ip_proxy("en"), data=multipart_encoder, headers=headers, allow_redirects=False, timeout=g_var.TIMEOUT) if html == -1: return html if not html.headers.get('Set-Cookie', None): g_var.logger.info('第二步邮箱验证信息提交失败...') return 0 # 将注册的账户写入数据库 try: set_cookie = html.headers['Set-Cookie'] user_Id = re.findall('bbuserid=(.*?);', set_cookie) user_password = re.findall('bbpassword=(.*?);', set_cookie) cookie = user_Id[0] + '|_|' + user_password[0] sql = "INSERT INTO " + present_website + "(username, password, mail, cookie) VALUES('" + \ username + "', '" + registerData['password1'] + "', '" + email_and_passwd[0] + "', '" + cookie + "');" last_row_id = MysqlHandler().insert(sql) g_var.logger.info(last_row_id) if last_row_id != -1: g_var.logger.info('注册成功!' + username) userData = { 'id': last_row_id, 'username': username, 'password': registerData['password1'], 'cookie': cookie, } return userData else: g_var.ERR_CODE = 2004 g_var.ERR_MSG = "数据库插入用户注册数据失败..." g_var.logger.error("数据库插入用户注册数据失败...") return 0 except Exception as e: g_var.logger.info(e) g_var.ERR_CODE = 2004 g_var.ERR_MSG = "数据库插入用户注册数据异常..." g_var.logger.error("数据库插入用户注册数据异常...") return 0
class TestMultipartEncoder(unittest.TestCase): def setUp(self): self.parts = [('field', 'value'), ('other_field', 'other_value')] self.boundary = 'this-is-a-boundary' self.instance = MultipartEncoder(self.parts, boundary=self.boundary) def test_to_string(self): assert self.instance.to_string() == ( '--this-is-a-boundary\r\n' 'Content-Disposition: form-data; name="field"\r\n\r\n' 'value\r\n' '--this-is-a-boundary\r\n' 'Content-Disposition: form-data; name="other_field"\r\n\r\n' 'other_value\r\n' '--this-is-a-boundary--\r\n' ).encode() def test_content_type(self): expected = 'multipart/form-data; boundary=this-is-a-boundary' assert self.instance.content_type == expected def test_encodes_data_the_same(self): encoded = encode_multipart_formdata(self.parts, self.boundary)[0] assert encoded == self.instance.read() def test_streams_its_data(self): large_file = LargeFileMock() parts = {'some field': 'value', 'some file': large_file, } encoder = MultipartEncoder(parts) total_size = len(encoder) read_size = 1024 * 1024 * 128 already_read = 0 while True: read = encoder.read(read_size) already_read += len(read) if not read: break assert encoder._buffer.tell() <= read_size assert already_read == total_size def test_length_is_correct(self): encoded = encode_multipart_formdata(self.parts, self.boundary)[0] assert len(encoded) == len(self.instance) def test_encodes_with_readable_data(self): s = io.BytesIO(b'value') m = MultipartEncoder([('field', s)], boundary=self.boundary) assert m.read() == ( '--this-is-a-boundary\r\n' 'Content-Disposition: form-data; name="field"\r\n\r\n' 'value\r\n' '--this-is-a-boundary--\r\n' ).encode() def test_reads_open_file_objects(self): with open('setup.py', 'rb') as fd: m = MultipartEncoder([('field', 'foo'), ('file', fd)]) assert m.read() is not None def test_reads_open_file_objects_with_a_specified_filename(self): with open('setup.py', 'rb') as fd: m = MultipartEncoder( [('field', 'foo'), ('file', ('filename', fd, 'text/plain'))] ) assert m.read() is not None def test_reads_open_file_objects_using_to_string(self): with open('setup.py', 'rb') as fd: m = MultipartEncoder([('field', 'foo'), ('file', fd)]) assert m.to_string() is not None def test_handles_encoded_unicode_strings(self): m = MultipartEncoder([ ('field', b'this is a unicode string: \xc3\xa9 \xc3\xa1 \xc7\xab \xc3\xb3') ]) assert m.read() is not None def test_handles_uncode_strings(self): s = b'this is a unicode string: \xc3\xa9 \xc3\xa1 \xc7\xab \xc3\xb3' m = MultipartEncoder([ ('field', s.decode('utf-8')) ]) assert m.read() is not None def test_regresion_1(self): """Ensure issue #31 doesn't ever happen again.""" fields = { "test": "t" * 100 } for x in range(30): fields['f%d' % x] = ( 'test', open('tests/test_multipart_encoder.py', 'rb') ) m = MultipartEncoder(fields=fields) total_size = len(m) blocksize = 8192 read_so_far = 0 while True: data = m.read(blocksize) if not data: break read_so_far += len(data) assert read_so_far == total_size def test_regression_2(self): """Ensure issue #31 doesn't ever happen again.""" fields = { "test": "t" * 8100 } m = MultipartEncoder(fields=fields) total_size = len(m) blocksize = 8192 read_so_far = 0 while True: data = m.read(blocksize) if not data: break read_so_far += len(data) assert read_so_far == total_size
def tmp_func(): # files = 'https://vk.com/photo104166508_457240109' '''files = 'https://avatars.mds.yandex.net/get-pdb/1522705/c0ae8580-efcd-46c5-a94b-e9a2636d599c/s1200' #token = access_token # (токен пользователя) # передаем функции photo_id список файлов и наш токен media = photo_id(files, fb_access_token) print(media) # post on fb url = 'https://graph.facebook.com/me/feed?' #url = 'https://www.facebook.com/groups/985618928451302/feed?' data = { 'access_token': fb_access_token###user.fb_token, # так же можем добавлять дополнительно links, video, files и т.д. } print(data) i = 0 for id in media: # проходимся по нашему списку и формируем словарь data.update({'attached_media[%d]' % (i): '{"media_fbid": "%s"}' % (id)}) i += 1 print(data) #resp = requests.post(url, data=data) resp = requests.post(url, params={ 'access_token': fb_access_token, 'owner_id': 985618928451302, 'from_group': 1, 'message': 'TEST WALL POST', 'attachments': 'photo' + '985618928451302' + '_457240109' })''' graph = facebook.GraphAPI(access_token=fb_access_token) print(graph) # to post to your wall # photo = open('/Homiak/Documents/MDKP/Litovkin/vk_bot/Exxx4vQRmnI.jpg', 'rb') ###graph.put_object(985618928451302, "feed", message="тест", picture='https://avatars.mds.yandex.net/get-pdb/1522705/c0ae8580-efcd-46c5-a94b-e9a2636d599c/s1200', link='https://avatars.mds.yandex.net/get-pdb/1522705/c0ae8580-efcd-46c5-a94b-e9a2636d599c/s1200') #так выкладываем объект, в нашем случае текст ############################################# Рабочая версия !!! access_token = fb_access_token #'ваш токен' #access_token = access_token.join([fb_app_id,fb_secret]) #photo = [] #photo.append(photo_url) #photo.append(photo_url2) #photo = ', '.join([photo_url,photo_url2]) photo = photo_url2 #photo_vk_dialig_url print(access_token) data = [ #('url', 'https://www.facebook.com/images/fb_icon_325x325.png'), # url вашей фотографии #('url', 'https://avatars.mds.yandex.net/get-pdb/1522705/c0ae8580-efcd-46c5-a94b-e9a2636d599c/s1200'), # url вашей фотографии ('url', photo), # url вашей фотографии ('caption', 'Test text'), # любой ваш текст который хотите публиковать ('access_token', access_token), # ну и токен куда мы без него #('content-type', 'video/mp4') ] video_url = 'https://vk.com/video-57876954_456253566' #payload = {'name': 'Name video','description': 'Opisanie', 'file_url': video_url} payload = [ # ('url', 'https://www.facebook.com/images/fb_icon_325x325.png'), # url вашей фотографии # ('url', 'https://avatars.mds.yandex.net/get-pdb/1522705/c0ae8580-efcd-46c5-a94b-e9a2636d599c/s1200'), # url вашей фотографии ('source', video_url), # url вашей фотографии ('title', 'Test text'), # любой ваш текст который хотите публиковать ('description', 'Test description'), # любой ваш текст который хотите публиковать ('access_token', access_token), # ну и токен куда мы без него #('Content-Type', 'video/mp4') ] m = MultipartEncoder(fields=dict( access_token=access_token, upload_phase='transfer', start_offset='0', #kwargs.get('start_offset'), upload_session_id='1363041', #kwargs.get('upload_session_id'), video_file_chunk=(video_url, 'multipart-form/data'), )) #m = MultipartEncoder( # fields={#'field0': 'value', 'field1': 'value', # ('source', video_url), # url вашей фотографии # ('title', 'Test text'), # любой ваш текст который хотите публиковать # ('description', 'Test description'), # любой ваш текст который хотите публиковать # ('access_token', fb_access_token) # } #) #m = MultipartEncoder(payload) #url = '%s%s/videos' % (self.api_video, user.fb_user_id) headers = {'Content-Type': 'video/mp4'} #req = requests.post(url=url, data=m, headers={'Content-Type': m.content_type}) print(data) print(m) #fb = requests.post('https://graph.facebook.com/me/photos', data=data) https://www.facebook.com/groups/1431304253689661/ #fb = requests.post('https://www.facebook.com/groups/985618928451302/feed', data=data) #fb = requests.post('https://graph.facebook.com/me/feed', data=data) #fb = requests.post('https://graph-video.facebook.com/985618928451302/videos?access_token='+fb_access_token, data=payload) #fb = requests.post(url='https://graph.facebook.com/985618928451302/videos', data =payload, headers={'Content-Type': 'multipart/form-data'} ) #class ="page_doc_title" href="/doc104166508_525827262?hash=70a2db0e93e2a32bdd&dl=1870c75cab5d8fbb13" target="_blank" > Статья.docx < / a > href = "https://www.facebook.com/download/428108827770823/test_parser.txt?av=100041816445446&eav=Afa_ZtO0agX6HzZ_5UHwyuo9qZ5WLmS2O6BSOMhpcwApQZdZZngV9K45jIsc_iVCsIo&hash=Acq7ioDxHR5l1iZ8" ###fb = requests.post(url='https://graph.facebook.com/985618928451302/videos', data=m, headers={'Content-Type': m.content_type} ) #fb = requests.post('https://graph.facebook.com/985618928451302/photos', data=data ) # ITS WORK!!! fb = requests.post('https://graph.facebook.com/1431304253689661/photos', data=data) # ITS WORK!!! data2 = [ # ('url', 'https://www.facebook.com/images/fb_icon_325x325.png'), # url вашей фотографии # ('url', 'https://avatars.mds.yandex.net/get-pdb/1522705/c0ae8580-efcd-46c5-a94b-e9a2636d599c/s1200'), # url вашей фотографии ('source', href), # url вашей фотографии #('caption', 'Test text'), # любой ваш текст который хотите публиковать ('access_token', access_token), # ну и токен куда мы без него # ('content-type', 'video/mp4') ] #fb = requests.post('https://graph.facebook.com/985618928451302/', data=data2 ) # ITS NOT WORK YET!!! print(fb.text) ######################################## Рабочая версия!!! # graph.put_object(985618928451302, "feed", message="тест", attachments='photo'+photo_url) #так выкладываем объект, в нашем случае текст # graph.put_object(985618928451302, "feed", message="тест", source=photo_url) #так выкладываем объект, в нашем случае текст # graph.put_object(985618928451302, "feed", message="тест", source=photo.read()) #так выкладываем объект, в нашем случае текст # photo.close() # to get your posts/feed feed = graph.get_connections("me", "feed") post = feed["data"] print(post)
def test_no_parts(self): fields = [] boundary = '--90967316f8404798963cce746a4f4ef9' m = MultipartEncoder(fields=fields, boundary=boundary) output = m.read().decode('utf-8') assert output == '----90967316f8404798963cce746a4f4ef9--\r\n'
def test_handles_uncode_strings(self): s = b'this is a unicode string: \xc3\xa9 \xc3\xa1 \xc7\xab \xc3\xb3' m = MultipartEncoder([ ('field', s.decode('utf-8')) ]) assert m.read() is not None
if __name__ == '__main__': # collect the sample result data data = get_result_data() # write sample data to a file to upload filename = 'sample_result.json' with open(filename, 'wb') as f: f.write(json.dumps(data)) # prepare the request for upload data_type = 'application/json' multipartblob = MultipartEncoder( fields={ 'file': (filename, open(filename, 'rb'), data_type), 'category': 'Result', } ) headers = { 'Content-Type': multipartblob.content_type, 'Auth-Token': AUTH_TOKEN, } url = BASE_URL + '/uploads' # make the post to upload data response = requests.post(url, data=multipartblob, headers=headers) # process the response data = json.loads(response.text) command_id = data['result']['command_id'] results_link = data['result']['info']['results_link']
def webwxuploadmedia(self, file_path): """ @brief upload image @param file_path String @return Dict: json """ url = self.wx_conf['API_webwxuploadmedia'] + '?f=json' # 计数器 self.media_count = self.media_count + 1 fn = file_path # mime_type: # 'application/pdf' # 'image/jpeg' # 'image/png' # ... mime_type = mimetypes.guess_type(fn, strict=False)[0] if not mime_type: mime_type = 'text/plain' # 文档格式 # 微信服务器目前应该支持3种类型: # pic 直接显示,包含图片,表情 # video 不清楚 # doc 显示为文件,包含PDF等 media_type = 'pic' if mime_type.split('/')[0] == 'image' else 'doc' time_format = "%a %b %d %Y %T GMT%z (%Z)" last_modifie_date = time.strftime(time_format, time.localtime()) file_size = os.path.getsize(fn) pass_ticket = self.pass_ticket client_media_id = str(int(time.time() * 1000)) + \ str(random.random())[:5].replace('.', '') webwx_data_ticket = '' for item in self.cookie: if item.name == 'webwx_data_ticket': webwx_data_ticket = item.value break if (webwx_data_ticket == ''): Log.error("No Cookie\n") return None uploadmediarequest = json.dumps( { "BaseRequest": self.base_request, "ClientMediaId": client_media_id, "TotalLen": file_size, "StartPos": 0, "DataLen": file_size, "MediaType": 4 }, ensure_ascii=False).encode('utf8') multipart_encoder = MultipartEncoder( fields={ 'id': 'WU_FILE_' + str(self.media_count), 'name': fn, 'type': mime_type, 'lastModifieDate': last_modifie_date, 'size': str(file_size), 'mediatype': media_type, 'uploadmediarequest': uploadmediarequest, 'webwx_data_ticket': webwx_data_ticket, 'pass_ticket': pass_ticket, 'filename': (fn, open(fn, 'rb'), mime_type.split('/')[1]) }, boundary=('-----------------------------' '1575017231431605357584454111')) headers = { 'Host': self.wx_filehost, 'User-Agent': self.user_agent, 'Accept': ('text/html,application/xhtml+xml,' 'application/xml;q=0.9,*/*;q=0.8'), 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate', 'Referer': 'https://' + self.wx_host, 'Content-Type': multipart_encoder.content_type, 'Origin': 'https://' + self.wx_host, 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', } try: r = self.session.post(url, data=multipart_encoder, headers=headers, timeout=5) dic = json.loads(r.text) if dic['BaseResponse']['Ret'] == 0: return dic except ReadTimeout: echo('Timeout\n') return None
url = 'http://10.68.170.184:8080/music/api/login' result = session.post(url, 'username=admin&password=123456') res = result.text print(res) # 获取用户信息 url = 'http://10.68.170.184:8080/music/api/user/1' result = session.get(url) res = result.text print(res) url = 'http://10.68.170.184:8080/music/api/song/upload' # # 上传文件 path = 'G:\\music_data\\aa.mp3' mm = MultipartEncoder(fields={ 'speed': '1', 'styleId': 'c0a4bd86-a09b-43ac-8169-14bb69630ac0', 'file': ('file', open(path, 'rb')) }, ) ss = mm.boundary # print(ss) # print('生成Content-Type' + str(mm.content_type)) # session.headers['Content-Type'] = mm.content_type # result = session.post(url=url, data=mm) # res = result.text # print(result.status_code) # print(res) # 调试数据 session.headers['Content-Type'] = 'multipart/form-data; boundary=' + ss + '' print(session.headers['Content-Type'])
url = "http://192.168.181.135:8069/api/test/" headers = {'Accept': 'application/json', 'Content-Type': 'multipart/form-data'} headers = {} data = { 'email': '*****@*****.**', 'name': '杨向晴', 'name2': 'aa', 'order_no': 'aa', 'province': 'aa' } from requests_toolbelt.multipart.encoder import MultipartEncoder mp_encoder = MultipartEncoder( fields=data # fields={ # 'foo': 'bar', # # plain file object, no filename or mime type produces a # # Content-Disposition header with just the part name # 'spam': ('spam.txt', open('spam.txt', 'rb'), 'text/plain'), # } ) r = requests.post( url, data= mp_encoder, # The MultipartEncoder is posted as data, don't use files=...! # The MultipartEncoder provides the content-type header with the boundary: headers={ 'Accept': 'application/json', 'Content-Type': mp_encoder.content_type }) # rsp = requests.post(
def test_reads_open_file_objects_with_a_specified_filename(self): with open('setup.py', 'rb') as fd: m = MultipartEncoder( [('field', 'foo'), ('file', ('filename', fd, 'text/plain'))] ) assert m.read() is not None
class TestMultipartEncoder(unittest.TestCase): def setUp(self): self.parts = [('field', 'value'), ('other_field', 'other_value')] self.boundary = 'this-is-a-boundary' self.instance = MultipartEncoder(self.parts, boundary=self.boundary) def test_to_string(self): assert self.instance.to_string() == ( '--this-is-a-boundary\r\n' 'Content-Disposition: form-data; name="field"\r\n\r\n' 'value\r\n' '--this-is-a-boundary\r\n' 'Content-Disposition: form-data; name="other_field"\r\n\r\n' 'other_value\r\n' '--this-is-a-boundary--\r\n' ).encode() def test_content_type(self): expected = 'multipart/form-data; boundary=this-is-a-boundary' assert self.instance.content_type == expected def test_encodes_data_the_same(self): encoded = encode_multipart_formdata(self.parts, self.boundary)[0] assert encoded == self.instance.read() def test_streams_its_data(self): large_file = LargeFileMock(123456789) parts = {'some field': 'value', 'some file': large_file, } encoder = MultipartEncoder(parts) total_size = len(encoder) read_size = 1024 * 1024 * 128 already_read = 0 while True: read = encoder.read(read_size) already_read += len(read) if not read: break assert encoder._buffer.tell() <= read_size assert already_read == total_size def test_streams_its_data_with_correct_length(self): for i in range(0, 100): # or more than 100 to increase fuzzing strength file_size = random.randint(0, 12345) if random.random() < 0.1: file_size = 0 # sometimes we check with an empty file self.check_read_file_with_chunks(file_size, read_size=1) self.check_read_file_with_chunks(file_size, read_size=2) self.check_read_file_with_chunks(file_size, read_size=3) read_size = random.randint(0, 2*file_size) self.check_read_file_with_chunks(file_size, read_size=1) for read_size in range(file_size - 10, file_size + 200): if read_size < -1 or read_size == 0: continue self.check_read_file_with_chunks(file_size, read_size) def check_read_file_with_chunks(self, file_size, read_size): #print "===== Testing with file_size=",file_size,"read_size=",read_size boundary="deterministic-test-boundary" a_file = LargeFileMock(file_size) parts = {'some_field': 'this is the value...', 'some_file': a_file.read(), } expected_bytes = encode_multipart_formdata(parts, boundary)[0] content_length = len(expected_bytes) # Now read from our encoder : a_file = LargeFileMock(file_size) parts = {'some_field': 'this is the value...', 'some_file': a_file, } encoder = MultipartEncoder(parts, boundary=boundary) raw_bytes_count = 0 while True: data = encoder.read(read_size) if not data: break #print "read",len(data),"bytes : ",repr(data) assert data == expected_bytes[raw_bytes_count:raw_bytes_count+len(data)] raw_bytes_count += len(data) #if raw_bytes_count != content_length: # print "Test failed with file_size=",file_size,"and read_size=",read_size assert raw_bytes_count == content_length def test_length_is_correct(self): encoded = encode_multipart_formdata(self.parts, self.boundary)[0] assert len(encoded) == len(self.instance) def test_encodes_with_readable_data(self): s = io.BytesIO(b'value') m = MultipartEncoder([('field', s)], boundary=self.boundary) assert m.read() == ( '--this-is-a-boundary\r\n' 'Content-Disposition: form-data; name="field"\r\n\r\n' 'value\r\n' '--this-is-a-boundary--\r\n' ).encode() def test_reads_open_file_objects(self): with open('setup.py', 'rb') as fd: m = MultipartEncoder([('field', 'foo'), ('file', fd)]) assert m.read() is not None def test_reads_open_file_objects_with_a_specified_filename(self): with open('setup.py', 'rb') as fd: m = MultipartEncoder( [('field', 'foo'), ('file', ('filename', fd, 'text/plain'))] ) assert m.read() is not None def test_reads_open_file_objects_using_to_string(self): with open('setup.py', 'rb') as fd: m = MultipartEncoder([('field', 'foo'), ('file', fd)]) assert m.to_string() is not None def test_handles_encoded_unicode_strings(self): m = MultipartEncoder([ ('field', b'this is a unicode string: \xc3\xa9 \xc3\xa1 \xc7\xab \xc3\xb3') ]) assert m.read() is not None def test_handles_uncode_strings(self): s = b'this is a unicode string: \xc3\xa9 \xc3\xa1 \xc7\xab \xc3\xb3' m = MultipartEncoder([ ('field', s.decode('utf-8')) ]) assert m.read() is not None def test_regresion_1(self): """Ensure issue #31 doesn't ever happen again.""" fields = { "test": "t" * 100 } for x in range(30): fields['f%d' % x] = ( 'test', open('tests/test_multipart_encoder.py', 'rb') ) m = MultipartEncoder(fields=fields) total_size = len(m) blocksize = 8192 read_so_far = 0 while True: data = m.read(blocksize) if not data: break read_so_far += len(data) assert read_so_far == total_size def test_regression_2(self): """Ensure issue #31 doesn't ever happen again.""" fields = { "test": "t" * 8100 } m = MultipartEncoder(fields=fields) total_size = len(m) blocksize = 8192 read_so_far = 0 while True: data = m.read(blocksize) if not data: break read_so_far += len(data) assert read_so_far == total_size
def test_handles_encoded_unicode_strings(self): m = MultipartEncoder([ ('field', b'this is a unicode string: \xc3\xa9 \xc3\xa1 \xc7\xab \xc3\xb3') ]) assert m.read() is not None
def __postMessage(self, Session, loginData, present_website): """ 发文章 Args: Session:Session对象 loginData:用户信息,包括user_id,username,password,cookie present_website:当前网站名,用于数据库表名 Returns: 成功返回状态值:loginData 失败返回状态值: -1:表示requests请求页面失败,需要更换代理 -2:页面发生改变,获取不到页面上的一些token值 0:数据库插入更新等错误 """ g_var.logger.info("post link...") headers = generate_headers(1, loginData['csrftoken']) if headers == -1: g_var.logger.info("获取注册headers失败...") return -1 cookie = 'csrftoken=' + loginData['csrftoken'] + '; c=' + loginData['c'] headers['Cookie'] = cookie biog = get_new_link() if biog == -1: return -1 try: num = string.ascii_letters + string.digits i = random.choice(num) variables = '{"input_0":{"country":null,"birthyear":null,"menuItems":[{"itemType":"STREAM","hidden":false,"inDropdown":false},{"itemType":"UPLOADS","hidden":false,"inDropdown":false},{"itemType":"FAVORITES","hidden":false,"inDropdown":false},{"itemType":"LISTENS","hidden":false,"inDropdown":false}],"displayName":"' + loginData[ 'username'] + '","biog":"' + biog + '","city":null,"gender":null,"brandedProfile":{"backgroundTiled":null,"backgroundColor":null},"clientMutationId":"' + i + '"}}' multipart_encoder = MultipartEncoder( fields={ 'id': i, 'query': 'mutation ChangeProfileMutation($input_0:ChangeProfileMutationInput!) {changeProfile(input:$input_0) {viewer {me {percentageComplete,displayName,biog,city,countryCode,country,gender,birthYear,brandedProfile {backgroundTiled,backgroundPicture {urlRoot},backgroundColor},picture {urlRoot,primaryColor},coverPicture {urlRoot,primaryColor},_profileNavigation3Jqt8o:profileNavigation(showHidden:true) {menuItems {__typename,...F0,...F1,...F2}},profileNavigation {menuItems {__typename,...F0,...F1,...F2}},id},id},clientMutationId}} fragment F0 on NavigationItemInterface {inDropdown,__typename} fragment F1 on HideableNavigationItemInterface {hidden,__typename} fragment F2 on PlaylistNavigationItem {count,playlist {id,name,slug}}', 'variables': variables, 'picture': 'undefined', 'coverPicture': 'undefined', 'backgroundPicture': 'undefined', '_onProgress': 'function(e,n){t.forEach(function(t){t(e,n)})}', '_aborter': '[object Object]', '_useUploadServers': 'undefined', }, boundary='------WebKitFormBoundary' + generate_random_string( 16, 16, 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'), ) headers['Content-Type'] = multipart_encoder.content_type except Exception as e: g_var.ERR_CODE = 5000 g_var.ERR_MSG = "推送个人链接数据生成中出现异常..." g_var.logger.info("推送个人链接数据生成中出现异常...") g_var.logger.info(e) return -2 url_link = 'https://www.mixcloud.com/graphql' g_var.logger.info("推送链接中...") html = Session.post(url_link, data=multipart_encoder, headers=headers, timeout=g_var.TIMEOUT) if html == -1: return html if biog in html.text: g_var.logger.info("链接发送成功!" + loginData["username"]) # 将链接、用户存入mixcloud_com_article表 url = 'https://www.mixcloud.com/' + loginData['username'] + '/' sql = "INSERT INTO mixcloud_com_article(url, user_id) VALUES('" + url + "', '" + str( loginData['id']) + "');" if g_var.insert_article_lock.acquire(): last_row_id = MysqlHandler().insert(sql) g_var.insert_article_lock.release() if last_row_id != -1: g_var.logger.info("insert article OK") else: g_var.logger.error("数据库插入链接错误!") return 0 return loginData else: g_var.logger.error("链接发送失败!..." + str(html.status_code)) g_var.ERR_CODE = 5000 g_var.ERR_MSG = g_var.ERR_MSG + "|_|" + "链接发送失败,未知错误!" return 0
def upload(self, local_file_path, ipuuid=None, chunk_size=1048576 * 10): """ Add log message @type local_file_path: string @param local_file_path: Local file path @rtype: string @return: Relative HDFS path """ if local_file_path is not None: # with open(local_file_path, 'r') as f: # strip path and extension from absolute file path to get filename hash = hashlib.md5() filename = local_file_path.rpartition('/')[2] file_size = os.path.getsize(local_file_path) if chunk_size > file_size: chunk_size = file_size chunks = FileBinaryDataChunks(local_file_path, chunk_size, self.progress_reporter).chunks() num = 0 offset_start = 0 offset_stop = chunk_size - 1 upload_id = '' for chunk in chunks: hash.update(chunk) if num == 0: HTTP_CONTENT_RANGE = 'bytes %s-%s/%s' % ( offset_start, offset_stop, file_size) m = MultipartEncoder( fields={ 'the_file': (filename, chunk, 'application/octet-stream'), }) else: offset_start = offset_stop + 1 offset_stop = offset_stop + chunk_size if offset_stop > file_size: offset_stop = file_size - 1 #print 'last offset_stop: %s file_size: %s' % (offset_stop, file_size) HTTP_CONTENT_RANGE = 'bytes %s-%s/%s' % ( offset_start, offset_stop, file_size) m = MultipartEncoder( fields={ 'upload_id': upload_id, 'the_file': (filename, chunk, 'application/octet-stream'), }) headers = { 'Content-Type': m.content_type, 'Content-Range': HTTP_CONTENT_RANGE } try: #r = self.requests_session.post(self.rest_endpoint, data=m, headers=headers) r = self.requests_post(self.rest_endpoint, data=m, headers=headers) except UploadPostWarning as e: raise UploadError(e) r_json = r.json() upload_id = r_json['upload_id'] offset = r_json['offset'] expires = r_json['expires'] #print 'upload_id: %s, c_num: %s, offset: %s' % (upload_id, num, offset) num += 1 m = MultipartEncoder(fields={ 'upload_id': upload_id, 'md5': hash.hexdigest(), 'ipuuid': ipuuid }) headers = {'Content-Type': m.content_type} try: #r = self.requests_session.post(self.rest_endpoint+'_complete', data=m, headers=headers) r = self.requests_post(self.rest_endpoint + '_complete', data=m, headers=headers) except UploadPostWarning as e: raise UploadError(e) return 'Success to upload %s' % local_file_path
def _upload_file(self, path, api_endpoint="upload", **extra_params): """Upload the file found at ``path`` to talus, returning an id :path: The (local) path to the file :returns: An id for the remote file """ if not os.path.exists(path): raise errors.TalusApiError( "Cannot upload image, path {!r} does not exist".format(path)) total_size = os.path.getsize(path) self.last_update = "" def print_progress(monitor): sys.stdout.write("\b" * len(self.last_update)) percent = float(monitor.bytes_read) / monitor.len update = "{:0.2f}%".format(percent * 100) if len(update) < 7: u = " " * (7 - len(update)) + update if len(update) < len(self.last_update): update += " " * (len(self.last_update) - len(update)) sys.stdout.write(update) sys.stdout.flush() self.last_update = update data = { "file": (os.path.basename(path), open(path, "rb"), "application/octet-stream") } data.update(extra_params) e = MultipartEncoder(fields=data) m = MultipartEncoderMonitor(e, print_progress) try: res = requests.post( self._api_base + "/api/{}/".format(api_endpoint), data=m, headers={"Content-Type": e.content_type}, timeout=(60 * 60 ) # super long timeout for uploading massive files! ) except requests.ConnectionError as e: raise errors.TalusApiError("Could not connect to {}".format( self._api_base + "/api/{}/".format(api_endpoint))) # clear out the last of the progress percent that was printed print("\b" * len(self.last_update)) if res.status_code // 100 != 2: raise errors.TalusApiError("Could not upload file!", error=res.text) if res.text[0] in ["'", '"']: return res.text[1:-1] return res.text
def _upload(filename, file, password=None): filename = os.path.basename(filename) secret = os.urandom(16) iv = os.urandom(12) encryptKey = deriveFileKey(secret) authKey = deriveAuthKey(secret) metaKey = deriveMetaKey(secret) fileCipher = AES.new(encryptKey, AES.MODE_GCM, iv, mac_len=16) metaCipher = AES.new(metaKey, AES.MODE_GCM, b'\x00' * 12, mac_len=16) mimetype = mimetypes.guess_type( filename, strict=False)[0] or 'application/octet-stream' print("Uploading as mimetype", mimetype) metadata = {"iv": b64encode(iv), "name": filename, "type": mimetype} metadata = metaCipher.encrypt(json.dumps(metadata).encode('utf8')) metadata += metaCipher.digest() mpenc = MultipartEncoder( fields={ 'data': (filename, LazyEncryptedFileWithTag(file, fileCipher, taglen=16), 'application/octet-stream') }) mpmon = MultipartEncoderMonitor(mpenc, callback=upload_progress_callback(mpenc)) try: resp = requests.post('https://send.firefox.com/api/upload', data=mpmon, headers={ 'X-File-Metadata': b64encode(metadata), 'Authorization': 'send-v1 ' + b64encode(authKey), 'Content-Type': mpmon.content_type }) print() resp.raise_for_status() res = resp.json() url = res['url'] + '#' + b64encode(secret) ownerToken = res['owner'] if password is not None: fid, secret = parse_url(url) newAuthKey = deriveAuthKey(secret, password, url) resp = requests.post('https://send.firefox.com/api/password/' + fid, headers={'Content-Type': 'application/json'}, json={ 'auth': b64encode(newAuthKey), 'owner_token': ownerToken }) resp.raise_for_status() print("Your download link is", url) print("Owner token is", ownerToken) return url, ownerToken except Exception as ex: print("\nAn exception occured while uploading file:", ex) return None, None
def import_dicom_files(hc_api, hc_dicomstore, dcm_ids, fw_api, fw_project, de_identify=False): log.info('Importing DICOM files...') dicomweb = hc_api.dicomStores.dicomWeb(name=hc_dicomstore) for study_uid, series_uid in search_uids(dicomweb, dcm_ids): log.info(' Processing series %s', series_uid) with tempfile.TemporaryDirectory() as tempdir: series_dir = os.path.join(tempdir, series_uid) os.mkdir(series_dir) log.debug(' Downloading...') for dicom in dicomweb.retrieve_series(study_uid, series_uid): dicom.save_as(os.path.join(series_dir, dicom.SOPInstanceUID)) log.debug(' Packing...') metadata_map = pkg_series(series_dir, de_identify=de_identify, timezone=DEFAULT_TZ, map_key='PatientID') log.debug(' Uploading...') for filepath, metadata in sorted(metadata_map.items()): subj_code_payload = { 'patient_id': metadata['patient_id'], 'use_patient_id': True } del metadata['patient_id'] master_subject_code = get_master_subject_code( subj_code_payload, fw_api) subject = get_subject_by_master_code(master_subject_code, fw_project, fw_api) metadata.setdefault('group', {})['_id'] = fw_project['group'] metadata.setdefault('project', {})['label'] = fw_project['label'] subject_info = copy.deepcopy(metadata['session']['subject']) metadata['session']['subject'] = { 'master_code': master_subject_code } for key in ('code', 'firstname', 'lastname'): if not (subject and subject.get(key)) and subject_info.get(key): metadata['session']['subject'][key] = subject_info[key] metadata_json = json.dumps(metadata, default=metadata_encoder) filename = os.path.basename(filepath) with open(filepath, 'rb') as f: mpe = MultipartEncoder(fields={ 'metadata': metadata_json, 'file': (filename, f) }) resp = fw_api.post( 'upload/uid', data=mpe, headers={'Content-Type': mpe.content_type}) resp.raise_for_status()
kAuth = config['KHALKEUS']['Token'] room = config['MNE']['ID'] images = { 'ayb': config['IMAGES']['AYB'], 'developer': config['IMAGES']['Developer'], 'afx': config['IMAGES']['Afx'], 'automation': config['IMAGES']['Automation'], 'hephaestus': config['IMAGES']['Hephaestus'], 'turk': config['IMAGES']['Turk'], 'matters': config['IMAGES']['Matters'] } m = MultipartEncoder({ 'roomId': room, 'text': 'hey boss', 'files': (images['hephaestus'], open(images['hephaestus'], 'rb'), 'image/png') }) r = requests.post('https://api.ciscospark.com/v1/messages', data=m, headers={ 'Authorization': 'Bearer {auth}'.format(auth=kAuth), 'Content-Type': m.content_type }) print(r.text)
import requests from requests_toolbelt.multipart.encoder import MultipartEncoder # BASE_URL = "http://localhost:8000/" BASE_URL = "http://whiteboard.house:8000/" response = requests.post( BASE_URL + "user/login/", json={ "username": "******", "password": "******" } ) access = response.json()['access'] encoder = MultipartEncoder(fields={ 'upload': ('upload.jpeg', open('/home/tecty/Pictures/upload.png', 'rb')) }) response = requests.post( BASE_URL + "upload/", data=encoder, headers={ "Authorization": access, 'Content-Type': encoder.content_type }, ) # print(response.request.body) print(response.json())
def setUp(self): self.parts = [('field', 'value'), ('other_field', 'other_value')] self.boundary = 'this-is-a-boundary' self.instance = MultipartEncoder(self.parts, boundary=self.boundary)
def file_to_civis(buf, name, api_key=None, **kwargs): """Upload a file to Civis. Parameters ---------- buf : file-like object The file or other buffer that you wish to upload. name : str The name you wish to give the file. api_key : str, optional Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY` environment variable will be used. **kwargs : kwargs Extra keyword arguments will be passed to the file creation endpoint. See :func:`~civis.resources._resources.Files.post`. Returns ------- file_id : int The new Civis file ID. Examples -------- >>> # Upload file which expires in 30 days >>> with open("my_data.csv", "r") as f: ... file_id = file_to_civis(f, 'my_data') >>> # Upload file which never expires >>> with open("my_data.csv", "r") as f: ... file_id = file_to_civis(f, 'my_data', expires_at=None) Notes ----- If you are opening a binary file (e.g., a compressed archive) to pass to this function, do so using the ``'rb'`` (read binary) mode (e.g., ``open('myfile.zip', 'rb')``). If you have the `requests-toolbelt` package installed (`pip install requests-toolbelt`), then this function will stream from the open file pointer into Platform. If `requests-toolbelt` is not installed, then it will need to read the entire buffer into memory before writing. """ client = APIClient(api_key=api_key) file_response = client.files.post(name, **kwargs) # Platform has given us a URL to which we can upload a file. # The file must be uploaded with a POST formatted as per # http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-post-example.html # Note that the payload must have "key" first and "file" last. form = file_response.upload_fields form_key = OrderedDict(key=form.pop('key')) form_key.update(form) form_key['file'] = buf url = file_response.upload_url if HAS_TOOLBELT: # This streams from the open file buffer without holding the # contents in memory. en = MultipartEncoder(fields=form_key) if en.len / 2**20 < 100: # Semi-arbitrary cutoff for "small" files. # Send these with requests directly because that uses less CPU response = requests.post(url, files=form_key) else: response = requests.post(url, data=en, headers={'Content-Type': en.content_type}) else: response = requests.post(url, files=form_key) response.raise_for_status() return file_response.id