コード例 #1
0
ファイル: TSDownloader.py プロジェクト: Belfagor2005/WorldCam
 def init(self, out_stream, url, proxy=None,g_stopEvent=None, maxbitRate=0):
     try:
         self.init_done=False
         self.init_url=url
         self.clientHeader=None
         self.status='init'
         self.proxy = proxy
         self.maxbitRate=maxbitRate
         if self.proxy and len(self.proxy)==0:
             self.proxy=None
         self.out_stream=out_stream
         self.g_stopEvent=g_stopEvent
         if '|' in url:
             sp = url.split('|')
             url = sp[0]
             self.clientHeader = sp[1]
             if PY3:
                 self.clientHeader= urllib.parse.parse_qsl(self.clientHeader)
             else:
                 self.clientHeader= urlparse.parse_qsl(self.clientHeader)
             
         #print 'header recieved now url and headers are',url, self.clientHeader 
         self.status='init done'
         self.url=url
         return True
         #return self.downloadInternal(testurl=True)
         
         #os.remove(self.outputfile)
     except: 
         traceback.print_exc()
         self.status='finished'
     return False
コード例 #2
0
ファイル: fetcher.py プロジェクト: clickear/qiandao_clickear
        def build_request(request):
            url = urlparse(request.url)
            ret = dict(
                    method = request.method,
                    url = request.url,
                    httpVersion = 'HTTP/1.1',
                    headers = build_headers(request.headers),
                    queryString = [
                        {'name': n, 'value': v} for n, v in\
                                parse_qsl(url.query)],
                    cookies = [
                        {'name': n, 'value': v} for n, v in \
                                parse_qsl(request.headers.get('cookie', ''))],
                    headersSize = -1,
                    bodySize = len(request.body) if request.body else 0,
                    )
            if request.body:
                ret['postData'] = dict(
                        mimeType = request.headers.get('content-type'),
                        text = request.body,
                        )
                if ret['postData']['mimeType'] == 'application/x-www-form-urlencoded':
                    ret['postData']['params'] = [
                            {'name': n, 'value': v} for n, v in \
                                urlparse.parse_qsl(request.body)]
                    try:
                        _ = json.dumps(ret['postData']['params'])
                    except UnicodeDecodeError:
                        logger.error('params encoding error')
                        del ret['postData']['params']

            return ret
コード例 #3
0
ファイル: fetcher.py プロジェクト: clickear/qiandao_clickear
        def build_request(en):
            url = urlparse(en['request']['url'])
            request = dict(
                    method = en['request']['method'],
                    url = en['request']['url'],
                    httpVersion = 'HTTP/1.1',
                    headers = [
                        {'name': x['name'], 'value': x['value'], 'checked': True} for x in\
                                en['request'].get('headers', [])],
                    queryString = [
                        {'name': n, 'value': v} for n, v in\
                                parse_qsl(url.query)],
                    cookies = [
                        {'name': x['name'], 'value': x['value'], 'checked': True} for x in\
                                en['request'].get('cookies', [])],
                    headersSize = -1,
                    bodySize = len(en['request'].get('data')) if en['request'].get('data') else 0,


                    )
            if en['request'].get('data'):
                request['postData'] = dict(
                        mimeType = en['request'].get('mimeType'),
                        text = en['request'].get('data'),
                        )
                if en['request'].get('mimeType') == 'application/x-www-form-urlencoded':
                    params = [{'name': x[0], 'value': x[1]} \
                        for x in urlparse.parse_qsl(en['request']['data'], True)]
                    request['postData']['params'] = params
            return request
コード例 #4
0
ファイル: __init__.py プロジェクト: thayton/osja
    def make_job_desc_url(self, job_post_id):
        url_parts = list(urlparse.urlparse(self.url))

        query = dict(urlparse.parse_qsl(url_parts[4]))
        query.update({'postid': job_post_id})

        url_parts[4] = urlencode(query)
        return urlparse.urlunparse(url_parts)
コード例 #5
0
def http_post(url, cookie_file='', postData={}, data=''):
    try:
        if (data != ''):
            postData = dict(urlparse.parse_qsl(data))
        net = Net(cookie_file=cookie_file)
        return net.http_POST(url, postData).content
    except URLError:
        return ''
コード例 #6
0
ファイル: bucket.py プロジェクト: davidbau/net-intent
 def do_POST(self):
     from urllib.parse import urlparse, parse_qsl
     url = urlparse(self.path)
     fields = OrderedDict(parse_qsl(url.query))
     length = int(self.headers.getheader('content-length'))
     field_data = self.rfile.read(length)
     fields.update(urlparse.parse_qsl(field_data))
     self.dispatch(url, fields)
コード例 #7
0
 def do_POST(self):
     from urllib.parse import urlparse, parse_qsl
     url = urlparse(self.path)
     fields = OrderedDict(parse_qsl(url.query))
     length = int(self.headers.getheader('content-length'))
     field_data = self.rfile.read(length)
     fields.update(urlparse.parse_qsl(field_data))
     self.dispatch(url, fields)
コード例 #8
0
ファイル: helpers.py プロジェクト: SquirrelMajik/GRec
def add_url_params(url, new_params, concat=True, unique=True):
    if isinstance(new_params, dict):
        new_params = [(k, v) for k, v in new_params.iteritems()]

    url_parts = list(urlparse.urlparse(url))
    params = urlparse.parse_qsl(url_parts[4])
    params = new_params if not concat else params + new_params

    if unique:
        params = dict(params)

    url_parts[4] = urllib.urlencode(params)

    return urlparse.urlunparse(url_parts)
コード例 #9
0
ファイル: orderedcollections.py プロジェクト: orionzhou/robin
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, keep_attr_order=True):
    """
    Kind of like urlparse.parse_qs, except returns an ordered dict.
    Also avoids replicating that function's bad habit of overriding the
    built-in 'dict' type.

    Taken from below with modification:
    <https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py>
    """
    od = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list)
    for name, value in urlparse.parse_qsl(qs, keep_blank_values, strict_parsing):
        od[name].append(value)

    return od
コード例 #10
0
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, keep_attr_order=True):
    """
    Kind of like urlparse.parse_qs, except returns an ordered dict.
    Also avoids replicating that function's bad habit of overriding the
    built-in 'dict' type.

    Taken from below with modification:
    <https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py>
    """
    od = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list)
    for name, value in urlparse.parse_qsl(qs, keep_blank_values,
                                          strict_parsing):
        od[name].append(value)

    return od
コード例 #11
0
    def init(self,
             out_stream,
             url,
             proxy=None,
             use_proxy_for_chunks=True,
             g_stopEvent=None,
             maxbitrate=0,
             auth=''):
        global clientHeader, gproxy, gauth
        try:
            self.init_done = False
            self.init_url = url
            clientHeader = None
            self.status = 'init'
            self.proxy = proxy
            self.auth = auth
            if self.auth == None or self.auth == 'None' or self.auth == '':
                self.auth = None
            if self.auth:
                gauth = self.auth

            if self.proxy and len(self.proxy) == 0:
                self.proxy = None
            gproxy = self.proxy
            self.use_proxy_for_chunks = use_proxy_for_chunks
            self.out_stream = out_stream
            self.g_stopEvent = g_stopEvent
            self.maxbitrate = maxbitrate
            if '|' in url:
                sp = url.split('|')
                url = sp[0]
                clientHeader = sp[1]
                print(clientHeader)
                if PY3:
                    clientHeader = urllib.parse.parse_qsl(clientHeader)
                else:
                    clientHeader = urlparse.parse_qsl(clientHeader)
                print('header recieved now url and headers are', url,
                      clientHeader)
            self.status = 'init done'
            self.url = url
            return self.preDownoload()
        except:
            traceback.print_exc()
            self.status = 'finished'
        return False
コード例 #12
0
 def check_request(self,
                   request,
                   method,
                   params=None,
                   data=None,
                   headers=None):
     self.assertEqual(request.method, method)
     if params is not None:
         url = urlparse.urlparse(request.url)
         qs = urlparse.parse_qsl(url.query)
         self.assertEqual(dict(qs), params)
     if headers is not None:
         for key, value in headers.items():
             self.assertEqual(request.headers[key], value)
     if data is None:
         self.assertEqual(request.body, None)
     else:
         self.assertEqual(json.loads(request.body), data)
コード例 #13
0
def build_ics_urls(ics_url):
    google_calendar_url_base = 'http://www.google.com/calendar/render?cid='

    # Parse the URL into [scheme, netloc, path, params, query, fragment]
    parsed_ics_url = list(urlparse.urlparse(ics_url))
    if parsed_ics_url[0] != 'https':
        parsed_ics_url[0] = 'http'
    ics_url_http = urlparse.urlunparse(parsed_ics_url)

    parsed_ics_url[0] = 'webcal'
    ics_url_webcal = urlparse.urlunparse(parsed_ics_url)

    parsed_google_url = list(urlparse.urlparse(google_calendar_url_base))
    parsed_google_url[4] = dict(urlparse.parse_qsl(parsed_google_url[4]))
    parsed_google_url[4]['cid'] = ics_url_webcal
    parsed_google_url[4] = urllib.urlencode(parsed_google_url[4])
    ics_url_google = urlparse.urlunparse(parsed_google_url)

    return ics_url_http, ics_url_webcal, ics_url_google
コード例 #14
0
ファイル: oauth.py プロジェクト: gedex/wc-api-python
    def get_oauth_url(self):
        """ Returns the URL with OAuth params """
        params = {}

        if "?" in self.url:
            url = self.url[: self.url.find("?")]
            for key, value in urlparse.parse_qsl(urlparse.urlparse(self.url).query):
                params[key] = value
        else:
            url = self.url

        params["oauth_consumer_key"] = self.consumer_key
        params["oauth_timestamp"] = int(time())
        params["oauth_nonce"] = HMAC(str(time() + randint(0, 99999)).encode(), "secret".encode(), sha1).hexdigest()
        params["oauth_signature_method"] = "HMAC-SHA256"
        params["oauth_signature"] = self.generate_oauth_signature(params, url)

        query_string = urlencode(params)

        return "%s?%s" % (url, query_string)
コード例 #15
0
def read_url_post(url):
    '''Transform a JSON contained in a file into an equivalent
    nested python dict.

    Parameters
    ----------
    url : string
        where to get the json.

    Returns
    -------
    dict
        Python version of the input

    Note: if the input is a bare array or literal, for example,
    the output will be of the corresponding type.
    '''
    urlp = urlparse.urlparse(url)
    main_url = urlparse.urlunsplit(
        (urlp.scheme, urlp.netloc, urlp.path, '', ''))
    data = json.dumps(dict(urlparse.parse_qsl(urlp.query)))

    handler = urllib_request.HTTPHandler()
    opener = urllib_request.build_opener(handler)

    request = urllib_request.Request(main_url, data)
    request.add_header("Content-Type", 'application/json')
    request.get_method = lambda: 'POST'

    try:
        response = opener.open(request)
    except Exception as e:
        response = e

    if response.code == 200:
        json_string = response.read()
    else:
        json_string = response.read()

    return json.loads(json_string)
コード例 #16
0
def read_url_post(url):
    '''Transform a JSON contained in a file into an equivalent
    nested python dict.

    Parameters
    ----------
    url : string
        where to get the json.

    Returns
    -------
    dict
        Python version of the input

    Note: if the input is a bare array or literal, for example,
    the output will be of the corresponding type.
    '''
    urlp = urlparse.urlparse(url)
    main_url = urlparse.urlunsplit(
        (urlp.scheme, urlp.netloc, urlp.path, '', ''))
    data = json.dumps(dict(urlparse.parse_qsl(urlp.query)))

    handler = urllib_request.HTTPHandler()
    opener = urllib_request.build_opener(handler)

    request = urllib_request.Request(main_url, data)
    request.add_header("Content-Type", 'application/json')
    request.get_method = lambda: 'POST'

    try:
        response = opener.open(request)
    except Exception as e:
        response = e

    if response.code == 200:
        json_string = response.read()
    else:
        json_string = response.read()

    return json.loads(json_string)
コード例 #17
0
    def parameterarrange(payload):
        parsedurl = urlparse.urlparse(host)
        parameters = urlparse.parse_qsl(parsedurl.query,
                                        keep_blank_values=True)
        parameternames = []
        parametervalues = []

        for m in parameters:
            parameternames.append(m[0])
            parametervalues.append(m[1])

        for n in parameters:
            try:
                print("Checking '%s' parameter") % n[0]
                index = parameternames.index(n[0])
                original = parametervalues[index]
                parametervalues[index] = payload
                return urllib.urlencode(
                    dict(zip(parameternames, parametervalues)))
                parametervalues[index] = original
            except (KeyError):
                pass
コード例 #18
0
ファイル: oauth.py プロジェクト: gedex/wc-api-python
    def get_oauth_url(self):
        """ Returns the URL with OAuth params """
        params = {}

        if "?" in self.url:
            url = self.url[:self.url.find("?")]
            for key, value in urlparse.parse_qsl(
                    urlparse.urlparse(self.url).query):
                params[key] = value
        else:
            url = self.url

        params["oauth_consumer_key"] = self.consumer_key
        params["oauth_timestamp"] = int(time())
        params["oauth_nonce"] = HMAC(
            str(time() + randint(0, 99999)).encode(), "secret".encode(),
            sha1).hexdigest()
        params["oauth_signature_method"] = "HMAC-SHA256"
        params["oauth_signature"] = self.generate_oauth_signature(params, url)

        query_string = urlencode(params)

        return "%s?%s" % (url, query_string)
コード例 #19
0
ファイル: views.py プロジェクト: NaxoOo/fbsentiments
def connect(request):
	consumer_key = '279983135533947'
	consumer_secret = '9ff237e023f95b524274adef3af1ea21'

	redirect_uri = 'https://fbsentiments.azurewebsites.net/status/authorize'

	request_token_url = 'https://www.facebook.com/dialog/oauth?client_id=' + consumer_key
	+ '&redirect_uri=' + redirect_uri
	+ '&scope=user_posts'
	access_token_url = 'https://graph.facebook.com/v2.3/oauth/access_token?client_id=' + consumer_key
	+ '&redirect_uri=' + redirect_uri
	+ '&client_secret=' + consumer_secret
	+ '&code='#{code-parameter}
	#authorize_url = 'https://api.twitter.com/oauth/authorize'
	#Authorization URL: https://www.facebook.com/dialog/oauth (the scopes that can be specified with this URL, can be found here)

	consumer = oauth.Consumer(consumer_key, consumer_secret)
	client = oauth.Client(consumer)

	# Step 1: Get a request token. This is a temporary token that is used for 
	# having the user authorize an access token and to sign the request to obtain 
	# said access token.

	resp, content = client.request(request_token_url, "GET")
	#OJO: revisar error code (cuando sepa el formato)
	#https://developers.facebook.com/docs/graph-api/using-graph-api/#errors
	#if resp['status'] != '200':
	    #raise Exception("Invalid response %s." % resp['status'])

	request_token = dict(urlparse.parse_qsl(content))

	#print "Request Token:"
	#print "    - oauth_token        = %s" % request_token['oauth_token'] #Ver nombre en documentación de facebook.
	#print "    - oauth_token_secret = %s" % request_token['oauth_token_secret']
	#print 

	return HttpResponse("Hello, world. You're at the polls index.")
コード例 #20
0
if resp['status'] != '401':
    raise Exception("Should have no access!")

consumer = oauth.Consumer(consumer_key, consumer_secret)
client = oauth.Client(consumer)
client.set_signature_method(SignatureMethod_RSA_SHA1())

# Step 1: Get a request token. This is a temporary token that is used for
# having the user authorize an access token and to sign the request to obtain
# said access token.

resp, content = client.request(request_token_url, "POST")
if resp['status'] != '200':
    raise Exception("Invalid response %s: %s" % (resp['status'], content))

request_token = dict(urlparse.parse_qsl(content))

print "Request Token:"
print "    - oauth_token        = %s" % request_token['oauth_token']
print "    - oauth_token_secret = %s" % request_token['oauth_token_secret']
print

# Step 2: Redirect to the provider. Since this is a CLI script we do not
# redirect. In a web application you would redirect the user to the URL
# below.

print "Go to the following link in your browser:"
print "%s?oauth_token=%s" % (authorize_url, request_token['oauth_token'])
print

# After the user has granted access to you, the consumer, the provider will
コード例 #21
0
ファイル: helpers.py プロジェクト: SquirrelMajik/GRec
def get_url_params(url, unique=True):
    url_parts = list(urlparse.urlparse(url))
    params = urlparse.parse_qsl(url_parts[4])
    if unique:
        params = dict(params)
    return params
コード例 #22
0
ファイル: ami.py プロジェクト: Kirembu/octopasty
    def application(self, environ, start_response):

        # Require a query string and a recognized content type.
        writer_conf = None
        path_info = environ.get('PATH_INFO')
        if path_info == '/json':
            writer_conf = 'json'
            content_type = 'application/json'
        elif path_info == '/manager':
            writer_conf = 'manager'
            content_type = 'text/html'
        elif path_info == '/mxml':
            writer_conf = 'mxml'
            content_type = 'text/html'
        elif path_info == '/rawman':
            writer_conf = 'rawman'
            content_type = 'text/plain'
        query_string = environ.get('QUERY_STRING')
        if writer_conf is None or query_string is None:
            start_response("404 Not found", [])
            return

        # Recover the session client or create a new one.
        cookie = Cookie.SimpleCookie(environ.get('HTTP_COOKIE'))
        try:
            session = cookie['mansession_id'].value
        except KeyError:
            client = None
        else:
            client = self.active_sessions.get(session)
        if client is None:
            session = '%.8x' % random.randint(0, (1 << 32) - 1)
            if 'REMOTE_ADDR' in environ and 'REMOTE_PORT' in environ:
                suffix = ('-web-%s:%s' %
                          (environ['REMOTE_ADDR'], environ['REMOTE_PORT']))
            else:
                suffix = '-web-' + session
            client = Client(suffix=suffix)
            self.active_sessions[session] = client
            writer = Writer(suffix=suffix, timeout=idle_client_timeout)
            client.configure(self.config, writer)

        # Plumber so the results return to the original requester.
        fragments = []
        client.writer.set_write(fragments.append)
        client.writer.configure(writer_conf)

        # Trigger the action by sending the request to the client.
        start_response(
            "200 OK", [('Content-type', content_type),
                       ('Cache-Control', 'no-cache, no-store'),
                       ('Set-Cookie',
                        'mansession_id="%s"; Version=1; Max_Age=60' % session),
                       ('Pragma', 'SuppressEvents')])
        ami_message = AMI_Message()
        for key, value in urlparse.parse_qsl(query_string):
            ami_message[key] = value
        # Login actions from the Web should have "Events: off" forced!
        if ami_message.get_lower('Action') == 'login':
            ami_message['Events'] = 'off'
        client.process_ami_message(ami_message)

        # Collect and return the results.
        yield ''.join(fragments)
コード例 #23
0
ファイル: app.py プロジェクト: o0khoiclub0o/thimbot
if resp['status'] != '401':
    raise Exception("Should have no access!")

consumer = oauth.Consumer(consumer_key, consumer_secret)
client = oauth.Client(consumer)
client.set_signature_method(SignatureMethod_RSA_SHA1())

# Step 1: Get a request token. This is a temporary token that is used for
# having the user authorize an access token and to sign the request to obtain
# said access token.

resp, content = client.request(request_token_url, "POST")
if resp['status'] != '200':
    raise Exception("Invalid response %s: %s" % (resp['status'],  content))

request_token = dict(urlparse.parse_qsl(content))

print "Request Token:"
print "    - oauth_token        = %s" % request_token['oauth_token']
print "    - oauth_token_secret = %s" % request_token['oauth_token_secret']
print

# Step 2: Redirect to the provider. Since this is a CLI script we do not
# redirect. In a web application you would redirect the user to the URL
# below.

print "Go to the following link in your browser:"
print "%s?oauth_token=%s" % (authorize_url, request_token['oauth_token'])
print

# After the user has granted access to you, the consumer, the provider will
コード例 #24
0
    def SQLIscan(site):
        print("[+] [ SQLI ] Scanner Started...\n")
        vuln = []
        payloads = {'2': '"', '1': '\''}
        errors = {
            'MySQL': 'You have an error in your SQL syntax;',
            'Oracle': 'SQL command not properly ended',
            'MSSQL': 'Unclosed quotation mark after the character string',
            'PostgreSQL': 'syntax error at or near'
        }
        path = urlparse.urlparse(site).scheme + "://" + urlparse.urlparse(
            site).netloc + urlparse.urlparse(site).path
        parsedurl = urlparse.urlparse(host)
        parameters = urlparse.parse_qsl(parsedurl.query,
                                        keep_blank_values=True)
        parameternames = []
        parametervalues = []

        for m in parameters:
            parameternames.append(m[0])
            parametervalues.append(m[1])

        for n in parameters:
            found = 0
            print("[+] Checking '%s' parameter") % n[0]
            try:
                for i in payloads:
                    pay = payloads[i]
                    index = parameternames.index(n[0])
                    original = parametervalues[index]
                    parametervalues[index] = pay
                    modifiedurl = urllib.urlencode(
                        dict(zip(parameternames, parametervalues)))
                    parametervalues[index] = original
                    modifiedparams = modifiedurl
                    payload = urllib.quote_plus(payloads[i])
                    u = urllib.urlopen(path + "?" + modifiedparams)
                    source = u.read()
                    #print ("[+] Checking HTML Context...")

                    for i in errors:
                        if errors[i] in source:  #htmlcode[0].contents[0]:
                            dbfound = " | Back-End Database: " + green + str(
                                i) + reset
                            found = 1
                            break
                    if found != 1:
                        break
            except (KeyError):
                pass

            if found == 1:
                print("[!] " + red + "SQL Injection Vulnerability Found!" +
                      reset)
                print(dbfound)
                vuln.append("'" + n[0] + "'")
                found = 0
        if len(vuln) != 0:
            print(" | Vulnerable Parameter/s:"),
            for i in vuln:
                print(i),

        else:
            print("[!] Not Vulnerable")
コード例 #25
0
 def _manage_images_on_page(self, page_node, data_node, exported_resources):
     """
         - Extract images from page and generate an xml node
         - Replace db id in url with xml id
     """
     img_model = 'ir.attachment'
     urls = self.img_url_map.bind("dummy.org", "/")
     for img_elem in page_node.iter('img'):
         img_src = img_elem.get('src')
         parse_result = urlparse.urlparse(img_src)
         path = parse_result.path
         query_args = parse_result.query
         if urls.test(parse_result.path, "GET"):
             endpoint, kwargs = urls.match(path,
                                           "GET",
                                           query_args=query_args)
             kwargs.update(dict(urlparse.parse_qsl(query_args)))
             image = None
             # get the binary object
             xml_id = kwargs.get('xmlid')
             if xml_id:
                 image = self.env.ref(xml_id, False)
             else:
                 _id = kwargs.get('id')
                 model = kwargs.get('model', 'ir.attachment')
                 if _id and model:
                     _id, _, unique = str(_id).partition('_')
                     image = self.env[model].browse(int(_id))
             if (not image or not image.exists()
                     or image._name != img_model):
                 raise exceptions.UserError(
                     _('Only images from ir.attachment are supported when '
                       'exporting help pages'))
             exported_data = image.export_data([
                 'id', 'datas', 'datas_fname', 'name', 'res_model',
                 'mimetype'
             ],
                                               raw_data=False)['datas'][0]
             xml_id = exported_data[0]
             new_src = '/web/image/%s' % xml_id
             img_elem.attrib['src'] = new_src
             if xml_id in exported_resources:
                 continue
             img_node = ET.SubElement(data_node,
                                      'record',
                                      attrib={
                                          'id': xml_id,
                                          'model': image._name
                                      })
             field_node = ET.SubElement(img_node,
                                        'field',
                                        attrib={'name': 'datas'})
             field_node.text = str(exported_data[1])
             field_node = ET.SubElement(img_node,
                                        'field',
                                        attrib={'name': 'datas_fname'})
             field_node.text = exported_data[2]
             field_node = ET.SubElement(img_node,
                                        'field',
                                        attrib={'name': 'name'})
             field_node.text = exported_data[3]
             field_node = ET.SubElement(img_node,
                                        'field',
                                        attrib={'name': 'res_model'})
             field_node.text = exported_data[4]
             field_node = ET.SubElement(img_node,
                                        'field',
                                        attrib={'name': 'mimetype'})
             field_node.text = exported_data[5]
             data_node.append(img_node)
             exported_resources.add(xml_id)
コード例 #26
0
    def XSSscan(site):
        print("[+] [ XSS ] Scanner Started...")
        vuln = []
        payloads = {
            '3': 'd4rk();"\'\\/}{d4rk',
            '2': 'd4rk</script><script>alert(1)</script>d4rk',
            '1': '<d4rk>'
        }
        path = urlparse.urlparse(site).scheme + "://" + urlparse.urlparse(
            site).netloc + urlparse.urlparse(site).path
        parsedurl = urlparse.urlparse(host)
        parameters = urlparse.parse_qsl(parsedurl.query,
                                        keep_blank_values=True)
        parameternames = []
        parametervalues = []

        for m in parameters:
            parameternames.append(m[0])
            parametervalues.append(m[1])

        for n in parameters:
            found = 0
            print(" | Checking '%s' parameter") % n[0]
            try:
                for i in payloads:
                    pay = payloads[i]
                    index = parameternames.index(n[0])
                    original = parametervalues[index]
                    parametervalues[index] = pay
                    modifiedurl = urllib.urlencode(
                        dict(zip(parameternames, parametervalues)))
                    parametervalues[index] = original
                    modifiedparams = modifiedurl
                    payload = urllib.quote_plus(payloads[i])
                    u = urllib.urlopen(path + "?" + modifiedparams)
                    source = u.read()
                    code = BeautifulSoup(source)
                    if str(i) == str(1):
                        #print ("[+] Checking HTML Context...")
                        if payloads[i] in source:  #htmlcode[0].contents[0]:
                            #print("[+] XSS Vulnerability Found.")
                            found = 1
                    script = code.findAll('script')
                    if str(i) == str(3) or str(i) == str(2):
                        #print("[+] Checking JS Context...")
                        if str(i) == str(3):
                            #JS Context
                            for p in range(len(script)):
                                try:
                                    if pay in script[p].contents[0]:
                                        #print("[+] XSS Vulnerability Found")
                                        found = 1
                                except (IndexError):
                                    pass
                        if str(i) == str(2):
                            if payloads['2'] in source:
                                #	print("[+] XSS Vulnerability Found")
                                found = 1
            except (KeyError):
                pass

            if found == 1:
                vuln.append("'" + n[0] + "'")
                found = 0
        if len(vuln) != 0:
            print("[!] " + red + "Vulnerable Parameter/s:" + reset),
            for i in vuln:
                print(boldred + i + reset),
        else:
            print("[!] Not Vulnerable")