Ejemplo n.º 1
0
def go(x):
    start_time = time.time()
    try:
        if x in tmp_site:
            print '>> ' + x + ' {Duplicate URL}'
        elif x:
            tmp_site.append(x)
            if re.search('http(s)?://', x): pass
            else: x = 'http://' + x
            x = parse(x).scheme + '://' + parse(x).netloc + '/'
            xs = x
            if re.search('.env', x): pass
            else: x = x + '/.env'
            d = requests.get(x, verify=True)
            st = d.status_code
            try:
                if st == 200:
                    s = d.text
                    pausi(x, s)
                else:
                    print c.end + x + ' Error! ' + str(st)
                exploit(xs)  #Try For Uplaod Shell
            except requests.exceptions.ConnectionError:
                print c.red + "Error Connection %s" % (x)
            except:  # ValueError as ere:
                except_error(x)  ##print x + ' error' #+ str(st)
            #prin/t ve
    except:  #ValueError as se:
        print x + ' error#'  # + se
Ejemplo n.º 2
0
def get(url):
    '''
    Requests a URL and returns its contents.
    '''
    # First parse the URL.
    parts = urlparse.parse(url)
    host = parts.authority   # Assume the authority is the server hostname.
    port = DEFAULT_PORT
    path = parts.full_path   # This includes the path, query, and fragment.
    # If no path was specified, assume the default path, /.
    if path == '':
        path = '/'

    # Build a GET request with only the 'host' header, and an empty body.
    header = {'Host': host}
    request = httpmsg.Request('GET', path, header, b'')
    req_msg = httpmsg.format_request(request)
    # Send the request and get the response.
    resp_msg = send_request(host, port, req_msg)
    # Parse the response.
    response = httpmsg.parse_response(resp_msg)
    
    if response.status == 200:
        # Success.
        return response.body
    else:
        # Not successful; don't return data.
        raise RuntimeError(
            "GET of " + url + " returned " + str(response.status)
            + ": " + response.reason)
Ejemplo n.º 3
0
def get(url):
    '''
    Requests a URL and returns its contents.
    '''
    # First parse the URL.
    parts = urlparse.parse(url)
    host = parts.authority   # Assume the authority is the server hostname.
    port = DEFAULT_PORT
    path = parts.full_path   # This includes the path, query, and fragment.
    # If no path was specified, assume the default path, /.
    if path == '':
        path = '/'

    # FIXME: Build the header, which should contain 'Host'.
    header = ...
    # FIXME: Build a GET request with the path, headers, and an empty body.
    request = ...
    # FIXME: Format the request into a message.
    req_msg = ...
    # Now send the message and get a response from the server.
    resp_msg = send_request(host, port, req_msg)
    # FIXME: Parse the response.
    response = ...
    
    # FIXME: Check the status of response.  If it's 200, return the body.
    return ...
Ejemplo n.º 4
0
    def add_premium_logo_to_image_url(self, default_image=True):
        url = self.image_url_16_9 if default_image else self.image_url_1_1
        paths = path.split(parse(url).path)
        path_to_replace = "%s%s/%s" % (paths[0], "-fnov-fpotl-fpi157750653",
                                       paths[1])

        return urljoin(url, path_to_replace)
Ejemplo n.º 5
0
def get_db(dburl="mysql://*****:*****@localhost:3306/dbname", pooled=False, unix_socket=None, charset='utf8', mincached=50, maxcached=100, maxshared=0, maxconnections=500, blocking=False):
	'''
	db generator, return a db instance from the db url
	
	db url examples:
		mysql://root:gbsoft@localhost:3306/dbname
		sqlserver://admin:gbsoft@localhost/dbname
		sqlite:///home/jadesoul/a.db
		access://E:/mydb/c.mdb
	
	only when the pooled is True, the following parameters will be considered
	'''
	info=parse(dburl)
	type=info.scheme
	
	if type=='access' or type=='sqlite':
		file=info.netloc+info.path
		if type=='sqlite': return sqlite(file)
	elif type=='mysql' or type=='sqlserver':
		host=info.hostname
		user=info.username
		passwd=info.password
		dbname=info.path[1:]
		port=info.port
		
		if type=='mysql': 
			if pooled:
				return pooled_mysql(mincached=mincached, maxcached=maxcached, maxshared=maxshared, maxconnections=maxconnections, blocking=blocking, host=host, user=user, passwd=passwd, dbname=dbname, port=port, charset=charset, unix_socket=unix_socket)
			else:
				return mysql(host=host, user=user, passwd=passwd, dbname=dbname, port=port, charset=charset, unix_socket=unix_socket)
Ejemplo n.º 6
0
    def register_node(self, address):
        """
        Add a new node to the list of nodes

        :param address: Address of node. Eg. 'http://192.168.0.5:5000'
        """

        parsed_url = parse(address)
        self.nodes.add(parsed_url.netloc)
Ejemplo n.º 7
0
    def _get_service_checks_tags(self, host, port, database_url):
        if database_url:
            parsed_url = urlparse.parse(database_url)
            host = parsed_url.hostname
            port = parsed_url.port

        service_checks_tags = [
            "host:%s" % host,
            "port:%s" % port,
            "db:%s" % self.DB_NAME
        ]
        return service_checks_tags
Ejemplo n.º 8
0
def check(url, getLessSimilar=False):

    is_reddit_link = parse(url if url.startswith("http://") else "http://" +
                           url).hostname.split('.')[-2:][0] == "reddit"

    kd_url = "http://karmadecay.com/search?kdtoolver=b1&q=" + quote(url)

    connect_timeout = None
    read_timeout = 5.0
    session = requests.Session()
    session.mount("http://", requests.adapters.HTTPAdapter(max_retries=8))
    # user agent string for Chrome 41, karmadecay.com returns 401 for a "python-requests..." user agent
    user_agent = {
        'User-agent':
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
    }
    request = session.get(kd_url,
                          headers=user_agent,
                          proxies=proxies,
                          timeout=connect_timeout)
    ResponseTime = request.elapsed.total_seconds()
    html = request.content
    request.close()

    tree = etree.HTML(html)

    output = []

    if not is_reddit_link:  # only add the top item if the link was not a reddit link (to avoid duplicates)
        headResult = tree.xpath("//tr[@class='result']")
        if len(headResult) > 0:  # make sure there's actually a head item
            headItem = _extractItem(headResult[0])
            if headItem.link is not None:  # make sure there's a link present, sometimes there's not one
                output.append(headItem)

    results = tree.xpath("//tr[@class='result']")

    output.extend([_extractItem(result) for result in results])

    if not getLessSimilar:
        return {'output': output, 'time': ResponseTime}

    lessSimilar = tree.xpath(
        "//tr[@class='ls' or @class='lsi']/following::tr[@class='result']")
    lsOutput = [_extractItem(result) for result in lessSimilar]
    return (output, lsOutput)
Ejemplo n.º 9
0
    def __init__(self,
                 base_uri=None,
                 resource='trackbacks',
                 format='json',
                 type='api.topsy',
                 **kwargs):
        """resource = otter REST resource name (ie. search),
        format = (only json is supported right now),
        kw = keyword args to pass to the api"""
        if base_uri:
            self.scheme, netloc, path, _, _, _ = urlparse.parse(base_uri)
            self.base_uri = netloc if netloc else path
            if not self.scheme:
                self.scheme = 'http'
            if 'otter' in base_uri:
                self.api_type = 'otter'
            else:
                self.api_type = 'api.topsy'
        else:
            self.scheme = 'http'
            if type == 'api.topsy':
                self.base_uri = 'api.topsy.com/v2'
                self.type = type
            elif type == 'otter':
                self.base_uri = 'otter.topsy.com'
                self.type = type
            else:
                raise NotImplementedError

        if not 'perpage' in kwargs:
            kwargs['perpage'] = 20000
            self.pagesize = kwargs['perpage']

        kwargs['limit'] = 20000
        self.resource = resource
        self.format = format
        self.content = None  # store decoded json
        default_args = {'include_enrichment_all': 1, 'sort_by': 'date'}
        self.kwargs = default_args
        self.kwargs.update(kwargs)
        self.pagenum = 1
        self.num_null_windows = 1
        self.total_retrieved = 0
        self.total_available = 0
Ejemplo n.º 10
0
	def __init__(self, user, password):

		self.mqttc = paho.Client()
		self.user = user
		self.password = password
		# Assign event callbacks
		self.mqttc.on_message = on_message
		self.mqttc.on_connect = on_connect
		self.mqttc.on_publish = on_publish
		self.mqttc.on_subscribe = on_subscribe

		# Parse CLOUDMQTT_URL (or fallback to localhost)
		url_str = os.environ.get('CLOUDMQTT_URL', 'mqtt://m21.cloudmqtt.com')
		url = parse(url_str)
		# Uncomment to enable debug messages
		self.mqttc.on_log = on_log
		# Connect

		if user == None or password == None:
			self.mqttc.username_pw_set("lgvohswk", "VcimBX_iUvkb")
		else:
			self.mqttc.username_pw_set(self.user, self.password)
		self.mqttc.connect("m21.cloudmqtt.com", 11172)
Ejemplo n.º 11
0
	def sendAnalytics(self, url, cached):
		
		if ( config.plugins.seriesplugin.ganalytics.value ):
			
			urlparts = parse(url)
			
			from plugin import VERSION,DEVICE
			parameter = urlencode(
				{
					'version' : VERSION,
					'cached'  : str(cached),
					'device'  : DEVICE
				}
			)
			
			if urlparts.query:
				url = urlparts.path + '?' + urlparts.query + '&' + parameter
			else:
				url = urlparts.path + '?' + parameter
			
			# https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters
			# v   = version 1
			# tid = Tracking ID / Web Property ID
			# aip = anomize ip
			# sc  = session start / end
			# ua  = User Agent
			# dr  = Document Referrer
			# t   = Hit type = 'pageview', 'screenview', 'event', 'transaction', 'item', 'social', 'exception', 'timing'
			# dl  = Document location URL
			# an  = Application Name
			# aid = Application Id
			# av  = Application Version
			# ec  = Event Category
			# ea  = Event Action
			# el  = Event Label
			# ev  = Event Value
			# cm[1-9][0-9]* = Custom Metric
			# z   = cache buster - random number
			#GET
			#http://www.google-analytics.com/collect?v=1&tid=UA-XXXX-Y&aip=1& TBD
			
			global my_uuid
			ua_parameter = urlencode(
				{
					'v'   : '1',
					'tid' : 'UA-31168065-1',
					'cid' : my_uuid,
					'aip' : '1',
					'sc'  : 'start',
					'ua'  : DEVICE + '_' + VERSION,
					't'   : 'pageview',
					'dp'  : url,
					'z'   : randint(1, 99999)
				}
			)
			
			try:
				req = Request( "http://www.google-analytics.com/collect" + '?' + ua_parameter )
				#splog("SP Analytics url: ", req.get_full_url())
				
				response = urlopen(req, timeout=5).read()
				#splog("SP Analytics respond: ", response) 
			
			except URLError as e:
				splog("SP Analytics error code: ", e.getcode())
				splog("SP Analytics error info: ", e.info())
			
			except socket.timeout as e:
				splog("SP Analytics socket timeout")
Ejemplo n.º 12
0
    def sendAnalytics(self, url, cached):

        if (config.plugins.seriesplugin.ganalytics.value):

            urlparts = parse(url)

            from plugin import VERSION, DEVICE
            parameter = urlencode({
                'version': VERSION,
                'cached': str(cached),
                'device': DEVICE
            })

            if urlparts.query:
                url = urlparts.path + '?' + urlparts.query + '&' + parameter
            else:
                url = urlparts.path + '?' + parameter

            # https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters
            # v   = version 1
            # tid = Tracking ID / Web Property ID
            # aip = anomize ip
            # sc  = session start / end
            # ua  = User Agent
            # dr  = Document Referrer
            # t   = Hit type = 'pageview', 'screenview', 'event', 'transaction', 'item', 'social', 'exception', 'timing'
            # dl  = Document location URL
            # an  = Application Name
            # aid = Application Id
            # av  = Application Version
            # ec  = Event Category
            # ea  = Event Action
            # el  = Event Label
            # ev  = Event Value
            # cm[1-9][0-9]* = Custom Metric
            # z   = cache buster - random number
            #GET
            #http://www.google-analytics.com/collect?v=1&tid=UA-XXXX-Y&aip=1& TBD

            global my_uuid
            ua_parameter = urlencode({
                'v': '1',
                'tid': 'UA-31168065-1',
                'cid': my_uuid,
                'aip': '1',
                'sc': 'start',
                'ua': DEVICE + '_' + VERSION,
                't': 'pageview',
                'dp': url,
                'z': randint(1, 99999)
            })

            try:
                req = Request("http://www.google-analytics.com/collect" + '?' +
                              ua_parameter)
                #splog("SP Analytics url: ", req.get_full_url())

                response = urlopen(req, timeout=5).read()
                #splog("SP Analytics respond: ", response)

            except URLError as e:
                splog("SP Analytics error code: ", e.getcode())
                splog("SP Analytics error info: ", e.info())

            except socket.timeout as e:
                splog("SP Analytics socket timeout")
Ejemplo n.º 13
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         data = urlparse.parse (url)
         data = dict((i, data[i][0]) if data[i] else (i, '') for i in data)
         data['sources'] = re.findall("[^', u\]\[]+", data['sources'])
         for i,s in data['sources']:
             token = str(self.__token(
                 {'id': i, 'server': 28, 'update': 0, 'ts': data['ts']}, 'iQDWcsGqN'))
             query = (self.info_path % (data['ts'], token, i, s))
             url = urlparse.urljoin(self.base_link, query)
             for r in range(1,3):
                 info_response = client.request(url, XHR=True, timeout=10)
                 if info_response != None: break
             grabber_dict = json.loads(info_response)
             try:
                 if grabber_dict['type'] == 'direct':
                     token64 = grabber_dict['params']['token']
                     randint = random.randint(1000000,2000000)
                     query = (self.grabber_path % (data['ts'], i, token64))
                     url = urlparse.urljoin(self.base_link, query)
                     for r in range(1,3):
                         response = client.request(url, XHR=True, timeout=10)
                         if response != None: break
                     sources_list = json.loads(response)['data']
                     for j in sources_list:
                         quality = j['label'] if not j['label'] == '' else 'SD'
                         quality = source_utils.label_to_quality(quality)
                         urls = None
                         if 'googleapis' in j['file']:
                             sources.append({'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': j['file'], 'direct': True, 'debridonly': False})
                             continue
                         if 'lh3.googleusercontent' in j['file'] or 'bp.blogspot' in j['file']:
                             try:
                                 newheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
                                        'Accept': '*/*',
                                        'Host': 'lh3.googleusercontent.com',
                                        'Accept-Language': 'en-US,en;q=0.8,de;q=0.6,es;q=0.4',
                                        'Accept-Encoding': 'identity;q=1, *;q=0',
                                        'Referer': self.film_url,
                                        'Connection': 'Keep-Alive',
                                        'X-Client-Data': 'CJK2yQEIo7bJAQjEtskBCPqcygEIqZ3KAQjSncoBCKijygE=',
                                        'Range': 'bytes=0-'
                                   }
                                 resp = client.request(j['file'], headers=newheaders, redirect=False, output='extended', timeout='10')
                                 loc = resp[2]['Location']
                                 c = resp[2]['Set-Cookie'].split(';')[0]
                                 j['file'] = '%s|Cookie=%s' % (loc, c)
                                 urls, host, direct = [{'quality': quality, 'url': j['file']}], 'gvideo', True    
                             except: 
                                 pass
                         valid, hoster = source_utils.is_host_valid(j['file'], hostDict)
                         if not urls or urls == []:
                             urls, host, direct = source_utils.check_directstreams(j['file'], hoster)
                         for x in urls:
                             sources.append({'source': 'gvideo', 'quality': x['quality'], 'language': 'en', 'url': x['url'],
                              'direct': True, 'debridonly': False})
                 elif not grabber_dict['target'] == '':
                     url = 'https:' + grabber_dict['target'] if not grabber_dict['target'].startswith('http') else grabber_dict['target']
                     valid, hoster = source_utils.is_host_valid(url, hostDict)
                     if not valid: continue
                     urls, host, direct = source_utils.check_directstreams(url, hoster)
                     sources.append({'source': hoster, 'quality': urls[0]['quality'], 'language': 'en', 'url': urls[0]['url'], 
                         'direct': False, 'debridonly': False})
             except: pass
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('PLocker - Exception: \n' + str(failure))
         return sources
Ejemplo n.º 14
0
    def do_GET(self):  # pylint: disable=invalid-name
        """
        handle a HTTP GET request
        """
        getLogger(__name__).debug("do_GET()")
        content_length = self.headers.getheader('Content-Length', 0)
        body = self.rfile.read(content_length)

        parsed_path = parse(self.path).path
        # if parsed_path == "/register":
        #    print "registring"
        #    client_id = generate_client_id()
        #    client_secret = generate_client_id()
        #    thingie = {'client_id': client_id, 'client_secret': client_secret}
        #    json = dumps(thingie)
        #    sql = 'insert into clients (id, secret) values (?, ?);'
        #    database_execute(sql, (client_id, client_secret))
        #    self.wfile.write(json)
        #    return
        if parsed_path == "/verify":
            getLogger(__name__).debug("Verify path")
            auth_header_contents = self.headers.getheader('Authorization', '')
            if auth_header_contents != '':
                try:
                    ttype, token = auth_header_contents.split(' ')
                except ValueError:
                    getLogger(__name__).critical(
                        "Problem parsing authorization header: %s",
                        auth_header_contents)
                    self.send_response(403)
                    return
                if ttype != 'Bearer':
                    result = "No Bearer Authorization Token found."
                    self.send_response(403)
                else:
                    sql = "select clients.user from bearer_tokens, clients where bearer_tokens.access_token = ? and bearer_tokens.expires > datetime('now') and bearer_tokens.client_id = clients.id;"
                    result = database_execute(sql, (token, ))
                    if not result:
                        result = "No authenticated bearer authorization token found"
                        self.send_response(403)
                    else:
                        result = result[0][0]
                        self.send_response(200)
            else:
                result = None
                self.send_response(403)
            self.end_headers()
            getLogger(__name__).debug('end')
            self.wfile.write(str(result))
            return
        else:
            try:
                scopes, credentials = self.authserver.validate_authorization_request(
                    self.path, self.command, body, self.headers)
                # store credentials somewhere
                headers, body, status = self.authserver.create_authorization_response(
                    self.path, self.command, body, self.headers, scopes,
                    credentials)
                self.send_response(status)
                for key, value in headers.iteritems():
                    self.send_header(key, value)
                self.end_headers()
            except OAuth2Error as error:
                getLogger(__name__).critical("OAuth2 Error: %s: %s",
                                             error.__class__.__name__,
                                             error.error)
                if error.message:
                    self.wfile.write(error.message)
                    getLogger(__name__).debug("Message: %s", error.message)
                if error.description:
                    self.wfile.write(error.description)
                    getLogger(__name__).debug("Description: %s",
                                              error.description)
        return
Ejemplo n.º 15
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         data = urlparse.parse(url)
         data = dict((i, data[i][0]) if data[i] else (i, '') for i in data)
         data['sources'] = re.findall("[^', u\]\[]+", data['sources'])
         for i, s in data['sources']:
             token = str(
                 self.__token(
                     {
                         'id': i,
                         'server': 28,
                         'update': 0,
                         'ts': data['ts']
                     }, 'iQDWcsGqN'))
             query = (self.info_path % (data['ts'], token, i, s))
             url = urlparse.urljoin(self.base_link, query)
             for r in range(1, 3):
                 info_response = client.request(url, XHR=True, timeout=10)
                 if info_response != None: break
             grabber_dict = json.loads(info_response)
             try:
                 if grabber_dict['type'] == 'direct':
                     token64 = grabber_dict['params']['token']
                     randint = random.randint(1000000, 2000000)
                     query = (self.grabber_path % (data['ts'], i, token64))
                     url = urlparse.urljoin(self.base_link, query)
                     for r in range(1, 3):
                         response = client.request(url,
                                                   XHR=True,
                                                   timeout=10)
                         if response != None: break
                     sources_list = json.loads(response)['data']
                     for j in sources_list:
                         quality = j[
                             'label'] if not j['label'] == '' else 'SD'
                         quality = source_utils.label_to_quality(quality)
                         urls = None
                         if 'googleapis' in j['file']:
                             sources.append({
                                 'source': 'GVIDEO',
                                 'quality': quality,
                                 'language': 'en',
                                 'url': j['file'],
                                 'direct': True,
                                 'debridonly': False
                             })
                             continue
                         if 'lh3.googleusercontent' in j[
                                 'file'] or 'bp.blogspot' in j['file']:
                             try:
                                 newheaders = {
                                     'User-Agent':
                                     'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
                                     'Accept': '*/*',
                                     'Host': 'lh3.googleusercontent.com',
                                     'Accept-Language':
                                     'en-US,en;q=0.8,de;q=0.6,es;q=0.4',
                                     'Accept-Encoding':
                                     'identity;q=1, *;q=0',
                                     'Referer': self.film_url,
                                     'Connection': 'Keep-Alive',
                                     'X-Client-Data':
                                     'CJK2yQEIo7bJAQjEtskBCPqcygEIqZ3KAQjSncoBCKijygE=',
                                     'Range': 'bytes=0-'
                                 }
                                 resp = client.request(j['file'],
                                                       headers=newheaders,
                                                       redirect=False,
                                                       output='extended',
                                                       timeout='10')
                                 loc = resp[2]['Location']
                                 c = resp[2]['Set-Cookie'].split(';')[0]
                                 j['file'] = '%s|Cookie=%s' % (loc, c)
                                 urls, host, direct = [{
                                     'quality': quality,
                                     'url': j['file']
                                 }], 'gvideo', True
                             except:
                                 pass
                         valid, hoster = source_utils.is_host_valid(
                             j['file'], hostDict)
                         if not urls or urls == []:
                             urls, host, direct = source_utils.check_directstreams(
                                 j['file'], hoster)
                         for x in urls:
                             sources.append({
                                 'source': 'gvideo',
                                 'quality': x['quality'],
                                 'language': 'en',
                                 'url': x['url'],
                                 'direct': True,
                                 'debridonly': False
                             })
                 elif not grabber_dict['target'] == '':
                     url = 'https:' + grabber_dict[
                         'target'] if not grabber_dict['target'].startswith(
                             'http') else grabber_dict['target']
                     valid, hoster = source_utils.is_host_valid(
                         url, hostDict)
                     if not valid: continue
                     urls, host, direct = source_utils.check_directstreams(
                         url, hoster)
                     sources.append({
                         'source': hoster,
                         'quality': urls[0]['quality'],
                         'language': 'en',
                         'url': urls[0]['url'],
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('PLocker - Exception: \n' + str(failure))
         return sources
Ejemplo n.º 16
0
def _verb(url,
          data=None,
          params=None,
          headers=None,
          connection=None,
          verb=None,
          ctx=None,
          stream_callback=None,
          stream_chunk=512):
    urlp = urlparse.parse(url)
    netl = urlparse.parse_netloc(urlp[1])
    host = netl[2]
    # print(verb,urlp,netl)
    if connection:
        sock = connection
    else:
        sock = _connect(host, netl[3], urlp[0], ctx)

    # print("CREATED SOCKET",sock.channel)
    #Generate and send Request Line
    msg = bytearray(BUFFER_LEN)
    p = 0
    endline = "\r\n"
    p = add_to_buffer(msg, verb, p)
    p = add_to_buffer(msg, " ", p)

    if not urlp[2]:
        p = add_to_buffer(msg, "/", p)
    else:
        p = add_to_buffer(msg, urlp[2], p)
    if (verb in zverbs) and (urlp[-2] or params):
        p = add_to_buffer(msg, "?", p)
        if urlp[-2]:
            p = add_to_buffer(msg, urlp[-2], p)
            if params:
                p = add_to_buffer(msg, "&", p)
        if params:
            p = add_to_buffer(msg, urlparse.urlencode(params), p)

    if msg[p - 1] != __ORD(" "):
        p = add_to_buffer(msg, " ", p)
    p = add_to_buffer(msg, "HTTP/1.1", p)
    p = add_to_buffer(msg, endline, p)
    __elements_set(msg, p)  #clamp buffer
    #send cmd
    try:
        # print(">>",msg)
        sock.sendall(msg)
    except Exception as e:
        sock.close()
        raise HTTPConnectionError

    __elements_set(msg, BUFFER_LEN)
    p = 0
    #Generated and send Request headers
    p = add_to_buffer(msg, "Host: ", p)
    p = add_to_buffer(msg, host, p)
    if netl[3]:
        p = add_to_buffer(msg, ":", p)
        p = add_to_buffer(msg, netl[3], p)

    p = add_to_buffer(msg, endline, p)
    __elements_set(msg, p)
    # print(">>",msg)
    sock.sendall(msg)
    __elements_set(msg, BUFFER_LEN)
    p = 0

    rh = {}
    if headers:
        for k in headers:
            rh[k.lower()] = headers[k]

    if "connection" not in rh:
        rh["connection"] = "close"

    if data is not None:
        rh["content-length"] = str(len(data[0]))  #data[0] is actual data
        rh["content-type"] = data[1]  #data[1] is data type header

    for k, v in rh.items():
        p = add_to_buffer(msg, k, p)
        p = add_to_buffer(msg, ": ", p)
        p = add_to_buffer(msg, v, p)
        p = add_to_buffer(msg, endline, p)
        __elements_set(msg, p)
        # print(">>",msg)
        sock.sendall(msg)
        __elements_set(msg, BUFFER_LEN)
        p = 0

    # Generate and send Body
    p = add_to_buffer(msg, endline, p)

    if data is not None:
        p = add_to_buffer(msg, data[0], p)

    __elements_set(msg, p)
    # print(">>",msg)
    sock.sendall(msg)
    __elements_set(msg, BUFFER_LEN)
    p = 0

    #Parse Response
    rr = Response()

    ssock = streams.SocketStream(sock)
    buffer = msg
    msg = _readline(ssock, buffer, 0, BUFFER_LEN)
    # print("<<",msg)

    if msg.startswith("HTTP/1.1"):
        rr.status = int(msg[9:12])

    __elements_set(msg, BUFFER_LEN)
    msg = _readline(ssock, buffer, 0, BUFFER_LEN)
    # print("<<",msg)
    #print(msg)

    #print(">[",msg,"]",msg=="\n",msg==endline)
    while not (msg == endline or msg == "\n"):
        idx_cl = msg.find(__ORD(":"))
        if idx_cl < 0:
            sock.close()
            raise HTTPResponseError
        rr.headers[str(msg[0:idx_cl].lower())] = str(msg[idx_cl +
                                                         1:-2].strip(endline))
        __elements_set(msg, BUFFER_LEN)
        msg = _readline(ssock, buffer, 0, BUFFER_LEN)
        # print("<<",msg)
        # print(msg)
        #print(">[",msg,"]",msg=="\n",msg==endline)

    #print(rr.headers)
    rr.connection = sock

    if "content-length" in rr.headers:
        bodysize = int(rr.headers["content-length"])
        #print("bodysize",bodysize)
        contentsize = bodysize
        reset_content = False
        if stream_callback is not None:
            reset_content = True
            contentsize = stream_chunk

        rr.content = bytearray(contentsize)
        tmp = 0
        rdr = 0
        while bodysize > 0:
            if reset_content:
                ofs = 0
            else:
                ofs = tmp
            rdr = sock.recv_into(rr.content,
                                 min(bodysize, contentsize),
                                 ofs=ofs)
            if rdr and stream_callback:
                __elements_set(rr.content, rdr)
                stream_callback(rr.content)
            #print(rdr,rr.content[tmp:tmp+rdr])
            tmp += rdr
            bodysize -= rdr
            if not rdr:
                break
        #print("CLOSED SOCKET A",sock.channel,rdr,tmp,bodysize)
        sock.close()
        rr.connection = None
    elif "transfer-encoding" in rr.headers:
        while True:
            __elements_set(msg, BUFFER_LEN)
            msg = _readline(ssock, buffer, 0, BUFFER_LEN)
            chsize = int(msg, 16)
            #print("chsize",chsize)
            if chsize:
                msg = sock.recv(chsize)
                #print(msg)
                if msg:
                    rr.content.extend(msg)
                else:
                    break
                __elements_set(buffer, BUFFER_LEN)
                msg = _readline(ssock, buffer, 0, BUFFER_LEN)
            else:
                __elements_set(buffer, BUFFER_LEN)
                msg = _readline(ssock, buffer, 0, BUFFER_LEN)  #terminator
                break
        #print("CLOSED SOCKET B",sock.channel)
        sock.close()
    else:
        while True:
            tmp = sock.recv(32)
            if tmp:
                rr.content.extend(tmp)
                #print(tmp)
            else:
                break
        #print("CLOSED SOCKET C",sock.channel)
        sock.close()
        rr.connection = None

    return rr
Ejemplo n.º 17
0
def eddy_encode_url(url, txpower):
    """
.. function:: eddy_encode_url(url, txpower)

    Return a bytearray representing an encoded Eddystone payload of type url (not encrypted).

    According to the specifications:

    * :samp:`url` is a string representing an URL to be encoded in the Eddystone format
    * :samp:`txpower` is the power calibration measurement of the beacon (used to calculate distances)

    """
    # convert tx power
    tx = _encode_power(txpower)

    # parse url
    scheme, netloc, path, query, fragment = urlparse.parse(url)

    # find prefix
    prefix = -1
    if scheme == "http":
        if netloc.startswith("www."):
            prefix = 0
            netloc = netloc[4:]
        else:
            prefix = 2
    elif scheme == "https":
        if netloc.startswith("www."):
            prefix = 1
            netloc = netloc[4:]
        else:
            prefix = 3

    #find suffix
    suffix = -1
    if path:
        ntl = netloc + path[0]
    else:
        ntl = netloc
    for i, ext in enumerate(_eddy_suffixes):
        if ntl.endswith(ext):
            suffix = i
            if path:
                netloc = netloc[:-(len(ext) - 1)]
                path = path[1:]
            else:
                netloc = netloc[:-(len(ext))]
            break
    #build packet
    packet = bytearray()
    packet.extend(_eddy_header)  # add header
    packet.append(0x10)  # add frame (URL type)
    packet.append(tx)  # tx power
    if prefix >= 0:
        packet.append(prefix)
    else:
        packet.extend(scheme)
    packet.extend(netloc)
    if suffix >= 0:
        packet.append(suffix)
    packet.extend(path)
    packet.extend(query)
    packet.extend(fragment)

    #patch header length
    packet[7] = len(packet) - 8

    return packet