Exemple #1
0
def detect_proxy_settings():
    """
    Do some work and return dictionary with Proxy server settings for that machine.
    """
    d = {
        'host':'',
        'port':'',
        'username':'',
        'password':'',
        'ssl': 'False'}
    httpproxy = urllib2.getproxies().get('http', None)
    httpsproxy = urllib2.getproxies().get('https', None)

    if httpproxy is not None:
        try:
            scheme, host, port, path = parseurl(httpproxy)
        except:
            return d
        d['ssl'] = 'False'
        d['host'] = host
        d['port'] = port

    if httpsproxy is not None:
        try:
            scheme, host, port, path = parseurl(httpsproxy)
        except:
            return d
        d['ssl'] = 'True'
        d['host'] = host
        d['port'] = port

    return d
Exemple #2
0
def gen_httplib_conn(scheme, host, proxy_host=None, proxy_port=None):
    """
    SEE ALSO http://code.activestate.com/recipes/301740-simplest-useful-https-with-basic-proxy-authenticat/
  """
    _port = {'http': 80, 'https': 443}
    _conn = {'http': httplib.HTTPConnection, 'https': httplib.HTTPSConnection}

    if scheme not in _port.keys():
        raise PyAmazonCloudDriveError("unsupported scheme. %s" % scheme)

    if proxy_host and proxy_port:
        pass
    elif urllib2.getproxies().has_key(scheme):
        proxy = urllib2.getproxies()[scheme]
        if proxy.find("/") != -1:
            proxy = urllib2.urlparse.urlparse(urllib2.getproxies()[scheme])[1]
        if proxy.find(":") != -1:
            proxy_host, proxy_port = proxy.split(":")
        else:
            proxy_host = proxy
            proxy_port = _port[scheme]

    else:
        return _conn[scheme](host)

    #print proxy_host,proxy_port

    proxy_connect = 'CONNECT %s:%s HTTP/1.1\r\n' % (host, _port[scheme])
    proxy_pieces = proxy_connect + '\r\n'

    #print proxy_pieces

    proxy_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    proxy_socket.connect((proxy_host, int(proxy_port)))
    proxy_socket.sendall(proxy_pieces)
    response = proxy_socket.recv(8192)
    status = response.split()[1]
    if status != str(200):
        raise PyAmazonCloudDriveError("%s:%s CONNECT returns %s." %
                                      (proxy_host, proxy_port, status))

    if scheme == 'http':
        sock = proxy_socket
    else:
        if sys.version_info[:2] < (2, 6):
            ssl = socket.ssl(proxy_socket, None, None)
            sock = httplib.FakeSocket(proxy_socket, ssl)
        else:
            import ssl
            sock = ssl.wrap_socket(proxy_socket, None, None)

    conn = httplib.HTTPConnection(host)
    conn.sock = sock
    return conn
def gen_httplib_conn(scheme,host,proxy_host=None,proxy_port=None):
  """
    SEE ALSO http://code.activestate.com/recipes/301740-simplest-useful-https-with-basic-proxy-authenticat/
  """
  _port = {'http' : 80, 'https' : 443}
  _conn= {'http' : httplib.HTTPConnection, 'https' : httplib.HTTPSConnection}

  if scheme not in _port.keys():
    raise PyAmazonCloudDriveError("unsupported scheme. %s"%scheme)

  if proxy_host and proxy_port:
    pass
  elif urllib2.getproxies().has_key(scheme):
    proxy = urllib2.getproxies()[scheme]
    if proxy.find("/")!=-1:
      proxy = urllib2.urlparse.urlparse(urllib2.getproxies()[scheme])[1]
    if proxy.find(":")!=-1:
      proxy_host,proxy_port=proxy.split(":")
    else:
      proxy_host=proxy
      proxy_port=_port[scheme]

  else:
    return _conn[scheme](host)

  #print proxy_host,proxy_port

  proxy_connect='CONNECT %s:%s HTTP/1.1\r\n'%(host,_port[scheme])
  proxy_pieces=proxy_connect+'\r\n'

  #print proxy_pieces

  proxy_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
  proxy_socket.connect((proxy_host,int(proxy_port)))
  proxy_socket.sendall(proxy_pieces)
  response=proxy_socket.recv(8192) 
  status=response.split()[1]
  if status!=str(200):
    raise PyAmazonCloudDriveError("%s:%s CONNECT returns %s."%
                                   (proxy_host,proxy_port,status))

  if scheme == 'http':
    sock = proxy_socket
  else:
    if sys.version_info[:2] < (2, 6):
      ssl = socket.ssl(proxy_socket, None, None)
      sock = httplib.FakeSocket(proxy_socket, ssl)
    else:
      import ssl
      sock = ssl.wrap_socket(proxy_socket, None, None)

  conn=httplib.HTTPConnection(host)
  conn.sock=sock
  return conn
Exemple #4
0
 def setupProxies(self, proxy):
     """Get proxies and insert into url opener"""
     if proxy is None: proxies = urllib2.getproxies()
     else: proxies={'http':proxy}
     opener  = urllib2.build_opener(
         urllib2.ProxyHandler(proxies))
     urllib2.install_opener(opener)        
Exemple #5
0
 def check_proxy(self, specific={}):
     """ Checks if proxy settings are set on the OS
     Returns:
     -- 1 when direct connection works fine
     -- 2 when direct connection fails and any proxy is set in the OS
     -- 3 and settings when direct connection fails but a proxy is set
     see: https://docs.python.org/2/library/urllib.html#urllib.getproxies
     """
     os_proxies = getproxies()
     if len(os_proxies) == 0 and self.check_internet_connection:
         logging.info("No proxy needed nor set. Direct connection works.")
         return 1
     elif len(os_proxies) == 0 and not self.check_internet_connection:
         logging.error("Proxy not set in the OS. Needs to be specified")
         return 2
     else:
         #
         env['http_proxy'] = os_proxies.get("http")
         env['https_proxy'] = os_proxies.get("https")
         #
         proxy = ProxyHandler({
             'http': os_proxies.get("http"),
             'https': os_proxies.get("https")
         })
         opener = build_opener(proxy)
         install_opener(opener)
         urlopen('http://www.google.com')
         return 3, os_proxies
Exemple #6
0
def urlopen(url):
    """Opens an url with urllib2"""

    timeout = 5

    # Proxy an ssl configuration
    pref = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Addons")
    if pref.GetBool("NoProxyCheck", True):
        proxies = {}
    else:
        if pref.GetBool("SystemProxyCheck", False):
            proxy = urllib2.getproxies()
            proxies = {"http": proxy.get('http'), "https": proxy.get('http')}
        elif pref.GetBool("UserProxyCheck", False):
            proxy = pref.GetString("ProxyUrl", "")
            proxies = {"http": proxy, "https": proxy}

    if ssl_ctx:
        handler = urllib2.HTTPSHandler(context=ssl_ctx)
    else:
        handler = {}
    proxy_support = urllib2.ProxyHandler(proxies)
    opener = urllib2.build_opener(proxy_support, handler)
    urllib2.install_opener(opener)

    # Url opening
    req = urllib2.Request(url,
                          headers={'User-Agent': "Magic Browser"})
    try:
        u = urllib2.urlopen(req, timeout=timeout)
    except Exception:
        return None
    else:
        return u
Exemple #7
0
def setupProxy():
    """Set up the urllib proxy if possible.

     The function will use the following methods in order to try and determine proxies:
        #. standard urllib2.urlopen (which will use any statically-defined http-proxy settings)
        #. previous stored proxy address (in prefs)
        #. proxy.pac files if these have been added to system settings
        #. auto-detect proxy settings (WPAD technology)

     .. note:
        This can take time, as each failed attempt to set up a proxy involves trying to load a URL and timing out. Best
        to do in a separate thread.

    :Returns:

        True (success) or False (failure)
    """
    global proxies
    # try doing nothing
    proxies = urllib2.ProxyHandler(urllib2.getproxies())
    if testProxy(proxies) is True:
        logging.debug("Using standard urllib2 (static proxy or no proxy required)")
        urllib2.install_opener(urllib2.build_opener(proxies))  # this will now be used globally for ALL urllib2 opening
        return 1

    # try doing what we did last time
    if len(prefs.connections["proxy"]) > 0:
        proxies = urllib2.ProxyHandler({"http": prefs.connections["proxy"]})
        if testProxy(proxies) is True:
            logging.debug("Using %s (from prefs)" % (prefs.connections["proxy"]))
            urllib2.install_opener(
                urllib2.build_opener(proxies)
            )  # this will now be used globally for ALL urllib2 opening
            return 1
        else:
            logging.debug("Found a previous proxy but it didn't work")

    # try finding/using a proxy.pac file
    pacURLs = getPacFiles()
    logging.debug("Found proxy PAC files: %s" % pacURLs)
    proxies = proxyFromPacFiles(pacURLs)  # installs opener, if successful
    if proxies and hasattr(proxies, "proxies") and len(proxies.proxies["http"]) > 0:
        # save that proxy for future
        prefs.connections["proxy"] = proxies.proxies["http"]
        prefs.saveUserPrefs()
        logging.debug("Using %s (from proxy PAC file)" % (prefs.connections["proxy"]))
        return 1

    # try finding/using 'auto-detect proxy'
    pacURLs = getWpadFiles()
    proxies = proxyFromPacFiles(pacURLs)  # installs opener, if successful
    if proxies and hasattr(proxies, "proxies") and len(proxies.proxies["http"]) > 0:
        # save that proxy for future
        prefs.connections["proxy"] = proxies.proxies["http"]
        prefs.saveUserPrefs()
        logging.debug("Using %s (from proxy auto-detect)" % (prefs.connections["proxy"]))
        return 1

    proxies = 0
    return 0
Exemple #8
0
 def check_proxy(self, specific={}):
     """ Checks if proxy settings are set on the OS
     Returns:
     -- 1 when direct connection works fine
     -- 2 when direct connection fails and any proxy is set in the OS
     -- 3 and settings when direct connection fails but a proxy is set
     see: https://docs.python.org/2/library/urllib.html#urllib.getproxies
     """
     os_proxies = getproxies()
     if len(os_proxies) == 0 and self.check_internet_connection:
         logging.info("No proxy needed nor set. Direct connection works.")
         return 1
     elif len(os_proxies) == 0 and not self.check_internet_connection:
         logging.error("Proxy not set in the OS. Needs to be specified")
         return 2
     else:
         #
         env['http_proxy'] = os_proxies.get("http")
         env['https_proxy'] = os_proxies.get("https")
         #
         proxy = ProxyHandler({
                              'http': os_proxies.get("http"),
                              'https': os_proxies.get("https")
                              })
         opener = build_opener(proxy)
         install_opener(opener)
         urlopen('http://www.google.com')
         return 3, os_proxies
 def setupProxies(self, proxy):
     """Get proxies and insert into url opener"""
     if proxy is None:
         proxies = urllib2.getproxies()
     else:
         proxies = {"http": proxy}
     opener = urllib2.build_opener(urllib2.ProxyHandler(proxies))
     urllib2.install_opener(opener)
 def __init__(self, proxies=None):
     if proxies is None:
         proxies = urllib2.getproxies()
     assert isinstance(proxies, dict)
     # only handle https proxy
     self.proxy = None
     if "https" in proxies:
         self.proxy = proxies["https"]
Exemple #11
0
 def __soap_init(self):
     '''
     Initializes the SOAP client if it is not already initialized.
     @return: nothing.
     @raise WebFault: if the service is unavailable for some reason.
     '''
     if not self.__soap:
         self.__soap = client.Client(self.__SGS_URL, proxy=getproxies())
 def configureProxy(self):
     """ If the file is empty, it assumes no proxy
     """
     proxies = urllib2.getproxies()
     proxy_config = open("proxy.config", "w+")
     if 'http' in proxies.keys():
         proxy_config.write(proxies['http'].split('/')[-1])
     proxy_config.close()        
 def __init__(self, proxies=None):
     if proxies is None:
         proxies = urllib2.getproxies()
     assert isinstance(proxies, dict)
     # only handle https proxy
     self.proxy = None
     if 'https' in proxies:
         self.proxy = proxies['https']
Exemple #14
0
 def configureProxy(self):
     """ If the file is empty, it assumes no proxy
     """
     proxies = urllib2.getproxies()
     proxy_config = open("proxy.config", "w+")
     if 'http' in proxies.keys():
         proxy_config.write(proxies['http'].split('/')[-1])
     proxy_config.close()
Exemple #15
0
def ok_auth(raw_token):
    if 'refresh_token' in raw_token and raw_token['refresh_token'] and (datetime.datetime.now() - raw_token['refresh_token']['timestamp']).days < 30:
        url = 'http://api.odnoklassniki.ru/oauth/token.do'
        params = {'refresh_token':raw_token['refresh_token']['token'],
                  'grant_type':'refresh_token',
                  'client_id':raw_token['app_id'],
                  'client_secret':raw_token['app_secret']}
        access_data = jsonrequest(url, params = params)
        return {'app_key':raw_token['app_key'], 'app_secret':raw_token['app_secret'], 'access_token':access_data['access_token']}, raw_token

    headers = {'User-Agent':'Mozilla/5.0 (X11; Linux i686; rv:10.0.6) Gecko/20100101 Firefox/10.0.6',
               'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'}
    cj = cookielib.CookieJar()
    ok_opener=build_opener(ProxyHandler(getproxies()), HTTPCookieProcessor(cj), MyHTTPErrorProcessor)
    url = 'http://m.odnoklassniki.ru/'
    request = Request(url, headers = headers)
    while True:
        try:
            p, ok_opener = browserrequest(ok_opener, request)
            break
        except BadStatusLine:
            sleep(2)
            pass
    tkn = re.search(r'/dk\?bk=GuestMain&amp;st\.cmd=main&amp;_prevCmd=main&amp;tkn=([0-9]{2,4})', p.read()).group(1)
    params = urlencode({'fr.posted': 'set',
                        'fr.needCaptcha': '',
                        'fr.login': raw_token['login'],
                        'fr.password': raw_token['password'],
                        'button_login': '******'})
    request = Request('http://m.odnoklassniki.ru/dk?bk=GuestMain&st.cmd=main&tkn={0}'.format(tkn), headers = headers, data = params)
    sleep(2)
    p, ok_opener = browserrequest(ok_opener, request)
    url = 'http://www.odnoklassniki.ru/dk?st.cmd=OAuth2Permissions&st.scope=VALUABLE+ACCESS&st.response_type=code&st.redirect_uri=http%3A%2F%2Flocalhost&st.client_id={}&st.show_permissions=off'.format(raw_token['app_id'])
    request = Request(url, headers = headers)
    p, ok_opener = browserrequest(ok_opener, request)
    try:
        code = urlparse.parse_qs(urlparse.urlparse(p.info()['Location']).query)['code'][0]
    except AttributeError:
        p = p.read()
        key = re.search(r'name=\"fr.submitKey\" value=\"([^\"]+)\"', p).group(1)
        url = 'http://www.odnoklassniki.ru/dk?cmd=OAuth2Permissions&st.cmd=OAuth2Permissions&st.scope=VALUABLE+ACCESS&st.response_type=code&st.redirect_uri=http%3A%2F%2Flocalhost&st.client_id={}&st.show_permissions=off'.format(raw_token['app_id'])
        params = urlencode({'fr.posted': 'set',
                            'fr.submitKey': key,
                            'button_accept_request':'clicked',
                            'hook_form_button_click':'button_accept_request'})
        request = Request(url, headers = headers, data = params)
        p, ok_opener = browserrequest(ok_opener, request)
        code = urlparse.parse_qs(urlparse.urlparse(p.info()['Location']).query)['code'][0]
    url = 'http://api.odnoklassniki.ru/oauth/token.do'
    params = {'code':code,
              'redirect_uri':'http://localhost',
              'grant_type':'authorization_code',
              'client_id':raw_token['app_id'],
              'client_secret':raw_token['app_secret']}
    access_data = jsonrequest(url, params = params)
    raw_token['refresh_token'] = {'timestamp':datetime.datetime.now(), 'token':access_data['refresh_token']}
    return {'app_key':raw_token['app_key'], 'app_secret':raw_token['app_secret'], 'access_token':access_data['access_token']}, raw_token
Exemple #16
0
    def setSystemProxy(self):
        import urllib2
        proxies = urllib2.getproxies()

        for proxy in proxies:
            if proxy == 'http':
                print "aria2c --http-proxy='%s'" % proxies[proxy]
            elif proxy == 'https':
                print "aria2c --https-proxy='%s'" % proxies[proxy]
            elif proxy == 'ftp':
                print "aria2c --http-proxy='%s'" % proxies[proxy]
Exemple #17
0
 def setupProxies(self, proxy=None):
     """Get proxies and insert into url opener"""
     #try to get a proxy
     if proxy is None: proxies = urllib2.getproxies()
     else: proxies=urllib2.ProxyHandler({'http':proxy})
     #if we got one install it
     if len(proxies.proxies['http'])>0:
         opener  = urllib2.build_opener(proxies)
         urllib2.install_opener(opener)#this will now be used globally for ALL urllib2 opening
     else:
         pass#no proxy could be found so use none
Exemple #18
0
def downloadVersion(version):
    """Fetch the requested version of nMOLDYN from the nMOLDYN server.
    """

    # Find automatically the proxies if some are defined.
    httpProxy = urllib2.getproxies()

    # The requests will go through the http proxy.
    proxy = urllib2.ProxyHandler(httpProxy)

    # Open the connection possibly through the proxy..
    opener = urllib2.build_opener(proxy)

    if PLATFORM == 'WINDOWS':
        filename = 'nMOLDYN-%s.exe' % version

    # Case of Linux.
    else:
        filename = 'nMOLDYN-%s.zip' % version

    url = 'http://dirac.cnrs-orleans.fr/~nmoldyn/' + filename

    dest_filename = os.path.join(PREFERENCES['outputfile_path'], filename)

    try:
        fileReq = urllib2.Request(url)
        src = opener.open(fileReq)

    except urllib2.URLError:
        LogMessage('warning', 'Could not open the url %s.' % url,
                   ['console', 'gui'])

    else:
        try:
            dst = open(dest_filename, 'w')

        except IOError:
            LogMessage(
                'warning',
                'Can not open the file %s for writing. Maybe a permission problem.'
                % dest_filename, ['console', 'gui'])
            return

        else:
            LogMessage('info',
                       'Downloading %s file. Please wait ...' % filename,
                       ['console'])
            shutil.copyfileobj(src, dst)
            dst.close()
            LogMessage(
                'info', '%s file successfully downloaded in %s' %
                (filename, dest_filename), ['console'])

    opener.close()
Exemple #19
0
 def setupProxies(self, proxy=None):
     """Get proxies and insert into url opener"""
     #try to get a proxy
     if proxy is None: proxies = urllib2.getproxies()
     else: proxies=urllib2.ProxyHandler({'http':proxy})
     #if we got one install it
     if len(proxies.proxies['http'])>0:
         opener  = urllib2.build_opener(proxies)
         urllib2.install_opener(opener)#this will now be used globally for ALL urllib2 opening
     else:
         pass#no proxy could be found so use none
    def ProxyDiagnostic(self):
        """Diagnose
        """
        self._logger.info("Checking proxy setting within environment.")

        self._logger.info("Proxies detected within urllib2:")
        proxies = urllib2.getproxies()
        self._logger.info(proxies)

        self._logger.info("Proxies detected within requests:")
        proxies = requests.utils.get_environ_proxies("")
        self._logger.info(proxies)
    def ProxyDiagnostic(self):
        """Diagnose
        """
        self._logger.info("Checking proxy setting within environment.")
        
        self._logger.info("Proxies detected within urllib2:")
        proxies = urllib2.getproxies()
        self._logger.info(proxies)

        self._logger.info("Proxies detected within requests:")
        proxies = requests.utils.get_environ_proxies("")
        self._logger.info(proxies)
Exemple #22
0
def detect_proxy_settings():
    """
    Do some work and return dictionary with Proxy server settings for that
    machine.
    """
    d = {
        'host': '',
        'port': '',
        'username': '',
        'password': '',
        'ssl': 'False'
    }
    httpproxy = urllib2.getproxies().get('http', None)
    httpsproxy = urllib2.getproxies().get('https', None)

    if httpproxy is not None:
        try:
            scheme, host, port, path = parse_url(httpproxy)
            host, username, password = parse_credentials(host)
        except:
            return d
        d['ssl'] = 'False'
        d['host'] = host
        d['port'] = port
        d['username'] = username
        d['password'] = password

    if httpsproxy is not None:
        try:
            scheme, host, port, path = parse_url(httpsproxy)
            host, username, password = parse_credentials(host)
        except:
            return d
        d['ssl'] = 'True'
        d['host'] = host
        d['port'] = port
        d['username'] = username
        d['password'] = password

    return d
Exemple #23
0
def post_message(uri, data):
    global headers
    req = urllib2.Request(uri)
    for key, value in headers.iteritems():
        req.add_header("%s"%(key),"%s"%(value))

    if data:
        req.add_data(data)

    o=urllib2.build_opener()
    o.add_handler(urllib2.ProxyHandler(urllib2.getproxies()))
    urllib2.install_opener(o)

    return (urllib2.urlopen(req).read())
Exemple #24
0
def post_message(uri, data):
    global headers
    req = urllib2.Request(uri)
    for key, value in headers.iteritems():
        req.add_header("%s"%(key),"%s"%(value))

    if data:
        req.add_data(data)

    o=urllib2.build_opener()
    o.add_handler(urllib2.ProxyHandler(urllib2.getproxies()))
    urllib2.install_opener(o)

    return (urllib2.urlopen(req).read())
Exemple #25
0
def vk_auth(app_id, app_secret, login, password, scope='groups,friends'):
    '''
    Create access token for VKontakte from
    app id, app secret, user's login and password.
    Without browser (could be used on server side).
    '''
    headers = {'User-Agent':'Mozilla/5.0 (X11; Linux i686; rv:10.0.6) Gecko/20100101 Firefox/10.0.6',
               'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'}
    cj = cookielib.CookieJar()
    vk_browser=build_opener(ProxyHandler(getproxies()), HTTPCookieProcessor(cj))
    url = 'http://m.vk.com'
    request = Request(url, headers=headers)
    p, vk_browser = browserrequest(vk_browser, request)
    url = re.search('<form method=\"post\" action=\"([^\"]+)"',p.read()).group(1)
    params = {
        'email':login.encode('utf-8'),
        'pass':password.encode('utf-8')
    }
    cparams = urlencode(params)
    request = Request(url, headers=headers, data=cparams)
    p, vk_browser = browserrequest(vk_browser, request)
    url = 'https://oauth.vk.com/authorize?client_id={}&scope={}&redirect_uri=http://api.vk.com/blank.html&display=page&response_type=code'.format(app_id, scope)
    request = Request(url, headers=headers)
    p, vk_browser = browserrequest(vk_browser, request)
    url = None
    for furl in re.findall('location.href = \"([^\"]+)\"', p.read()):
        if 'login.vk.com' in furl and 'cancel' not in furl:
            url = furl
            break
    if url:
        request = Request(url, headers=headers)
        p, vk_browser = browserrequest(vk_browser, request)
    tree = urlparse.parse_qs(urlparse.urlparse(p.geturl()).fragment)
    try:
        code = tree['code'][0]
    except:
        return None
    vkapi = {
        'client_id':app_id,
        'client_secret':app_secret,
        'redirect_uri':'http://api.vk.com/blank.html',
        'scope':scope,
        'code':code
        }
    try:
        return jsonrequest('https://oauth.vk.com/access_token', params = vkapi)['access_token']
    except:
        return None
Exemple #26
0
def fb_auth(app_id, app_secret, app_url, login, password):
    import httplib
    httplib.HTTPConnection.debuglevel = 1

    headers = {'User-Agent':'Mozilla/5.0 (X11; Linux i686; rv:10.0.6) Gecko/20100101 Firefox/10.0.6',
               'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'}
    cj = cookielib.CookieJar()
    fb_browser=build_opener(ProxyHandler(getproxies()), HTTPCookieProcessor(cj), MyHTTPErrorProcessor)
    url = 'http://m.facebook.com'
    request = Request(url, headers=headers)
    p, fb_browser = browserrequest(fb_browser,request)
    p = p.read()
    url = re.search('<form method=\"post\" class=\"mobile-login-form _[a-zA-Z0-9]+\" id=\"login_form\" novalidate=\"1\" action=\"([^\"]+)\">',p).group(1)
    params = {
        'lsd': re.search('name=\"lsd\" value=\"([^\"]+)\"',p).group(1),
        'charset_test': re.search('name=\"charset_test\" value=\"([^\"]+)\"',p).group(1),
        'version': re.search('name=\"version\" value=\"([^\"]+)\"',p).group(1),
        'ajax': re.search('name=\"ajax\" value=\"([^\"]+)\"',p).group(1),
        'width': re.search('name=\"width\" value=\"([^\"]+)\"',p).group(1),
        'pxr': re.search('name=\"pxr\" value=\"([^\"]+)\"',p).group(1),
        'gps': re.search('name=\"gps\" value=\"([^\"]+)\"',p).group(1),
        'm_ts': re.search('name=\"m_ts\" value=\"([^\"]+)\"',p).group(1),
        'li': re.search('name=\"li\" value=\"([^\"]+)\"',p).group(1),
        'signup_layout': re.search('name=\"signup_layout\" value=\"([^\"]+)\"',p).group(1),
        'email': login,
        'pass': password,
        'login': re.search('value=\"([^\"]+)\" type=\"submit\" name=\"login\"',p).group(1),
    }
    cparams = urlencode(params)
    request = Request(url, headers=headers, data=cparams)
    p, fb_browser = browserrequest(fb_browser, request)
    url = 'https://www.facebook.com/dialog/oauth?client_id={}&redirect_uri={}'.format(app_id, app_url)
    request = Request(url, headers=headers)
    p, fb_browser = browserrequest(fb_browser, request)
    if 'Location' not in p.info() or 'code=' not in p.info()['Location']:
        return None
    code = urlparse.parse_qs(urlparse.urlparse(p.info()['Location']).query)['code'][0]
    url = 'https://graph.facebook.com/oauth/access_token'
    params = urlencode({
        'client_id': app_id,
        'redirect_uri':app_url,
        'client_secret':app_secret,
        'code':code})
    request=Request(url, headers=headers, data=params)
    p, fb_browser = browserrequest(fb_browser, request)
    tree=urlparse.parse_qs(p.read())
    return tree['access_token'][0]
    def login(self, email=None, password=None, authSubToken=None, pip = None):
        """Login to your Google Account. You must provide either:
        - an email and password
        - a valid Google authSubToken"""
        if (authSubToken is not None):
            self.setAuthSubToken(authSubToken)
        else:
            if (email is None or password is None):
                raise Exception("You should provide at least authSubToken or (email and password)")
            params = {"Email": email,
                                "Passwd": password,
                                "service": self.SERVICE,
                                "accountType": self.ACCOUNT_TYPE_HOSTED_OR_GOOGLE,
                                "has_permission": "1",
                                "source": "android",
                                "androidId": self.androidId,
                                "app": "com.android.vending",
                                #"client_sig": self.client_sig,
                                "device_country": "US",
                                "operatorCountry": "US",
                                "lang": "en",
                                "sdk_version": "16"}
            headers = {
                "Accept-Encoding": "",
            }
            params = urllib.urlencode(params)
            proxy_handler = urllib2.ProxyHandler({'http': pip})
            opener = urllib2.build_opener(proxy_handler)
#            opener.addheaders = [headers]
            urllib2.install_opener(opener)
            req=urllib2.Request(url=self.URL_LOGIN,data=params,headers=headers,unverifiable=False)
            response=urllib2.urlopen(req)
#            response= resp.read()
            
            data = response.read().split()
            print "Proxies are: ", urllib2.getproxies()
            params = {}
            for d in data:
                if not "=" in d: continue
                k, v = d.split("=")
                params[k.strip().lower()] = v.strip()
            if "auth" in params:
                self.setAuthSubToken(params["auth"])
            elif "error" in params:
                raise LoginError("server says: " + params["error"])
            else:
                raise LoginError("Auth token not found.")
Exemple #28
0
def proxy_check():

    try:
        proxy = urllib2.getproxies()
        http_proxy = proxy.get('http')
        https_proxy = proxy.get('https')
        if http_proxy != None and https_proxy != None:
            return [http_proxy, https_proxy]
        if http_proxy != None:
            return [http_proxy]
        elif https_proxy != None:
            return [https_proxy]
        else:
            return False
    except Exception as e:
        print "UNABLE TO GET PROXY INFO: " + str(e)
        return None
Exemple #29
0
def proxy_check():
    
    try:
        proxy = urllib2.getproxies()
        http_proxy = proxy.get('http')
        https_proxy = proxy.get('https')
        if http_proxy != None and https_proxy != None:
            return [http_proxy,https_proxy]
        if http_proxy != None:
            return [http_proxy]
        elif https_proxy != None:
            return [https_proxy]
        else:
            return False
    except Exception as e:
        print "UNABLE TO GET PROXY INFO: "+str(e)
        return None
Exemple #30
0
 def set_uri(self, proxy):
   """interpret the proxy setting to obtain a real name and port or None"""
   if proxy == None:
     self.hostname = None
   else:
     if proxy == -1:
       uri = urllib2.getproxies().get(self.method, None)
     else:
       uri = proxy
     if uri:
       method, self.username, self.password, self.hostname = self.parse_uri(uri)
       if method:
         self.method = method
     else:
       self.hostname = None
       self.username = None
       self.password = None
Exemple #31
0
def checkForNewVersion():

    # Find automatically the proxies if some are defined.
    httpProxy = urllib2.getproxies()

    # The requests will go through the http proxy.
    proxy = urllib2.ProxyHandler(httpProxy)

    # Open the connection possibly through the proxy..
    opener = urllib2.build_opener(proxy)

    # The url for the file storing the last nMOLDYN version.
    url = 'http://dirac.cnrs-orleans.fr/~nmoldyn/last_version'

    # Build the url request for the file storing the last nMOLDYN version.
    req = urllib2.Request(url)

    # Open the url.
    try:
        f = opener.open(req)

    # The url could not be opened. Raises an error.
    except urllib2.URLError:
        LogMessage(
            'warning',
            'Could not open the url %s.\nPerhaps a problem with a proxy.\n\n\
Can not check whether a new version of nMOLDYN was released.' % url,
            ['console', 'gui'])
        return

    # The url could be opened. Its contents will be extracted.
    else:
        # The name of the last nMOLDYN version.
        lastVersion = f.read().strip()
        f.close()
        opener.close()

    if LooseVersion(vstring=lastVersion) > LooseVersion(
            vstring=Cfg.nmoldyn_version):
        return lastVersion
    else:
        return None
Exemple #32
0
def set_proxy(proxy_dic=None):
    '''
    proxy format: {'http': 'http://www.example.com:3128/'}
    To disable autodetected proxy pass an empty dictionary: {}
    '''
    if proxy_dic is None:
        # The default is to read the list of proxies from the environment variables <protocol>_proxy.
        # If no proxy environment variables are set, then in a Windows environment proxy settings are
        # obtained from the registry's Internet Settings section, and in a Mac OS X environment proxy
        # information is retrieved from the OS X System Configuration
        # Framework.
        proxy = urllib2.ProxyHandler()
    else:
        # If proxies is given, it must be a dictionary mapping protocol names to
        # URLs of proxies.
        proxy = urllib2.ProxyHandler(proxy_dic)
    opener = urllib2.build_opener(proxy)
    urllib2.install_opener(opener)
    print "Proxies returned by urllib2.getproxies:",
    pprint(urllib2.getproxies())
Exemple #33
0
def set_proxy(proxy_dic=None):
    '''
    proxy format: {'http': 'http://www.example.com:3128/'}
    To disable autodetected proxy pass an empty dictionary: {}
    '''
    if proxy_dic is None:
        # The default is to read the list of proxies from the environment variables <protocol>_proxy.
        # If no proxy environment variables are set, then in a Windows environment proxy settings are
        # obtained from the registry's Internet Settings section, and in a Mac OS X environment proxy
        # information is retrieved from the OS X System Configuration
        # Framework.
        proxy = urllib2.ProxyHandler()
    else:
        # If proxies is given, it must be a dictionary mapping protocol names to
        # URLs of proxies.
        proxy = urllib2.ProxyHandler(proxy_dic)
    opener = urllib2.build_opener(proxy)
    urllib2.install_opener(opener)
    print "Proxies returned by urllib2.getproxies:",
    pprint(urllib2.getproxies())
def urlopen(url):
    """Opens an url with urllib2"""

    timeout = 5

    if sys.version_info.major < 3:
        import urllib2
    else:
        import urllib.request as urllib2

    # Proxy an ssl configuration
    pref = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Addons")
    if pref.GetBool("NoProxyCheck", True):
        proxies = {}
    else:
        if pref.GetBool("SystemProxyCheck", False):
            proxy = urllib2.getproxies()
            proxies = {"http": proxy.get('http'), "https": proxy.get('http')}
        elif pref.GetBool("UserProxyCheck", False):
            proxy = pref.GetString("ProxyUrl", "")
            proxies = {"http": proxy, "https": proxy}

    if ssl_ctx:
        handler = urllib2.HTTPSHandler(context=ssl_ctx)
    else:
        handler = {}
    proxy_support = urllib2.ProxyHandler(proxies)
    opener = urllib2.build_opener(proxy_support, handler)
    urllib2.install_opener(opener)

    # Url opening
    try:
        u = urllib2.urlopen(url, timeout=timeout)
    except:
        return None
    else:
        return u
Exemple #35
0
def request(method,
            url,
            params=None,
            data=None,
            headers=None,
            cookies=None,
            files=None,
            auth=None,
            timeout=60,
            allow_redirects=False,
            stream=False):
    """Initiate an HTTP(S) request. Returns :class:`Response` object.

    :param method: 'GET' or 'POST'
    :type method: unicode
    :param url: URL to open
    :type url: unicode
    :param params: mapping of URL parameters
    :type params: dict
    :param data: mapping of form data ``{'field_name': 'value'}`` or
        :class:`str`
    :type data: dict or str
    :param headers: HTTP headers
    :type headers: dict
    :param cookies: cookies to send to server
    :type cookies: dict
    :param files: files to upload (see below).
    :type files: dict
    :param auth: username, password
    :type auth: tuple
    :param timeout: connection timeout limit in seconds
    :type timeout: int
    :param allow_redirects: follow redirections
    :type allow_redirects: bool
    :param stream: Stream content instead of fetching it all at once.
    :type stream: bool
    :returns: Response object
    :rtype: :class:`Response`


    The ``files`` argument is a dictionary::

        {'fieldname' : { 'filename': 'blah.txt',
                         'content': '<binary data>',
                         'mimetype': 'text/plain'}
        }

    * ``fieldname`` is the name of the field in the HTML form.
    * ``mimetype`` is optional. If not provided, :mod:`mimetypes` will
      be used to guess the mimetype, or ``application/octet-stream``
      will be used.

    """
    # TODO: cookies
    socket.setdefaulttimeout(timeout)

    # Default handlers
    openers = [urllib2.ProxyHandler(urllib2.getproxies())]

    if not allow_redirects:
        openers.append(NoRedirectHandler())

    if auth is not None:  # Add authorisation handler
        username, password = auth
        password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
        password_manager.add_password(None, url, username, password)
        auth_manager = urllib2.HTTPBasicAuthHandler(password_manager)
        openers.append(auth_manager)

    # Install our custom chain of openers
    opener = urllib2.build_opener(*openers)
    urllib2.install_opener(opener)

    if not headers:
        headers = CaseInsensitiveDictionary()
    else:
        headers = CaseInsensitiveDictionary(headers)

    if 'user-agent' not in headers:
        headers['user-agent'] = USER_AGENT

    # Accept gzip-encoded content
    encodings = [
        s.strip() for s in headers.get('accept-encoding', '').split(',')
    ]
    if 'gzip' not in encodings:
        encodings.append('gzip')

    headers['accept-encoding'] = ', '.join(encodings)

    if files:
        if not data:
            data = {}
        new_headers, data = encode_multipart_formdata(data, files)
        headers.update(new_headers)
    elif data and isinstance(data, dict):
        data = urllib.urlencode(str_dict(data))

    # Make sure everything is encoded text
    headers = str_dict(headers)

    if isinstance(url, unicode):
        url = url.encode('utf-8')

    if params:  # GET args (POST args are handled in encode_multipart_formdata)

        scheme, netloc, path, query, fragment = urlparse.urlsplit(url)

        if query:  # Combine query string and `params`
            url_params = urlparse.parse_qs(query)
            # `params` take precedence over URL query string
            url_params.update(params)
            params = url_params

        query = urllib.urlencode(str_dict(params), doseq=True)
        url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))

    req = Request(url, data, headers, method=method)
    return Response(req, stream)
Exemple #36
0
import urllib2
import urlparse
from logging import getLogger


log = getLogger(__name__)

# 1. get proxies if needed. a proxy for each  protocol
# 2. handle authentication
# basic, digest, and nltm (windows) authentications should be handled.
# 3. handle any protocol
# typically http, https, ftp

# 1. get the proxies list
proxies_dict=urllib2.getproxies()
# urllib can only get proxies on windows and mac. so on linux or if the user
# wants to specify the proxy there has to be a way to do that. TODO get proxies
# from condarc and overrwrite any system proxies
# the proxies are in a dict {'http':'http://proxy:8080'}
# protocol:proxyserver

#2. handle authentication

proxypwdmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()


def get_userandpass(proxytype='',realm=''):
    """a function to get username and password from terminal.
    can be replaced with anything like some gui"""
    import getpass
Exemple #37
0
def getRegexParsed(regexs, url,cookieJar=None,forCookieJarOnly=False,recursiveCall=False,cachedPages={}, rawPost=False, cookie_jar_file=None):#0,1,2 = URL, regexOnly, CookieJarOnly
        #cachedPages = {}
        #print 'url',url
        doRegexs = re.compile('\$doregex\[([^\]]*)\]').findall(url)
#        print 'doRegexs',doRegexs,regexs
        setresolved=True
        for k in doRegexs:
            if k in regexs:
                #print 'processing ' ,k
                m = regexs[k]
                #print m
                cookieJarParam=False
                if  'cookiejar' in m: # so either create or reuse existing jar
                    #print 'cookiejar exists',m['cookiejar']
                    cookieJarParam=m['cookiejar']
                    if  '$doregex' in cookieJarParam:
                        cookieJar=getRegexParsed(regexs, m['cookiejar'],cookieJar,True, True,cachedPages)
                        cookieJarParam=True
                    else:
                        cookieJarParam=True
                #print 'm[cookiejar]',m['cookiejar'],cookieJar
                if cookieJarParam:
                    if cookieJar==None:
                        #print 'create cookie jar'
                        cookie_jar_file=None
                        if 'open[' in m['cookiejar']:
                            cookie_jar_file=m['cookiejar'].split('open[')[1].split(']')[0]
#                            print 'cookieJar from file name',cookie_jar_file

                        cookieJar=getCookieJar(cookie_jar_file)
#                        print 'cookieJar from file',cookieJar
                        if cookie_jar_file:
                            saveCookieJar(cookieJar,cookie_jar_file)
                        #import cookielib
                        #cookieJar = cookielib.LWPCookieJar()
                        #print 'cookieJar new',cookieJar
                    elif 'save[' in m['cookiejar']:
                        cookie_jar_file=m['cookiejar'].split('save[')[1].split(']')[0]
                        complete_path=os.path.join(profile,cookie_jar_file)
#                        print 'complete_path',complete_path
                        saveCookieJar(cookieJar,cookie_jar_file)

                if  m['page'] and '$doregex' in m['page']:
                    pg=getRegexParsed(regexs, m['page'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
                    if len(pg)==0:
                        pg='http://regexfailed'
                    m['page']=pg

                if 'setcookie' in m and m['setcookie'] and '$doregex' in m['setcookie']:
                    m['setcookie']=getRegexParsed(regexs, m['setcookie'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
                if 'appendcookie' in m and m['appendcookie'] and '$doregex' in m['appendcookie']:
                    m['appendcookie']=getRegexParsed(regexs, m['appendcookie'],cookieJar,recursiveCall=True,cachedPages=cachedPages)


                if  'post' in m and '$doregex' in m['post']:
                    m['post']=getRegexParsed(regexs, m['post'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
#                    print 'post is now',m['post']

                if  'rawpost' in m and '$doregex' in m['rawpost']:
                    m['rawpost']=getRegexParsed(regexs, m['rawpost'],cookieJar,recursiveCall=True,cachedPages=cachedPages,rawPost=True)
                    #print 'rawpost is now',m['rawpost']

                if 'rawpost' in m and '$epoctime$' in m['rawpost']:
                    m['rawpost']=m['rawpost'].replace('$epoctime$',getEpocTime())

                if 'rawpost' in m and '$epoctime2$' in m['rawpost']:
                    m['rawpost']=m['rawpost'].replace('$epoctime2$',getEpocTime2())


                link=''
                if m['page'] and m['page'] in cachedPages and not 'ignorecache' in m and forCookieJarOnly==False :
                    #print 'using cache page',m['page']
                    link = cachedPages[m['page']]
                else:
                    if m['page'] and  not m['page']=='' and  m['page'].startswith('http'):
                        if '$epoctime$' in m['page']:
                            m['page']=m['page'].replace('$epoctime$',getEpocTime())
                        if '$epoctime2$' in m['page']:
                            m['page']=m['page'].replace('$epoctime2$',getEpocTime2())

                        #print 'Ingoring Cache',m['page']
                        page_split=m['page'].split('|')
                        pageUrl=page_split[0]
                        header_in_page=None
                        if len(page_split)>1:
                            header_in_page=page_split[1]

#                            if 
#                            proxy = urllib2.ProxyHandler({ ('https' ? proxytouse[:5]=="https":"http") : proxytouse})
#                            opener = urllib2.build_opener(proxy)
#                            urllib2.install_opener(opener)

                            
                        
#                        import urllib2
#                        print 'urllib2.getproxies',urllib2.getproxies()
                        current_proxies=urllib2.ProxyHandler(urllib2.getproxies())
        
        
                        #print 'getting pageUrl',pageUrl
                        req = urllib2.Request(pageUrl)
                        if 'proxy' in m:
                            proxytouse= m['proxy']
#                            print 'proxytouse',proxytouse
#                            urllib2.getproxies= lambda: {}
                            if pageUrl[:5]=="https":
                                proxy = urllib2.ProxyHandler({ 'https' : proxytouse})
                                #req.set_proxy(proxytouse, 'https')
                            else:
                                proxy = urllib2.ProxyHandler({ 'http'  : proxytouse})
                                #req.set_proxy(proxytouse, 'http')
                            opener = urllib2.build_opener(proxy)
                            urllib2.install_opener(opener)
                            
                        
                        req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1')
                        proxytouse=None

                        if 'referer' in m:
                            req.add_header('Referer', m['referer'])
                        if 'accept' in m:
                            req.add_header('Accept', m['accept'])
                        if 'agent' in m:
                            req.add_header('User-agent', m['agent'])
                        if 'x-req' in m:
                            req.add_header('X-Requested-With', m['x-req'])
                        if 'x-addr' in m:
                            req.add_header('x-addr', m['x-addr'])
                        if 'x-forward' in m:
                            req.add_header('X-Forwarded-For', m['x-forward'])
                        if 'setcookie' in m:
#                            print 'adding cookie',m['setcookie']
                            req.add_header('Cookie', m['setcookie'])
                        if 'appendcookie' in m:
#                            print 'appending cookie to cookiejar',m['appendcookie']
                            cookiestoApend=m['appendcookie']
                            cookiestoApend=cookiestoApend.split(';')
                            for h in cookiestoApend:
                                n,v=h.split('=')
                                w,n= n.split(':')
                                ck = cookielib.Cookie(version=0, name=n, value=v, port=None, port_specified=False, domain=w, domain_specified=False, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False)
                                cookieJar.set_cookie(ck)
                        if 'origin' in m:
                            req.add_header('Origin', m['origin'])
                        if header_in_page:
                            header_in_page=header_in_page.split('&')
                            for h in header_in_page:
                                n,v=h.split('=')
                                req.add_header(n,v)
                        
                        if not cookieJar==None:
#                            print 'cookieJarVal',cookieJar
                            cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
                            opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
                            opener = urllib2.install_opener(opener)
#                            print 'noredirect','noredirect' in m
                            
                            if 'noredirect' in m:
                                opener = urllib2.build_opener(cookie_handler,NoRedirection, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
                                opener = urllib2.install_opener(opener)
                        elif 'noredirect' in m:
                            opener = urllib2.build_opener(NoRedirection, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
                            opener = urllib2.install_opener(opener)
                            

                        if 'connection' in m:
#                            print '..........................connection//////.',m['connection']
                            from keepalive import HTTPHandler
                            keepalive_handler = HTTPHandler()
                            opener = urllib2.build_opener(keepalive_handler)
                            urllib2.install_opener(opener)


                        #print 'after cookie jar'
                        post=None

                        if 'post' in m:
                            postData=m['post']
                            #if '$LiveStreamRecaptcha' in postData:
                            #    (captcha_challenge,catpcha_word,idfield)=processRecaptcha(m['page'],cookieJar)
                            #    if captcha_challenge:
                            #        postData=postData.replace('$LiveStreamRecaptcha','manual_recaptcha_challenge_field:'+captcha_challenge+',recaptcha_response_field:'+catpcha_word+',id:'+idfield)
                            splitpost=postData.split(',');
                            post={}
                            for p in splitpost:
                                n=p.split(':')[0];
                                v=p.split(':')[1];
                                post[n]=v
                            post = urllib.urlencode(post)

                        if 'rawpost' in m:
                            post=m['rawpost']
                            #if '$LiveStreamRecaptcha' in post:
                            #    (captcha_challenge,catpcha_word,idfield)=processRecaptcha(m['page'],cookieJar)
                            #    if captcha_challenge:
                            #       post=post.replace('$LiveStreamRecaptcha','&manual_recaptcha_challenge_field='+captcha_challenge+'&recaptcha_response_field='+catpcha_word+'&id='+idfield)
                        link=''
                        try:
                            
                            if post:
                                response = urllib2.urlopen(req,post)
                            else:
                                response = urllib2.urlopen(req)
                            if response.info().get('Content-Encoding') == 'gzip':
                                from StringIO import StringIO
                                import gzip
                                buf = StringIO( response.read())
                                f = gzip.GzipFile(fileobj=buf)
                                link = f.read()
                            else:
                                link=response.read()
                            
                        
                        
                            if 'proxy' in m and not current_proxies is None:
                                urllib2.install_opener(urllib2.build_opener(current_proxies))
                            
                            link=javascriptUnEscape(link)
                            #print repr(link)
                            #print link This just print whole webpage in LOG
                            if 'includeheaders' in m:
                                #link+=str(response.headers.get('Set-Cookie'))
                                link+='$$HEADERS_START$$:'
                                for b in response.headers:
                                    link+= b+':'+response.headers.get(b)+'\n'
                                link+='$$HEADERS_END$$:'
    #                        print link

                            response.close()
                        except: 
                            pass
                        cachedPages[m['page']] = link
                        #print link
                        #print 'store link for',m['page'],forCookieJarOnly

                        if forCookieJarOnly:
                            return cookieJar# do nothing
                    elif m['page'] and  not m['page'].startswith('http'):
                        if m['page'].startswith('$pyFunction:'):
                            val=doEval(m['page'].split('$pyFunction:')[1],'',cookieJar,m )
                            if forCookieJarOnly:
                                return cookieJar# do nothing
                            link=val
                            link=javascriptUnEscape(link)
                        else:
                            link=m['page']

                if  '$doregex' in m['expres']:
                    m['expres']=getRegexParsed(regexs, m['expres'],cookieJar,recursiveCall=True,cachedPages=cachedPages)
                  
                if not m['expres']=='':
                    #print 'doing it ',m['expres']
                    if '$LiveStreamCaptcha' in m['expres']:
                        val=askCaptcha(m,link,cookieJar)
                        #print 'url and val',url,val
                        url = url.replace("$doregex[" + k + "]", val)

                    elif m['expres'].startswith('$pyFunction:') or '#$pyFunction' in m['expres']:
                        #print 'expeeeeeeeeeeeeeeeeeee',m['expres']
                        val=''
                        if m['expres'].startswith('$pyFunction:'):
                            val=doEval(m['expres'].split('$pyFunction:')[1],link,cookieJar,m)
                        else:
                            val=doEvalFunction(m['expres'],link,cookieJar,m)
                        if 'ActivateWindow' in m['expres']: return
                        if forCookieJarOnly:
                            return cookieJar# do nothing
                        if 'listrepeat' in m:
                            listrepeat=m['listrepeat']
                            return listrepeat,eval(val), m,regexs,cookieJar

                        try:
                            url = url.replace(u"$doregex[" + k + "]", val)
                        except: url = url.replace("$doregex[" + k + "]", val.decode("utf-8"))
                    else:
                        if 'listrepeat' in m:
                            listrepeat=m['listrepeat']
                            ret=re.findall(m['expres'],link)
                            return listrepeat,ret, m,regexs
                             
                        val=''
                        if not link=='':
                            #print 'link',link
                            reg = re.compile(m['expres']).search(link)                            
                            try:
                                val=reg.group(1).strip()
                            except: traceback.print_exc()
                        elif m['page']=='' or m['page']==None:
                            val=m['expres']
                            
                        if rawPost:
#                            print 'rawpost'
                            val=urllib.quote_plus(val)
                        if 'htmlunescape' in m:
                            #val=urllib.unquote_plus(val)
                            import HTMLParser
                            val=HTMLParser.HTMLParser().unescape(val)
                        try:
                            url = url.replace("$doregex[" + k + "]", val)
                        except: url = url.replace("$doregex[" + k + "]", val.decode("utf-8"))
                        #print 'ur',url
                        #return val
                else:
                    url = url.replace("$doregex[" + k + "]",'')
        if '$epoctime$' in url:
            url=url.replace('$epoctime$',getEpocTime())
        if '$epoctime2$' in url:
            url=url.replace('$epoctime2$',getEpocTime2())

        if '$GUID$' in url:
            import uuid
            url=url.replace('$GUID$',str(uuid.uuid1()).upper())
        if '$get_cookies$' in url:
            url=url.replace('$get_cookies$',getCookiesString(cookieJar))

        if recursiveCall: return url
        #print 'final url',repr(url)
        if url=="":
            return
        else:
            return url,setresolved
Exemple #38
0
import urllib2
 
proxy = urllib2.ProxyHandler({'http': '172.26.67.10:8080'})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)

print urllib2.getproxies()

print urllib2.urlopen('http://www.google.com')

#response = urllib2.urlopen('http://www.google.com')
#datum = response.read().decode("UTF-8")
#response.close()
#print datum
Exemple #39
0
def getRegexParsed(
        regexs,
        url,
        cookieJar=None,
        forCookieJarOnly=False,
        recursiveCall=False,
        cachedPages={},
        rawPost=False,
        cookie_jar_file=None):  #0,1,2 = URL, regexOnly, CookieJarOnly
    #cachedPages = {}
    #print 'url',url
    doRegexs = re.compile('\$doregex\[([^\]]*)\]').findall(url)
    #        print 'doRegexs',doRegexs,regexs
    setresolved = True
    for k in doRegexs:
        if k in regexs:
            #print 'processing ' ,k
            m = regexs[k]
            #print m
            cookieJarParam = False
            if 'cookiejar' in m:  # so either create or reuse existing jar
                #print 'cookiejar exists',m['cookiejar']
                cookieJarParam = m['cookiejar']
                if '$doregex' in cookieJarParam:
                    cookieJar = getRegexParsed(regexs, m['cookiejar'],
                                               cookieJar, True, True,
                                               cachedPages)
                    cookieJarParam = True
                else:
                    cookieJarParam = True
            #print 'm[cookiejar]',m['cookiejar'],cookieJar
            if cookieJarParam:
                if cookieJar == None:
                    #print 'create cookie jar'
                    cookie_jar_file = None
                    if 'open[' in m['cookiejar']:
                        cookie_jar_file = m['cookiejar'].split(
                            'open[')[1].split(']')[0]
#                            print 'cookieJar from file name',cookie_jar_file

                    cookieJar = getCookieJar(cookie_jar_file)
                    #                        print 'cookieJar from file',cookieJar
                    if cookie_jar_file:
                        saveCookieJar(cookieJar, cookie_jar_file)
                    #import cookielib
                    #cookieJar = cookielib.LWPCookieJar()
                    #print 'cookieJar new',cookieJar
                elif 'save[' in m['cookiejar']:
                    cookie_jar_file = m['cookiejar'].split('save[')[1].split(
                        ']')[0]
                    complete_path = os.path.join(profile, cookie_jar_file)
                    #                        print 'complete_path',complete_path
                    saveCookieJar(cookieJar, cookie_jar_file)

            if m['page'] and '$doregex' in m['page']:
                pg = getRegexParsed(regexs,
                                    m['page'],
                                    cookieJar,
                                    recursiveCall=True,
                                    cachedPages=cachedPages)
                if len(pg) == 0:
                    pg = 'http://regexfailed'
                m['page'] = pg

            if 'setcookie' in m and m['setcookie'] and '$doregex' in m[
                    'setcookie']:
                m['setcookie'] = getRegexParsed(regexs,
                                                m['setcookie'],
                                                cookieJar,
                                                recursiveCall=True,
                                                cachedPages=cachedPages)
            if 'appendcookie' in m and m['appendcookie'] and '$doregex' in m[
                    'appendcookie']:
                m['appendcookie'] = getRegexParsed(regexs,
                                                   m['appendcookie'],
                                                   cookieJar,
                                                   recursiveCall=True,
                                                   cachedPages=cachedPages)

            if 'post' in m and '$doregex' in m['post']:
                m['post'] = getRegexParsed(regexs,
                                           m['post'],
                                           cookieJar,
                                           recursiveCall=True,
                                           cachedPages=cachedPages)
#                    print 'post is now',m['post']

            if 'rawpost' in m and '$doregex' in m['rawpost']:
                m['rawpost'] = getRegexParsed(regexs,
                                              m['rawpost'],
                                              cookieJar,
                                              recursiveCall=True,
                                              cachedPages=cachedPages,
                                              rawPost=True)
                #print 'rawpost is now',m['rawpost']

            if 'rawpost' in m and '$epoctime$' in m['rawpost']:
                m['rawpost'] = m['rawpost'].replace('$epoctime$',
                                                    getEpocTime())

            if 'rawpost' in m and '$epoctime2$' in m['rawpost']:
                m['rawpost'] = m['rawpost'].replace('$epoctime2$',
                                                    getEpocTime2())

            link = ''
            if m['page'] and m[
                    'page'] in cachedPages and not 'ignorecache' in m and forCookieJarOnly == False:
                #print 'using cache page',m['page']
                link = cachedPages[m['page']]
            else:
                if m['page'] and not m['page'] == '' and m['page'].startswith(
                        'http'):
                    if '$epoctime$' in m['page']:
                        m['page'] = m['page'].replace('$epoctime$',
                                                      getEpocTime())
                    if '$epoctime2$' in m['page']:
                        m['page'] = m['page'].replace('$epoctime2$',
                                                      getEpocTime2())

                    #print 'Ingoring Cache',m['page']
                    page_split = m['page'].split('|')
                    pageUrl = page_split[0]
                    header_in_page = None
                    if len(page_split) > 1:
                        header_in_page = page_split[1]

#                            if
#                            proxy = urllib2.ProxyHandler({ ('https' ? proxytouse[:5]=="https":"http") : proxytouse})
#                            opener = urllib2.build_opener(proxy)
#                            urllib2.install_opener(opener)

#                        import urllib2
#                        print 'urllib2.getproxies',urllib2.getproxies()
                    current_proxies = urllib2.ProxyHandler(
                        urllib2.getproxies())

                    #print 'getting pageUrl',pageUrl
                    req = urllib2.Request(pageUrl)
                    if 'proxy' in m:
                        proxytouse = m['proxy']
                        #                            print 'proxytouse',proxytouse
                        #                            urllib2.getproxies= lambda: {}
                        if pageUrl[:5] == "https":
                            proxy = urllib2.ProxyHandler({'https': proxytouse})
                            #req.set_proxy(proxytouse, 'https')
                        else:
                            proxy = urllib2.ProxyHandler({'http': proxytouse})
                            #req.set_proxy(proxytouse, 'http')
                        opener = urllib2.build_opener(proxy)
                        urllib2.install_opener(opener)

                    req.add_header(
                        'User-Agent',
                        'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1'
                    )
                    proxytouse = None

                    if 'referer' in m:
                        req.add_header('Referer', m['referer'])
                    if 'accept' in m:
                        req.add_header('Accept', m['accept'])
                    if 'agent' in m:
                        req.add_header('User-agent', m['agent'])
                    if 'x-req' in m:
                        req.add_header('X-Requested-With', m['x-req'])
                    if 'x-addr' in m:
                        req.add_header('x-addr', m['x-addr'])
                    if 'x-forward' in m:
                        req.add_header('X-Forwarded-For', m['x-forward'])
                    if 'setcookie' in m:
                        #                            print 'adding cookie',m['setcookie']
                        req.add_header('Cookie', m['setcookie'])
                    if 'appendcookie' in m:
                        #                            print 'appending cookie to cookiejar',m['appendcookie']
                        cookiestoApend = m['appendcookie']
                        cookiestoApend = cookiestoApend.split(';')
                        for h in cookiestoApend:
                            n, v = h.split('=')
                            w, n = n.split(':')
                            ck = cookielib.Cookie(version=0,
                                                  name=n,
                                                  value=v,
                                                  port=None,
                                                  port_specified=False,
                                                  domain=w,
                                                  domain_specified=False,
                                                  domain_initial_dot=False,
                                                  path='/',
                                                  path_specified=True,
                                                  secure=False,
                                                  expires=None,
                                                  discard=True,
                                                  comment=None,
                                                  comment_url=None,
                                                  rest={'HttpOnly': None},
                                                  rfc2109=False)
                            cookieJar.set_cookie(ck)
                    if 'origin' in m:
                        req.add_header('Origin', m['origin'])
                    if header_in_page:
                        header_in_page = header_in_page.split('&')
                        for h in header_in_page:
                            n, v = h.split('=')
                            req.add_header(n, v)

                    if not cookieJar == None:
                        #                            print 'cookieJarVal',cookieJar
                        cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
                        opener = urllib2.build_opener(
                            cookie_handler, urllib2.HTTPBasicAuthHandler(),
                            urllib2.HTTPHandler())
                        opener = urllib2.install_opener(opener)
                        #                            print 'noredirect','noredirect' in m

                        if 'noredirect' in m:
                            opener = urllib2.build_opener(
                                cookie_handler, NoRedirection,
                                urllib2.HTTPBasicAuthHandler(),
                                urllib2.HTTPHandler())
                            opener = urllib2.install_opener(opener)
                    elif 'noredirect' in m:
                        opener = urllib2.build_opener(
                            NoRedirection, urllib2.HTTPBasicAuthHandler(),
                            urllib2.HTTPHandler())
                        opener = urllib2.install_opener(opener)

                    if 'connection' in m:
                        #                            print '..........................connection//////.',m['connection']
                        from keepalive import HTTPHandler
                        keepalive_handler = HTTPHandler()
                        opener = urllib2.build_opener(keepalive_handler)
                        urllib2.install_opener(opener)

                    #print 'after cookie jar'
                    post = None

                    if 'post' in m:
                        postData = m['post']
                        #if '$LiveStreamRecaptcha' in postData:
                        #    (captcha_challenge,catpcha_word,idfield)=processRecaptcha(m['page'],cookieJar)
                        #    if captcha_challenge:
                        #        postData=postData.replace('$LiveStreamRecaptcha','manual_recaptcha_challenge_field:'+captcha_challenge+',recaptcha_response_field:'+catpcha_word+',id:'+idfield)
                        splitpost = postData.split(',')
                        post = {}
                        for p in splitpost:
                            n = p.split(':')[0]
                            v = p.split(':')[1]
                            post[n] = v
                        post = urllib.urlencode(post)

                    if 'rawpost' in m:
                        post = m['rawpost']
                        #if '$LiveStreamRecaptcha' in post:
                        #    (captcha_challenge,catpcha_word,idfield)=processRecaptcha(m['page'],cookieJar)
                        #    if captcha_challenge:
                        #       post=post.replace('$LiveStreamRecaptcha','&manual_recaptcha_challenge_field='+captcha_challenge+'&recaptcha_response_field='+catpcha_word+'&id='+idfield)
                    link = ''
                    try:

                        if post:
                            response = urllib2.urlopen(req, post)
                        else:
                            response = urllib2.urlopen(req)
                        if response.info().get('Content-Encoding') == 'gzip':
                            from StringIO import StringIO
                            import gzip
                            buf = StringIO(response.read())
                            f = gzip.GzipFile(fileobj=buf)
                            link = f.read()
                        else:
                            link = response.read()

                        if 'proxy' in m and not current_proxies is None:
                            urllib2.install_opener(
                                urllib2.build_opener(current_proxies))

                        link = javascriptUnEscape(link)
                        #print repr(link)
                        #print link This just print whole webpage in LOG
                        if 'includeheaders' in m:
                            #link+=str(response.headers.get('Set-Cookie'))
                            link += '$$HEADERS_START$$:'
                            for b in response.headers:
                                link += b + ':' + response.headers.get(
                                    b) + '\n'
                            link += '$$HEADERS_END$$:'

#                        print link

                        response.close()
                    except:
                        pass
                    cachedPages[m['page']] = link
                    #print link
                    #print 'store link for',m['page'],forCookieJarOnly

                    if forCookieJarOnly:
                        return cookieJar  # do nothing
                elif m['page'] and not m['page'].startswith('http'):
                    if m['page'].startswith('$pyFunction:'):
                        val = doEval(m['page'].split('$pyFunction:')[1], '',
                                     cookieJar, m)
                        if forCookieJarOnly:
                            return cookieJar  # do nothing
                        link = val
                        link = javascriptUnEscape(link)
                    else:
                        link = m['page']

            if '$doregex' in m['expres']:
                m['expres'] = getRegexParsed(regexs,
                                             m['expres'],
                                             cookieJar,
                                             recursiveCall=True,
                                             cachedPages=cachedPages)

            if not m['expres'] == '':
                #print 'doing it ',m['expres']
                if '$LiveStreamCaptcha' in m['expres']:
                    val = askCaptcha(m, link, cookieJar)
                    #print 'url and val',url,val
                    url = url.replace("$doregex[" + k + "]", val)

                elif m['expres'].startswith(
                        '$pyFunction:') or '#$pyFunction' in m['expres']:
                    #print 'expeeeeeeeeeeeeeeeeeee',m['expres']
                    val = ''
                    if m['expres'].startswith('$pyFunction:'):
                        val = doEval(m['expres'].split('$pyFunction:')[1],
                                     link, cookieJar, m)
                    else:
                        val = doEvalFunction(m['expres'], link, cookieJar, m)
                    if 'ActivateWindow' in m['expres']: return
                    if forCookieJarOnly:
                        return cookieJar  # do nothing
                    if 'listrepeat' in m:
                        listrepeat = m['listrepeat']
                        return listrepeat, eval(val), m, regexs, cookieJar

                    try:
                        url = url.replace(u"$doregex[" + k + "]", val)
                    except:
                        url = url.replace("$doregex[" + k + "]",
                                          val.decode("utf-8"))
                else:
                    if 'listrepeat' in m:
                        listrepeat = m['listrepeat']
                        ret = re.findall(m['expres'], link)
                        return listrepeat, ret, m, regexs

                    val = ''
                    if not link == '':
                        #print 'link',link
                        reg = re.compile(m['expres']).search(link)
                        try:
                            val = reg.group(1).strip()
                        except:
                            traceback.print_exc()
                    elif m['page'] == '' or m['page'] == None:
                        val = m['expres']

                    if rawPost:
                        #                            print 'rawpost'
                        val = urllib.quote_plus(val)
                    if 'htmlunescape' in m:
                        #val=urllib.unquote_plus(val)
                        import HTMLParser
                        val = HTMLParser.HTMLParser().unescape(val)
                    try:
                        url = url.replace("$doregex[" + k + "]", val)
                    except:
                        url = url.replace("$doregex[" + k + "]",
                                          val.decode("utf-8"))
                    #print 'ur',url
                    #return val
            else:
                url = url.replace("$doregex[" + k + "]", '')
    if '$epoctime$' in url:
        url = url.replace('$epoctime$', getEpocTime())
    if '$epoctime2$' in url:
        url = url.replace('$epoctime2$', getEpocTime2())

    if '$GUID$' in url:
        import uuid
        url = url.replace('$GUID$', str(uuid.uuid1()).upper())
    if '$get_cookies$' in url:
        url = url.replace('$get_cookies$', getCookiesString(cookieJar))

    if recursiveCall: return url
    #print 'final url',repr(url)
    if url == "":
        return
    else:
        return url, setresolved
Exemple #40
0
 def __init__(self, verbose):
     QtGui.QMainWindow.__init__(self)
     self.setWindowTitle("Photini photo metadata editor")
     self.selection = list()
     # logger window
     self.loggerwindow = LoggerWindow(verbose)
     self.logger = logging.getLogger(self.__class__.__name__)
     # config store
     self.config_store = ConfigStore()
     # set network proxy
     proxies = urllib2.getproxies()
     if 'http' in proxies:
         scheme, host, port = proxies['http'].split(':')
         QNetworkProxy.setApplicationProxy(
             QNetworkProxy(QNetworkProxy.HttpProxy, host[2:], int(port)))
     # restore size
     size = self.width(), self.height()
     self.resize(*eval(
         self.config_store.get('main_window', 'size', str(size))))
     # image selector
     self.image_list = ImageList(self.config_store)
     self.image_list.selection_changed.connect(self.new_selection)
     self.image_list.new_metadata.connect(self.new_metadata)
     # prepare list of tabs and associated stuff
     self.tab_list = (
         {'name' : '&Descriptive metadata',  'class' : Descriptive},
         {'name' : '&Technical metadata',    'class' : Technical},
         {'name' : 'Map (&Google)',          'class' : GoogleMap},
         {'name' : 'Map (&Bing)',            'class' : BingMap},
         {'name' : 'Map (&OSM)',             'class' : OpenStreetMap},
         {'name' : '&Flickr uploader',       'class' : FlickrUploader},
         {'name' : '&Picasa uploader',       'class' : PicasaUploader},
         )
     for tab in self.tab_list:
         tab['key'] = tab['name'].replace('&', '').replace(' ', '_')
         tab['key'] = tab['key'].replace('(', '').replace(')', '').lower()
         if tab['class']:
             tab['object'] = tab['class'](self.config_store, self.image_list)
         else:
             tab['object'] = None
     # file menu
     file_menu = self.menuBar().addMenu('File')
     open_action = QtGui.QAction('Open images', self)
     open_action.setShortcuts(['Ctrl+O'])
     open_action.triggered.connect(self.image_list.open_files)
     file_menu.addAction(open_action)
     self.save_action = QtGui.QAction('Save images with new data', self)
     self.save_action.setShortcuts(['Ctrl+S'])
     self.save_action.setEnabled(False)
     self.save_action.triggered.connect(self.image_list.save_files)
     file_menu.addAction(self.save_action)
     self.close_action = QtGui.QAction('Close selected images', self)
     self.close_action.setEnabled(False)
     self.close_action.triggered.connect(self.close_files)
     file_menu.addAction(self.close_action)
     close_all_action = QtGui.QAction('Close all images', self)
     close_all_action.triggered.connect(self.close_all_files)
     file_menu.addAction(close_all_action)
     file_menu.addSeparator()
     quit_action = QtGui.QAction('Quit', self)
     quit_action.setShortcuts(['Ctrl+Q', 'Ctrl+W'])
     quit_action.triggered.connect(QtGui.qApp.closeAllWindows)
     file_menu.addAction(quit_action)
     # options menu
     options_menu = self.menuBar().addMenu('Options')
     settings_action = QtGui.QAction('Settings', self)
     settings_action.triggered.connect(self.edit_settings)
     options_menu.addAction(settings_action)
     options_menu.addSeparator()
     for tab in self.tab_list:
         tab['action'] = QtGui.QAction(tab['name'].replace('&', ''), self)
         tab['action'].setCheckable(True)
         if tab['class']:
             tab['action'].setChecked(
                 eval(self.config_store.get('tabs', tab['key'], 'True')))
         else:
             tab['action'].setEnabled(False)
         tab['action'].triggered.connect(self.add_tabs)
         options_menu.addAction(tab['action'])
     # help menu
     help_menu = self.menuBar().addMenu('Help')
     about_action = QtGui.QAction('About Photini', self)
     about_action.triggered.connect(self.about)
     help_menu.addAction(about_action)
     help_menu.addSeparator()
     help_action = QtGui.QAction('Photini documentation', self)
     help_action.triggered.connect(self.open_docs)
     help_menu.addAction(help_action)
     # main application area
     self.central_widget = QtGui.QSplitter()
     self.central_widget.setOrientation(Qt.Vertical)
     self.central_widget.setChildrenCollapsible(False)
     self.tabs = QtGui.QTabWidget()
     self.add_tabs()
     self.tabs.currentChanged.connect(self.new_tab)
     self.central_widget.addWidget(self.tabs)
     self.central_widget.addWidget(self.image_list)
     size = self.central_widget.sizes()
     self.central_widget.setSizes(eval(
         self.config_store.get('main_window', 'split', str(size))))
     self.central_widget.splitterMoved.connect(self.new_split)
     self.setCentralWidget(self.central_widget)
Exemple #41
0
class sqlenum:

    q = 0

    foundlist = []
    notfoundlist = []
    errorlist = {}

    match = ''

    opener = None
    req = None

    iterated = []
    randomize = 0

    def main(self):

        url = ''
        data = {}
        headers = {}
        cookiepath = ''

        split_wlists = []
        w = []
        i = []
        c = []
        t = []

        try:
            opts, args = getopt.getopt(
                sys.argv[1:-1], 'rw:m:v:d:h:c:',
                ['wordlist', 'match', 'verbose', 'data', 'headers', 'cookie'])
        except getopt.error, msg:
            print "Error:", msg
            print usage
            exit(2)

        if sys.argv[-1][:4] == 'http':
            url = sys.argv[-1]
        else:
            print '! Error, -u url required'
            print usage
            return -1

        for o, a in opts:

            if o in ("-m", "-match"):
                self.match = a
            if o in ("-w", "-wordlist"):
                split_wlists.append(a)
            if o in ("-v", "-verbose"):
                self.verbose = a
            if o in ("-d", "-data"):
                d = a.split("=")
                data[d[0]] = d[1]
            if o in ("-h", "-headers"):
                h = a.split("=")
                headers[h[0]] = h[1]
            if o in ("-c", "-cookie"):
                cookiepath = a
            if o in ("-r", "-random"):
                self.randomimze = 1

        if urllib2.getproxies():
            print '+ Using HTTP proxy ' + urllib2.getproxies()['http']

        self.req = request(url, data, headers, cookiepath)

        if self.req.fword:

            if self.req.fword - self.req.def_wordlist != len(split_wlists):
                print '! Error having ' + str(
                    len(split_wlists)) + ' wordfile and ' + str(
                        self.req.fword -
                        self.req.def_wordlist) + ' %%WORD%%s parameters.'
                return -1

            if self.req.def_wordlist == 1:
                w.append(w_default)

            try:

                for path in split_wlists:
                    f = open(path, 'r')

                    filelines = []
                    for line in f.readlines():
                        filelines.append(line.strip())

                    w.append(filelines)

            except Exception, e:
                print '! Error opening word lists: ' + str(e)
                return -1
Exemple #42
0
                        page_split=m['page'].split('|')
                        pageUrl=page_split[0]
                        header_in_page=None
                        if len(page_split)>1:
                            header_in_page=page_split[1]

                           if 
                           proxy = urllib2.ProxyHandler({ ('https' ? proxytouse[:5]=="https":"http") : proxytouse})
                           opener = urllib2.build_opener(proxy)
                           urllib2.install_opener(opener)

                            
                        
                        import urllib2
#                        print 'urllib2.getproxies',urllib2.getproxies()
                        current_proxies=urllib2.ProxyHandler(urllib2.getproxies())
        
        
                        #print 'getting pageUrl',pageUrl
                        req = urllib2.Request(pageUrl)
                        if 'proxy' in m:
                            proxytouse= m['proxy']
#                            print 'proxytouse',proxytouse
#                            urllib2.getproxies= lambda: {}
                            if pageUrl[:5]=="https":
                                proxy = urllib2.ProxyHandler({ 'https' : proxytouse})
                                #req.set_proxy(proxytouse, 'https')
                            else:
                                proxy = urllib2.ProxyHandler({ 'http'  : proxytouse})
                                #req.set_proxy(proxytouse, 'http')
                            opener = urllib2.build_opener(proxy)
Exemple #43
0
def initialize(consoleLogging=True):
    with INIT_LOCK:

        global ACTUAL_LOG_DIR, LOG_DIR, WEB_PORT, WEB_LOG, ENCRYPTION_VERSION, WEB_ROOT, WEB_USERNAME, WEB_PASSWORD, WEB_HOST, WEB_IPV6, USE_API, API_KEY, \
            HANDLE_REVERSE_PROXY, \
            USE_XBMC, XBMC_ALWAYS_ON, XBMC_NOTIFY_ONSNATCH, XBMC_NOTIFY_ONDOWNLOAD, XBMC_UPDATE_FULL, XBMC_UPDATE_ONLYFIRST, \
            XBMC_UPDATE_LIBRARY, XBMC_HOST, XBMC_USERNAME, XBMC_PASSWORD, \
            USE_TRAKT, TRAKT_USERNAME, TRAKT_PASSWORD, TRAKT_API, TRAKT_REMOVE_WATCHLIST, TRAKT_USE_WATCHLIST, TRAKT_METHOD_ADD, TRAKT_START_PAUSED, traktCheckerScheduler, TRAKT_USE_RECOMMENDED, TRAKT_SYNC, \
            USE_PLEX, PLEX_NOTIFY_ONSNATCH, PLEX_NOTIFY_ONDOWNLOAD, PLEX_UPDATE_LIBRARY, \
            PLEX_SERVER_HOST, PLEX_HOST, PLEX_USERNAME, PLEX_PASSWORD, SKIP_REMOVED_FILES, \
            USE_DRIVES, USE_DRIVEA, USE_DRIVEB, USE_DRIVEC, DRIVEA_NAME, DRIVEB_NAME, DRIVEC_NAME, \
            USE_SPEEDFAN, SPEEDFAN_LOG_LOCATION, \
            __INITIALIZED__, LAUNCH_BROWSER, \
            INDEXER_DEFAULT, INDEXER_TIMEOUT, \
            PROG_DIR, \
            versionCheckScheduler, VERSION_NOTIFY, AUTO_UPDATE, CPU_PRESET, \
            MIN_DAILYSEARCH_FREQUENCY, DEFAULT_UPDATE_FREQUENCY, MIN_UPDATE_FREQUENCY, UPDATE_FREQUENCY, \
            ROOT_DIRS, TIMEZONE_DISPLAY, \
            EXTRA_SCRIPTS, \
            USE_BOXCAR2, BOXCAR2_ACCESSTOKEN, BOXCAR2_NOTIFY_ONDOWNLOAD, BOXCAR2_NOTIFY_ONSNATCH, \
            USE_LISTVIEW, \
            GIT_PATH, MOVE_ASSOCIATED_FILES, CLEAR_CACHE, NFO_RENAME, \
            GUI_NAME, FUZZY_DATING, TRIM_ZERO, DATE_PRESET, TIME_PRESET, TIME_PRESET_W_SECONDS, \
            ANON_REDIRECT, LOCALHOST_IP, TMDB_API_KEY, DEBUG, PROXY_SETTING, \
            DEFAULT_AUTOPOSTPROCESSER_FREQUENCY, MIN_AUTOPOSTPROCESSER_FREQUENCY, \
            USE_SICKBEARD, SICKBEARD_API, SICKBEARD_HOST

        if __INITIALIZED__:
            return False

        CheckSection(CFG, 'General')
        CheckSection(CFG, 'Blackhole')
        CheckSection(CFG, 'XBMC')
        CheckSection(CFG, 'PLEX')
        CheckSection(CFG, 'EVENTGHOST')
        CheckSection(CFG, 'Growl')
        CheckSection(CFG, 'Prowl')
        CheckSection(CFG, 'Twitter')
        CheckSection(CFG, 'Boxcar')
        CheckSection(CFG, 'Boxcar2')
        CheckSection(CFG, 'NMJ')
        CheckSection(CFG, 'NMJv2')
        CheckSection(CFG, 'Synology')
        CheckSection(CFG, 'SynologyNotifier')
        CheckSection(CFG, 'pyTivo')
        CheckSection(CFG, 'NMA')
        CheckSection(CFG, 'Pushalot')
        CheckSection(CFG, 'Pushbullet')

        GUI_NAME = check_setting_str(CFG, 'GUI', 'gui_name', 'slick')

        ACTUAL_LOG_DIR = check_setting_str(CFG, 'General', 'log_dir', 'Logs')
        # put the log dir inside the data dir, unless an absolute path
        LOG_DIR = os.path.normpath(os.path.join(DATA_DIR, ACTUAL_LOG_DIR))

        if not helpers.makeDir(LOG_DIR):
            logger.log(u"!!! No log folder, logging to screen only!", logger.ERROR)

        SOCKET_TIMEOUT = check_setting_int(CFG, 'General', 'socket_timeout', 30)
        socket.setdefaulttimeout(SOCKET_TIMEOUT)

        try:
            WEB_PORT = check_setting_int(CFG, 'General', 'web_port', 8081)
        except:
            WEB_PORT = 8081

        if WEB_PORT < 21 or WEB_PORT > 65535:
            WEB_PORT = 8081

        WEB_HOST = check_setting_str(CFG, 'General', 'web_host', '0.0.0.0')
        WEB_IPV6 = bool(check_setting_int(CFG, 'General', 'web_ipv6', 0))
        WEB_ROOT = check_setting_str(CFG, 'General', 'web_root', '').rstrip("/")
        WEB_LOG = bool(check_setting_int(CFG, 'General', 'web_log', 0))
        ENCRYPTION_VERSION = check_setting_int(CFG, 'General', 'encryption_version', 0)
        WEB_USERNAME = check_setting_str(CFG, 'General', 'web_username', '')
        WEB_PASSWORD = check_setting_str(CFG, 'General', 'web_password', '')
        LAUNCH_BROWSER = bool(check_setting_int(CFG, 'General', 'launch_browser', 1))

        LOCALHOST_IP = check_setting_str(CFG, 'General', 'localhost_ip', '')

        CPU_PRESET = check_setting_str(CFG, 'General', 'cpu_preset', 'NORMAL')

        ANON_REDIRECT = check_setting_str(CFG, 'General', 'anon_redirect', 'http://dereferer.org/?')
        PROXY_SETTING = check_setting_str(CFG, 'General', 'proxy_setting', '')
        # attempt to help prevent users from breaking links by using a bad url 
        if not ANON_REDIRECT.endswith('?'):
            ANON_REDIRECT = ''

        USE_API = bool(check_setting_int(CFG, 'General', 'use_api', 0))
        API_KEY = check_setting_str(CFG, 'General', 'api_key', '')

        DEBUG = bool(check_setting_int(CFG, 'General', 'debug', 0))

        HANDLE_REVERSE_PROXY = bool(check_setting_int(CFG, 'General', 'handle_reverse_proxy', 0))

        ACTUAL_CACHE_DIR = check_setting_str(CFG, 'General', 'cache_dir', 'cache')
        # fix bad configs due to buggy code
        if ACTUAL_CACHE_DIR == 'None':
            ACTUAL_CACHE_DIR = 'cache'

        # unless they specify, put the cache dir inside the data dir
        if not os.path.isabs(ACTUAL_CACHE_DIR):
            CACHE_DIR = os.path.join(DATA_DIR, ACTUAL_CACHE_DIR)
        else:
            CACHE_DIR = ACTUAL_CACHE_DIR

        if not helpers.makeDir(CACHE_DIR):
            logger.log(u"!!! Creating local cache dir failed, using system default", logger.ERROR)
            CACHE_DIR = None

        ROOT_DIRS = check_setting_str(CFG, 'General', 'root_dirs', '')
        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', ROOT_DIRS):
            ROOT_DIRS = ''

        proxies = getproxies()
        proxy_url = None
        if 'http' in proxies:
            proxy_url = proxies['http']
        elif 'ftp' in proxies:
            proxy_url = proxies['ftp']

        QUALITY_DEFAULT = check_setting_int(CFG, 'General', 'quality_default', SD)
        STATUS_DEFAULT = check_setting_int(CFG, 'General', 'status_default', SKIPPED)
        VERSION_NOTIFY = check_setting_int(CFG, 'General', 'version_notify', 1)
        AUTO_UPDATE = check_setting_int(CFG, 'General', 'auto_update', 0)
        FLATTEN_FOLDERS_DEFAULT = bool(check_setting_int(CFG, 'General', 'flatten_folders_default', 0))
        INDEXER_DEFAULT = check_setting_int(CFG, 'General', 'indexer_default', 0)
        INDEXER_TIMEOUT = check_setting_int(CFG, 'General', 'indexer_timeout', 10)
        ANIME_DEFAULT = bool(check_setting_int(CFG, 'General', 'anime_default', 0))
        SCENE_DEFAULT = bool(check_setting_int(CFG, 'General', 'scene_default', 0))

        PROVIDER_ORDER = check_setting_str(CFG, 'General', 'provider_order', '').split()

        UPDATE_FREQUENCY = check_setting_int(CFG, 'General', 'update_frequency', DEFAULT_UPDATE_FREQUENCY)
        if UPDATE_FREQUENCY < MIN_UPDATE_FREQUENCY:
            UPDATE_FREQUENCY = MIN_UPDATE_FREQUENCY

        USE_XBMC = bool(check_setting_int(CFG, 'XBMC', 'use_xbmc', 0))
        XBMC_ALWAYS_ON = bool(check_setting_int(CFG, 'XBMC', 'xbmc_always_on', 1))
        XBMC_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'XBMC', 'xbmc_notify_onsnatch', 0))
        XBMC_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'XBMC', 'xbmc_notify_ondownload', 0))
        XBMC_UPDATE_LIBRARY = bool(check_setting_int(CFG, 'XBMC', 'xbmc_update_library', 0))
        XBMC_UPDATE_FULL = bool(check_setting_int(CFG, 'XBMC', 'xbmc_update_full', 0))
        XBMC_UPDATE_ONLYFIRST = bool(check_setting_int(CFG, 'XBMC', 'xbmc_update_onlyfirst', 0))
        XBMC_HOST = check_setting_str(CFG, 'XBMC', 'xbmc_host', '')
        XBMC_USERNAME = check_setting_str(CFG, 'XBMC', 'xbmc_username', '')
        XBMC_PASSWORD = check_setting_str(CFG, 'XBMC', 'xbmc_password', '')

        USE_PLEX = bool(check_setting_int(CFG, 'Plex', 'use_plex', 0))
        PLEX_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Plex', 'plex_notify_onsnatch', 0))
        PLEX_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Plex', 'plex_notify_ondownload', 0))
        PLEX_UPDATE_LIBRARY = bool(check_setting_int(CFG, 'Plex', 'plex_update_library', 0))
        PLEX_SERVER_HOST = check_setting_str(CFG, 'Plex', 'plex_server_host', '')
        PLEX_HOST = check_setting_str(CFG, 'Plex', 'plex_host', '')
        PLEX_USERNAME = check_setting_str(CFG, 'Plex', 'plex_username', '')
        PLEX_PASSWORD = check_setting_str(CFG, 'Plex', 'plex_password', '')

        USE_BOXCAR2 = bool(check_setting_int(CFG, 'Boxcar2', 'use_boxcar2', 0))
        BOXCAR2_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Boxcar2', 'boxcar2_notify_onsnatch', 0))
        BOXCAR2_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Boxcar2', 'boxcar2_notify_ondownload', 0))
        BOXCAR2_ACCESSTOKEN = check_setting_str(CFG, 'Boxcar2', 'boxcar2_accesstoken', '')

        USE_TRAKT = bool(check_setting_int(CFG, 'Trakt', 'use_trakt', 0))
        TRAKT_USERNAME = check_setting_str(CFG, 'Trakt', 'trakt_username', '')
        TRAKT_PASSWORD = check_setting_str(CFG, 'Trakt', 'trakt_password', '')
        TRAKT_API = check_setting_str(CFG, 'Trakt', 'trakt_api', '')
        TRAKT_REMOVE_WATCHLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_remove_watchlist', 0))
        TRAKT_USE_WATCHLIST = bool(check_setting_int(CFG, 'Trakt', 'trakt_use_watchlist', 0))
        TRAKT_METHOD_ADD = check_setting_str(CFG, 'Trakt', 'trakt_method_add', "0")
        TRAKT_START_PAUSED = bool(check_setting_int(CFG, 'Trakt', 'trakt_start_paused', 0))
        TRAKT_USE_RECOMMENDED = bool(check_setting_int(CFG, 'Trakt', 'trakt_use_recommended', 0))
        TRAKT_SYNC = bool(check_setting_int(CFG, 'Trakt', 'trakt_sync', 0))

        USE_PLEX = bool(check_setting_int(CFG, 'Plex', 'use_plex', 0))
        PLEX_SERVER_HOST = check_setting_str(CFG, 'Plex', 'plex_server_host', '')
        PLEX_HOST = check_setting_str(CFG, 'Plex', 'plex_host', '')

        USE_DRIVES = bool(check_setting_int(CFG, 'Drives', 'use_drives', 0))
        USE_DRIVEA = bool(check_setting_int(CFG, 'Drives', 'use_driveA', 0))
        USE_DRIVEB = bool(check_setting_int(CFG, 'Drives', 'use_driveB', 0))
        USE_DRIVEC = bool(check_setting_int(CFG, 'Drives', 'use_driveC', 0))
        DRIVEA_NAME = check_setting_str(CFG, 'Drives', 'driveA_name', '')
        DRIVEB_NAME = check_setting_str(CFG, 'Drives', 'driveB_name', '')
        DRIVEC_NAME = check_setting_str(CFG, 'Drives', 'driveC_name', '')

        USE_SICKBEARD = bool(check_setting_int(CFG, 'Sickbeard', 'use_sickbeard', 0))
        SICKBEARD_HOST = check_setting_str(CFG, 'Sickbeard', 'sickbeard_host', '')
        SICKBEARD_API = check_setting_str(CFG, 'Sickbeard', 'sickbeard_api', '')

        USE_SPEEDFAN = bool(check_setting_int(CFG, 'Speedfan', 'use_speedfan', 0))
        SPEEDFAN_LOG_LOCATION = check_setting_str(CFG, 'Speedfan', 'speedfan_log_location', '')

        GIT_PATH = check_setting_str(CFG, 'General', 'git_path', '')

        EXTRA_SCRIPTS = [x.strip() for x in check_setting_str(CFG, 'General', 'extra_scripts', '').split('|') if
                         x.strip()]

        USE_LISTVIEW = bool(check_setting_int(CFG, 'General', 'use_listview', 0))

        FUZZY_DATING = bool(check_setting_int(CFG, 'GUI', 'fuzzy_dating', 0))
        TRIM_ZERO = bool(check_setting_int(CFG, 'GUI', 'trim_zero', 0))
        DATE_PRESET = check_setting_str(CFG, 'GUI', 'date_preset', '%x')
        TIME_PRESET_W_SECONDS = check_setting_str(CFG, 'GUI', 'time_preset', '%I:%M:%S %p')
        TIME_PRESET = TIME_PRESET_W_SECONDS.replace(u":%S", u"")
        TIMEZONE_DISPLAY = check_setting_str(CFG, 'GUI', 'timezone_display', 'network')

        if not os.path.isfile(CONFIG_FILE):
            logger.log(u"Unable to find '" + CONFIG_FILE + "', all settings will be default!", logger.DEBUG)
            save_config()

        # start up all the threads
        logger.sb_log_instance.initLogging(consoleLogging=consoleLogging)

        # migrate the config if it needs it
        migrator = ConfigMigrator(CFG)
        migrator.migrate_config()

        # initialize schedulers
        # updaters
        update_now = datetime.timedelta(minutes=0)
        versionCheckScheduler = scheduler.Scheduler(versionChecker.CheckVersion(),
                                                    cycleTime=datetime.timedelta(hours=UPDATE_FREQUENCY),
                                                    threadName="CHECKVERSION",
                                                    silent=False)

        search_intervals = {'15m': 15, '45m': 45, '90m': 90, '4h': 4*60, 'daily': 24*60}

        __INITIALIZED__ = True
        return True
Exemple #44
0
def setupProxy():
    """Set up the urllib proxy if possible.

     The function will use the following methods in order to try and determine proxies:
        #. standard urllib2.urlopen (which will use any statically-defined http-proxy settings)
        #. previous stored proxy address (in prefs)
        #. proxy.pac files if these have been added to system settings
        #. auto-detect proxy settings (WPAD technology)

     .. note:
        This can take time, as each failed attempt to set up a proxy involves trying to load a URL and timing out. Best
        to do in a separate thread.

    :Returns:

        True (success) or False (failure)
    """
    global proxies
    #try doing nothing
    proxies = urllib2.ProxyHandler(urllib2.getproxies())
    if testProxy(proxies) is True:
        logging.debug(
            "Using standard urllib2 (static proxy or no proxy required)")
        urllib2.install_opener(urllib2.build_opener(
            proxies))  #this will now be used globally for ALL urllib2 opening
        return 1

    #try doing what we did last time
    if len(prefs.connections['proxy']) > 0:
        proxies = urllib2.ProxyHandler({'http': prefs.connections['proxy']})
        if testProxy(proxies) is True:
            logging.debug('Using %s (from prefs)' %
                          (prefs.connections['proxy']))
            urllib2.install_opener(
                urllib2.build_opener(proxies)
            )  #this will now be used globally for ALL urllib2 opening
            return 1
        else:
            logging.debug("Found a previous proxy but it didn't work")

    #try finding/using a proxy.pac file
    pacURLs = getPacFiles()
    logging.debug("Found proxy PAC files: %s" % pacURLs)
    proxies = proxyFromPacFiles(pacURLs)  # installs opener, if successful
    if proxies and hasattr(proxies,
                           'proxies') and len(proxies.proxies['http']) > 0:
        #save that proxy for future
        prefs.connections['proxy'] = proxies.proxies['http']
        prefs.saveUserPrefs()
        logging.debug('Using %s (from proxy PAC file)' %
                      (prefs.connections['proxy']))
        return 1

    #try finding/using 'auto-detect proxy'
    pacURLs = getWpadFiles()
    proxies = proxyFromPacFiles(pacURLs)  # installs opener, if successful
    if proxies and hasattr(proxies,
                           'proxies') and len(proxies.proxies['http']) > 0:
        #save that proxy for future
        prefs.connections['proxy'] = proxies.proxies['http']
        prefs.saveUserPrefs()
        logging.debug('Using %s (from proxy auto-detect)' %
                      (prefs.connections['proxy']))
        return 1

    proxies = 0
    return 0
Exemple #45
0
def getRegexParsed(
        regexs,
        url,
        cookieJar=None,
        forCookieJarOnly=False,
        recursiveCall=False,
        cachedPages={},
        rawPost=False,
        cookie_jar_file=None):  #0,1,2 = URL, regexOnly, CookieJarOnly
    doRegexs = re.compile('\$doregex\[([^\]]*)\]').findall(url)
    setresolved = True
    for k in doRegexs:
        if k in regexs:
            m = regexs[k]
            cookieJarParam = False
            if 'cookiejar' in m:
                cookieJarParam = m['cookiejar']
                if '$doregex' in cookieJarParam:
                    cookieJar = getRegexParsed(regexs, m['cookiejar'],
                                               cookieJar, True, True,
                                               cachedPages)
                    cookieJarParam = True
                else:
                    cookieJarParam = True
            if cookieJarParam:
                if cookieJar == None:
                    cookie_jar_file = None
                    if 'open[' in m['cookiejar']:
                        cookie_jar_file = m['cookiejar'].split(
                            'open[')[1].split(']')[0]
                    cookieJar = getCookieJar(cookie_jar_file)
                    if cookie_jar_file:
                        saveCookieJar(cookieJar, cookie_jar_file)
                elif 'save[' in m['cookiejar']:
                    cookie_jar_file = m['cookiejar'].split('save[')[1].split(
                        ']')[0]
                    complete_path = os.path.join(profile, cookie_jar_file)
                    saveCookieJar(cookieJar, cookie_jar_file)
            if m['page'] and '$doregex' in m['page']:
                pg = getRegexParsed(regexs,
                                    m['page'],
                                    cookieJar,
                                    recursiveCall=True,
                                    cachedPages=cachedPages)
                if len(pg) == 0:
                    pg = 'http://regexfailed'
                m['page'] = pg
            if 'setcookie' in m and m['setcookie'] and '$doregex' in m[
                    'setcookie']:
                m['setcookie'] = getRegexParsed(regexs,
                                                m['setcookie'],
                                                cookieJar,
                                                recursiveCall=True,
                                                cachedPages=cachedPages)
            if 'appendcookie' in m and m['appendcookie'] and '$doregex' in m[
                    'appendcookie']:
                m['appendcookie'] = getRegexParsed(regexs,
                                                   m['appendcookie'],
                                                   cookieJar,
                                                   recursiveCall=True,
                                                   cachedPages=cachedPages)
            if 'post' in m and '$doregex' in m['post']:
                m['post'] = getRegexParsed(regexs,
                                           m['post'],
                                           cookieJar,
                                           recursiveCall=True,
                                           cachedPages=cachedPages)
            if 'rawpost' in m and '$doregex' in m['rawpost']:
                m['rawpost'] = getRegexParsed(regexs,
                                              m['rawpost'],
                                              cookieJar,
                                              recursiveCall=True,
                                              cachedPages=cachedPages,
                                              rawPost=True)
            if 'rawpost' in m and '$epoctime$' in m['rawpost']:
                m['rawpost'] = m['rawpost'].replace('$epoctime$',
                                                    getEpocTime())
            if 'rawpost' in m and '$epoctime2$' in m['rawpost']:
                m['rawpost'] = m['rawpost'].replace('$epoctime2$',
                                                    getEpocTime2())
            link = ''
            if m['page'] and m[
                    'page'] in cachedPages and not 'ignorecache' in m and forCookieJarOnly == False:
                link = cachedPages[m['page']]
            else:
                if m['page'] and not m['page'] == '' and m['page'].startswith(
                        'http'):
                    if '$epoctime$' in m['page']:
                        m['page'] = m['page'].replace('$epoctime$',
                                                      getEpocTime())
                    if '$epoctime2$' in m['page']:
                        m['page'] = m['page'].replace('$epoctime2$',
                                                      getEpocTime2())
                    page_split = m['page'].split('|')
                    pageUrl = page_split[0]
                    header_in_page = None
                    if len(page_split) > 1:
                        header_in_page = page_split[1]
                    current_proxies = urllib2.ProxyHandler(
                        urllib2.getproxies())
                    req = urllib2.Request(pageUrl)
                    if 'proxy' in m:
                        proxytouse = m['proxy']
                        if pageUrl[:5] == "https":
                            proxy = urllib2.ProxyHandler({'https': proxytouse})
                        else:
                            proxy = urllib2.ProxyHandler({'http': proxytouse})
                        opener = urllib2.build_opener(proxy)
                        urllib2.install_opener(opener)
                    req.add_header(
                        'User-Agent',
                        'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1'
                    )
                    proxytouse = None
                    if 'referer' in m:
                        req.add_header('Referer', m['referer'])
                    if 'accept' in m:
                        req.add_header('Accept', m['accept'])
                    if 'agent' in m:
                        req.add_header('User-agent', m['agent'])
                    if 'x-req' in m:
                        req.add_header('X-Requested-With', m['x-req'])
                    if 'x-addr' in m:
                        req.add_header('x-addr', m['x-addr'])
                    if 'x-forward' in m:
                        req.add_header('X-Forwarded-For', m['x-forward'])
                    if 'setcookie' in m:
                        req.add_header('Cookie', m['setcookie'])
                    if 'appendcookie' in m:
                        cookiestoApend = m['appendcookie']
                        cookiestoApend = cookiestoApend.split(';')
                        for h in cookiestoApend:
                            n, v = h.split('=')
                            w, n = n.split(':')
                            ck = cookielib.Cookie(version=0,
                                                  name=n,
                                                  value=v,
                                                  port=None,
                                                  port_specified=False,
                                                  domain=w,
                                                  domain_specified=False,
                                                  domain_initial_dot=False,
                                                  path='/',
                                                  path_specified=True,
                                                  secure=False,
                                                  expires=None,
                                                  discard=True,
                                                  comment=None,
                                                  comment_url=None,
                                                  rest={'HttpOnly': None},
                                                  rfc2109=False)
                            cookieJar.set_cookie(ck)
                    if 'origin' in m:
                        req.add_header('Origin', m['origin'])
                    if header_in_page:
                        header_in_page = header_in_page.split('&')
                        for h in header_in_page:
                            n, v = h.split('=')
                            req.add_header(n, v)
                    if not cookieJar == None:
                        cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
                        opener = urllib2.build_opener(
                            cookie_handler, urllib2.HTTPBasicAuthHandler(),
                            urllib2.HTTPHandler())
                        opener = urllib2.install_opener(opener)
                        if 'noredirect' in m:
                            opener = urllib2.build_opener(
                                cookie_handler, NoRedirection,
                                urllib2.HTTPBasicAuthHandler(),
                                urllib2.HTTPHandler())
                            opener = urllib2.install_opener(opener)
                    elif 'noredirect' in m:
                        opener = urllib2.build_opener(
                            NoRedirection, urllib2.HTTPBasicAuthHandler(),
                            urllib2.HTTPHandler())
                        opener = urllib2.install_opener(opener)
                    if 'connection' in m:
                        from keepalive import HTTPHandler
                        keepalive_handler = HTTPHandler()
                        opener = urllib2.build_opener(keepalive_handler)
                        urllib2.install_opener(opener)
                    post = None
                    if 'post' in m:
                        postData = m['post']
                        splitpost = postData.split(',')
                        post = {}
                        for p in splitpost:
                            n = p.split(':')[0]
                            v = p.split(':')[1]
                            post[n] = v
                        post = urllib.urlencode(post)
                    if 'rawpost' in m:
                        post = m['rawpost']
                    link = ''
                    try:
                        if post:
                            response = urllib2.urlopen(req, post)
                        else:
                            response = urllib2.urlopen(req)
                        if response.info().get('Content-Encoding') == 'gzip':
                            from StringIO import StringIO
                            import gzip
                            buf = StringIO(response.read())
                            f = gzip.GzipFile(fileobj=buf)
                            link = f.read()
                        else:
                            link = response.read()
                        if 'proxy' in m and not current_proxies is None:
                            urllib2.install_opener(
                                urllib2.build_opener(current_proxies))
                        link = javascriptUnEscape(link)
                        if 'includeheaders' in m:
                            link += '$$HEADERS_START$$:'
                            for b in response.headers:
                                link += b + ':' + response.headers.get(
                                    b) + '\n'
                            link += '$$HEADERS_END$$:'
                        response.close()
                    except:
                        pass
                    cachedPages[m['page']] = link
                    if forCookieJarOnly:
                        return cookieJar
                elif m['page'] and not m['page'].startswith('http'):
                    if m['page'].startswith('$pyFunction:'):
                        val = doEval(m['page'].split('$pyFunction:')[1], '',
                                     cookieJar, m)
                        if forCookieJarOnly:
                            return cookieJar
                        link = val
                        link = javascriptUnEscape(link)
                    else:
                        link = m['page']
            if '$doregex' in m['expres']:
                m['expres'] = getRegexParsed(regexs,
                                             m['expres'],
                                             cookieJar,
                                             recursiveCall=True,
                                             cachedPages=cachedPages)
            if not m['expres'] == '':
                if '$LiveStreamCaptcha' in m['expres']:
                    val = askCaptcha(m, link, cookieJar)
                    url = url.replace("$doregex[" + k + "]", val)
                elif m['expres'].startswith(
                        '$pyFunction:') or '#$pyFunction' in m['expres']:
                    val = ''
                    if m['expres'].startswith('$pyFunction:'):
                        val = doEval(m['expres'].split('$pyFunction:')[1],
                                     link, cookieJar, m)
                    else:
                        val = doEvalFunction(m['expres'], link, cookieJar, m)
                    if 'ActivateWindow' in m['expres']: return
                    if forCookieJarOnly:
                        return cookieJar
                    if 'listrepeat' in m:
                        listrepeat = m['listrepeat']
                        return listrepeat, eval(val), m, regexs, cookieJar
                    try:
                        url = url.replace(u"$doregex[" + k + "]", val)
                    except:
                        url = url.replace("$doregex[" + k + "]",
                                          val.decode("utf-8"))
                else:
                    if 'listrepeat' in m:
                        listrepeat = m['listrepeat']
                        ret = re.findall(m['expres'], link)
                        return listrepeat, ret, m, regexs
                    val = ''
                    if not link == '':
                        reg = re.compile(m['expres']).search(link)
                        try:
                            val = reg.group(1).strip()
                        except:
                            traceback.print_exc()
                    elif m['page'] == '' or m['page'] == None:
                        val = m['expres']
                    if rawPost:
                        val = urllib.quote_plus(val)
                    if 'htmlunescape' in m:
                        import HTMLParser
                        val = HTMLParser.HTMLParser().unescape(val)
                    try:
                        url = url.replace("$doregex[" + k + "]", val)
                    except:
                        url = url.replace("$doregex[" + k + "]",
                                          val.decode("utf-8"))
            else:
                url = url.replace("$doregex[" + k + "]", '')
    if '$epoctime$' in url:
        url = url.replace('$epoctime$', getEpocTime())
    if '$epoctime2$' in url:
        url = url.replace('$epoctime2$', getEpocTime2())
    if '$GUID$' in url:
        import uuid
        url = url.replace('$GUID$', str(uuid.uuid1()).upper())
    if '$get_cookies$' in url:
        url = url.replace('$get_cookies$', getCookiesString(cookieJar))
    if recursiveCall: return url
    if url == "":
        return
    else:
        return url, setresolved